id
stringlengths
2
115
lastModified
stringlengths
24
24
tags
list
author
stringlengths
2
42
description
stringlengths
0
6.67k
citation
stringlengths
0
10.7k
likes
int64
0
3.66k
downloads
int64
0
8.89M
created
timestamp[us]
card
stringlengths
11
977k
card_len
int64
11
977k
embeddings
list
chloecchng/biomedical_cpgQA
2023-10-24T17:37:28.000Z
[ "task_categories:question-answering", "size_categories:1K<n<10K", "language:en", "license:apache-2.0", "biology", "medical", "region:us" ]
chloecchng
null
null
2
100
2023-10-09T09:58:21
--- license: apache-2.0 task_categories: - question-answering language: - en tags: - biology - medical size_categories: - 1K<n<10K --- # Dataset Card for the Biomedical Domain ### Dataset Summary This dataset was obtain through github (https://github.com/mmahbub/cpgQA/blob/main/dataset/cpgQA-v1.0.csv?plain=1) to Huggin Face for easier access while fine tuning. ### Languages English (en) ## Dataset Structure The dataset is in a CSV format, with each row representing a single review. The following columns are included: * **Title:** Categorises the QA. * **Context:** Gives a context of the QA. * **Question:** The question asked. * **Answer:** The expected and appropriate answer to the question asked.
713
[ [ -0.021026611328125, -0.047943115234375, 0.01439666748046875, -0.0006198883056640625, -0.02471923828125, 0.01291656494140625, 0.024444580078125, -0.0182342529296875, 0.04718017578125, 0.040496826171875, -0.04833984375, -0.064453125, -0.02410888671875, 0.00906...
portafolio/llamadas-celular-es-01
2023-10-18T17:58:08.000Z
[ "region:us" ]
portafolio
null
null
0
100
2023-10-18T17:56:33
Entry not found
15
[ [ -0.0214080810546875, -0.01497650146484375, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.016998291015625, -0.05206298828125, -0.01496124267578125, -0.06036376953125, 0.0379...
result-kand2-sdxl-wuerst-karlo/877f2204
2023-10-28T21:09:41.000Z
[ "region:us" ]
result-kand2-sdxl-wuerst-karlo
null
null
0
100
2023-10-28T21:09:40
--- dataset_info: features: - name: result dtype: string - name: id dtype: int64 splits: - name: train num_bytes: 185 num_examples: 10 download_size: 1372 dataset_size: 185 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "877f2204" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
455
[ [ -0.046417236328125, -0.00379180908203125, 0.0167999267578125, 0.0181121826171875, -0.0231781005859375, -0.013702392578125, 0.0302276611328125, -0.019683837890625, 0.05047607421875, 0.041290283203125, -0.056121826171875, -0.047271728515625, -0.0404052734375, ...
result-kand2-sdxl-wuerst-karlo/144daf3b
2023-10-29T13:49:52.000Z
[ "region:us" ]
result-kand2-sdxl-wuerst-karlo
null
null
0
100
2023-10-29T13:49:52
--- dataset_info: features: - name: result dtype: string - name: id dtype: int64 splits: - name: train num_bytes: 174 num_examples: 10 download_size: 1351 dataset_size: 174 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "144daf3b" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
455
[ [ -0.05328369140625, -0.021484375, 0.0198974609375, 0.037200927734375, -0.01239776611328125, -0.004863739013671875, 0.0272369384765625, -0.0175628662109375, 0.0484619140625, 0.032073974609375, -0.05413818359375, -0.04205322265625, -0.035400390625, -0.008064270...
result-kand2-sdxl-wuerst-karlo/1abdaff0
2023-10-29T16:21:52.000Z
[ "region:us" ]
result-kand2-sdxl-wuerst-karlo
null
null
0
100
2023-10-29T16:21:52
--- dataset_info: features: - name: result dtype: string - name: id dtype: int64 splits: - name: train num_bytes: 208 num_examples: 10 download_size: 1389 dataset_size: 208 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "1abdaff0" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
455
[ [ -0.051361083984375, -0.0238494873046875, 0.007633209228515625, 0.034576416015625, -0.016754150390625, -0.0048828125, 0.029388427734375, -0.0159454345703125, 0.06597900390625, 0.0276641845703125, -0.05999755859375, -0.045562744140625, -0.04608154296875, -0.01...
bnl_newspapers
2023-01-25T14:27:26.000Z
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "annotations_creators:no-annotation", "language_creators:found", "multilinguality:multilingual", "size_categories:100K<n<1M", "source_datasets:original", "language:ar"...
null
Digitised historic newspapers from the Bibliothèque nationale (BnL) - the National Library of Luxembourg.
@misc{bnl_newspapers, title={Historical Newspapers}, url={https://data.bnl.lu/data/historical-newspapers/}, author={ Bibliothèque nationale du Luxembourg},
1
99
2022-03-02T23:29:22
--- annotations_creators: - no-annotation language_creators: - found language: - ar - da - de - fi - fr - lb - nl - pt license: - cc0-1.0 multilinguality: - multilingual size_categories: - 100K<n<1M source_datasets: - original task_categories: - text-generation - fill-mask task_ids: - language-modeling - masked-language-modeling pretty_name: BnL Historical Newspapers dataset_info: features: - name: id dtype: string - name: source dtype: string - name: url dtype: string - name: title dtype: string - name: ispartof dtype: string - name: text dtype: string - name: pub_date dtype: timestamp[s] - name: publisher dtype: string - name: language dtype: string - name: article_type dtype: class_label: names: '0': ADVERTISEMENT_SECTION '1': BIBLIOGRAPHY '2': CHAPTER '3': INDEX '4': CONTRIBUTION '5': TABLE_OF_CONTENTS '6': WEATHER '7': SHIPPING '8': SECTION '9': ARTICLE '10': TITLE_SECTION '11': DEATH_NOTICE '12': SUPPLEMENT '13': TABLE '14': ADVERTISEMENT '15': CHART_DIAGRAM '16': ILLUSTRATION '17': ISSUE - name: extent dtype: int32 config_name: processed splits: - name: train num_bytes: 1611620156 num_examples: 537558 download_size: 1224029060 dataset_size: 1611620156 --- # Dataset Card for BnL Historical Newspapers ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://data.bnl.lu/data/historical-newspapers/ - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** opendata@bnl.etat.lu ### Dataset Summary The BnL has digitised over 800.000 pages of Luxembourg newspapers. This dataset currently has one configuration covering a subset of these newspapers, which sit under the "Processed Datasets" collection. The BNL: > processed all newspapers and monographs that are in the public domain and extracted the full text and associated meta data of every single article, section, advertisement… The result is a large number of small, easy to use XML files formatted using Dublin Core. [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure The dataset currently contains a single configuration. ### Data Instances An example instance from the datasets: ``` python {'id': 'https://persist.lu/ark:/70795/wx8r4c/articles/DTL47', 'article_type': 8, 'extent': 49, 'ispartof': 'Luxemburger Wort', 'pub_date': datetime.datetime(1853, 3, 23, 0, 0), 'publisher': 'Verl. der St-Paulus-Druckerei', 'source': 'newspaper/luxwort/1853-03-23', 'text': 'Asien. Eine neue Nedcrland-Post ist angekommen mil Nachrichten aus Calcutta bis zum 5. Febr.; Vom» vay, 12. Febr. ; Nangun und HongKong, 13. Jan. Die durch die letzte Post gebrachle Nachricht, der König von Ava sei durch seinen Bruder enlhronl worden, wird bestätigt. (K. Z.) Verantwortl. Herausgeber, F. Schümann.', 'title': 'Asien.', 'url': 'http://www.eluxemburgensia.lu/webclient/DeliveryManager?pid=209701#panel:pp|issue:209701|article:DTL47', 'language': 'de' } ``` ### Data Fields - 'id': This is a unique and persistent identifier using ARK. - 'article_type': The type of the exported data, possible values ('ADVERTISEMENT_SECTION', 'BIBLIOGRAPHY', 'CHAPTER', 'INDEX', 'CONTRIBUTION', 'TABLE_OF_CONTENTS', 'WEATHER', 'SHIPPING', 'SECTION', 'ARTICLE', 'TITLE_SECTION', 'DEATH_NOTICE', 'SUPPLEMENT', 'TABLE', 'ADVERTISEMENT', 'CHART_DIAGRAM', 'ILLUSTRATION', 'ISSUE') - 'extent': The number of words in the text field - 'ispartof: The complete title of the source document e.g. “Luxemburger Wort”. - 'pub_date': The publishing date of the document e.g “1848-12-15” - 'publisher':The publisher of the document e.g. “Verl. der St-Paulus-Druckerei”. - 'source': Describes the source of the document. For example <dc:source>newspaper/luxwort/1848-12-15</dc:source> means that this article comes from the newspaper “luxwort” (ID for Luxemburger Wort) issued on 15.12.1848. - 'text': The full text of the entire article, section, advertisement etc. It includes any titles and subtitles as well. The content does not contain layout information, such as headings, paragraphs or lines. - 'title': The main title of the article, section, advertisement, etc. - 'url': The link to the BnLViewer on eluxemburgensia.lu to view the resource online. - 'language': The language of the text, possible values ('ar', 'da', 'de', 'fi', 'fr', 'lb', 'nl', 'pt') ### Data Splits This dataset contains a single split `train`. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @misc{bnl_newspapers, title={Historical Newspapers}, url={https://data.bnl.lu/data/historical-newspapers/}, author={ Bibliothèque nationale du Luxembourg}, ``` ### Contributions Thanks to [@davanstrien](https://github.com/davanstrien) for adding this dataset.
6,839
[ [ -0.0283050537109375, -0.04974365234375, 0.03228759765625, 0.0231475830078125, -0.0306549072265625, -0.022125244140625, -0.0197906494140625, -0.037506103515625, 0.040985107421875, 0.040924072265625, -0.0357666015625, -0.06988525390625, -0.03863525390625, 0.03...
diplomacy_detection
2023-01-25T14:29:25.000Z
[ "task_categories:text-classification", "task_ids:intent-classification", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:en", "license:unknown", "region:us" ]
null
null
@inproceedings{peskov-etal-2020-takes, title = "It Takes Two to Lie: One to Lie, and One to Listen", author = "Peskov, Denis and Cheng, Benny and Elgohary, Ahmed and Barrow, Joe and Danescu-Niculescu-Mizil, Cristian and Boyd-Graber, Jordan", booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.acl-main.353", doi = "10.18653/v1/2020.acl-main.353", pages = "3811--3854", abstract = "Trust is implicit in many online text conversations{---}striking up new friendships, or asking for tech support. But trust can be betrayed through deception. We study the language and dynamics of deception in the negotiation-based game Diplomacy, where seven players compete for world domination by forging and breaking alliances with each other. Our study with players from the Diplomacy community gathers 17,289 messages annotated by the sender for their intended truthfulness and by the receiver for their perceived truthfulness. Unlike existing datasets, this captures deception in long-lasting relationships, where the interlocutors strategically combine truth with lies to advance objectives. A model that uses power dynamics and conversational contexts can predict when a lie occurs nearly as well as human players.", }
0
99
2022-03-02T23:29:22
--- annotations_creators: - found language_creators: - found language: - en license: - unknown multilinguality: - monolingual size_categories: - n<1K source_datasets: - original task_categories: - text-classification task_ids: - intent-classification pretty_name: HateOffensive dataset_info: features: - name: messages sequence: string - name: sender_labels sequence: class_label: names: '0': 'false' '1': 'true' - name: receiver_labels sequence: class_label: names: '0': 'false' '1': 'true' '2': noannotation - name: speakers sequence: class_label: names: '0': italy '1': turkey '2': russia '3': england '4': austria '5': germany '6': france - name: receivers sequence: class_label: names: '0': italy '1': turkey '2': russia '3': england '4': austria '5': germany '6': france - name: absolute_message_index sequence: int64 - name: relative_message_index sequence: int64 - name: seasons sequence: class_label: names: '0': spring '1': fall '2': winter '3': Spring '4': Fall '5': Winter - name: years sequence: class_label: names: '0': '1901' '1': '1902' '2': '1903' '3': '1904' '4': '1905' '5': '1906' '6': '1907' '7': '1908' '8': '1909' '9': '1910' '10': '1911' '11': '1912' '12': '1913' '13': '1914' '14': '1915' '15': '1916' '16': '1917' '17': '1918' - name: game_score sequence: class_label: names: '0': '0' '1': '1' '2': '2' '3': '3' '4': '4' '5': '5' '6': '6' '7': '7' '8': '8' '9': '9' '10': '10' '11': '11' '12': '12' '13': '13' '14': '14' '15': '15' '16': '16' '17': '17' '18': '18' - name: game_score_delta sequence: class_label: names: '0': '0' '1': '1' '2': '2' '3': '3' '4': '4' '5': '5' '6': '6' '7': '7' '8': '8' '9': '9' '10': '10' '11': '11' '12': '12' '13': '13' '14': '14' '15': '15' '16': '16' '17': '17' '18': '18' '19': '-1' '20': '-2' '21': '-3' '22': '-4' '23': '-5' '24': '-6' '25': '-7' '26': '-8' '27': '-9' '28': '-10' '29': '-11' '30': '-12' '31': '-13' '32': '-14' '33': '-15' '34': '-16' '35': '-17' '36': '-18' - name: players sequence: class_label: names: '0': italy '1': turkey '2': russia '3': england '4': austria '5': germany '6': france - name: game_id dtype: int64 splits: - name: validation num_bytes: 254344 num_examples: 21 - name: train num_bytes: 2539778 num_examples: 189 - name: test num_bytes: 506191 num_examples: 42 download_size: 3208706 dataset_size: 3300313 --- # Dataset Card for HateOffensive ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage** : https://sites.google.com/view/qanta/projects/diplomacy - **Repository** : https://github.com/DenisPeskov/2020_acl_diplomacy - **Paper** : http://users.umiacs.umd.edu/~jbg/docs/2020_acl_diplomacy.pdf - **Leaderboard** : - **Point of Contact** : ### Dataset Summary This dataset contains pairwise conversations annotated by the sender and the receiver for deception (and conversely truthfulness). The 17,289 messages are gathered from 12 games. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages English ## Dataset Structure ### Data Instances ``` { "messages": ["Greetings Sultan!\n\nAs your neighbor I would like to propose an alliance! What are your views on the board so far?", "I think an alliance would be great! Perhaps a dmz in the Black Sea would be a good idea to solidify this alliance?\n\nAs for my views on the board, my first moves will be Western into the Balkans and Mediterranean Sea.", "Sounds good lets call a dmz in the black sea", "What's our move this year?", "I've been away from the game for a while", "Not sure yet, what are your thoughts?", "Well I'm pretty worried about Germany attacking me (and Austria to a lesser extent) so im headed west. It looks like Italy's landing a army in Syr this fall unless you can stop it", "That sounds good to me. I'll move to defend against Italy while you move west. If it's not too much too ask, I'd like to request that you withdraw your fleet from bla.", "Oh sorry missed the msg to move out of bl sea ill do that this turn. I did bring my army down into Armenia, To help you expel the Italian. It looks like Austria and Italy are working together. If we have a chance in the region you should probably use smy to protect con. We can't afford to lose con.", "I'll defend con from both ank and smy.", "Hey sorry for stabbing you earlier, it was an especially hard choice since Turkey is usually my country of choice. It's cool we got to do this study huh?"], "sender_labels": [false, true, false, true, true, true, true, true, true, true, true], "receiver_labels": [true, true, true, true, true, true, true, true, true, true, "NOANNOTATION"], "speakers": ["russia", "turkey", "russia", "russia", "russia", "turkey", "russia", "turkey", "russia", "turkey", "russia"], "receivers": ["turkey", "russia", "turkey", "turkey", "turkey", "russia", "turkey", "russia", "turkey", "russia", "turkey"], "absolute_message_index": [78, 107, 145, 370, 371, 374, 415, 420, 495, 497, 717], "relative_message_index": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "seasons": ["Spring", "Spring", "Spring", "Spring", "Spring", "Spring", "Fall", "Fall", "Spring", "Spring", "Fall"], "years": ["1901", "1901", "1901", "1902", "1902", "1902", "1902", "1902", "1903", "1903", "1905"], "game_score": ["4", "3", "4", "5", "5", "4", "5", "4", "5", "3", "7"], "game_score_delta": ["1", "-1", "1", "1", "1", "-1", "1", "-1", "2", "-2", "7"], "players": ["russia", "turkey"], "game_id": 10 } ``` ### Data Fields - speakers: the sender of the message (string format. Seven possible values: russia, turkey, england, austria, germany, france, italy) - receivers: the receiver of the message (string format. Seven possible values: russia, turkey, england, austria, germany, france, italy) - messages: the raw message string (string format. ranges in length from one word to paragraphs in length) - sender_labels: indicates if the sender of the message selected that the message is truthful, true, or deceptive, false. This is used for our ACTUAL_LIE calculation (true/false which can be bool or string format) - receiver_labels: indicates if the receiver of the message selected that the message is perceived as truthful, true, or deceptive, false. In <10% of the cases, no annotation was received. This is used for our SUSPECTED_LIE calculation (string format. true/false/"NOANNOTATION" ) - game_score: the current game score---supply centers---of the sender (string format that ranges can range from 0 to 18) - game_score_delta: the current game score---supply centers---of the sender minus the game score of the recipient (string format that ranges from -18 to 18) - absolute_message_index: the index the message is in the entire game, across all dialogs (int format) - relative_message_index: the index of the message in the current dialog (int format) - seasons: the season in Diplomacy, associated with the year (string format. Spring, Fall, Winter) - years: the year in Diplomacy, associated with the season (string format. 1901 through 1918) - game_id: which of the 12 games the dialog comes from (int format ranging from 1 to 12) ### Data Splits Train, Test and Validation splits ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information Unknown ### Citation Information @inproceedings{Peskov:Cheng:Elgohary:Barrow:Danescu-Niculescu-Mizil:Boyd-Graber-2020, Title = {It Takes Two to Lie: One to Lie and One to Listen}, Author = {Denis Peskov and Benny Cheng and Ahmed Elgohary and Joe Barrow and Cristian Danescu-Niculescu-Mizil and Jordan Boyd-Graber}, Booktitle = {Association for Computational Linguistics}, Year = {2020}, Location = {Seattle}, } ### Contributions Thanks to [@MisbahKhan789](https://github.com/MisbahKhan789) for adding this dataset.
10,653
[ [ -0.05279541015625, -0.047119140625, 0.0205841064453125, 0.022491455078125, -0.0177764892578125, -0.01378631591796875, -0.0096893310546875, -0.03973388671875, 0.038177490234375, 0.058441162109375, -0.061920166015625, -0.06903076171875, -0.041595458984375, 0.0...
oclar
2022-11-03T16:15:26.000Z
[ "task_categories:text-classification", "task_ids:text-scoring", "task_ids:sentiment-classification", "task_ids:sentiment-scoring", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language...
null
The researchers of OCLAR Marwan et al. (2019), they gathered Arabic costumer reviews from Google reviewsa and Zomato website (https://www.zomato.com/lebanon) on wide scope of domain, including restaurants, hotels, hospitals, local shops, etc.The corpus finally contains 3916 reviews in 5-rating scale. For this research purpose, the positive class considers rating stars from 5 to 3 of 3465 reviews, and the negative class is represented from values of 1 and 2 of about 451 texts.
@misc{Dua:2019 , author = "Dua, Dheeru and Graff, Casey", year = "2017", title = "{UCI} Machine Learning Repository", url = "http://archive.ics.uci.edu/ml", institution = "University of California, Irvine, School of Information and Computer Sciences" } @InProceedings{AlOmari2019oclar, title = {Sentiment Classifier: Logistic Regression for Arabic Services Reviews in Lebanon}, authors={Al Omari, M., Al-Hajj, M., Hammami, N., & Sabra, A.}, year={2019} }
1
99
2022-03-02T23:29:22
--- annotations_creators: - crowdsourced language_creators: - crowdsourced language: - ar license: - unknown multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - text-scoring - sentiment-classification - sentiment-scoring paperswithcode_id: null pretty_name: OCLAR dataset_info: features: - name: pagename dtype: string - name: review dtype: string - name: rating dtype: int8 splits: - name: train num_bytes: 398204 num_examples: 3916 download_size: 382976 dataset_size: 398204 --- # Dataset Card for OCLAR ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [OCLAR homepage](http://archive.ics.uci.edu/ml/datasets/Opinion+Corpus+for+Lebanese+Arabic+Reviews+%28OCLAR%29#) - **Paper:** [paper link](https://www.semanticscholar.org/paper/Sentiment-Classifier%3A-Logistic-Regression-for-in-Omari-Al-Hajj/9319f4d9e8b3b7bfd0d214314911c071ba7ce1a0) - **Point of Contact:** [Marwan Al Omari](marwanalomari@yahoo.com) ### Dataset Summary The researchers of OCLAR Marwan et al. (2019), they gathered Arabic costumer reviews [Zomato website](https://www.zomato.com/lebanon) on wide scope of domain, including restaurants, hotels, hospitals, local shops, etc. The corpus finally contains 3916 reviews in 5-rating scale. For this research purpose, the positive class considers rating stars from 5 to 3 of 3465 reviews, and the negative class is represented from values of 1 and 2 of about 451 texts. ### Supported Tasks and Leaderboards Opinion Corpus for Lebanese Arabic Reviews (OCLAR) corpus is utilizable for Arabic sentiment classification on services reviews, including hotels, restaurants, shops, and others. ### Languages The text in the dataset is in Arabic, mainly in Lebanese (LB). The associated BCP-47 code is `ar-LB`. ## Dataset Structure ### Data Instances A typical data point comprises a `pagename` which is the name of service / location being reviewed, a `review` which is the review left by the user / client , and a `rating` which is a score between 1 and 5. The authors consider a review to be positive if the score is greater or equal than `3`, else it is considered negative. An example from the OCLAR data set looks as follows: ``` "pagename": 'Ramlet Al Baida Beirut Lebanon', "review": 'مكان يطير العقل ويساعد على الاسترخاء', "rating": 5, ``` ### Data Fields - `pagename`: string name of the service / location being reviewed - `review`: string review left by the user / costumer - `rating`: number of stars left by the reviewer. It ranges from 1 to 5. ### Data Splits The data set comes in a single csv file of a total `3916` reviews : - `3465` are considered positive (a rating of 3 to 5) - `451` are considered negative (a rating of 1 or 2) ## Dataset Creation ### Curation Rationale This dataset was created for Arabic sentiment classification on services’ reviews in Lebanon country. Reviews are about public services, including hotels, restaurants, shops, and others. ### Source Data #### Initial Data Collection and Normalization The data was collected from Google Reviews and [Zomato website](https://www.zomato.com/lebanon) #### Who are the source language producers? The source language producers are people who posted their reviews on Google Reviews or [Zomato website](https://www.zomato.com/lebanon). They're mainly Arabic speaking Lebanese people. ### Annotations #### Annotation process The dataset does not contain any additional annotations #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset The author's research has tackled a highly important task of sentiment analysis for Arabic language in the Lebanese context on 3916 reviews’ services from Google and Zomato. Experiments show three main findings: 1) The classifier is confident when used to predict positive reviews, 2) while it is biased on predicting reviews with negative sentiment, and finally 3) the low percentage of negative reviews in the corpus contributes to the diffidence of LR. ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators This dataset was curated by Marwan Al Omari, Moustafa Al-Hajj from Centre for Language Sciences and Communication, Lebanese University, Beirut, Lebanon; Nacereddine Hammami from college of Computer and Information Sciences, Jouf University, Aljouf, KSA; and Amani Sabra from Centre for Language Sciences and Communication, Lebanese University, Beirut, Lebanon. ### Licensing Information [More Information Needed] ### Citation Information - Marwan Al Omari, Centre for Language Sciences and Communication, Lebanese University, Beirut, Lebanon, marwanalomari '@' yahoo.com - Moustafa Al-Hajj, Centre for Language Sciences and Communication, Lebanese University, Beirut, Lebanon, moustafa.alhajj '@' ul.edu.lb - Nacereddine Hammami, college of Computer and Information Sciences, Jouf University, Aljouf, KSA, n.hammami '@' ju.edu.sa - Amani Sabra, Centre for Language Sciences and Communication, Lebanese University, Beirut, Lebanon, amani.sabra '@' ul.edu.lb ``` @misc{Dua:2019 , author = "Dua, Dheeru and Graff, Casey", year = "2017", title = "{UCI} Machine Learning Repository", url = "http://archive.ics.uci.edu/ml", institution = "University of California, Irvine, School of Information and Computer Sciences" } @InProceedings{AlOmari2019oclar, title = {Sentiment Classifier: Logistic Regression for Arabic Services Reviews in Lebanon}, authors={Al Omari, M., Al-Hajj, M., Hammami, N., & Sabra, A.}, year={2019} } ``` ### Contributions Thanks to [@alaameloh](https://github.com/alaameloh) for adding this dataset.
6,903
[ [ -0.053131103515625, -0.035980224609375, 0.00862884521484375, 0.01531219482421875, -0.030609130859375, -0.0029926300048828125, -0.0221099853515625, -0.03472900390625, 0.0148468017578125, 0.0433349609375, -0.0231781005859375, -0.0899658203125, -0.045074462890625, ...
taskmaster3
2022-11-03T16:30:39.000Z
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:dialogue-modeling", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:cc-by-4.0", "arxiv...
null
Taskmaster is dataset for goal oriented conversations. The Taskmaster-3 dataset consists of 23,757 movie ticketing dialogs. By "movie ticketing" we mean conversations where the customer's goal is to purchase tickets after deciding on theater, time, movie name, number of tickets, and date, or opt out of the transaction. This collection was created using the "self-dialog" method. This means a single, crowd-sourced worker is paid to create a conversation writing turns for both speakers, i.e. the customer and the ticketing agent.
@inproceedings{48484, title = {Taskmaster-1: Toward a Realistic and Diverse Dialog Dataset}, author = {Bill Byrne and Karthik Krishnamoorthi and Chinnadhurai Sankar and Arvind Neelakantan and Daniel Duckworth and Semih Yavuz and Ben Goodrich and Amit Dubey and Kyu-Young Kim and Andy Cedilnik}, year = {2019} }
0
99
2022-03-02T23:29:22
--- annotations_creators: - crowdsourced language_creators: - crowdsourced language: - en license: - cc-by-4.0 multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - text-generation - fill-mask task_ids: - dialogue-modeling paperswithcode_id: null pretty_name: taskmaster3 dataset_info: features: - name: conversation_id dtype: string - name: vertical dtype: string - name: instructions dtype: string - name: scenario dtype: string - name: utterances list: - name: index dtype: int32 - name: speaker dtype: string - name: text dtype: string - name: apis list: - name: name dtype: string - name: index dtype: int32 - name: args list: - name: arg_name dtype: string - name: arg_value dtype: string - name: response list: - name: response_name dtype: string - name: response_value dtype: string - name: segments list: - name: start_index dtype: int32 - name: end_index dtype: int32 - name: text dtype: string - name: annotations list: - name: name dtype: string splits: - name: train num_bytes: 143609327 num_examples: 23757 download_size: 313402141 dataset_size: 143609327 --- # Dataset Card for taskmaster3 ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Taskmaster](https://research.google/tools/datasets/taskmaster-1/) - **Repository:** [GitHub](https://github.com/google-research-datasets/Taskmaster/tree/master/TM-3-2020) - **Paper:** [Taskmaster-1: Toward a Realistic and Diverse Dialog Dataset](https://arxiv.org/abs/1909.05358) - **Leaderboard:** N/A - **Point of Contact:** [Taskmaster Googlegroup](taskmaster-datasets@googlegroups.com) ### Dataset Summary Taskmaster is dataset for goal oriented conversations. The Taskmaster-3 dataset consists of 23,757 movie ticketing dialogs. By "movie ticketing" we mean conversations where the customer's goal is to purchase tickets after deciding on theater, time, movie name, number of tickets, and date, or opt out of the transaction. This collection was created using the "self-dialog" method. This means a single, crowd-sourced worker is paid to create a conversation writing turns for both speakers, i.e. the customer and the ticketing agent. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages The dataset is in English language. ## Dataset Structure ### Data Instances A typical example looks like this ``` { "conversation_id": "dlg-ddee80da-9ffa-4773-9ce7-f73f727cb79c", "instructions": "SCENARIO: Pretend you’re *using a digital assistant to purchase tickets for a movie currently showing in theaters*. ...", "scenario": "4 exchanges with 1 error and predefined variables", "utterances": [ { "apis": [], "index": 0, "segments": [ { "annotations": [ { "name": "num.tickets" } ], "end_index": 21, "start_index": 20, "text": "2" }, { "annotations": [ { "name": "name.movie" } ], "end_index": 42, "start_index": 37, "text": "Mulan" } ], "speaker": "user", "text": "I would like to buy 2 tickets to see Mulan." }, { "index": 6, "segments": [], "speaker": "user", "text": "Yes.", "apis": [ { "args": [ { "arg_name": "name.movie", "arg_value": "Mulan" }, { "arg_name": "name.theater", "arg_value": "Mountain AMC 16" } ], "index": 6, "name": "book_tickets", "response": [ { "response_name": "status", "response_value": "success" } ] } ] } ], "vertical": "Movie Tickets" } ``` ### Data Fields Each conversation in the data file has the following structure: - `conversation_id`: A universally unique identifier with the prefix 'dlg-'. The ID has no meaning. - `utterances`: A list of utterances that make up the conversation. - `instructions`: Instructions for the crowdsourced worker used in creating the conversation. - `vertical`: In this dataset the vertical for all dialogs is "Movie Tickets". - `scenario`: This is the title of the instructions for each dialog. Each utterance has the following fields: - `index`: A 0-based index indicating the order of the utterances in the conversation. - `speaker`: Either USER or ASSISTANT, indicating which role generated this utterance. - `text`: The raw text of the utterance. In case of self dialogs (one_person_dialogs), this is written by the crowdsourced worker. In case of the WOz dialogs, 'ASSISTANT' turns are written and 'USER' turns are transcribed from the spoken recordings of crowdsourced workers. - `segments`: A list of various text spans with semantic annotations. - `apis`: An array of API invocations made during the utterance. Each API has the following structure: - `name`: The name of the API invoked (e.g. find_movies). - `index`: The index of the parent utterance. - `args`: A `list` of `dict` with keys `arg_name` and `arg_value` which represent the name of the argument and the value for the argument respectively. - `response`: A `list` of `dict`s with keys `response_name` and `response_value` which represent the name of the response and the value for the response respectively. Each segment has the following fields: - `start_index`: The position of the start of the annotation in the utterance text. - `end_index`: The position of the end of the annotation in the utterance text. - `text`: The raw text that has been annotated. - `annotations`: A list of annotation details for this segment. Each annotation has a single field: - `name`: The annotation name. ### Data Splits There are no deafults splits for all the config. The below table lists the number of examples in each config. | | Train | |-------------------|--------| | n_instances | 23757 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data [More Information Needed] #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations [More Information Needed] #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information The dataset is licensed under `Creative Commons Attribution 4.0 License` ### Citation Information [More Information Needed] ``` @inproceedings{48484, title = {Taskmaster-1: Toward a Realistic and Diverse Dialog Dataset}, author = {Bill Byrne and Karthik Krishnamoorthi and Chinnadhurai Sankar and Arvind Neelakantan and Daniel Duckworth and Semih Yavuz and Ben Goodrich and Amit Dubey and Kyu-Young Kim and Andy Cedilnik}, year = {2019} } ``` ### Contributions Thanks to [@patil-suraj](https://github.com/patil-suraj) for adding this dataset.
9,272
[ [ -0.032379150390625, -0.0577392578125, 0.01532745361328125, 0.005466461181640625, -0.006526947021484375, 0.007152557373046875, -0.02392578125, -0.0274658203125, 0.0242156982421875, 0.048980712890625, -0.0823974609375, -0.06109619140625, -0.036468505859375, 0....
wiki_qa_ar
2023-01-25T15:02:18.000Z
[ "task_categories:question-answering", "task_ids:open-domain-qa", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:ar", "license:unknown", "region:us" ]
null
Arabic Version of WikiQA by automatic automatic machine translators and crowdsourced the selection of the best one to be incorporated into the corpus
@InProceedings{YangYihMeek:EMNLP2015:WikiQA, author = {{Yi}, Yang and {Wen-tau}, Yih and {Christopher} Meek}, title = "{WikiQA: A Challenge Dataset for Open-Domain Question Answering}", journal = {Association for Computational Linguistics}, year = 2015, doi = {10.18653/v1/D15-1237}, pages = {2013–2018}, }
2
99
2022-03-02T23:29:22
--- annotations_creators: - crowdsourced language_creators: - crowdsourced language: - ar license: - unknown multilinguality: - monolingual size_categories: - 100K<n<1M source_datasets: - original task_categories: - question-answering task_ids: - open-domain-qa paperswithcode_id: wikiqaar pretty_name: English-Arabic Wikipedia Question-Answering dataset_info: features: - name: question_id dtype: string - name: question dtype: string - name: document_id dtype: string - name: answer_id dtype: string - name: answer dtype: string - name: label dtype: class_label: names: '0': '0' '1': '1' config_name: plain_text splits: - name: test num_bytes: 7563127 num_examples: 20632 - name: validation num_bytes: 3740721 num_examples: 10387 - name: train num_bytes: 26009979 num_examples: 70264 download_size: 35226436 dataset_size: 37313827 --- # Dataset Card for WikiQAar ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [WikiQaAr](https://github.com/qcri/WikiQAar) - **Repository:** [WikiQaAr](https://github.com/qcri/WikiQAar) - **Paper:** - **Point of Contact:** [Ines Abbes ](abbes.ines@yahoo.com) ### Dataset Summary Arabic Version of WikiQA by automatic automatic machine translators and crowdsourced the selection of the best one to be incorporated into the corpus ### Supported Tasks and Leaderboards [More Information Needed] ### Languages The dataset is based on Arabic. ## Dataset Structure ### Data Instances Each data point contains the question and whether the answer is a valid or not. ### Data Fields - `question_id`: the question id. - `question`: the question text. - `document_id`: the wikipedia document id. - `answer_id` : the answer id. - `answer` : a candidate answer to the question. - `label` : 1 if the `answer` is correct or 0 otherwise. ### Data Splits The dataset is not split. | | train | validation | test | |------------|-------:|-----------:|-------:| | Data split | 70,264 | 20,632 | 10,387 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data [More Information Needed] #### Initial Data Collection and Normalization Translation of WikiQA. #### Who are the source language producers? Translation of WikiQA. ### Annotations The dataset does not contain any additional annotations. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @InProceedings{YangYihMeek:EMNLP2015:WikiQA, author = {{Yi}, Yang and {Wen-tau}, Yih and {Christopher} Meek}, title = "{WikiQA: A Challenge Dataset for Open-Domain Question Answering}", journal = {Association for Computational Linguistics}, year = 2015, doi = {10.18653/v1/D15-1237}, pages = {2013–2018}, } ``` ### Contributions Thanks to [@zaidalyafeai](https://github.com/zaidalyafeai) for adding this dataset.
4,496
[ [ -0.043487548828125, -0.03564453125, 0.0009512901306152344, 0.003414154052734375, -0.00481414794921875, -0.0034122467041015625, -0.01351165771484375, -0.007312774658203125, 0.026641845703125, 0.0307769775390625, -0.052215576171875, -0.06256103515625, -0.041503906...
HHousen/quora
2021-11-21T02:11:20.000Z
[ "region:us" ]
HHousen
null
null
1
99
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0213775634765625, -0.01497650146484375, 0.05718994140625, 0.02880859375, -0.0350341796875, 0.046478271484375, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.0170135498046875, -0.052093505859375, -0.01497650146484375, -0.0604248046875, 0.0379028...
Tevatron/wikipedia-nq-corpus
2021-10-13T22:18:40.000Z
[ "region:us" ]
Tevatron
null
@inproceedings{karpukhin-etal-2020-dense, title = "Dense Passage Retrieval for Open-Domain Question Answering", author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov, Sergey and Chen, Danqi and Yih, Wen-tau", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.550", doi = "10.18653/v1/2020.emnlp-main.550", pages = "6769--6781", }
0
99
2022-03-02T23:29:22
Entry not found
15
[ [ -0.0213775634765625, -0.01497650146484375, 0.05718994140625, 0.02880859375, -0.0350341796875, 0.046478271484375, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.0170135498046875, -0.052093505859375, -0.01497650146484375, -0.0604248046875, 0.0379028...
snoop2head/commoncrawl_sampled_gpt2-xl
2022-08-04T12:28:33.000Z
[ "region:us" ]
snoop2head
null
null
0
99
2022-08-03T04:46:04
Entry not found
15
[ [ -0.0213775634765625, -0.01497650146484375, 0.05718994140625, 0.02880859375, -0.0350341796875, 0.046478271484375, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.0170135498046875, -0.052093505859375, -0.01497650146484375, -0.0604248046875, 0.0379028...
HuggingFaceH4/self-instruct-seed
2023-01-31T22:37:02.000Z
[ "task_categories:conversational", "size_categories:n<1K", "language:en", "license:apache-2.0", "arxiv:2212.10560", "region:us" ]
HuggingFaceH4
null
null
14
99
2023-01-31T22:33:52
--- license: apache-2.0 task_categories: - conversational language: - en size_categories: - n<1K --- Manually created seed dataset used in bootstrapping in the Self-instruct paper https://arxiv.org/abs/2212.10560. This is part of the instruction fine-tuning datasets.
268
[ [ -0.0192108154296875, 0.0045013427734375, 0.01165771484375, 0.02374267578125, 0.011077880859375, 0.00264739990234375, 0.0305633544921875, -0.0028247833251953125, 0.01435089111328125, 0.04254150390625, -0.06744384765625, -0.01316070556640625, -0.007106781005859375...
Dahoas/cot_gsm8k
2023-05-31T13:01:00.000Z
[ "region:us" ]
Dahoas
null
null
4
99
2023-05-31T13:00:55
--- dataset_info: features: - name: question dtype: string - name: answer dtype: string - name: prompt dtype: string - name: response dtype: string splits: - name: train num_bytes: 7710945 num_examples: 7217 - name: val num_bytes: 267770 num_examples: 256 - name: test num_bytes: 1436697 num_examples: 1319 download_size: 5472201 dataset_size: 9415412 --- # Dataset Card for "cot_gsm8k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
581
[ [ -0.049346923828125, 0.0026397705078125, 0.02410888671875, 0.017822265625, -0.0302581787109375, 0.00911712646484375, 0.018218994140625, -0.00513458251953125, 0.044097900390625, 0.043426513671875, -0.05010986328125, -0.07305908203125, -0.0517578125, -0.0099411...
ahmed-masry/unichart-pretrain-data
2023-07-30T01:39:51.000Z
[ "region:us" ]
ahmed-masry
null
null
1
99
2023-07-30T01:39:33
--- dataset_info: features: - name: imgname dtype: string - name: query dtype: string - name: label dtype: string splits: - name: train num_bytes: 1198892722 num_examples: 6898333 download_size: 346172299 dataset_size: 1198892722 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "unichart-pretrain-data" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
532
[ [ -0.038360595703125, 0.005779266357421875, 0.0163421630859375, 0.006748199462890625, -0.02874755859375, 0.007343292236328125, 0.01229095458984375, 0.0005402565002441406, 0.05181884765625, 0.02545166015625, -0.0672607421875, -0.054595947265625, -0.03765869140625, ...
maastrichtlawtech/lleqa
2023-10-25T10:07:40.000Z
[ "task_categories:question-answering", "task_categories:text-retrieval", "task_categories:text-classification", "task_ids:closed-domain-qa", "task_ids:document-question-answering", "task_ids:document-retrieval", "task_ids:topic-classification", "annotations_creators:expert-generated", "language_creat...
maastrichtlawtech
null
null
1
99
2023-09-27T13:31:22
--- annotations_creators: - expert-generated language_creators: - found language: - fr license: - cc-by-nc-sa-4.0 multilinguality: - monolingual pretty_name: LLeQA size_categories: - 1K<n<10K source_datasets: - original task_categories: - question-answering - text-retrieval - text-classification task_ids: - closed-domain-qa - document-question-answering - document-retrieval - topic-classification paperswithcode_id: lleqa tags: - legal configs: - config_name: corpus data_files: - split: corpus path: "articles.json" - config_name: questions data_files: - split: train path: "questions_train.json" - split: validation path: "questions_dev.json" - split: test path: "questions_test.json" - config_name: negatives data_files: - split: bm25 path: "negatives/negatives_bm25.json" - split: me5 path: "negatives/negatives_me5_large.json" extra_gated_fields: Name: text Email: text Affiliation: text Job Title: text Country: text I agree to use this dataset for non-commerical use ONLY: checkbox --- # Dataset Card for LLeQA ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository:** [maastrichtlawtech/lleqa](https://github.com/maastrichtlawtech/lleqa) - **Paper:** [Interpretable Long-Form Legal Question Answering with Retrieval-Augmented Large Language Models](https://arxiv.org/abs/2309.17050) - **Point of Contact:** [Maastricht Law & Tech Lab](law-techlab@maastrichtuniversity.nl) ### Dataset Summary The Long-form Legal Question Answering (LLeQA) dataset is a French-native expert-annotated dataset for studying legal question answering. LLeQA builds upon [BSARD](https://huggingface.co/datasets/maastrichtlawtech/bsard), an information retrieval dataset comprising 1,108 legal questions labeled with relevant provisions from a corpus of 22,633 Belgian law articles, and enhance it in two ways: 1. We introduce 760 new legal questions (+69\%) and 5,308 additional statutory articles (+23\%). 2. We supplement the data with new types of annotations, including an exhaustive taxonomy for the question, the jurisdictions concerned, the exact paragraph-level references within the relevant articles, and a comprehensive answer written by seasoned legal professionals. Owing to the rich variety of its annotations, LLeQA serves as a multifaceted resource that extends its utility beyond legal question answering and has the potential to catalyze significant progress in various legal tasks, such as legal inquiry classification, legal topic modeling, and legal information retrieval. ### Supported Tasks and Leaderboards - `qestion-answering`: The dataset can be used to train a model for long-form question-answering (LFQA) in the legal domain, which consists in comprehensively answering a short legal question in a free-form based on a given context of one or several statutory articles. Success on this task is typically measured by achieving high [ROUGE](https://huggingface.co/spaces/evaluate-metric/rouge) or [METEOR](https://huggingface.co/spaces/evaluate-metric/meteor) scores, even though these metrics are not always correlated with human judgment. - `text-retrieval`: The dataset can be used to train a model for information retrieval (IR) in the legal domain, which consists in retrieving relevant statutory articles based on a given legal question. Success on this task is typically measured by achieving high [recall](https://huggingface.co/spaces/evaluate-metric/recall) and [precision](https://huggingface.co/spaces/evaluate-metric/precision) scores at various cut-offs. - `text-classification`: The dataset can be used to train a model for text classification in the legal domain, which consists in classifying a legal question into a predefined set of topics. Success on this task is typically measured by achieving high [accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) scores. ### Languages The text in the dataset is in French, as spoken in Wallonia and Brussels-Capital region. The associated BCP-47 code is `fr-BE`. ## Dataset Structure ### Data Instances A `question` sample typically comprises a unique identifier (*int*), the question itself (*str*), the regions concerned (*List[str]*), related topics (*List[str]*), the IDs of the relevant articles from the knowledge corpus (*List[int]*), the exact paragraphs within those articles that are relevant to the question (*List[str]*), and a comprehensive expert-written answer (*str*). Below is an example of such sample from the LLeQA test set: ```json { "id":696, "question":"Je souhaite divorcer pour cause de désunion irrémédiable. Puis-je fixer une limite dans le temps pour la pension alimentaire ?", "regions":["Région wallonne", "Région de Bruxelles-Capitale", "Région flamande"], "topics":["Famille, Obligations alimentaires, Les pensions alimentaires (entre époux/ex-époux), Pensions alimentaires dans le cadre d'une procédure de divorce, Procédure de divorce pour cause de désunion irrémédiable"], "article_ids":[3604], "paragraph_ids":["3604§4", "3604§10"], "answer":"Oui, c'est le juge qui fixe cette limite dans le jugement de divorce. En principe, la durée de la pension alimentaire après divorce est limitée au maximum à la durée du mariage. Mais le juge peut la fixer pour une durée plus courte. Il décide toujours en fonction de la situation concrète des ex-conjoints. A l’expiration de ce délai, le juge peut prolonger le paiement de la pension alimentaire. Celui qui reçoit la pension alimentaire doit prouver qu'à cause de circonstances exceptionnelles et pour des raisons indépendantes de sa volonté, il est toujours dans un état de besoin. L'obligation de payer la pension alimentaire prend également fin si : celui qui reçoit la pension alimentaire se remarie ou fait une déclaration de cohabitation légale. Dans ce cas, il perd automatiquement son droit à la pension alimentaire après divorce, sauf si le jugement de divorce prévoit autre chose ; celui qui reçoit la pension alimentaire vit maritalement avec une autre personne. Dans ce cas, le juge peut décider de mettre fin à la pension alimentaire ; celui qui reçoit la pension alimentaire décède. Dans ce cas, le paiement de la pension alimentaire prend automatiquement fin.", } ``` An `article` sample typically contains a unique identifier (*int*), a legislative reference (*str*), the authority that issued the article (*str*), a description resulting from the concatenated headings of the sections the article belong to (*str*), the individual headings of these sections (*str*), the article number in the statute (*str*), the full content of the article (*str*), and the content of its individual paragraphs (*Dict[str]*). Below is an example of such sample from the knwoledge corpus: ```json { "id":3604, "reference":"Art. 301, Code civil (Livre I, Titre VI, Chapitre IV)", "authority":"federale", "description":"Des personnes, Du divorce, Des effets du divorce", "article_no":"301", "code":"Code civil", "book":"Des personnes", "part":null, "act":"Du divorce", "chapter":"Des effets du divorce", "section":null, "subsection":null, "article":"§ 1er. Les époux peuvent convenir à tout moment de la pension alimentaire éventuelle, du montant de celle-ci et des modalités selon lesquelles le montant convenu pourrait être revu.§ 2. A défaut de la convention visée au § 1er, le tribunal de la famillepeut, dans le jugement prononçant le divorce ou lors d'une décision ultérieure, accorder, à la demande de l'époux dans le besoin, une pension alimentaire à charge de l'autre époux.Le tribunal peut refuser de faire droit à la demande de pension si le défendeur prouve que le demandeur a commis une faute grave ayant rendu impossible la poursuite de la vie commune.En aucun cas, la pension alimentaire n'est accordée au conjoint reconnu coupable d'un fait visé aux articles 375, 398 à 400, 402, 403 ou 405 du Code pénal, commis contre la personne du défendeur, ou d'une tentative de commettre un fait visé aux articles 375, 393, 394 ou 397 du même Code contre cette même personne.Par dérogation à l'article 4 du titre préliminaire du Code de procédure pénale, le juge peut, en attendant que la décision sur l'action publique soit coulée en force de chose jugée, allouer au demandeur une pension provisionnelle, en tenant compte de toutes les circonstances de la cause. Il peut subordonner l'octroi de cette pension provisionnelle à la constitution d'une garantie qu'il détermine et dont il fixe les modalités.§ 3. Le tribunal fixe le montant de la pension alimentaire qui doit couvrir au moins l'état de besoin du bénéficiaire.Il tient compte des revenus et possibilités des conjoints et de la dégradation significative de la situation économique du bénéficiaire. Pour apprécier cette dégradation, le juge se fonde notamment sur la durée du mariage, l'âge des parties, leur comportement durant le mariage quant à l'organisation de leurs besoins, la charge des enfants pendant la vie commune ou après celle-ci. Le juge peut décider le cas échéant que la pension sera dégressive et déterminer dans quelle mesure elle le sera.La pension alimentaire ne peut excéder le tiers des revenus du conjoint débiteur.§ 4. La durée de la pension ne peut être supérieure à celle du mariage.En cas de circonstances exceptionnelles, si le bénéficiaire démontre qu'à l'expiration du délai visé à l'alinéa 1er, il reste, pour des raisons indépendantes de sa volonté, dans un état de besoin, le tribunal peut prolonger le délai. Dans ce cas, le montant de la pension correspond au montant nécessaire pour couvrir l'état de besoin du bénéficiaire.§ 5. Si le défendeur prouve que l'état de besoin du demandeur résulte d'une décision prise unilatéralement par celui-ci, et sans que les besoins de la famille aient justifié ce choix, il peut être dispensé de payer la pension ou n'être tenu que de payer une pension réduite.§ 6. Le tribunal qui accorde la pension constate que celle-ci est adaptée de plein droit aux fluctuations de l'indice des prix à la consommation.Le montant de base de la pension correspond à l'indice des prix à la consommation du mois au cours duquel le jugement ou l'arrêt prononçant le divorce est coulé en force de chose jugée, à moins que le tribunal n'en décide autrement. Tous les douze mois, le montant de la pension est adapté en fonction de la hausse ou de la baisse de l'indice des prix à la consommation du mois correspondant.Ces modifications sont appliquées à la pension dès l'échéance qui suit la publication au Moniteur belge de l'indice nouveau à prendre en considération.Le tribunal peut, dans certains cas, appliquer un autre système d'adaptation de la pension au coût de la vie.§ 7. Sauf si les parties ont convenu expressément le contraire, le tribunal peut, ultérieurement, à la demande d'une des parties, augmenter, réduire ou supprimer la pension, si, à la suite de circonstances nouvelles et indépendantes de la volonté des parties, son montant n'est plus adapté.De même, si à la suite de la dissolution du mariage, la liquidation-partage du patrimoine commun ou de l'indivision ayant existé entre les époux entraîne une modification de leur situation financière qui justifie une adaptation de la pension alimentaire ayant fait l'objet d'un jugement ou d'une convention intervenus avant l'établissement de comptes de la liquidation, le tribunal peut adapter la pension, 2.§ 8. La pension peut à tout moment être remplacée, de l'accord des parties, par un capital homologué par le tribunal. A la demande du débiteur de la pension, le tribunal peut également accorder à tout moment la capitalisation.§ 9. Les époux ne peuvent pas renoncer aux droits à la pension alimentaire avant la dissolution du mariage.Ils peuvent néanmoins transiger, en cours de procédure, sur le montant de cette pension 5.§ 10. La pension n'est plus due au décès du débiteur, mais le bénéficiaire peut demander des aliments à charge de la succession aux conditions prévues à l'article 205bis, § 1er et §§ 3 à 6 .La pension prend, en toute hypothèse, définitivement fin en cas de remariage du bénéficiaire de la pension ou au moment où ce dernier fait une déclaration de cohabitation légale, sauf convention contraire des parties.Le juge peut mettre fin à la pension lorsque le bénéficiaire vit maritalement avec une autre personne.§ 11. Le tribunal peut décider qu'en cas de défaut d'exécution par le débiteur de son obligation de paiement, le bénéficiaire de la pension sera autorisé à percevoir les revenus de celui-ci ou ceux des biens qu'il administre en vertu de leur régime matrimonial, ainsi que toutes autres sommes qui lui sont dues par des tiers.Cette décision est opposable à tout tiers débiteur, actuel ou futur, sur la notification qui leur en est faite par le greffier à la requête du demandeur.§ 12. 1.", "paragraphs":{ "1":"§ 1er. Les époux peuvent convenir à tout moment de la pension alimentaire éventuelle, du montant de celle-ci et des modalités selon lesquelles le montant convenu pourrait être revu", "2":"§ 2. A défaut de la convention visée au § 1er, le tribunal de la famillepeut, dans le jugement prononçant le divorce ou lors d'une décision ultérieure, accorder, à la demande de l'époux dans le besoin, une pension alimentaire à charge de l'autre époux.Le tribunal peut refuser de faire droit à la demande de pension si le défendeur prouve que le demandeur a commis une faute grave ayant rendu impossible la poursuite de la vie commune.En aucun cas, la pension alimentaire n'est accordée au conjoint reconnu coupable d'un fait visé aux articles 375, 398 à 400, 402, 403 ou 405 du Code pénal, commis contre la personne du défendeur, ou d'une tentative de commettre un fait visé aux articles 375, 393, 394 ou 397 du même Code contre cette même personne.Par dérogation à l'article 4 du titre préliminaire du Code de procédure pénale, le juge peut, en attendant que la décision sur l'action publique soit coulée en force de chose jugée, allouer au demandeur une pension provisionnelle, en tenant compte de toutes les circonstances de la cause. Il peut subordonner l'octroi de cette pension provisionnelle à la constitution d'une garantie qu'il détermine et dont il fixe les modalités", "3":"§ 3. Le tribunal fixe le montant de la pension alimentaire qui doit couvrir au moins l'état de besoin du bénéficiaire.Il tient compte des revenus et possibilités des conjoints et de la dégradation significative de la situation économique du bénéficiaire. Pour apprécier cette dégradation, le juge se fonde notamment sur la durée du mariage, l'âge des parties, leur comportement durant le mariage quant à l'organisation de leurs besoins, la charge des enfants pendant la vie commune ou après celle-ci. Le juge peut décider le cas échéant que la pension sera dégressive et déterminer dans quelle mesure elle le sera.La pension alimentaire ne peut excéder le tiers des revenus du conjoint débiteur", "4":"§ 4. La durée de la pension ne peut être supérieure à celle du mariage.En cas de circonstances exceptionnelles, si le bénéficiaire démontre qu'à l'expiration du délai visé à l'alinéa 1er, il reste, pour des raisons indépendantes de sa volonté, dans un état de besoin, le tribunal peut prolonger le délai. Dans ce cas, le montant de la pension correspond au montant nécessaire pour couvrir l'état de besoin du bénéficiaire", "5":"§ 5. Si le défendeur prouve que l'état de besoin du demandeur résulte d'une décision prise unilatéralement par celui-ci, et sans que les besoins de la famille aient justifié ce choix, il peut être dispensé de payer la pension ou n'être tenu que de payer une pension réduite", "6":"§ 6. Le tribunal qui accorde la pension constate que celle-ci est adaptée de plein droit aux fluctuations de l'indice des prix à la consommation.Le montant de base de la pension correspond à l'indice des prix à la consommation du mois au cours duquel le jugement ou l'arrêt prononçant le divorce est coulé en force de chose jugée, à moins que le tribunal n'en décide autrement. Tous les douze mois, le montant de la pension est adapté en fonction de la hausse ou de la baisse de l'indice des prix à la consommation du mois correspondant.Ces modifications sont appliquées à la pension dès l'échéance qui suit la publication au Moniteur belge de l'indice nouveau à prendre en considération.Le tribunal peut, dans certains cas, appliquer un autre système d'adaptation de la pension au coût de la vie", "7":"§ 7. Sauf si les parties ont convenu expressément le contraire, le tribunal peut, ultérieurement, à la demande d'une des parties, augmenter, réduire ou supprimer la pension, si, à la suite de circonstances nouvelles et indépendantes de la volonté des parties, son montant n'est plus adapté.De même, si à la suite de la dissolution du mariage, la liquidation-partage du patrimoine commun ou de l'indivision ayant existé entre les époux entraîne une modification de leur situation financière qui justifie une adaptation de la pension alimentaire ayant fait l'objet d'un jugement ou d'une convention intervenus avant l'établissement de comptes de la liquidation, le tribunal peut adapter la pension, 2", "8":"§ 8. La pension peut à tout moment être remplacée, de l'accord des parties, par un capital homologué par le tribunal. A la demande du débiteur de la pension, le tribunal peut également accorder à tout moment la capitalisation", "9":"§ 9. Les époux ne peuvent pas renoncer aux droits à la pension alimentaire avant la dissolution du mariage.Ils peuvent néanmoins transiger, en cours de procédure, sur le montant de cette pension 5", "10":"§ 10. La pension n'est plus due au décès du débiteur, mais le bénéficiaire peut demander des aliments à charge de la succession aux conditions prévues à l'article 205bis, § 1er et §§ 3 à 6 .La pension prend, en toute hypothèse, définitivement fin en cas de remariage du bénéficiaire de la pension ou au moment où ce dernier fait une déclaration de cohabitation légale, sauf convention contraire des parties.Le juge peut mettre fin à la pension lorsque le bénéficiaire vit maritalement avec une autre personne", "11":"§ 11. Le tribunal peut décider qu'en cas de défaut d'exécution par le débiteur de son obligation de paiement, le bénéficiaire de la pension sera autorisé à percevoir les revenus de celui-ci ou ceux des biens qu'il administre en vertu de leur régime matrimonial, ainsi que toutes autres sommes qui lui sont dues par des tiers.Cette décision est opposable à tout tiers débiteur, actuel ou futur, sur la notification qui leur en est faite par le greffier à la requête du demandeur" } } `````` ### Data Fields - The `question`samples have the following fields: - `id`: an *int32* feature corresponding to a unique ID number for the question. - `question`: a *string* feature corresponding to the question. - `regions`: a *list of strings* feature of regions concerned by the question. - `topics`: a *list of strings* feature of topics related to the question. - `article_ids`: a *list of ints* feature of article IDs from the knowledge corpus relevant to the question. - `paragraph_ids`: a *list of strings* feature of the exact paragraph IDs within the articles that are relevant to the question. - `answer`: a *string* feature corresponding to the comprehensive answer to the question. - The `article` samples have the following fields: - `id`: an *int32* feature corresponding to a unique ID number for the article. - `reference`: a *string* feature corresponding to the legislative reference of the article. - `authority`: a *string* feature corresponding to the authority that issued the article (either *"regional"* or *"federal"*). - `description`: a *string* feature corresponding to the concatenated headings of the article. - `article_no`: a *string* feature corresponding to the article number in the statute. - `code`: a *string* feature corresponding to the law code to which the article belongs. - `book`: a *string* feature corresponding to the book to which the article belongs. - `part`: a *string* feature corresponding to the part to which the article belongs. - `act`: a *string* feature corresponding to the act to which the article belongs. - `chapter`: a *string* feature corresponding to the chapter to which the article belongs. - `section`: a *string* feature corresponding to the section to which the article belongs. - `subsection`: a *string* feature corresponding to the subsection to which the article belongs. - `article`: a *string* feature corresponding to the full content of the article. - `paragraphs`: a *dict of strings* feature corresponding to the content of the individual paragraphs of the article. ### Data Splits The LLeQA dataset is split into a train, dev, and test sets with a 90/10/10 ratio, respectively. Number of `question` samples in each set is given below: | | Train | Dev | Test | | ----- | ------ | ---- | ----- | | LLeQA | 1472 | 201 | 195 | ## Dataset Creation ### Curation Rationale The dataset is intended to be used by researchers to build and evaluate IR and QA models in the legal domain. It should not be regarded as a reliable source of legal information at this point in time, as both the questions and articles correspond to an outdated version of the Belgian law from May 2023 (time of dataset collection). In the latter case, the user is advised to consult daily updated official legal resources (e.g., the Belgian Official Gazette). ### Source Data #### Initial Data Collection and Normalization The collection process of LLeQA involves three main stages. First, we gather and refine annotated legal questions. Then, we build an expansive corpus of supportive statutory articles drawn from Belgian legislation. Finally, we enrich the question annotations by generating paragraph-level references within relevant articles. We elaborate upon each of these steps below. Please refer to the paper for more details. #### Who are the source language producers? Speakers were not directly approached for inclusion in this dataset and thus could not be asked for demographic information. Questions were collected, anonimyzed, and reformulated by Belgian jurists from [Droits Quotidiens](https://www.droitsquotidiens.be/fr/equipe). Therefore, no direct information about the speakers’ age and gender distribution, or socioeconomic status is available. However, it is expected that most, but not all, of the speakers are adults (18+ years), speak French as a native language, and live in Wallonia or Brussels-Capital region. ### Annotations #### Annotation process We partner with [Droits Quotidiens](https://www.droitsquotidiens.be/fr/equipe), a Belgian non-profit organization that endeavors to make the law comprehensible and accessible to the most vulnerable. To this end, the organization maintains a rich website featuring thousands of legal questions commonly posed by Belgian citizens. Each question comes with its own individual page, encompassing one or more categorizations, references to relevant legislative statutes, and a detailed answer written in layman's terms by experienced jurists. Practically, their legal clarification process consists of four steps. First, they select a common legal issue based on the numerous support requests they receive every day. Then, they define a new anonymized "model" question on that issue expressed in simple terms, as close as possible as if a layperson had asked it. Finally, the jurists search the Belgian law for articles that help answer the model question, reference them, and write a comprehensive answer in a language that is understandable by the general public. #### Who are the annotators? A total of six Belgian jurists from [Droits Quotidiens](https://www.droitsquotidiens.be/fr/equipe) contributed to annotating the questions. All have a law degree from a Belgian university and years of experience in providing legal advice and clarifications of the law. They range in age from 30-60 years, including one man and five women, gave their ethnicity as white European, speak French as a native language, and represent upper middle class based on income levels. ### Personal and Sensitive Information The questions represent informal, asynchronous, edited, written language that have an average length of 15 words. None of them contained hateful, aggressive, or inappropriate language as they were all reviewed and reworded by Droits Quotidiens to be neutral, anonymous, and comprehensive. The legal articles represent strong, formal, written language that have a median length of 84 words (yet 1500+ articles exceed 500 words). ## Considerations for Using the Data ### Social Impact of Dataset We believe LLeQA can serve as a robust foundation for advancements in interpretable, long-form legal question answering, thereby contributing to the democratization of legal access. ### Discussion of Biases [More Information Needed] ### Other Known Limitations - It is essential to note that not all legal questions can be answered with statutes alone. For instance, the question “Can I evict my tenants if they make too much noise?” might not have a detailed answer within the statutory law that quantifies a specific noise threshold at which eviction is allowed. Instead, the landlord should probably rely more on case law and find precedents similar to their current situation (e.g., the tenant makes two parties a week until 2 am). Hence, some questions are better suited than others to the statutory article retrieval task, and the domain of the less suitable ones remains to be determined. ## Additional Information ### Dataset Curators The dataset was created by Antoine Louis during work done at the Law & Tech lab of Maastricht University, with the help of jurists from [Droits Quotidiens](https://www.droitsquotidiens.be/fr/equipe). ### Licensing Information LLeQA is distributed under a gated access for research purposes only and is licensed under the [CC BY-NC-SA 4.0 license](https://creativecommons.org/licenses/by-nc-sa/4.0/). ### Citation Information ```latex @article{louis2023interpretable, author = {Louis, Antoine and van Dijck, Gijs and Spanakis, Gerasimos}, title = {Interpretable Long-Form Legal Question Answering with Retrieval-Augmented Large Language Models}, journal = {CoRR}, volume = {abs/2309.17050}, year = {2023}, url = {https://arxiv.org/abs/2309.17050}, eprinttype = {arXiv}, eprint = {2309.17050}, } ``` ### Contributions Thanks to [@antoiloui](https://github.com/antoiloui) for adding this dataset.
27,824
[ [ -0.033447265625, -0.043060302734375, 0.040924072265625, 0.02410888671875, -0.0288238525390625, -0.0216827392578125, 0.0025882720947265625, -0.01177215576171875, 0.0246124267578125, 0.03826904296875, -0.03143310546875, -0.039764404296875, -0.03814697265625, 0...
vlsp-2023-vllm/mmlu
2023-09-30T03:37:34.000Z
[ "region:us" ]
vlsp-2023-vllm
null
null
0
99
2023-09-29T19:08:22
--- configs: - config_name: default data_files: - split: validation path: data/validation-* - split: dev path: data/dev-* - split: test path: data/test-* dataset_info: features: - name: id dtype: string - name: answer dtype: int64 - name: question dtype: string - name: choices sequence: string splits: - name: validation num_bytes: 890402 num_examples: 1456 - name: dev num_bytes: 140819 num_examples: 271 - name: test num_bytes: 7615124 num_examples: 13062 download_size: 4415183 dataset_size: 8646345 --- References: https://huggingface.co/datasets/cais/mmlu # MMLU (Vietnamese translation version) ## Install To install `lm-eval` from the github repository main branch, run: ```bash git clone https://github.com/hieunguyen1053/lm-evaluation-harness cd lm-evaluation-harness pip install -e . ``` ## Basic Usage > **Note**: When reporting results from eval harness, please include the task versions (shown in `results["versions"]`) for reproducibility. This allows bug fixes to tasks while also ensuring that previously reported scores are reproducible. See the [Task Versioning](#task-versioning) section for more info. ### Hugging Face `transformers` To evaluate a model hosted on the [HuggingFace Hub](https://huggingface.co/models) (e.g. vlsp-2023-vllm/hoa-1b4) on `mmlu` you can use the following command: ```bash python main.py \ --model hf-causal \ --model_args pretrained=vlsp-2023-vllm/hoa-1b4 \ --tasks mmlu_vi \ --device cuda:0 ``` Additional arguments can be provided to the model constructor using the `--model_args` flag. Most notably, this supports the common practice of using the `revisions` feature on the Hub to store partially trained checkpoints, or to specify the datatype for running a model: ```bash python main.py \ --model hf-causal \ --model_args pretrained=vlsp-2023-vllm/hoa-1b4,revision=step100000,dtype="float" \ --tasks mmlu_vi \ --device cuda:0 ``` To evaluate models that are loaded via `AutoSeq2SeqLM` in Huggingface, you instead use `hf-seq2seq`. *To evaluate (causal) models across multiple GPUs, use `--model hf-causal-experimental`* > **Warning**: Choosing the wrong model may result in erroneous outputs despite not erroring.
2,295
[ [ -0.0265350341796875, -0.059173583984375, 0.039215087890625, 0.032745361328125, -0.002361297607421875, -0.0163726806640625, 0.0020847320556640625, -0.01355743408203125, 0.01708984375, 0.0243988037109375, -0.04974365234375, -0.0199127197265625, -0.04327392578125, ...
hackaprompt/hackaprompt-dataset
2023-10-22T13:41:01.000Z
[ "size_categories:100K<n<1M", "language:en", "code", "region:us" ]
hackaprompt
null
null
2
99
2023-10-19T03:01:52
--- language: - en tags: - code pretty_name: HackAPrompt Dataset size_categories: - 100K<n<1M --- # Dataset Card for HackAPrompt 💻🔍 This dataset contains submissions from a prompt hacking competition. An in-depth analysis of the dataset has been accepted at the EMNLP 2023 conference. 📊👾 Submissions were sourced from two environments: a playground for experimentation and an official submissions platform. The playground itself can be accessed [here](https://huggingface.co/spaces/hackaprompt/playground) 🎮 More details about the competition itself [here](https://www.hackaprompt.com/) 🏆 ## Dataset Details 📋 ### Dataset Description 📄 We conducted a prompt hacking competition where users were competing to "hack" different large language models (LLMs). Different levels were proposed, with varying degrees of difficulty, and for each level, 3 LLMs were evaluated: GPT-3 (`text-davinci-003`), FlanT5-XXL (`philschmid/flan-t5-xxl-sharded-fp16`), and ChatGPT (`gpt-3.5-turbo`). We anonymously collected user submissions throughout the competition and also had users submit their best attempts via an online platform for a chance to win the competition. Users submitted their prompts, and our servers automatically evaluated their attempts. To delineate between ties, token counts were used where lower counts gave better scores. This dataset releases all submissions sent to both our playground and submission servers. 📤📥 ### Columns Description 🧾 - **level**: A numerical value indicating the difficulty or complexity of the prompt. - **user_input**: The input provided by the user or participant in response to the given challenge. - **prompt**: The full prompt that was used to query the model, this includes the user's input. - **completion**: The output or completion generated by the model based on the user's input. - **model**: The type or version of the model that generated the completion. For example, "gpt-3.5-turbo" or "FlanT5-XXL". - **expected_completion**: The expected or ideal output that should have been generated by the model for the given user input. - **token_count**: The number of tokens present in the user's input. This serves as a measure of the input's length. - **correct**: A boolean value indicating whether the model's completion was correct or not, based on the expected output. - **error**: A boolean value indicating if there was an error during the model's processing of the user input. Note: we did not include submissions that triggered errors in this dataset. - **score**: A numerical value representing the score assigned to the model's completion based on its accuracy, correctness, and other evaluation metrics. (Only available for prompts on the submissions platform) - **dataset**: A categorical variable indicating the source of the submission. The two categories are "playground_data" (for submissions from the playground environment) and "submission_data" (for official submissions). - **timestamp**: The date and time when the submission was made. (Only available for playground dataset) <!-- - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] --> ## Uses 🧑‍🔬 This dataset is meant to be used in a research context to better understand the different types of attacks "in the wild" on LLMs. 📚🔬 <!-- Address questions around how the dataset is intended to be used. --> #### Personal and Sensitive Information 🔒 We did not release directly any personal or sensitive information explicitly. On the playground, users could submit anonymously, and we did not collect information about the users directly. For the submissions data, teams did submit in their names, but that information has not been made available in this version of the dataset to preserve participants' privacy. <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> ## Bias, Risks, and Limitations ⚠️ The data was submitted via a public portal hosted on huggingface. We did not curate the data before publishing it. The data may contain offensive material. Please use at your own risk. ### Recommendations 🚀 Users should be made aware of the risks, biases, and limitations of the dataset and use at their own risk. Please use at your own risk. ## Citation [optional] 📝 <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** TODO: Add link to publication when available. 📚🔗
4,937
[ [ -0.022735595703125, -0.060272216796875, 0.0223236083984375, 0.0298614501953125, -0.0067596435546875, 0.01337432861328125, -0.00348663330078125, -0.0416259765625, 0.03271484375, 0.0276641845703125, -0.057342529296875, -0.0614013671875, -0.04193115234375, 0.02...
naman1011/spider
2023-10-26T05:37:37.000Z
[ "region:us" ]
naman1011
null
null
0
99
2023-10-26T05:06:17
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
has_part
2022-11-03T16:15:21.000Z
[ "task_categories:text-classification", "task_ids:text-scoring", "annotations_creators:machine-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:extended|other-Generics-KB", "language:en", "license:unknown", "Meronym-Prediction", ...
null
This dataset is a new knowledge-base (KB) of hasPart relationships, extracted from a large corpus of generic statements. Complementary to other resources available, it is the first which is all three of: accurate (90% precision), salient (covers relationships a person may mention), and has high coverage of common terms (approximated as within a 10 year old’s vocabulary), as well as having several times more hasPart entries than in the popular ontologies ConceptNet and WordNet. In addition, it contains information about quantifiers, argument modifiers, and links the entities to appropriate concepts in Wikipedia and WordNet.
@misc{bhakthavatsalam2020dogs, title={Do Dogs have Whiskers? A New Knowledge Base of hasPart Relations}, author={Sumithra Bhakthavatsalam and Kyle Richardson and Niket Tandon and Peter Clark}, year={2020}, eprint={2006.07510}, archivePrefix={arXiv}, primaryClass={cs.CL} }
0
98
2022-03-02T23:29:22
--- annotations_creators: - machine-generated language_creators: - found language: - en license: - unknown multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - extended|other-Generics-KB task_categories: - text-classification task_ids: - text-scoring paperswithcode_id: haspart-kb pretty_name: hasPart KB tags: - Meronym-Prediction dataset_info: features: - name: arg1 dtype: string - name: arg2 dtype: string - name: score dtype: float64 - name: wikipedia_primary_page sequence: string - name: synset sequence: string splits: - name: train num_bytes: 4363417 num_examples: 49848 download_size: 7437382 dataset_size: 4363417 --- # Dataset Card for [HasPart] ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://allenai.org/data/haspartkb - **Repository:** - **Paper:** https://arxiv.org/abs/2006.07510 - **Leaderboard:** - **Point of Contact:** Peter Clark <peterc@allenai.org> ### Dataset Summary This dataset is a new knowledge-base (KB) of hasPart relationships, extracted from a large corpus of generic statements. Complementary to other resources available, it is the first which is all three of: accurate (90% precision), salient (covers relationships a person may mention), and has high coverage of common terms (approximated as within a 10 year old’s vocabulary), as well as having several times more hasPart entries than in the popular ontologies ConceptNet and WordNet. In addition, it contains information about quantifiers, argument modifiers, and links the entities to appropriate concepts in Wikipedia and WordNet. ### Supported Tasks and Leaderboards Text Classification / Scoring - meronyms (e.g., `plant` has part `stem`) ### Languages English ## Dataset Structure ### Data Instances [More Information Needed] ``` {'arg1': 'plant', 'arg2': 'stem', 'score': 0.9991798414303377, 'synset': ['wn.plant.n.02', 'wn.stalk.n.02'], 'wikipedia_primary_page': ['Plant']} ``` ### Data Fields - `arg1`, `arg2`: These are the entities of the meronym, i.e., `arg1` _has\_part_ `arg2` - `score`: Meronymic score per the procedure described below - `synset`: Ontological classification from WordNet for the two entities - `wikipedia_primary_page`: Wikipedia page of the entities **Note**: some examples contain synset / wikipedia info for only one of the entities. ### Data Splits Single training file ## Dataset Creation Our approach to hasPart extraction has five steps: 1. Collect generic sentences from a large corpus 2. Train and apply a RoBERTa model to identify hasPart relations in those sentences 3. Normalize the entity names 4. Aggregate and filter the entries 5. Link the hasPart arguments to Wikipedia pages and WordNet senses Rather than extract knowledge from arbitrary text, we extract hasPart relations from generic sentences, e.g., “Dogs have tails.”, in order to bias the process towards extractions that are general (apply to most members of a category) and salient (notable enough to write down). As a source of generic sentences, we use **GenericsKB**, a large repository of 3.4M standalone generics previously harvested from a Webcrawl of 1.7B sentences. ### Annotations #### Annotation process For each sentence _S_ in GenericsKB, we identify all noun chunks in the sentence using a noun chunker (spaCy's Doc.noun chunks). Each chunk is a candidate whole or part. Then, for each possible pair, we use a RoBERTa model to classify whether a hasPart relationship exists between them. The input sentence is presented to RoBERTa as a sequence of wordpiece tokens, with the start and end of the candidate hasPart arguments identified using special tokens, e.g.: > `[CLS] [ARG1-B]Some pond snails[ARG1-E] have [ARG2-B]gills[ARG2-E] to breathe in water.` where `[ARG1/2-B/E]` are special tokens denoting the argument boundaries. The `[CLS]` token is projected to two class labels (hasPart/notHasPart), and a softmax layer is then applied, resulting in output probabilities for the class labels. We train with cross-entropy loss. We use RoBERTa-large (24 layers), each with a hidden size of 1024, and 16 attention heads, and a total of 355M parameters. We use the pre-trained weights available with the model and further fine-tune the model parameters by training on our labeled data for 15 epochs. To train the model, we use a hand-annotated set of ∼2k examples. #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information @misc{bhakthavatsalam2020dogs, title={Do Dogs have Whiskers? A New Knowledge Base of hasPart Relations}, author={Sumithra Bhakthavatsalam and Kyle Richardson and Niket Tandon and Peter Clark}, year={2020}, eprint={2006.07510}, archivePrefix={arXiv}, primaryClass={cs.CL} } ### Contributions Thanks to [@jeromeku](https://github.com/jeromeku) for adding this dataset.
6,321
[ [ -0.0296783447265625, -0.046478271484375, 0.040557861328125, 0.0006432533264160156, -0.026947021484375, -0.00917816162109375, -0.0237884521484375, -0.024444580078125, 0.038787841796875, 0.034881591796875, -0.054290771484375, -0.05426025390625, -0.05517578125, ...
hippocorpus
2022-11-03T16:15:25.000Z
[ "task_categories:text-classification", "task_ids:text-scoring", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:other", "narrative-flow", "region:us" ]
null
To examine the cognitive processes of remembering and imagining and their traces in language, we introduce Hippocorpus, a dataset of 6,854 English diary-like short stories about recalled and imagined events. Using a crowdsourcing framework, we first collect recalled stories and summaries from workers, then provide these summaries to other workers who write imagined stories. Finally, months later, we collect a retold version of the recalled stories from a subset of recalled authors. Our dataset comes paired with author demographics (age, gender, race), their openness to experience, as well as some variables regarding the author's relationship to the event (e.g., how personal the event is, how often they tell its story, etc.).
@inproceedings{sap-etal-2020-recollection, title = "Recollection versus Imagination: Exploring Human Memory and Cognition via Neural Language Models", author = "Sap, Maarten and Horvitz, Eric and Choi, Yejin and Smith, Noah A. and Pennebaker, James", booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.acl-main.178", doi = "10.18653/v1/2020.acl-main.178", pages = "1970--1978", abstract = "We investigate the use of NLP as a measure of the cognitive processes involved in storytelling, contrasting imagination and recollection of events. To facilitate this, we collect and release Hippocorpus, a dataset of 7,000 stories about imagined and recalled events. We introduce a measure of narrative flow and use this to examine the narratives for imagined and recalled events. Additionally, we measure the differential recruitment of knowledge attributed to semantic memory versus episodic memory (Tulving, 1972) for imagined and recalled storytelling by comparing the frequency of descriptions of general commonsense events with more specific realis events. Our analyses show that imagined stories have a substantially more linear narrative flow, compared to recalled stories in which adjacent sentences are more disconnected. In addition, while recalled stories rely more on autobiographical events based on episodic memory, imagined stories express more commonsense knowledge based on semantic memory. Finally, our measures reveal the effect of narrativization of memories in stories (e.g., stories about frequently recalled memories flow more linearly; Bartlett, 1932). Our findings highlight the potential of using NLP tools to study the traces of human cognition in language.", }
3
98
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - en license: - other multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - text-scoring paperswithcode_id: null pretty_name: hippocorpus tags: - narrative-flow dataset_info: features: - name: AssignmentId dtype: string - name: WorkTimeInSeconds dtype: string - name: WorkerId dtype: string - name: annotatorAge dtype: float32 - name: annotatorGender dtype: string - name: annotatorRace dtype: string - name: distracted dtype: float32 - name: draining dtype: float32 - name: frequency dtype: float32 - name: importance dtype: float32 - name: logTimeSinceEvent dtype: string - name: mainEvent dtype: string - name: memType dtype: string - name: mostSurprising dtype: string - name: openness dtype: string - name: recAgnPairId dtype: string - name: recImgPairId dtype: string - name: similarity dtype: string - name: similarityReason dtype: string - name: story dtype: string - name: stressful dtype: string - name: summary dtype: string - name: timeSinceEvent dtype: string splits: - name: train num_bytes: 7229795 num_examples: 6854 download_size: 0 dataset_size: 7229795 --- # Dataset Card for [Dataset Name] ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Hippocorpus](https://msropendata.com/datasets/0a83fb6f-a759-4a17-aaa2-fbac84577318) - **Repository:** [Hippocorpus](https://msropendata.com/datasets/0a83fb6f-a759-4a17-aaa2-fbac84577318) - **Paper:** [Recollection versus Imagination: Exploring Human Memory and Cognition via Neural Language Models](http://erichorvitz.com/cognitive_studies_narrative.pdf) - **Point of Contact:** [Eric Horvitz](mailto:horvitz@microsoft.com) ### Dataset Summary To examine the cognitive processes of remembering and imagining and their traces in language, we introduce Hippocorpus, a dataset of 6,854 English diary-like short stories about recalled and imagined events. Using a crowdsourcing framework, we first collect recalled stories and summaries from workers, then provide these summaries to other workers who write imagined stories. Finally, months later, we collect a retold version of the recalled stories from a subset of recalled authors. Our dataset comes paired with author demographics (age, gender, race), their openness to experience, as well as some variables regarding the author's relationship to the event (e.g., how personal the event is, how often they tell its story, etc.). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages The dataset can be found in English ## Dataset Structure [More Information Needed] ### Data Instances [More Information Needed] ### Data Fields This CSV file contains all the stories in Hippcorpus v2 (6854 stories) These are the columns in the file: - `AssignmentId`: Unique ID of this story - `WorkTimeInSeconds`: Time in seconds that it took the worker to do the entire HIT (reading instructions, storywriting, questions) - `WorkerId`: Unique ID of the worker (random string, not MTurk worker ID) - `annotatorAge`: Lower limit of the age bucket of the worker. Buckets are: 18-24, 25-29, 30-34, 35-39, 40-44, 45-49, 50-54, 55+ - `annotatorGender`: Gender of the worker - `annotatorRace`: Race/ethnicity of the worker - `distracted`: How distracted were you while writing your story? (5-point Likert) - `draining`: How taxing/draining was writing for you emotionally? (5-point Likert) - `frequency`: How often do you think about or talk about this event? (5-point Likert) - `importance`: How impactful, important, or personal is this story/this event to you? (5-point Likert) - `logTimeSinceEvent`: Log of time (days) since the recalled event happened - `mainEvent`: Short phrase describing the main event described - `memType`: Type of story (recalled, imagined, retold) - `mostSurprising`: Short phrase describing what the most surpring aspect of the story was - `openness`: Continuous variable representing the openness to experience of the worker - `recAgnPairId`: ID of the recalled story that corresponds to this retold story (null for imagined stories). Group on this variable to get the recalled-retold pairs. - `recImgPairId`: ID of the recalled story that corresponds to this imagined story (null for retold stories). Group on this variable to get the recalled-imagined pairs. - `similarity`: How similar to your life does this event/story feel to you? (5-point Likert) - `similarityReason`: Free text annotation of similarity - `story`: Story about the imagined or recalled event (15-25 sentences) - `stressful`: How stressful was this writing task? (5-point Likert) - `summary`: Summary of the events in the story (1-3 sentences) - `timeSinceEvent`: Time (num. days) since the recalled event happened ### Data Splits [More Information Needed] ## Dataset Creation [More Information Needed] ### Curation Rationale [More Information Needed] ### Source Data [More Information Needed] #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations [More Information Needed] #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data [More Information Needed] ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information [More Information Needed] ### Dataset Curators The dataset was initially created by Maarten Sap, Eric Horvitz, Yejin Choi, Noah A. Smith, James W. Pennebaker, during work done at Microsoft Research. ### Licensing Information Hippocorpus is distributed under the [Open Use of Data Agreement v1.0](https://msropendata-web-api.azurewebsites.net/licenses/f1f352a6-243f-4905-8e00-389edbca9e83/view). ### Citation Information ``` @inproceedings{sap-etal-2020-recollection, title = "Recollection versus Imagination: Exploring Human Memory and Cognition via Neural Language Models", author = "Sap, Maarten and Horvitz, Eric and Choi, Yejin and Smith, Noah A. and Pennebaker, James", booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.acl-main.178", doi = "10.18653/v1/2020.acl-main.178", pages = "1970--1978", abstract = "We investigate the use of NLP as a measure of the cognitive processes involved in storytelling, contrasting imagination and recollection of events. To facilitate this, we collect and release Hippocorpus, a dataset of 7,000 stories about imagined and recalled events. We introduce a measure of narrative flow and use this to examine the narratives for imagined and recalled events. Additionally, we measure the differential recruitment of knowledge attributed to semantic memory versus episodic memory (Tulving, 1972) for imagined and recalled storytelling by comparing the frequency of descriptions of general commonsense events with more specific realis events. Our analyses show that imagined stories have a substantially more linear narrative flow, compared to recalled stories in which adjacent sentences are more disconnected. In addition, while recalled stories rely more on autobiographical events based on episodic memory, imagined stories express more commonsense knowledge based on semantic memory. Finally, our measures reveal the effect of narrativization of memories in stories (e.g., stories about frequently recalled memories flow more linearly; Bartlett, 1932). Our findings highlight the potential of using NLP tools to study the traces of human cognition in language.", } ``` ### Contributions Thanks to [@manandey](https://github.com/manandey) for adding this dataset.
9,318
[ [ -0.0188446044921875, -0.052459716796875, 0.035552978515625, 0.0189361572265625, -0.0171051025390625, 0.006809234619140625, -0.01763916015625, -0.051788330078125, 0.044647216796875, 0.03472900390625, -0.05364990234375, -0.048980712890625, -0.03143310546875, 0...
kan_hope
2023-01-25T14:33:30.000Z
[ "task_categories:text-classification", "task_ids:multi-label-classification", "annotations_creators:expert-generated", "language_creators:crowdsourced", "multilinguality:multilingual", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "language:kn", "license:cc-by-4.0", "hop...
null
Numerous methods have been developed to monitor the spread of negativity in modern years by eliminating vulgar, offensive, and fierce comments from social media platforms. However, there are relatively lesser amounts of study that converges on embracing positivity, reinforcing supportive and reassuring content in online forums. Consequently, we propose creating an English Kannada Hope speech dataset, KanHope and comparing several experiments to benchmark the dataset. The dataset consists of 6,176 user generated comments in code mixed Kannada scraped from YouTube and manually annotated as bearing hope speech or Not-hope speech. This dataset was prepared for hope-speech text classification benchmark on code-mixed Kannada, an under-resourced language.
@misc{hande2021hope, title={Hope Speech detection in under-resourced Kannada language}, author={Adeep Hande and Ruba Priyadharshini and Anbukkarasi Sampath and Kingston Pal Thamburaj and Prabakaran Chandran and Bharathi Raja Chakravarthi}, year={2021}, eprint={2108.04616}, archivePrefix={arXiv}, primaryClass={cs.CL} }
1
98
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - crowdsourced language: - en - kn license: - cc-by-4.0 multilinguality: - multilingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - multi-label-classification pretty_name: KanHope language_bcp47: - en-IN - kn-IN tags: - hope-speech-detection dataset_info: features: - name: text dtype: string - name: label dtype: class_label: names: '0': Not-Hope '1': Hope splits: - name: train num_bytes: 494898 num_examples: 4940 - name: test num_bytes: 65722 num_examples: 618 download_size: 568972 dataset_size: 560620 --- # Dataset Card for KanHope ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://zenodo.org/record/4904729 - **Repository:** [KanHope](https://github.com/adeepH/KanHope) - **Paper:** [Hope speech detection in Under-resourced Kannada langauge](https://arxiv.org/abs/2108.04616) - **Leaderboard:** [N/A] - **Point of Contact:** [Adeep Hande](adeeph18c@iiitt.ac.in) ### Dataset Summary KanHope dataset is a code-mixed Kannada-English dataset for hope speech detection. All texts are scraped from the comments section of YouTube. The dataset consists of 6,176 user-generated comments in code mixed Kannada scraped from YouTube and manually annotated as bearing hope speech or Not-hope speech. ### Supported Tasks and Leaderboards This task aims to detect Hope speech content of the code-mixed dataset of comments/posts in Dravidian Languages ( Kannada-English) collected from social media. The comment/post may contain more than one sentence, but the average sentence length of the corpora is 1. Each comment/post is annotated at the comment/post level. This dataset also has class imbalance problems depicting real-world scenarios. ### Languages Code-mixed text in Dravidian languages (Kannada-English). ## Dataset Structure ### Data Instances An example from the Kannada dataset looks as follows: | text | label | | :------ | :----- | | ��������� ��ͭ� heartly heltidini... plz avrigella namma nimmellara supprt beku | 0 (Non_hope speech) | | Next song gu kuda alru andre evaga yar comment madidera alla alrru like madi share madi nam industry na next level ge togond hogaona. | 1 (Hope Speech) | ### Data Fields Kannada - `text`: Kannada-English code mixed comment. - `label`: integer from either of 0 or 1 that corresponds to these values: "Non_hope Speech", "Hope Speech" ### Data Splits | | train | validation | test | |---------|------:|-----------:|-----:| | Kannada | 4941 | 618 | 617 | ## Dataset Creation ### Curation Rationale Numerous methods have been developed to monitor the spread of negativity in modern years by eliminating vulgar, offensive, and fierce comments from social media platforms. However, there are relatively lesser amounts of study that converges on embracing positivity, reinforcing supportive and reassuring content in online forums. ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? Youtube users ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information [Needs More Information] ### Citation Information ``` @misc{hande2021hope, title={Hope Speech detection in under-resourced Kannada language}, author={Adeep Hande and Ruba Priyadharshini and Anbukkarasi Sampath and Kingston Pal Thamburaj and Prabakaran Chandran and Bharathi Raja Chakravarthi}, year={2021}, eprint={2108.04616}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ### Contributions Thanks to [@adeepH](https://github.com/adeepH) for adding this dataset.
5,189
[ [ -0.0259246826171875, -0.039337158203125, 0.00800323486328125, 0.0216217041015625, -0.0419921875, 0.0196533203125, -0.014678955078125, -0.0245361328125, 0.056243896484375, 0.022979736328125, -0.052886962890625, -0.06689453125, -0.051849365234375, 0.0088195800...
kor_qpair
2023-01-25T14:34:00.000Z
[ "task_categories:text-classification", "task_ids:semantic-similarity-classification", "annotations_creators:expert-generated", "language_creators:other", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:ko", "license:mit", "region:us" ]
null
This is a Korean paired question dataset containing labels indicating whether two questions in a given pair are semantically identical. This dataset was used to evaluate the performance of [KoGPT2](https://github.com/SKT-AI/KoGPT2#subtask-evaluations) on a phrase detection downstream task.
@misc{Song:2018, title = "Paired Question v.2", authors = "Youngsook Song", publisher = "GitHub", year = "2018" }
2
98
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - other language: - ko license: - mit multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - semantic-similarity-classification pretty_name: KorQpair dataset_info: features: - name: question1 dtype: string - name: question2 dtype: string - name: is_duplicate dtype: class_label: names: '0': '0' '1': '1' splits: - name: train num_bytes: 515365 num_examples: 6136 - name: test num_bytes: 63466 num_examples: 758 - name: validation num_bytes: 57242 num_examples: 682 download_size: 545236 dataset_size: 636073 --- # Dataset Card for [Dataset Name] ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Github](https://github.com/songys/Question_pair) - **Repository:** [Github](https://github.com/songys/Question_pair) - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields Each row in the dataset contains two questions and a `is_duplicate` label. - `question1`: The first question - `question2`: The second question - `is_duplicate`: 0 if `question1` and `question2` are semantically similar; 1 otherwise ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@jaketae](https://github.com/jaketae) for adding this dataset.
3,480
[ [ -0.03643798828125, -0.04571533203125, 0.01200103759765625, 0.021728515625, -0.0098724365234375, 0.01192474365234375, -0.0211029052734375, -0.02069091796875, 0.0462646484375, 0.047760009765625, -0.0660400390625, -0.0782470703125, -0.0426025390625, 0.010742187...
kor_sae
2023-01-25T14:34:03.000Z
[ "task_categories:text-classification", "task_ids:intent-classification", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:ko", "license:cc-by-sa-4.0", "arxiv:1912.00342"...
null
This new dataset is designed to extract intent from non-canonical directives which will help dialog managers extract intent from user dialog that may have no clear objective or are paraphrased forms of utterances.
@article{cho2019machines, title={Machines Getting with the Program: Understanding Intent Arguments of Non-Canonical Directives}, author={Cho, Won Ik and Moon, Young Ki and Moon, Sangwhan and Kim, Seok Min and Kim, Nam Soo}, journal={arXiv preprint arXiv:1912.00342}, year={2019} }
3
98
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - ko license: - cc-by-sa-4.0 multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - text-classification task_ids: - intent-classification pretty_name: Structured Argument Extraction for Korean dataset_info: features: - name: intent_pair1 dtype: string - name: intent_pair2 dtype: string - name: label dtype: class_label: names: '0': yes/no '1': alternative '2': wh- questions '3': prohibitions '4': requirements '5': strong requirements splits: - name: train num_bytes: 2885167 num_examples: 30837 download_size: 2545926 dataset_size: 2885167 --- # Dataset Card for Structured Argument Extraction for Korean ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Structured Argument Extraction for Korean](https://github.com/warnikchow/sae4k) - **Repository:** [Structured Argument Extraction for Korean](https://github.com/warnikchow/sae4k) - **Paper:** [Machines Getting with the Program: Understanding Intent Arguments of Non-Canonical Directives](https://arxiv.org/abs/1912.00342) - **Point of Contact:** [Won Ik Cho](wicho@hi.snu.ac.kr) ### Dataset Summary The Structured Argument Extraction for Korean dataset is a set of question-argument and command-argument pairs with their respective question type label and negativeness label. Often times, agents like Alexa or Siri, encounter conversations without a clear objective from the user. The goal of this dataset is to extract the intent argument of a given utterance pair without a clear directive. This may yield a more robust agent capable of parsing more non-canonical forms of speech. ### Supported Tasks and Leaderboards * `intent_classification`: The dataset can be trained with a Transformer like [BERT](https://huggingface.co/bert-base-uncased) to classify the intent argument or a question/command pair in Korean, and it's performance can be measured by it's BERTScore. ### Languages The text in the dataset is in Korean and the associated is BCP-47 code is `ko-KR`. ## Dataset Structure ### Data Instances An example data instance contains a question or command pair and its label: ``` { "intent_pair1": "내일 오후 다섯시 조별과제 일정 추가해줘" "intent_pair2": "내일 오후 다섯시 조별과제 일정 추가하기" "label": 4 } ``` ### Data Fields * `intent_pair1`: a question/command pair * `intent_pair2`: a corresponding question/command pair * `label`: determines the intent argument of the pair and can be one of `yes/no` (0), `alternative` (1), `wh- questions` (2), `prohibitions` (3), `requirements` (4) and `strong requirements` (5) ### Data Splits The corpus contains 30,837 examples. ## Dataset Creation ### Curation Rationale The Structured Argument Extraction for Korean dataset was curated to help train models extract intent arguments from utterances without a clear objective or when the user uses non-canonical forms of speech. This is especially helpful in Korean because in English, the `Who, what, where, when and why` usually comes in the beginning, but this isn't necessarily the case in the Korean language. So for low-resource languages, this lack of data can be a bottleneck for comprehension performance. ### Source Data #### Initial Data Collection and Normalization The corpus was taken from the one constructed by [Cho et al.](https://arxiv.org/abs/1811.04231), a Korean single utterance corpus for identifying directives/non-directives that contains a wide variety of non-canonical directives. #### Who are the source language producers? Korean speakers are the source language producers. ### Annotations #### Annotation process Utterances were categorized as question or command arguments and then further classified according to their intent argument. #### Who are the annotators? The annotation was done by three Korean natives with a background in computational linguistics. ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators The dataset is curated by Won Ik Cho, Young Ki Moon, Sangwhan Moon, Seok Min Kim and Nam Soo Kim. ### Licensing Information The dataset is licensed under the CC BY-SA-4.0. ### Citation Information ``` @article{cho2019machines, title={Machines Getting with the Program: Understanding Intent Arguments of Non-Canonical Directives}, author={Cho, Won Ik and Moon, Young Ki and Moon, Sangwhan and Kim, Seok Min and Kim, Nam Soo}, journal={arXiv preprint arXiv:1912.00342}, year={2019} } ``` ### Contributions Thanks to [@stevhliu](https://github.com/stevhliu) for adding this dataset.
5,975
[ [ -0.0241851806640625, -0.05908203125, 0.040374755859375, 0.00875091552734375, -0.03167724609375, -0.0179290771484375, -0.0303955078125, 0.0043487548828125, 0.00795745849609375, 0.046661376953125, -0.05291748046875, -0.06890869140625, -0.033203125, 0.013236999...
m_lama
2022-11-03T16:15:15.000Z
[ "task_categories:question-answering", "task_categories:text-classification", "task_ids:open-domain-qa", "task_ids:text-scoring", "annotations_creators:crowdsourced", "annotations_creators:expert-generated", "annotations_creators:machine-generated", "language_creators:crowdsourced", "language_creator...
null
mLAMA: a multilingual version of the LAMA benchmark (T-REx and GoogleRE) covering 53 languages.
@article{kassner2021multilingual, author = {Nora Kassner and Philipp Dufter and Hinrich Sch{\"{u}}tze}, title = {Multilingual {LAMA:} Investigating Knowledge in Multilingual Pretrained Language Models}, journal = {CoRR}, volume = {abs/2102.00894}, year = {2021}, url = {https://arxiv.org/abs/2102.00894}, archivePrefix = {arXiv}, eprint = {2102.00894}, timestamp = {Tue, 09 Feb 2021 13:35:56 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2102-00894.bib}, bibsource = {dblp computer science bibliography, https://dblp.org}, note = {to appear in EACL2021} }
4
98
2022-03-02T23:29:22
--- annotations_creators: - crowdsourced - expert-generated - machine-generated language_creators: - crowdsourced - expert-generated - machine-generated language: - af - ar - az - be - bg - bn - ca - ceb - cs - cy - da - de - el - en - es - et - eu - fa - fi - fr - ga - gl - he - hi - hr - hu - hy - id - it - ja - ka - ko - la - lt - lv - ms - nl - pl - pt - ro - ru - sk - sl - sq - sr - sv - ta - th - tr - uk - ur - vi - zh license: - cc-by-nc-sa-4.0 multilinguality: - translation size_categories: - 100K<n<1M source_datasets: - extended|lama task_categories: - question-answering - text-classification task_ids: - open-domain-qa - text-scoring paperswithcode_id: null pretty_name: MLama tags: - probing dataset_info: features: - name: uuid dtype: string - name: lineid dtype: uint32 - name: obj_uri dtype: string - name: obj_label dtype: string - name: sub_uri dtype: string - name: sub_label dtype: string - name: template dtype: string - name: language dtype: string - name: predicate_id dtype: string config_name: all splits: - name: test num_bytes: 125919995 num_examples: 843143 download_size: 40772287 dataset_size: 125919995 --- # Dataset Card for [Dataset Name] ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Multilingual LAMA](http://cistern.cis.lmu.de/mlama/) - **Repository:** [Github](https://github.com/norakassner/mlama) - **Paper:** [Arxiv](https://arxiv.org/abs/2102.00894) - **Point of Contact:** [Contact section](http://cistern.cis.lmu.de/mlama/) ### Dataset Summary This dataset provides the data for mLAMA, a multilingual version of LAMA. Regarding LAMA see https://github.com/facebookresearch/LAMA. For mLAMA the TREx and GoogleRE part of LAMA was considered and machine translated using Google Translate, and the Wikidata and Google Knowledge Graph API. The machine translated templates were checked for validity, i.e., whether they contain exactly one '[X]' and one '[Y]'. This data can be used for creating fill-in-the-blank queries like "Paris is the capital of [MASK]" across 53 languages. For more details see the website http://cistern.cis.lmu.de/mlama/ or the github repo https://github.com/norakassner/mlama. ### Supported Tasks and Leaderboards Language model knowledge probing. ### Languages This dataset contains data in 53 languages: af,ar,az,be,bg,bn,ca,ceb,cs,cy,da,de,el,en,es,et,eu,fa,fi,fr,ga,gl,he,hi,hr,hu,hy,id,it,ja,ka,ko,la,lt,lv,ms,nl,pl,pt,ro,ru,sk,sl,sq,sr,sv,ta,th,tr,uk,ur,vi,zh ## Dataset Structure For each of the 53 languages and each of the 43 relations/predicates there is a set of triples. ### Data Instances For each language and relation there are triples, that consists of an object, a predicate and a subject. For each predicate there is a template available. An example for `dataset["test"][0]` is given here: ```python { 'language': 'af', 'lineid': 0, 'obj_label': 'Frankryk', 'obj_uri': 'Q142', 'predicate_id': 'P1001', 'sub_label': 'President van Frankryk', 'sub_uri': 'Q191954', 'template': "[X] is 'n wettige term in [Y].", 'uuid': '3fe3d4da-9df9-45ba-8109-784ce5fba38a' } ``` ### Data Fields Each instance has the following fields * "uuid": a unique identifier * "lineid": a identifier unique to mlama * "obj_id": knowledge graph id of the object * "obj_label": surface form of the object * "sub_id": knowledge graph id of the subject * "sub_label": surface form of the subject * "template": template * "language": language code * "predicate_id": relation id ### Data Splits There is only one partition that is labelled as 'test data'. ## Dataset Creation ### Curation Rationale The dataset was translated into 53 languages to investigate knowledge in pretrained language models multilingually. ### Source Data #### Initial Data Collection and Normalization The data has several sources: LAMA (https://github.com/facebookresearch/LAMA) licensed under Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) T-REx (https://hadyelsahar.github.io/t-rex/) licensed under Creative Commons Attribution-ShareAlike 4.0 International License Google-RE (https://github.com/google-research-datasets/relation-extraction-corpus) Wikidata (https://www.wikidata.org/) licensed under Creative Commons CC0 License and Creative Commons Attribution-ShareAlike License #### Who are the source language producers? See links above. ### Annotations #### Annotation process Crowdsourced (wikidata) and machine translated. #### Who are the annotators? Unknown. ### Personal and Sensitive Information Names of (most likely) famous people who have entries in Google Knowledge Graph or Wikidata. ## Considerations for Using the Data Data was created through machine translation and automatic processes. ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations Not all triples are available in all languages. ## Additional Information ### Dataset Curators The authors of the mLAMA paper and the authors of the original datasets. ### Licensing Information The Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0). https://creativecommons.org/licenses/by-nc-sa/4.0/ ### Citation Information ``` @article{kassner2021multilingual, author = {Nora Kassner and Philipp Dufter and Hinrich Sch{\"{u}}tze}, title = {Multilingual {LAMA:} Investigating Knowledge in Multilingual Pretrained Language Models}, journal = {CoRR}, volume = {abs/2102.00894}, year = {2021}, url = {https://arxiv.org/abs/2102.00894}, archivePrefix = {arXiv}, eprint = {2102.00894}, timestamp = {Tue, 09 Feb 2021 13:35:56 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2102-00894.bib}, bibsource = {dblp computer science bibliography, https://dblp.org}, note = {to appear in EACL2021} } ``` ### Contributions Thanks to [@pdufter](https://github.com/pdufter) for adding this dataset.
7,108
[ [ -0.0302276611328125, -0.0430908203125, 0.0185546875, 0.0302886962890625, -0.009857177734375, -0.0038280487060546875, -0.0286712646484375, -0.022552490234375, 0.0262908935546875, 0.029052734375, -0.040679931640625, -0.07476806640625, -0.039459228515625, 0.017...
newsph_nli
2023-01-25T14:41:24.000Z
[ "task_categories:text-classification", "task_ids:natural-language-inference", "annotations_creators:machine-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:tl", "license:unknown", "arxiv:2010.11574", "region...
null
First benchmark dataset for sentence entailment in the low-resource Filipino language. Constructed through exploting the structure of news articles. Contains 600,000 premise-hypothesis pairs, in 70-15-15 split for training, validation, and testing.
@article{cruz2020investigating, title={Investigating the True Performance of Transformers in Low-Resource Languages: A Case Study in Automatic Corpus Creation}, author={Jan Christian Blaise Cruz and Jose Kristian Resabal and James Lin and Dan John Velasco and Charibeth Cheng}, journal={arXiv preprint arXiv:2010.11574}, year={2020} }
0
98
2022-03-02T23:29:22
--- annotations_creators: - machine-generated language_creators: - found language: - tl license: - unknown multilinguality: - monolingual size_categories: - 100K<n<1M source_datasets: - original task_categories: - text-classification task_ids: - natural-language-inference paperswithcode_id: newsph-nli pretty_name: NewsPH NLI dataset_info: features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': '0' '1': '1' splits: - name: train num_bytes: 154510599 num_examples: 420000 - name: test num_bytes: 3283665 num_examples: 9000 - name: validation num_bytes: 33015530 num_examples: 90000 download_size: 76565287 dataset_size: 190809794 --- # Dataset Card for NewsPH NLI ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [NewsPH NLI homepage](https://github.com/jcblaisecruz02/Filipino-Text-Benchmarks) - **Repository:** [NewsPH NLI repository](https://github.com/jcblaisecruz02/Filipino-Text-Benchmarks) - **Paper:** [Arxiv paper](https://arxiv.org/pdf/2010.11574.pdf) - **Leaderboard:** - **Point of Contact:** [Jan Christian Cruz](mailto:jan_christian_cruz@dlsu.edu.ph) ### Dataset Summary First benchmark dataset for sentence entailment in the low-resource Filipino language. Constructed through exploting the structure of news articles. Contains 600,000 premise-hypothesis pairs, in 70-15-15 split for training, validation, and testing. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages The dataset contains news articles in Filipino (Tagalog) scraped rom all major Philippine news sites online. ## Dataset Structure ### Data Instances Sample data: { "premise": "Alam ba ninyo ang ginawa ni Erap na noon ay lasing na lasing na rin?", "hypothesis": "Ininom niya ang alak na pinagpulbusan!", "label": "0" } ### Data Fields [More Information Needed] ### Data Splits Contains 600,000 premise-hypothesis pairs, in 70-15-15 split for training, validation, and testing. ## Dataset Creation ### Curation Rationale We propose the use of news articles for automatically creating benchmark datasets for NLI because of two reasons. First, news articles commonly use single-sentence paragraphing, meaning every paragraph in a news article is limited to a single sentence. Second, straight news articles follow the “inverted pyramid” structure, where every succeeding paragraph builds upon the premise of those that came before it, with the most important information on top and the least important towards the end. ### Source Data #### Initial Data Collection and Normalization To create the dataset, we scrape news articles from all major Philippine news sites online. We collect a total of 229,571 straight news articles, which we then lightly preprocess to remove extraneous unicode characters and correct minimal misspellings. No further preprocessing is done to preserve information in the data. #### Who are the source language producers? The dataset was created by Jan Christian, Blaise Cruz, Jose Kristian Resabal, James Lin, Dan John Velasco, and Charibeth Cheng from De La Salle University and the University of the Philippines ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? Jan Christian Blaise Cruz, Jose Kristian Resabal, James Lin, Dan John Velasco and Charibeth Cheng ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [Jan Christian Blaise Cruz] (mailto:jan_christian_cruz@dlsu.edu.ph) ### Licensing Information [More Information Needed] ### Citation Information @article{cruz2020investigating, title={Investigating the True Performance of Transformers in Low-Resource Languages: A Case Study in Automatic Corpus Creation}, author={Jan Christian Blaise Cruz and Jose Kristian Resabal and James Lin and Dan John Velasco and Charibeth Cheng}, journal={arXiv preprint arXiv:2010.11574}, year={2020} } ### Contributions Thanks to [@anaerobeth](https://github.com/anaerobeth) for adding this dataset.
5,397
[ [ -0.02508544921875, -0.043426513671875, 0.006252288818359375, 0.05108642578125, -0.0256805419921875, -0.00986480712890625, -0.020599365234375, -0.0265960693359375, 0.0377197265625, 0.035552978515625, -0.041412353515625, -0.050262451171875, -0.041595458984375, ...
urdu_fake_news
2023-01-25T15:01:58.000Z
[ "task_categories:text-classification", "task_ids:fact-checking", "task_ids:intent-classification", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:ur", "license:unknown", "...
null
Urdu fake news datasets that contain news of 5 different news domains. These domains are Sports, Health, Technology, Entertainment, and Business. The real news are collected by combining manual approaches.
@article{MaazUrdufake2020, author = {Amjad, Maaz and Sidorov, Grigori and Zhila, Alisa and G’{o}mez-Adorno, Helena and Voronkov, Ilia and Gelbukh, Alexander}, title = {Bend the Truth: A Benchmark Dataset for Fake News Detection in Urdu and Its Evaluation}, journal={Journal of Intelligent & Fuzzy Systems}, volume={39}, number={2}, pages={2457-2469}, doi = {10.3233/JIFS-179905}, year={2020}, publisher={IOS Press} }
0
98
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - ur license: - unknown multilinguality: - monolingual size_categories: - n<1K source_datasets: - original task_categories: - text-classification task_ids: - fact-checking - intent-classification pretty_name: Bend the Truth (Urdu Fake News) dataset_info: features: - name: news dtype: string - name: label dtype: class_label: names: '0': Fake '1': Real - name: category dtype: class_label: names: '0': bus '1': hlth '2': sp '3': tch '4': sbz splits: - name: train num_bytes: 1762905 num_examples: 638 - name: test num_bytes: 799587 num_examples: 262 download_size: 1042653 dataset_size: 2562492 --- # Dataset Card for Bend the Truth (Urdu Fake News) ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Github](https://github.com/MaazAmjad/Datasets-for-Urdu-news/) - **Repository:** [Github](https://github.com/MaazAmjad/Datasets-for-Urdu-news/) - **Paper:** - **Leaderboard:** - **Point of Contact:** [Maaz Amjad](https://github.com/MaazAmjad) ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields - news: a string in urdu - label: the label indicating whethere the provided news is real or fake. - category: The intent of the news being presented. The available 5 classes are Sports, Health, Technology, Entertainment, and Business. ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@chaitnayabasava](https://github.com/chaitnayabasava) for adding this dataset.
3,671
[ [ -0.026611328125, -0.044921875, 0.0157012939453125, 0.0175323486328125, -0.0240631103515625, 0.019378662109375, -0.0150299072265625, -0.01146697998046875, 0.040008544921875, 0.045806884765625, -0.0614013671875, -0.07000732421875, -0.052734375, 0.0074462890625...
ARTeLab/mlsum-it
2022-11-17T02:51:00.000Z
[ "task_categories:summarization", "multilinguality:monolingual", "size_categories:10K<n<100k", "language:it", "region:us" ]
ARTeLab
null
null
1
98
2022-03-02T23:29:22
--- language: - it multilinguality: - monolingual size_categories: - 10K<n<100k task_categories: - summarization --- # Dataset Card for mlsum-it ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** [https://huggingface.co/datasets/mlsum] - **Repository:** [Needs More Information] - **Paper:** [Needs More Information] - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary The MLSum-it dataset is the translated version (Helsinki-NLP/opus-mt-es-it) of the spanish portion of MLSum, containing news articles taken from BBC/mundo. More informations on the official dataset page [HuggingFace page](https://huggingface.co/datasets/mlsum). There are two features: - source: Input news article. - target: Summary of the article. ### Supported Tasks and Leaderboards - `abstractive-summarization`, `summarization` ### Languages The text in the dataset is in Italian ## Dataset Structure ### Data Instances [Needs More Information] ### Data Fields [Needs More Information] ### Data Splits [Needs More Information] ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information [Needs More Information] ### Citation Information More details and results in [published work](https://www.mdpi.com/2078-2489/13/5/228) ``` @Article{info13050228, AUTHOR = {Landro, Nicola and Gallo, Ignazio and La Grassa, Riccardo and Federici, Edoardo}, TITLE = {Two New Datasets for Italian-Language Abstractive Text Summarization}, JOURNAL = {Information}, VOLUME = {13}, YEAR = {2022}, NUMBER = {5}, ARTICLE-NUMBER = {228}, URL = {https://www.mdpi.com/2078-2489/13/5/228}, ISSN = {2078-2489}, ABSTRACT = {Text summarization aims to produce a short summary containing relevant parts from a given text. Due to the lack of data for abstractive summarization on low-resource languages such as Italian, we propose two new original datasets collected from two Italian news websites with multi-sentence summaries and corresponding articles, and from a dataset obtained by machine translation of a Spanish summarization dataset. These two datasets are currently the only two available in Italian for this task. To evaluate the quality of these two datasets, we used them to train a T5-base model and an mBART model, obtaining good results with both. To better evaluate the results obtained, we also compared the same models trained on automatically translated datasets, and the resulting summaries in the same training language, with the automatically translated summaries, which demonstrated the superiority of the models obtained from the proposed datasets.}, DOI = {10.3390/info13050228} } ```
4,341
[ [ -0.043243408203125, -0.032318115234375, 0.0027313232421875, 0.0237884521484375, -0.0306854248046875, 0.007080078125, -0.0256500244140625, -0.0374755859375, 0.0467529296875, 0.0257720947265625, -0.06353759765625, -0.07611083984375, -0.049652099609375, 0.02207...
DELith/github-issues
2021-11-21T15:58:45.000Z
[ "region:us" ]
DELith
null
null
0
98
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
DanL/scientific-challenges-and-directions-dataset
2022-10-25T08:56:00.000Z
[ "task_categories:text-classification", "task_ids:multi-label-classification", "annotations_creators:expert-generated", "multilinguality:monolingual", "source_datasets:CORD-19", "language:en", "arxiv:2108.13751", "arxiv:2004.10706", "region:us" ]
DanL
null
null
2
98
2022-03-02T23:29:22
--- YAML tags: annotations_creators: - expert-generated language_creators: [] language: - en license: [] multilinguality: - monolingual pretty_name: DanL/scientific-challenges-and-directions-dataset source_datasets: - CORD-19 task_categories: - text-classification task_ids: - multi-label-classification --- # Dataset Card for scientific-challenges-and-directions ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository: [repo](https://github.com/Dan-La/scientific-challenges-and-directions)** - **Paper: [A Search Engine for Discovery of Scientific Challenges and Directions](https://arxiv.org/abs/2108.13751)** - **Point of Contact: lahav@mail.tau.ac.il,tomh@allenai.org** ### Dataset Summary The scientific challenges and directions dataset is a collection of 2894 sentences and their surrounding contexts, from 1786 full-text papers in the [CORD-19](https://arxiv.org/abs/2004.10706) corpus, labeled for classification of _challenges_ and _directions_ by expert annotators with biomedical and bioNLP backgrounds. At a high level, our labels are defined as follows: * **Challenge**: A sentence mentioning a problem, difficulty, flaw, limitation, failure, lack of clarity, or knowledge gap. * **Research direction**: A sentence mentioning suggestions or needs for further research, hypotheses, speculations, indications or hints that an issue is worthy of exploration. The dataset was developed to help scientists and medical professionals discover challenges and potential directions across scientific literature. ### Languages The language in the dataset is English as written by authors of the scientific papers in the CORD-19 corpus. ## Dataset Structure ### Data Instances For each instance, there is a unique id, a string for the text sentence, a string for the previous sentence, a string for the next sentence, and a list for the challenge and direction labels. ``` {'id': 'PMC7152165_152', 'label': [0.0, 0.0], 'next_sent': 'The railways brought a new technology and vast engineering and architectural structures into Britain’s rural and urban landscapes.', 'prev_sent': 'In Britain, improvements in coaching technologies and roads helped to increase stage coach speeds in the late eighteenth and early nineteenth centuries, while the railway construction boom of the 1830s and 1840s led to a massive reduction in journey times, and the emergence of distinctly new experiences and geographies.', 'text': 'Britain’s railway companies were among the nation’s largest employers in the nineteenth century, and they facilitated the mobility of passengers and important commodities.'} ``` ### Data Fields * id: A string as a unique id for the instance. The id is composed of the unique PMC id of the paper, an underscore, and the index of the sentence within the paper. * next_sent_: A string of a sentence that is following the _text_ of the instance. If the text is the first in its paragraph the string is saved as '|'. * prev_sent_: A string of a sentence that is preceding the _text_ of the instance. If the text is the first in its paragraph the string is saved as '|'. * text: A string of the sentence we seek to classify. * label: A list of 2 values - the first is the label for _challenge_ and the last of _direction_. Each value may be either 0, indicating that the _text_ is **not** _challenge_ or _direction_, or 1, indicating that the the _text_ is _challenge_ or _direction_. Each instance can be a _challenge_, a _direction_, both, or neither. ### Data Splits The scientific-challenges-and-directions dataset has 3 splits: _train_, _dev_, and _test_. Each instances shows up in only one split. The splits are stratified with no overlap in papers. | Labels | Train | Dev | Test | All | |:----------------------------:|:------:|:-----:|:----:|:----:| | Not Challenge, Not Direction | 602 | 146 | 745 | 1493 | | Not Challenge, Direction | 106 | 25 | 122 | 253 | | Challenge, Not Direction | 288 | 73 | 382 | 743 | | Challenge, Direction | 155 | 40 | 210 | 405 | ## Dataset Creation ### Curation Rationale The resource was developed to help scientists and medical professionals discover challenges and potential directions across scientific literature, focusing on a broad corpus pertaining to the COVID-19 pandemic and related historical research. ### Source Data #### Initial Data Collection and Normalization See section 3.1 in our [paper](https://arxiv.org/abs/2108.13751). #### Who are the source language producers? The authors of the subset of full-text papers in the [CORD-19 dataset](https://arxiv.org/abs/2004.10706), which at the time of creating our dataset included roughly 180K documents. ### Annotations #### Annotation process See section 3.1 in our [paper](https://arxiv.org/abs/2108.13751). #### Who are the annotators? Four expert annotators with biomedical and bioNLP backgrounds. For more details see section 3.1 in our [paper](https://arxiv.org/abs/2108.13751). ### Personal and Sensitive Information The dataset does not contain any personal information about the authors or annotators. ## Considerations for Using the Data ### Social Impact of Dataset As mentioned, the dataset was developed to help scientists and medical professionals discover challenges and potential directions across scientific literature, focusing on a broad corpus pertaining to the COVID-19 pandemic and related historical research. Studies were conducted to evaluate the utility of the dataset for researchers and medical professionals, in which a prototype based on the dataset was found to outperform other biomedical search tools. For more details see section 4 in our [paper](https://arxiv.org/abs/2108.13751). This dataset was also developed for evaluating representational systems for scientific text classification and can be used as such. ### Discussion of Biases The source of the dataset is the full-text papers in the [CORD-19 dataset](https://arxiv.org/abs/2004.10706), so biases in CORD-19 may be replicated to our dataset. ### Other Known Limitations N/A ## Additional Information ### Dataset Curators The dataset was developed by Dan Lahav, Jon Saad Falcon, Bailey Kuehl, Sophie Johnson, Sravanthi Parasa, Noam Shomron, Duen Horng Chau, Diyi Yang, Eric Horvitz, Daniel S. Weld and Tom Hope as part of _Tel Aviv University_, the _Allen Institute for AI_, _University of Washington_, _Georgia Institute of Technology_, _Microsoft_ and _Swedish Medical Group_. It was supported by the Edmond J. Safra Center for Bioinformatics at Tel-Aviv University, ONR grant N00014-18-1-2193, NSF RAPID grant 2040196, the WR-F/Cable Professorship, and AI2. ### Licensing Information [More Information Needed] ### Citation Information If using our dataset and models, please cite: ``` @misc{lahav2021search, title={A Search Engine for Discovery of Scientific Challenges and Directions}, author={Dan Lahav and Jon Saad Falcon and Bailey Kuehl and Sophie Johnson and Sravanthi Parasa and Noam Shomron and Duen Horng Chau and Diyi Yang and Eric Horvitz and Daniel S. Weld and Tom Hope}, year={2021}, eprint={2108.13751}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ### Contributions Thanks to [@Dan-La](https://github.com/Dan-La) and [@tomhoper](https://github.com/tomhoper) for adding this dataset.
8,366
[ [ -0.01444244384765625, -0.03533935546875, 0.033203125, 0.01421356201171875, -0.01447296142578125, 0.007312774658203125, -0.01042938232421875, -0.024749755859375, 0.04071044921875, 0.01189422607421875, -0.043731689453125, -0.0677490234375, -0.042877197265625, ...
bigscience-catalogue-data-dev/lm_code_github-eval_subset
2022-02-16T10:42:10.000Z
[ "region:us" ]
bigscience-catalogue-data-dev
null
null
0
98
2022-03-02T23:29:22
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
emre/Open_SLR108_Turkish_10_hours
2022-12-06T21:00:45.000Z
[ "license:cc-by-4.0", "robust-speech-event", "arxiv:2103.16193", "region:us" ]
emre
null
null
3
98
2022-03-02T23:29:22
--- license: cc-by-4.0 tags: - robust-speech-event datasets: - MediaSpeech --- MediaSpeech Identifier: SLR108 Summary: French, Arabic, Turkish and Spanish media speech datasets Category: Speech License: dataset is distributed under the Creative Commons Attribution 4.0 International License. About this resource: MediaSpeech is a dataset of French, Arabic, Turkish and Spanish media speech built with the purpose of testing Automated Speech Recognition (ASR) systems performance. The dataset contains 10 hours of speech for each language provided. The dataset consists of short speech segments automatically extracted from media videos available on YouTube and manually transcribed, with some pre- and post-processing. Baseline models and wav version of the dataset can be found in the following git repository: https://github.com/NTRLab/MediaSpeech @misc{mediaspeech2021, title={MediaSpeech: Multilanguage ASR Benchmark and Dataset}, author={Rostislav Kolobov and Olga Okhapkina and Olga Omelchishina, Andrey Platunov and Roman Bedyakin and Vyacheslav Moshkin and Dmitry Menshikov and Nikolay Mikhaylovskiy}, year={2021}, eprint={2103.16193}, archivePrefix={arXiv}, primaryClass={eess.AS} }
1,239
[ [ -0.047637939453125, -0.018585205078125, -0.005519866943359375, 0.0264129638671875, -0.0145416259765625, 0.0084381103515625, -0.0229949951171875, -0.005420684814453125, 0.0237274169921875, 0.030364990234375, -0.062469482421875, -0.035675048828125, -0.04443359375,...
rungalileo/medical_transcription_4
2022-08-04T04:58:36.000Z
[ "region:us" ]
rungalileo
null
null
3
98
2022-08-04T04:58:25
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
ashraq/hotel-reviews
2022-10-27T17:24:29.000Z
[ "region:us" ]
ashraq
null
null
1
98
2022-10-27T17:22:07
--- dataset_info: features: - name: review_date dtype: string - name: hotel_name dtype: string - name: review dtype: string splits: - name: train num_bytes: 15043294 num_examples: 93757 download_size: 6100544 dataset_size: 15043294 --- # Dataset Card for "hotel-reviews" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) Data was obtained from [here](https://www.kaggle.com/datasets/jiashenliu/515k-hotel-reviews-data-in-europe)
548
[ [ -0.047088623046875, -0.0285797119140625, 0.028533935546875, 0.005336761474609375, -0.0160064697265625, -0.01458740234375, 0.00124359130859375, -0.028289794921875, 0.059112548828125, 0.045654296875, -0.058441162109375, -0.0628662109375, -0.018524169921875, 0....
SirNeural/flan_v2
2023-02-24T19:05:00.000Z
[ "license:apache-2.0", "flan", "flan 2022", "flan v2", "arxiv:2301.13688", "region:us" ]
SirNeural
null
null
148
98
2023-02-13T23:02:33
--- license: apache-2.0 tags: - flan - flan 2022 - flan v2 pretty_name: Flan v2 --- # Dataset Card for Flan V2 ## Dataset Description - **Homepage:** https://ai.googleblog.com/2023/02/the-flan-collection-advancing-open.html - **Repository:** https://github.com/google-research/FLAN/tree/main/flan/v2 - **Paper:** https://arxiv.org/abs/2301.13688 - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This is a processed version of the Flan V2 dataset. I'm not affiliated with the creators, I'm just releasing the files in an easier-to-access format after processing. The authors of the Flan Collection recommend experimenting with different mixing ratio's of tasks to get optimal results downstream. ## Setup Instructions Here are the steps I followed to get everything working: ### Build AESLC and WinoGrande datasets manually The repos for these datasets were updated recently and checksums need to be recomputed in TFDS - `tfds build --dataset aeslc --register_checksums` - `tfds build --dataset winogrande --register_checksums` ### Fix dataset versions I've opened a PR [here](https://github.com/google-research/FLAN/pull/20) to get these updated in the upstream FLAN repo, until that gets merged in run these locally to fix any dataset version errors. - `sed -i 's/glue\/cola:1.0.0/glue\/cola:2.0.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/gem\/common_gen:1.0.0/gem\/common_gen:1.1.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/gem\/dart:1.0.0/gem\/dart:1.1.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/gem\/e2e_nlg:1.0.0/gem\/e2e_nlg:1.1.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/gem\/web_nlg_en:1.0.0/gem\/web_nlg_en:1.1.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/gem\/common_gen:1.0.0/gem\/common_gen:1.1.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/paws_wiki:1.0.0/paws_wiki:1.1.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/glue\/mrpc:1.0.0/glue\/mrpc:2.0.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/glue\/qqp:1.0.0/glue\/qqp:2.0.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/glue\/sst2:1.0.0/glue\/sst2:2.0.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/glue\/mnli:1.0.0/glue\/mnli:2.0.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/glue\/qnli:1.0.0/glue\/qnli:2.0.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/glue\/wnli:1.0.0/glue\/wnli:2.0.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/glue\/stsb:1.0.0/glue\/stsb:2.0.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/hellaswag:0.0.1/hellaswag:1.1.0/g' flan/v2/task_configs_v1.py` - `sed -i 's/xsum:1.0.0/huggingface:xsum/g' flan/v2/task_configs_v1.py` ### Download and install manual steps Save these to `~/tensorflow_datasets/downloads/manual`. - [CzEng (deduped ignoring sections)](https://ufal.mff.cuni.cz/czeng/czeng16pre) - [Newsroom (extract)](https://lil.nlp.cornell.edu/newsroom/download/index.html) - [Yandex 1M Corpus](https://translate.yandex.ru/corpus?lang=en) - [Story Cloze (extract and rename to cloze_test_test__spring2016.csv and cloze_test_val__spring2016.csv)](https://cs.rochester.edu/nlp/) ### Finally, export tasks ```python import tensorflow as tf tf.config.set_visible_devices([], 'GPU') from flan.v2 import constants from flan.v2 import constants_t0 from flan.v2 import mixtures_utils from flan.v2 import mixtures from flan.v2 import tasks import json import t5 import seqio import itertools from multiprocessing import Pool seqio.add_global_cache_dirs(constants.CACHE_DIRS) seqio.set_global_cache_dirs(constants.CACHE_DIRS) vocab = t5.data.get_default_vocabulary() def prepare_task(split, shots, opt, task): dataset = seqio.get_mixture_or_task(f'palmflan_{task}_{shots}_{opt}').get_dataset( split=split, num_epochs=1, sequence_length={'inputs':4096,'targets':4096} ) print("starting", task, shots, opt, split) with open(f'./data/{task}_{shots}_{opt}_{split}.jsonl', 'w') as f: for ex in dataset.as_numpy_iterator(): f.write( json.dumps({ "inputs": vocab.decode(ex["inputs"]), "targets": vocab.decode(ex["targets"]), "task": task, })) f.write("\n") print("done with", task, shots, opt, split) # prepare_task("train", "zs", "noopt", "dialog") # use this to export a single task tasks = itertools.product(["train"], ["zs", "fs"], ["opt", "noopt"], ["dialog", "t0", "niv2", "flan", "cot"]) with Pool(5) as p: p.starmap(prepare_task, [(task[0], task[1], task[2], task[3]) for task in tasks]) ``` ## Dataset Structure ### Data Instances Flan 2021 (flan), P3 (t0), Super-Natural Instructions (niv2), Chain-of-thought (cot), and Dialog (dialog) ### Data Fields Instruction data comes in a few formats: - Few Shot (fs) - Zero Shot (zs) - Options Provided in context (i.e. multiple choice pick one) (opt) - No Options Provided (noopt) Each combination of the above tasks + formats are saved as a JSONL with following schema `{"input": ..., "target": ..., "task": ...}` ### Data Splits Everything is saved as a train split Note: FLAN-fs-opt-train is too big to be uploaded even when gzipped, so its split into 45gb chunks. To combine and recover, run `cat flan_fs_opt_train_*.gz | gunzip -c > flan_fs_opt_train.jsonl`
5,203
[ [ -0.04058837890625, -0.0472412109375, 0.023162841796875, 0.01265716552734375, 0.003299713134765625, -0.0051422119140625, -0.0228729248046875, -0.022491455078125, 0.029296875, 0.03466796875, -0.0511474609375, -0.032684326171875, -0.040283203125, 0.010780334472...
jonathan-roberts1/Brazilian_Coffee_Scenes
2023-03-31T15:27:06.000Z
[ "task_categories:image-classification", "license:other", "region:us" ]
jonathan-roberts1
null
null
0
98
2023-02-14T18:27:36
--- dataset_info: features: - name: image dtype: image - name: label dtype: class_label: names: '0': coffee '1': no coffee splits: - name: train num_bytes: 4256968.464 num_examples: 2876 download_size: 2830232 dataset_size: 4256968.464 license: other task_categories: - image-classification --- # Dataset Card for "Brazilian_Coffee_Scenes" ## Dataset Description - **Paper** [Do deep features generalize from everyday objects to remote sensing and aerial scenes domains?](https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W13/papers/Penatti_Do_Deep_Features_2015_CVPR_paper.pdf) ### Licensing Information [CC BY-NC] ## Citation Information [Do deep features generalize from everyday objects to remote sensing and aerial scenes domains?](https://www.cv-foundation.org/openaccess/content_cvpr_workshops_2015/W13/papers/Penatti_Do_Deep_Features_2015_CVPR_paper.pdf) ``` @inproceedings{penatti2015deep, title = {Do deep features generalize from everyday objects to remote sensing and aerial scenes domains?}, author = {Penatti, Ot{\'a}vio AB and Nogueira, Keiller and Dos Santos, Jefersson A}, year = 2015, booktitle = {Proceedings of the IEEE conference on computer vision and pattern recognition workshops}, pages = {44--51} } ```
1,359
[ [ -0.0237884521484375, -0.03436279296875, 0.0162811279296875, 0.00820159912109375, -0.0306243896484375, -0.0168304443359375, -0.01702880859375, -0.043975830078125, -0.0113677978515625, 0.0350341796875, -0.03289794921875, -0.06768798828125, -0.034088134765625, ...
pythainlp/final_training_set_v1_enth
2023-04-29T07:05:42.000Z
[ "task_categories:text-generation", "task_categories:conversational", "language:th", "language:en", "region:us" ]
pythainlp
null
null
1
98
2023-04-22T08:56:14
--- dataset_info: features: - name: text dtype: string - name: nb_token dtype: int64 - name: metadata dtype: string splits: - name: train num_bytes: 665379914.0331497 num_examples: 379520 - name: test num_bytes: 899398.9668502472 num_examples: 513 download_size: 258632318 dataset_size: 666279313 task_categories: - text-generation - conversational language: - th - en --- # Dataset Card for "final_training_set_v1_en_th" Finetuning datasets for [WangChanGLM](https://github.com/pythainlp/wangchanglm) sourced from [LAION OIG chip2 and infill_dbpedia](https://huggingface.co/datasets/laion/OIG) ([Apache-2.0](https://github.com/pythainlp/wangchanglm/blob/main/LICENSE)), [DataBricks Dolly v2](https://github.com/databrickslabs/dolly) ([Apache-2.0](https://github.com/pythainlp/wangchanglm/blob/main/LICENSE)), [OpenAI TL;DR](https://github.com/openai/summarize-from-feedback) ([MIT](https://opensource.org/license/mit/)), and [Hello-SimpleAI HC3](https://huggingface.co/datasets/Hello-SimpleAI/HC3) ([CC-BY SA](https://creativecommons.org/licenses/by-sa/4.0/)). The dataset is translated using Google Translate API by [Thu Ya Kyaw](https://github.com/iamthuya).
1,209
[ [ -0.01486968994140625, -0.0177154541015625, -0.005443572998046875, 0.0161590576171875, -0.023712158203125, -0.01352691650390625, -0.005462646484375, -0.022247314453125, 0.0013685226440429688, 0.03375244140625, -0.04437255859375, -0.04034423828125, -0.022720336914...
fujiki/llm-japanese-dataset_wikinews
2023-07-24T08:13:28.000Z
[ "license:cc-by-2.5", "region:us" ]
fujiki
null
null
2
98
2023-07-24T07:42:30
--- license: cc-by-2.5 dataset_info: features: - name: instruction dtype: string - name: input dtype: string - name: output dtype: string splits: - name: train num_bytes: 6934579 num_examples: 4265 download_size: 3599861 dataset_size: 6934579 --- - This dataset is a subset of [izumi-lab/llm-japanese-dataset](https://huggingface.co/datasets/izumi-lab/llm-japanese-dataset) only including news-title generation tasks from `Wikinews`. - Please also refer to the original dataset: [izumi-lab/llm-japanese-dataset](https://huggingface.co/datasets/izumi-lab/llm-japanese-dataset)
611
[ [ -0.01007080078125, -0.035186767578125, 0.0290985107421875, 0.013092041015625, -0.023162841796875, 0.0204010009765625, 0.0023899078369140625, -0.0068359375, 0.042449951171875, 0.06982421875, -0.11004638671875, -0.048309326171875, -0.039520263671875, 0.0193328...
izumi-lab/wikipedia-ja-20230720
2023-07-29T03:05:36.000Z
[ "language:ja", "license:cc-by-sa-3.0", "region:us" ]
izumi-lab
null
null
2
98
2023-07-28T02:11:33
--- dataset_info: features: - name: curid dtype: string - name: title dtype: string - name: text dtype: string splits: - name: train num_bytes: 3653518687 num_examples: 1362415 download_size: 2130533065 dataset_size: 3653518687 license: cc-by-sa-3.0 language: - ja --- # Dataset Card for "wikipedia-ja-20230720" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
480
[ [ -0.06463623046875, -0.0165557861328125, 0.0169830322265625, 0.01094818115234375, -0.01415252685546875, -0.0189208984375, 0.00986480712890625, -0.015899658203125, 0.06890869140625, 0.0310211181640625, -0.0638427734375, -0.043701171875, -0.032745361328125, -0....
loremipsum3658/emb
2023-08-24T21:20:50.000Z
[ "region:us" ]
loremipsum3658
null
null
0
98
2023-08-24T21:20:44
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: validation path: data/validation-* dataset_info: features: - name: input_text dtype: string - name: target_text dtype: string - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 101360 num_examples: 114 - name: test num_bytes: 22158 num_examples: 25 - name: validation num_bytes: 21371 num_examples: 25 download_size: 93794 dataset_size: 144889 --- # Dataset Card for "emb" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
725
[ [ -0.05499267578125, -0.041778564453125, 0.02587890625, 0.021209716796875, -0.0108795166015625, -0.00012993812561035156, 0.02020263671875, -0.007843017578125, 0.07318115234375, 0.033203125, -0.062103271484375, -0.060455322265625, -0.03594970703125, -0.01163482...
loremipsum3658/sen
2023-08-24T21:25:11.000Z
[ "region:us" ]
loremipsum3658
null
null
0
98
2023-08-24T21:25:05
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: validation path: data/validation-* dataset_info: features: - name: input dtype: string - name: output dtype: string - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 157759 num_examples: 75 - name: test num_bytes: 42689 num_examples: 17 - name: validation num_bytes: 41047 num_examples: 16 download_size: 175628 dataset_size: 241495 --- # Dataset Card for "sen" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
715
[ [ -0.039947509765625, -0.01111602783203125, 0.0131683349609375, -0.007663726806640625, -0.01422119140625, -0.011749267578125, 0.0024242401123046875, -0.02008056640625, 0.04833984375, 0.0247039794921875, -0.062408447265625, -0.04833984375, -0.032806396484375, -...
loremipsum3658/and
2023-08-24T21:29:56.000Z
[ "region:us" ]
loremipsum3658
null
null
0
98
2023-08-24T21:29:46
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: validation path: data/validation-* dataset_info: features: - name: nup dtype: string - name: data dtype: string - name: titulo dtype: string - name: andamento dtype: string - name: classificacao_andamento sequence: string - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 13722868 num_examples: 19924 - name: test num_bytes: 3071574 num_examples: 4270 - name: validation num_bytes: 2943882 num_examples: 4269 download_size: 10133342 dataset_size: 19738324 --- # Dataset Card for "and" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
856
[ [ -0.03668212890625, -0.028045654296875, 0.0266265869140625, 0.01125335693359375, -0.004543304443359375, -0.00106048583984375, 0.0230560302734375, -0.031982421875, 0.053558349609375, 0.0391845703125, -0.05291748046875, -0.0416259765625, -0.042266845703125, -0....
ContextualAI/tiny-wiki100-chunks
2023-09-22T17:47:30.000Z
[ "region:us" ]
ContextualAI
null
null
0
98
2023-09-22T17:47:26
--- dataset_info: features: - name: doc_id dtype: string - name: text dtype: string - name: title dtype: string splits: - name: train num_bytes: 63619 num_examples: 100 download_size: 43300 dataset_size: 63619 --- # Dataset Card for "tiny-wiki100-chunks" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
423
[ [ -0.0517578125, -0.027679443359375, 0.0201873779296875, 0.0089263916015625, -0.01238250732421875, -0.0101318359375, 0.00209808349609375, -0.0029296875, 0.076171875, 0.027923583984375, -0.0596923828125, -0.0254669189453125, -0.0296783447265625, -0.009246826171...
counter
2023-01-25T14:28:41.000Z
[ "task_categories:text-classification", "task_ids:text-scoring", "task_ids:semantic-similarity-scoring", "task_ids:topic-classification", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", ...
null
The COrpus of Urdu News TExt Reuse (COUNTER) corpus contains 1200 documents with real examples of text reuse from the field of journalism. It has been manually annotated at document level with three levels of reuse: wholly derived, partially derived and non derived.
@Article{Sharjeel2016, author="Sharjeel, Muhammad and Nawab, Rao Muhammad Adeel and Rayson, Paul", title="COUNTER: corpus of Urdu news text reuse", journal="Language Resources and Evaluation", year="2016", pages="1--27", issn="1574-0218", doi="10.1007/s10579-016-9367-2", url="http://dx.doi.org/10.1007/s10579-016-9367-2" }
0
97
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - ur license: - cc-by-nc-sa-4.0 multilinguality: - monolingual size_categories: - n<1K source_datasets: - original task_categories: - text-classification task_ids: - text-scoring - semantic-similarity-scoring - topic-classification paperswithcode_id: counter pretty_name: COUNTER dataset_info: features: - name: source struct: - name: filename dtype: string - name: headline dtype: string - name: body dtype: string - name: total_number_of_words dtype: int64 - name: total_number_of_sentences dtype: int64 - name: number_of_words_with_swr dtype: int64 - name: newspaper dtype: string - name: newsdate dtype: string - name: domain dtype: class_label: names: '0': business '1': sports '2': national '3': foreign '4': showbiz - name: classification dtype: class_label: names: '0': wholly_derived '1': partially_derived '2': not_derived - name: derived struct: - name: filename dtype: string - name: headline dtype: string - name: body dtype: string - name: total_number_of_words dtype: int64 - name: total_number_of_sentences dtype: int64 - name: number_of_words_with_swr dtype: int64 - name: newspaper dtype: string - name: newsdate dtype: string - name: domain dtype: class_label: names: '0': business '1': sports '2': national '3': foreign '4': showbiz - name: classification dtype: class_label: names: '0': wholly_derived '1': partially_derived '2': not_derived splits: - name: train num_bytes: 2598872 num_examples: 600 download_size: 1356306 dataset_size: 2598872 --- # Dataset Card for COUNTER ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** http://ucrel.lancs.ac.uk/textreuse/counter.php - **Repository:** [More Information Needed] - **Paper:** https://link.springer.com/article/10.1007%2Fs10579-016-9367-2 - **Leaderboard:** [More Information Needed] - **Point of Contact:** [UCREL](ucrel@lancaster.ac.uk) ### Dataset Summary The COrpus ofUrdu News TExt Reuse (COUNTER) corpus contains 1200 documents with realexamples of text reuse from the field of journalism. It has been manually annotatedat document level with three levels of reuse: wholly derived, partially derived andnon derived ### Supported Tasks and Leaderboards other:text-reuse ### Languages ur ## Dataset Structure ### Data Instances Here is one example from the dataset: ``` {"derived": { "body" :"میر پور(وقت نیوز) بنگلہ دیش نے 5 میچوں کی سیریز کےآ خری میچ میں بھی فتح حاصل کر کے سیریز میں وائٹ واش کر دیا،زمبابوے ایک میچ بھی نہ جیت سکا۔آخری میچ میں زمبابوے کے 129 رنز کا ہدف بنگال ٹائیگرز نے 24.3 اوورز میں 5 وکٹوں کے نقصان پر حاصل کر لیا۔بنگلہ دیش کے شیر بنگلہ سٹیڈیم میر پور میں کھیلے گئے آخری ایک روزہ میچ میں زمبابوے کے کپتان چکمبورا نے ٹاس جیت کے بینٹگ کا فیصلہ کیا جو ان کی ٹیم کیلئے ڈراؤنا خواب ثابت ہوا اور پوری ٹیم 30 اوورز میں 128 رنز بنا کر پویلین لوٹ گئی زمبابوے کی پہلی وکٹ 16 رنز پر گری جب سکندر رضا صرف 9 رنز بنا کر مشرقی مرتضی کی بال پر آؤٹ ہوئے اس کے بعد مساکد ازااور سباندا کی پارٹنرشپنے ٹیم کا سکور95 رنز تک پہنچا دیا ۔مساکدازا 52 رنز بنا کر جبیر الحسن کا شکار بنے جبکہ سباندا نے 37 رنز کی اننگز کھیلی اس کے بعد کئی بھی زمبابوے کا کھلاڑی جم کر نہ کھیل سکا۔بنگال ٹائیگرز کی جانب سے عمدہ باؤلنگ کے نتیجے میں کپتان چکمبورا سمیت 8 کھلاڑی ڈبل فیگر کراس نہ کر سکے ۔بنگلہ دیش کی جانب سے ایک روزہ میچوں میں ڈیبیو کرنے والے تیج السلام نے اپنے پہلے ہی میچ میں ہیٹرک کی اسلام نے 7 اوورز میں صرف 14 رنز دئے اور چار کھلاڑیوں کع آؤٹ کیا جبکہ شکیب الحسن نے 30 رنز دیکر 3 اور جبیر الحسن نے41 رنز دیکر2 کھلاڑیوں کو پویلین کی راہ دکھائی ۔ 128 رنز کے جواب میں بنگال ٹائیگرز نے بیٹنگ شروع کی مشکلات کا سامنا رہا ان کے بھی ابتدائی 3 کھلاڑی 47 رنز پر پویلین لوٹ گئے۔ تمیم اقبال 10، انعام الحق8 رنز بنا کر آؤٹ ہوئے،آل راؤنڈر شکیب الحسن بغیر کوئی رنز بنائیپویلین لوٹ گئے وکٹ کیپر مشفق الرحیم صرف 11 رنز بنا کر چتارہ کا شکار بن گئے۔محمد اللہ نے51 رنز کی میچ وننگ اننگز کھیلی جبکہ صابر رحمٰن13 رنز بنا کر ناٹ آؤٹ رہے۔ زمبابوے کی جانب سے چتارہ نے 3 اور پنیا نگارا نے 2 کھلاڑیوں کو آؤٹ کیا ۔فتح کے ساتھ بنگلہ دیش نے سیریز میں وائٹ واش کر دیا۔زمبابوے کی ٹیم کوئی میچ نہ جیت سکی،تیج السلام کو میچ کا بہترین ایوارڈ دیا گیا جبکہ سیریز کا بہترین کھلاڑی مشفق الرحیم کو قرار دیا گیا۔", "classification": 1, # partially_derived "domain": 1, # sports "filename": "0001p.xml", "headline": "بنگلہ دیش کا زمبابوے کا ون ڈے سیریز میں 5-0 سے وائٹ واش", "newsdate": "02.12.14", "newspaper": "daily_waqt", "number_of_words_with_swr": 265, "total_number_of_sentences": 13, "total_number_of_words": 393}, "source": { "body": "ڈھاکہ ۔ یکم دسمبر (اے پی پی) بنگلہ دیش نے زمبابوے کو ٹیسٹ کے بعد ون ڈے سیریز میں بھی وائٹ واش کر دیا۔ سیریز کے پانچویں اور آخری ون ڈے میچ میں بنگال ٹائیگرز نے زمبابوے کو 5 وکٹوں سے شکست دے دی، مہمان ٹیم پہلے بیٹنگ کرتے ہوئے 128 رنز پر ڈھیر ہوگئی۔ تیج الاسلام نے کیریئر کے پہلے ون ڈے میچ میں ہیٹ ٹرک کرکے نئی تاریخ رقم کر دی، انہوں نے 4 کھلاڑیوں کو آؤٹ کیا۔ جواب میں بنگلہ دیش نے ہدف 24.3 اوورز میں 5 وکٹوں کے نقصان پر حاصل کر لیا۔ محمد اللہ نے 51 رنز کی ناقابل شکست اننگز کھیلی۔ تفصیلات کے مطابق پیر کو شیر بنگلہ نیشنل سٹیڈیم، میرپور میں پانچویں اور آخری ون ڈے میچ میں زمبابوے کے کپتان ایلٹن چگمبورا نے ٹاس جیت کر پہلے بیٹنگ کا فیصلہ کیا جو غلط ثابت ہوا۔ زمبابوے کی پوری ٹیم ڈیبیو ون ڈے کھیلنے والے نوجوان لیفٹ آرم سپنر تیج الاسلام اور شکیب الحسن کی تباہ کن باؤلنگ کے باعث 30 اوورز میں 128 رنز پر ڈھیر ہوگئی۔ ہیملٹن ماساکڈزا 52 اور ووسی سبانڈا 37 رنز کے ساتھ نمایاں رہے، ان کے علاوہ کوئی بھی بلے باز دوہرا ہندسہ عبور نہ کر سکا۔ اپنا پہلا ون ڈے کھیلنے والے تیج الاسلام نے 11 رنز کے عوض 4 وکٹیں حاصل کیں جس میں شاندار ہیٹ ٹرک بھی شامل ہے، اس طرح وہ ڈیبیو میں ہیٹ ٹرک کرنے والے دنیا کے پہلے باؤلر بن گئے ہیں۔ شکیب الحسن نے تین اور زبیر حسین نے دو وکٹیں حاصل کیں۔ جواب میں بنگلہ دیش نے ہدف 24.3 اوورز میں 5 وکٹوں کے نقصان پر حاصل کر لیا۔ محمد اللہ نے 51 رنز کی ناقابل شکست اننگز کھیل کر ٹیم کی فتح میں اہم کردار ادا کیا۔ زمبابوے کی جانب سے ٹینڈائی چتارا نے تین اور تناشے پینگارا نے دو وکٹیں حاصل کیں۔", "classification": 1, # partially_derived "domain": 1, # sports "filename": "0001.xml", "headline": "بنگال ٹائیگرز نے کمزور زمبابوے کو ٹیسٹ کے بعد ون ڈے سیریز میں بھی وائٹ واش کر دیا، پانچویں اور آخری ون ڈے میچ میں بنگلہ دیش 5 وکٹوں سے فتح یاب، تیج الاسلام نے ڈیبیو ون ڈے میں ہیٹ ٹرک کرکے نئی تاریخ رقم کر دی" "newsdate": "01.12.14", "newspaper": "APP", "number_of_words_with_swr": 245, "total_number_of_sentences": 15, "total_number_of_words": 352}} ``` ### Data Fields ```source```: The source document ```derived```: The derived document For each pair of source and derived documents. we have the following fields: ```filename (str)```: Name of the file in dataset ```headline(str)```: Headline of the news item ```body(str)```: Main text of the news item ```total_number_of_words(int)```: Number of words in document ```total_number_of_sentences(int)```: Number of sentences in document ```number_of_words_with_swr(int)```: Number of words after stop word removal ```newspaper(str)```: The newspaper in which the news item was published ```newsdate(str)```: The date on which the news item is published DD.MM.YY ```domain(int)```: The category of news item from this list: "business", "sports", "national", "foreign", "showbiz". ```classification (int)```: Three classes of reuse from this list: Wholly Derived (WD), Partially Derived (PD) and Non Derived (ND) ### Data Splits One split train with 600 pairs of documents. The corpus is composed of two main document types: (1) source documents and (2) derived documents. There are total 1200 documents in the corpus: 600 are newsagency articles (source documents) and 600 are newspapers stories (derived documents). The corpus contains in total 275,387 words (tokens8), 21,426 unique words and 10,841 sentences. The average length of a source document is 227 words while for derived documents it is 254 words. ## Dataset Creation ### Curation Rationale Our main intention was to develop a standard benchmark resource for the evaluation of existing systems available for text reuse detection in general and specifically for Urdu language. To generate a corpus with realistic examples, we opted for the field of journalism. In journalism, the same news story is published in different newspapers in different forms. It is a standard practice followed by all the newspapers (reporters and editors) to reuse (verbatim or modified) a news story released by the news agency. ### Source Data #### Initial Data Collection and Normalization The COUNTER corpus consists of news articles (source documents) released by five news agencies in Pakistan i.e. Associated Press of Pakistan (APP), InternationalNews Network (INN), Independent News Pakistan (INP), News Network International (NNI) and South Asian News Agency (SANA). The corresponding news stories (derived documents) were extracted from nine daily published and large circulation national news papers of the All Pakistan Newspapers Society (APNS), who are subscribed to these news agencies. These include Nawa-e-Waqt, Daily Dunya, Express, Jang, Daily Waqt, Daily Insaf, Daily Aaj, Daily Islam and DailyPakistan. All of them are part of the mainstream national press, long established dailies with total circulation figures of over four million.7News agency texts (source documents) were provided (in electronic form) by the news agencies on a daily basis when they released the news. Newspaper stories (derived documents) were collected by three volunteers over a period of six months (from July to December 2014).National, Foreign, Business, Sports and Showbiz were the domains targeted for data collection. #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process The corpus has been annotated at the document level with three classes of reuse i.e.Wholly Derived (WD), Partially Derived (PD) and Non Derived (ND). The derived collection contains documents with various degrees of text reuse. Some of the newspaper stories (derived documents)are rewritten (either verbatim or paraphrased) from the new agencys text (source document) while others have been written by the journalists independently on their own. For the former case, source-derived document pairs are either tagged as Wholly Derived (WD) or Partially Derived (PD) depending on the volume of text reused from the news agencys text for creating the newspaper article while for the latter case, they are tagged as Non Derived (ND) as the journalists have not reused anything from the news agencys text but based on their own observations and findings, developed and documented the story. The annotations were carried out in three phases: (1) training phase, (2) annotations, (3)conflict resolving. During the training phase, annotators A and B manually annotated 60 document pairs, following a preliminary version of the annotation guidelines. A detailed meeting was carried out afterwards, discussing the problems and disagreements. It was observed that the highest number of disagreements were between PD and ND cases, as both found it difficult to distinguish between these two classes. The reason being that adjusting the threshold where a text is heavily paraphrased or new information added to it that it becomes independently written(ND). Following the discussion, the annotation guidelines were slightly revised, and the first 60 annotations results were saved. In the annotation phase, the remaining540 document pairs were manually examined by the two annotators (A and B). Both were asked to judge, and classify (at document level) whether a document(newspaper story) depending on the volume of text rewritten from the source (news agency article) falls into one of the following categories:remaining540 document pairs were manually examined by the two annotators (A and B). Both were asked to judge, and classify (at document level) whether a document(newspaper story) depending on the volume of text rewritten from the source (news agency article) falls into one of the following categories: Wholly Derived (WD)The News agency text is the only source for the reused newspaper text, which means it is a verbatim copy of the source. In this case, most of the reused text is word-to-word copy of the source text.Partially Derived (PD)The Newspaper text has been either derived from more than one news agency or most of the text is paraphrased by the editor when rewriting from news agency text source. In this case, most parts of the derived document contain paraphrased text or new facts and figures added by the journalists own findings. Non Derived (ND)The News agency text has not been used in the production of the newspaper text (though words may still co-occur in both documents), it has completely different facts and figures or is heavily paraphrased from the newsagencys copy. In this case, the derived document is independently written and has a lot more new text. #### Who are the annotators? The annotations were performed by three annotators (A, B and C), who were native Urdu language speakers and experts of paraphrasing mechanisms. All three were graduates, experienced in text annotations and having an advanced Urdu level. ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations Dataset provided for research purposes only. Please check dataset license for additional information. ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information This dataset is licensed under the Creative Common Attribution-NonCommercial-ShareAlike 4.0 International License. [(CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/). ### Citation Information ``` @Article{Sharjeel2016, author="Sharjeel, Muhammad and Nawab, Rao Muhammad Adeel and Rayson, Paul", title="COUNTER: corpus of Urdu news text reuse", journal="Language Resources and Evaluation", year="2016", pages="1--27", issn="1574-0218", doi="10.1007/s10579-016-9367-2", url="http://dx.doi.org/10.1007/s10579-016-9367-2" } ``` ### Contributions Thanks to [@arkhalid](https://github.com/arkhalid) for adding this dataset.
15,716
[ [ -0.049224853515625, -0.0379638671875, 0.0163726806640625, 0.0341796875, -0.039886474609375, 0.00029087066650390625, 0.01082611083984375, -0.039520263671875, 0.050567626953125, 0.0214691162109375, -0.040771484375, -0.044403076171875, -0.0570068359375, 0.03036...
curiosity_dialogs
2023-01-25T14:28:58.000Z
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:dialogue-modeling", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:cc-by-nc-4.0", "co...
null
This dataset contains 14K dialogs (181K utterances) where users and assistants converse about geographic topics like geopolitical entities and locations. This dataset is annotated with pre-existing user knowledge, message-level dialog acts, grounding to Wikipedia, and user reactions to messages.
@inproceedings{rodriguez2020curiosity, title = {Information Seeking in the Spirit of Learning: a Dataset for Conversational Curiosity}, author = {Pedro Rodriguez and Paul Crook and Seungwhan Moon and Zhiguang Wang}, year = 2020, booktitle = {Empirical Methods in Natural Language Processing} }
6
97
2022-03-02T23:29:22
--- annotations_creators: - crowdsourced language_creators: - crowdsourced language: - en license: - cc-by-nc-4.0 multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - text-generation - fill-mask task_ids: - dialogue-modeling paperswithcode_id: curiosity pretty_name: Curiosity Dataset tags: - conversational-curiosity dataset_info: features: - name: messages sequence: - name: message dtype: string - name: liked dtype: class_label: names: '0': 'False' '1': 'True' - name: sender dtype: class_label: names: '0': user '1': assistant - name: facts sequence: - name: fid dtype: int32 - name: used dtype: class_label: names: '0': 'False' '1': 'True' - name: source dtype: class_label: names: '0': section '1': known '2': random - name: message_id dtype: string - name: dialog_acts sequence: string - name: known_entities sequence: string - name: focus_entity dtype: string - name: dialog_id dtype: int32 - name: inferred_steps dtype: class_label: names: '0': 'False' '1': 'True' - name: created_time dtype: int64 - name: aspects sequence: string - name: first_aspect dtype: string - name: second_aspect dtype: string - name: shuffle_facts dtype: class_label: names: '0': 'False' '1': 'True' - name: related_entities sequence: string - name: tag dtype: string - name: user_id dtype: int32 - name: assistant_id dtype: int32 - name: is_annotated dtype: class_label: names: '0': 'False' '1': 'True' - name: user_dialog_rating dtype: int32 - name: user_other_agent_rating dtype: int32 - name: assistant_dialog_rating dtype: int32 - name: assistant_other_agent_rating dtype: int32 - name: reported dtype: class_label: names: '0': 'False' '1': 'True' - name: annotated dtype: class_label: names: '0': 'False' '1': 'True' config_name: curiosity_dialogs splits: - name: train num_bytes: 37198297 num_examples: 10287 - name: val num_bytes: 4914487 num_examples: 1287 - name: test num_bytes: 4915613 num_examples: 1287 - name: test_zero num_bytes: 4333191 num_examples: 1187 download_size: 92169165 dataset_size: 51361588 --- # Dataset Card for Curiosity Dataset ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Curiosity Dataset Homepage](https://www.pedro.ai/curiosity) - **Repository:** [Curiosity Dataset Repository](https://github.com/facebookresearch/curiosity) - **Paper:** [ACL Anthology](https://www.aclweb.org/anthology/2020.emnlp-main.655/) - **Point of Contact:** [Pedro Rodriguez](https://mailhide.io/e/wbfjM) ### Dataset Summary Curiosity dataset consists of 14K English dialogs (181K utterances) where users and assistants converse about geographic topics like geopolitical entities and locations. This dataset is annotated with pre-existing user knowledge, message-level dialog acts, grounding to Wikipedia, and user reactions to messages. ### Supported Tasks and Leaderboards * `text-generation-other-conversational-curiosity`: The dataset can be used to train a model for Conversational Curiosity, which consists in the testing of the hypothesis that engagement increases when users are presented with facts related to what they know. Success on this task is typically measured by achieving a *high* [Accuracy](https://huggingface.co/metrics/accuracy) and [F1 Score](https://huggingface.co/metrics/f1). ### Languages The text in the dataset is in English collected by crowd-souring. The associated BCP-47 code is `en`. ## Dataset Structure ### Data Instances A typical data point consists of dialogs between an user and an assistant, which is followed by the different attributes of the particular dialog. An example from the Curiosity Dataset train set looks as follows: ``` {'annotated': 1, 'aspects': ['Media', 'Politics and government'], 'assistant_dialog_rating': 5, 'assistant_id': 341, 'assistant_other_agent_rating': 5, 'created_time': 1571783665, 'dialog_id': 21922, 'first_aspect': 'Media', 'focus_entity': 'Namibia', 'inferred_steps': 1, 'is_annotated': 0, 'known_entities': ['South Africa', 'United Kingdom', 'Portugal'], 'messages': {'dialog_acts': [['request_topic'], ['inform_response'], ['request_aspect'], ['inform_response'], ['request_followup'], ['inform_response'], ['request_aspect', 'feedback_positive'], ['inform_response'], ['request_followup'], ['inform_response'], [], []], 'facts': [{'fid': [], 'source': [], 'used': []}, {'fid': [77870, 77676, 77816, 77814, 77775, 77659, 77877, 77785, 77867], 'source': [0, 1, 2, 2, 0, 2, 0, 1, 1], 'used': [0, 0, 0, 0, 0, 0, 0, 0, 0]}, {'fid': [], 'source': [], 'used': []}, {'fid': [77725, 77870, 77676, 77863, 77814, 77775, 77659, 77877, 77867], 'source': [2, 0, 1, 1, 2, 0, 2, 0, 1], 'used': [0, 0, 0, 0, 0, 0, 0, 0, 0]}, {'fid': [], 'source': [], 'used': []}, {'fid': [77694, 77661, 77863, 77780, 77671, 77704, 77869, 77693, 77877], 'source': [1, 2, 1, 0, 2, 2, 0, 1, 0], 'used': [0, 0, 0, 0, 0, 0, 0, 0, 1]}, {'fid': [], 'source': [], 'used': []}, {'fid': [77816, 77814, 77864, 77659, 77877, 77803, 77738, 77784, 77789], 'source': [2, 2, 0, 2, 0, 1, 1, 0, 1], 'used': [0, 0, 0, 0, 0, 0, 0, 0, 0]}, {'fid': [], 'source': [], 'used': []}, {'fid': [77694, 77776, 77780, 77696, 77707, 77693, 77778, 77702, 77743], 'source': [1, 0, 0, 2, 1, 1, 0, 2, 2], 'used': [0, 0, 0, 0, 0, 0, 0, 0, 0]}, {'fid': [], 'source': [], 'used': []}, {'fid': [77662, 77779, 77742, 77734, 77663, 77777, 77702, 77731, 77778], 'source': [1, 0, 2, 1, 2, 0, 2, 1, 0], 'used': [0, 0, 0, 0, 0, 0, 0, 0, 1]}], 'liked': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'message': ['Hi. I want information about Namibia.', 'Nmbia is a country in southern Africa.', 'Do you have information about the media there?', 'A mentional amount of foriegn', 'What about it?', "Media and journalists in Namibia are represented by the Namibia chapter of the Media Institute of 'southern Africa and the Editors Forum of Namibia.", 'Interesting! What can you tell me about the politics and government?', 'Namibia formed the Namibian Defence Force, comprising former enemies in a 23-year bush war.', 'Do you have more information about it?', "With a small army and a fragile economy , the Namibian government's principal foreign policy concern is developing strengthened ties within the Southern African region.", "That's all I wanted to know. Thank you!", 'My pleasure!'], 'message_id': ['617343895', '2842515356', '4240816985', '520711081', '1292358002', '3677078227', '1563061125', '1089028270', '1607063839', '113037558', '1197873991', '1399017322'], 'sender': [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]}, 'related_entities': ['Western Roman Empire', 'United Kingdom', 'Portuguese language', 'Southern African Development Community', 'South Africa', 'Kalahari Desert', 'Namib Desert', 'League of Nations', 'Afrikaans', 'Sub-Saharan Africa', 'Portugal', 'South-West Africa', 'Warmbad, Namibia', 'German language', 'NBC'], 'reported': 0, 'second_aspect': 'Politics and government', 'shuffle_facts': 1, 'tag': 'round_2', 'user_dialog_rating': 5, 'user_id': 207, 'user_other_agent_rating': 5} ``` ### Data Fields * `messages`: List of dialogs between the user and the assistant and their associated attributes * `dialog_acts`: List of actions performed in the dialogs * `facts`: List of facts returned by the assistant * `fid`: Fact ID * `source`: Source for the fact * `used`: Whether facts were used before in the same dialog * `liked`: List of values indicating whether each dialog was liked * `message`: List of dialogs (messages) between the user and the assistant * `message_id`: Message ID * `sender`: Message author ID (numeric) * `known_entities`: Rooted facts about entities the user knows * `focus_entity` : Entity in focus in the dialogs * `dialog_id `: Dialog ID * `inferred_steps`: Number of inferred steps * `created_time`: Time of creation of the dialog * `aspects`: List of two aspects which the dialog is about * `first_aspect`: First aspect * `second_aspect`: Second aspect * `shuffle_facts`: Whether facts were shuffled * `related_entities` : List of fifteen related entities to the focus entity * `tag`: Conversation tag * `user_id`: User ID * `assistant_id`: Assistant ID * `is_annotated`: 0 or 1 (More Information Needed) * `user_dialog_rating`: 1 - 5 (More Information Needed) * `user_other_agent_rating`: 1 - 5 (More Information Needed) * `assistant_dialog_rating`: 1 - 5 (More Information Needed) * `assistant_other_agent_rating`: 1 - 5 (More Information Needed) * `reported`: Whether the dialog was reported inappropriate * `annotated`: 0 or 1 (More Information Needed) ### Data Splits The data is split into a training, validation, test and test_zero set as per the original dataset split. | | train | validation | test | test_zero | |-----------------------|------:|-----------:|-----:|----------:| | Input dialog examples | 10287 | 1287 | 1287 | 1187 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [Attribution-NonCommercial 4.0 International](https://creativecommons.org/licenses/by-nc/4.0/legalcode) ### Citation Information ``` @inproceedings{rodriguez2020curiosity, title = {Information Seeking in the Spirit of Learning: a Dataset for Conversational Curiosity}, author = {Pedro Rodriguez and Paul Crook and Seungwhan Moon and Zhiguang Wang}, year = 2020, booktitle = {Empirical Methods in Natural Language Processing} } ``` ### Contributions Thanks to [@vineeths96](https://github.com/vineeths96) for adding this dataset.
12,044
[ [ -0.042327880859375, -0.04962158203125, 0.0289306640625, 0.00772857666015625, -0.01224517822265625, -0.0030078887939453125, -0.000946044921875, -0.00550079345703125, 0.0482177734375, 0.04193115234375, -0.06170654296875, -0.0595703125, -0.0361328125, 0.0093917...
kor_sarcasm
2023-03-21T14:49:40.000Z
[ "task_categories:text-classification", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:ko", "license:mit", "sarcasm-detection", "region:us" ]
null
This is a dataset designed to detect sarcasm in Korean because it distorts the literal meaning of a sentence and is highly related to sentiment classification.
null
2
97
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - found language: - ko license: - mit multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: [] pretty_name: Korean Sarcasm Detection tags: - sarcasm-detection dataset_info: features: - name: tokens dtype: string - name: label dtype: class_label: names: '0': no_sarcasm '1': sarcasm splits: - name: train num_bytes: 1012030 num_examples: 9000 - name: test num_bytes: 32480 num_examples: 301 download_size: 1008955 dataset_size: 1044510 --- # Dataset Card for Korean Sarcasm Detection ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Korean Sarcasm Detection](https://github.com/SpellOnYou/korean-sarcasm) - **Repository:** [Korean Sarcasm Detection](https://github.com/SpellOnYou/korean-sarcasm) - **Point of Contact:** [Dionne Kim](jiwon.kim.096@gmail.com) ### Dataset Summary The Korean Sarcasm Dataset was created to detect sarcasm in text, which can significantly alter the original meaning of a sentence. 9319 tweets were collected from Twitter and labeled for `sarcasm` or `not_sarcasm`. These tweets were gathered by querying for: `역설, 아무말, 운수좋은날, 笑, 뭐래 아닙니다, 그럴리없다, 어그로, irony sarcastic, and sarcasm`. The dataset was pre-processed by removing the keyword hashtag, urls and mentions of the user to maintain anonymity. ### Supported Tasks and Leaderboards * `sarcasm_detection`: The dataset can be used to train a model to detect sarcastic tweets. A [BERT](https://huggingface.co/bert-base-uncased) model can be presented with a tweet in Korean and be asked to determine whether it is sarcastic or not. ### Languages The text in the dataset is in Korean and the associated is BCP-47 code is `ko-KR`. ## Dataset Structure ### Data Instances An example data instance contains a Korean tweet and a label whether it is sarcastic or not. `1` maps to sarcasm and `0` maps to no sarcasm. ``` { "tokens": "[ 수도권 노선 아이템 ] 17 . 신분당선의 #딸기 : 그의 이미지 컬러 혹은 머리 색에서 유래한 아이템이다 . #메트로라이프" "label": 0 } ``` ### Data Fields * `tokens`: contains the text of the tweet * `label`: determines whether the text is sarcastic (`1`: sarcasm, `0`: no sarcasm) ### Data Splits The data is split into a training set comrpised of 9018 tweets and a test set of 301 tweets. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization The dataset was created by gathering HTML data from Twitter. Queries for hashtags that include sarcasm and variants of it were used to return tweets. It was preprocessed by removing the keyword hashtag, urls and mentions of the user to preserve anonymity. #### Who are the source language producers? The source language producers are Korean Twitter users. ### Annotations #### Annotation process Tweets were labeled `1` for sarcasm and `0` for no sarcasm. #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information Mentions of the user in a tweet were removed to keep them anonymous. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators This dataset was curated by Dionne Kim. ### Licensing Information This dataset is licensed under the MIT License. ### Citation Information ``` @misc{kim2019kocasm, author = {Kim, Jiwon and Cho, Won Ik}, title = {Kocasm: Korean Automatic Sarcasm Detection}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/SpellOnYou/korean-sarcasm}} } ``` ### Contributions Thanks to [@stevhliu](https://github.com/stevhliu) for adding this dataset.
4,964
[ [ -0.0059967041015625, -0.04400634765625, 0.015655517578125, 0.04010009765625, -0.0238494873046875, 0.0107421875, -0.0237579345703125, -0.0163116455078125, 0.0299835205078125, 0.0231781005859375, -0.0511474609375, -0.07122802734375, -0.0352783203125, 0.0187530...
refresd
2023-01-25T14:43:11.000Z
[ "task_categories:text-classification", "task_categories:translation", "task_ids:semantic-similarity-classification", "task_ids:semantic-similarity-scoring", "task_ids:text-scoring", "annotations_creators:crowdsourced", "annotations_creators:machine-generated", "language_creators:crowdsourced", "lang...
null
The Rationalized English-French Semantic Divergences (REFreSD) dataset consists of 1,039 English-French sentence-pairs annotated with sentence-level divergence judgments and token-level rationales. For any questions, write to ebriakou@cs.umd.edu.
@inproceedings{briakou-carpuat-2020-detecting, title = "Detecting Fine-Grained Cross-Lingual Semantic Divergences without Supervision by Learning to Rank", author = "Briakou, Eleftheria and Carpuat, Marine", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.121", pages = "1563--1580", }
0
97
2022-03-02T23:29:22
--- annotations_creators: - crowdsourced - machine-generated language_creators: - crowdsourced - machine-generated language: - en - fr license: - mit multilinguality: - translation size_categories: - 1K<n<10K source_datasets: - extended|other-wikimatrix task_categories: - text-classification - translation task_ids: - semantic-similarity-classification - semantic-similarity-scoring - text-scoring paperswithcode_id: refresd pretty_name: Rationalized English-French Semantic Divergences dataset_info: features: - name: sentence_en dtype: string - name: sentence_fr dtype: string - name: label dtype: class_label: names: '0': divergent '1': equivalent - name: all_labels dtype: class_label: names: '0': unrelated '1': some_meaning_difference '2': no_meaning_difference - name: rationale_en dtype: string - name: rationale_fr dtype: string splits: - name: train num_bytes: 501562 num_examples: 1039 download_size: 503977 dataset_size: 501562 --- # Dataset Card for REFreSD Dataset ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Github](https://github.com/Elbria/xling-SemDiv/tree/master/REFreSD) - **Repository:** [Github](https://github.com/Elbria/xling-SemDiv/) - **Paper:** [Detecting Fine-Grained Cross-Lingual Semantic Divergences without Supervision by Learning to Rank](https://www.aclweb.org/anthology/2020.emnlp-main.121) - **Leaderboard:** - **Point of Contact:** [Eleftheria Briakou](mailto:ebriakou@cs.umd.edu) - **Additional Documentation:** [Annotation workflow, data statement, DataSheet, and IRB documentation](https://elbria.github.io/post/refresd/) ### Dataset Summary The Rationalized English-French Semantic Divergences (REFreSD) dataset consists of 1,039 English-French sentence-pairs annotated with sentence-level divergence judgments and token-level rationales. The project under which REFreSD was collected aims to advance our fundamental understanding of computational representations and methods for comparing and contrasting text meaning across languages. ### Supported Tasks and Leaderboards `semantic-similarity-classification` and `semantic-similarity-scoring`: This dataset can by used to assess the ability of computational methods to detect meaning mismatches between languages. The model performance is measured in terms of accuracy by comparing the model predictions with the human judgments in REFreSD. Details about the results of a BERT-based model, Divergent mBERT, over this dataset can be found in the [paper](https://www.aclweb.org/anthology/2020.emnlp-main.121). ### Languages The text is in English and French as found on Wikipedia. The associated BCP-47 codes are `en` and `fr`. ## Dataset Structure ### Data Instances Each data point looks like this: ```python { 'sentence_pair': {'en': 'The invention of farming some 10,000 years ago led to the development of agrarian societies , whether nomadic or peasant , the latter in particular almost always dominated by a strong sense of traditionalism .', 'fr': "En quelques décennies , l' activité économique de la vallée est passée d' une mono-activité agricole essentiellement vivrière , à une quasi mono-activité touristique , si l' on excepte un artisanat du bâtiment traditionnel important , en partie saisonnier ."} 'label': 0, 'all_labels': 0, 'rationale_en': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'rationale_fr': [2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3], } ``` ### Data Fields - `sentence_pair`: Dictionary of sentences containing the following field. - `en`: The English sentence. - `fr`: The corresponding (or not) French sentence. - `label`: Binary. Whether both sentences correspond. `{0:divergent, 1:equivalent}` - `all_labels`: 3-class label `{0: "unrelated", 1: "some_meaning_difference", 2:"no_meaning_difference"}`. The first two are sub-classes of the `divergent` label. - `rationale_en`: A list of integers from 0-3 indicating the number of annotators who highlighted the token of the text in the English sentence during annotation. Word-aligned rationale for the divergent/equivalent label, from English. - `rationale_fr`: A list of integers from 0-3 indicating the number of annotators who highlighted the token of the text in the French sentence during annotation. Word-aligned rationale for the divergent/equivalent label, from French. ### Data Splits The dataset contains 1039 sentence pairs in a single `"train"` split. Of these pairs, 64% are annotated as divergent, and 40% contain fine-grained meaning divergences. | Label | Number of Instances | | ----------------------- | ------------------- | | Unrelated | 252 | | Some meaning difference | 418 | | No meaning different | 369 | ## Dataset Creation ### Curation Rationale The curators chose the English-French section of the WikiMatrix corpus because (1) it is likely to contain diverse, interesting divergence types since it consists of mined parallel sentences of diverse topics which are not necessarily generated by (human) translations, and (2) Wikipedia and WikiMatrix are widely used resources to train semantic representations and perform cross-lingual transfer in NLP. ### Source Data #### Initial Data Collection and Normalization The source for this corpus is the English and French portion of the [WikiMatrix corpus](https://arxiv.org/abs/1907.05791), which itself was extracted from Wikipedia articles. The curators excluded noisy samples by filtering out sentence pairs that a) were too short or too long, b) consisted mostly of numbers, or c) had a small token-level edit difference. #### Who are the source language producers? Some content of Wikipedia articles has been (human) translated from existing articles in another language while others have been written or edited independently in each language. Therefore, information on how the original text is created is not available. ### Annotations #### Annotation process The annotations were collected over the span of three weeks in April 2020. Annotators were presented with an English sentence and a French sentence. First, they highlighted spans and labeled them as 'added', 'changed', or 'other', where added spans contain information not contained in the other sentence, changed spans contain some information that is in the other sentence but whose meaning is not the same, and other spans have some different meaning not covered in the previous two cases, such as idioms. They then assessed the relation between the two sentences as either 'unrelated', 'some meaning differences', or 'no meaning difference'. See the [annotation guidelines](https://elbria.github.io/post/refresd/files/REFreSD_Annotation_Guidelines.pdf) for more information about the task and the annotation interface, and see the [DataSheet](https://elbria.github.io/post/refresd/files/REFreSD_Datasheet.pdf) for information about the annotator compensation. The following table contains Inter-Annotator Agreement metrics for the dataset: | Granularity | Method | IAA | | ----------- | --------------- | ------------ | | Sentence | Krippendorf's α | 0.60 | | Span | macro F1 | 45.56 ± 7.60 | | Token | macro F1 | 33.94 ± 8.24 | #### Who are the annotators? This dataset includes annotations from 6 participants recruited from the University of Maryland, College Park (UMD) educational institution. Participants ranged in age from 20–25 years, including one man and five women. For each participant, the curators ensured they were proficient in both languages of interest: three of them self-reported as English native speakers, one as a French native speaker, and two as bilingual English-French speakers. ### Personal and Sensitive Information The dataset contains discussions of people as they appear in Wikipedia articles. It does not contain confidential information, nor does it contain identifying information about the source language producers or the annotators. ## Considerations for Using the Data ### Social Impact of Dataset Models that are successful in the supported task require sophisticated semantic representations at the sentence level beyond the combined representations of the individual tokens in isolation. Such models could be used to curate parallel corpora for tasks like machine translation, cross-lingual transfer learning, or semantic modeling. The statements in the dataset, however, are not necessarily representative of the world and may overrepresent one worldview if one language is primarily translated to, rather than an equal distribution of translations between the languages. ### Discussion of Biases The English Wikipedia is known to have significantly more [contributors](https://en.wikipedia.org/wiki/Wikipedia:Who_writes_Wikipedia%3F) who identify as male than any other gender and who reside in either North America or Europe. This leads to an overrepresentation of male perspectives from these locations in the corpus in terms of both the topics covered and the language used to talk about those topics. It's not clear to what degree this holds true for the French Wikipedia. The REFreSD dataset itself has not yet been examined for the degree to which it contains the gender and other biases seen in the larger Wikipedia datasets. ### Other Known Limitations It is unknown how many of the sentences in the dataset were written independently, and how many were written as [translations](https://en.wikipedia.org/wiki/Wikipedia:Translation) by either humans or machines from some other language to the languages of interest in this dataset. ## Additional Information ### Dataset Curators The dataset curators are Eleftheria Briakou and Marine Carpuat, who are both affiliated with the University of Maryland, College Park's Department of Computer Science. ### Licensing Information The project is licensed under the [MIT License](https://github.com/Elbria/xling-SemDiv/blob/master/LICENSE). ### Citation Information ```BibTeX @inproceedings{briakou-carpuat-2020-detecting, title = "Detecting Fine-Grained Cross-Lingual Semantic Divergences without Supervision by Learning to Rank", author = "Briakou, Eleftheria and Carpuat, Marine", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.121", pages = "1563--1580", } ``` ### Contributions Thanks to [@mpariente](https://github.com/mpariente) and [@mcmillanmajora](https://github.com/mcmillanmajora) for adding this dataset.
12,055
[ [ -0.055694580078125, -0.03997802734375, 0.0081939697265625, 0.0240631103515625, -0.0113983154296875, -0.01654052734375, -0.025970458984375, -0.048553466796875, 0.03497314453125, 0.03228759765625, -0.050262451171875, -0.051055908203125, -0.053314208984375, 0.0...
saudinewsnet
2023-07-17T08:18:44.000Z
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "annotations_creators:no-annotation", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:ar"...
null
The dataset contains a set of 31,030 Arabic newspaper articles alongwith metadata, extracted from various online Saudi newspapers and written in MSA.
@misc{hagrima2015, author = "M. Alhagri", title = "Saudi Newspapers Arabic Corpus (SaudiNewsNet)", year = 2015, url = "http://github.com/ParallelMazen/SaudiNewsNet" }
1
97
2022-03-02T23:29:22
--- annotations_creators: - no-annotation language_creators: - found language: - ar license: - unknown multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - text-generation - fill-mask task_ids: - language-modeling - masked-language-modeling paperswithcode_id: null pretty_name: saudinewsnet dataset_info: features: - name: source dtype: string - name: url dtype: string - name: date_extracted dtype: string - name: title dtype: string - name: author dtype: string - name: content dtype: string splits: - name: train num_bytes: 103654105 num_examples: 31030 download_size: 29014166 dataset_size: 103654105 --- # Dataset Card for "saudinewsnet" ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [SaudiNewsNet](https://github.com/parallelfold/SaudiNewsNet) - **Repository:** [Website](https://github.com/parallelfold/SaudiNewsNet) - **Paper:** [More Information Needed] - **Point of Contact:** [Mazen Abdulaziz](mailto:mazen.abdulaziz@gmail.com) - **Size of downloaded dataset files:** 29.01 MB - **Size of the generated dataset:** 103.65 MB - **Total amount of disk used:** 132.67 MB ### Dataset Summary The dataset contains a set of 31,030 Arabic newspaper articles alongwith metadata, extracted from various online Saudi newspapers and written in MSA. The dataset currently contains **31,030** Arabic articles (with a total number of **8,758,976 words**). The articles were extracted from the following Saudi newspapers (sorted by number of articles): - [Al-Riyadh](http://www.alriyadh.com/) (4,852 articles) - [Al-Jazirah](http://al-jazirah.com/) (3,690 articles) - [Al-Yaum](http://alyaum.com/) (3,065 articles) - [Al-Eqtisadiya](http://aleqt.com/) (2,964 articles) - [Al-Sharq Al-Awsat](http://aawsat.com/) (2,947 articles) - [Okaz](http://www.okaz.com.sa/) (2,846 articles) - [Al-Watan](http://alwatan.com.sa/) (2,279 articles) - [Al-Madina](http://www.al-madina.com/) (2,252 articles) - [Al-Weeam](http://alweeam.com.sa/) (2,090 articles) - [Ain Alyoum](http://3alyoum.com/) (2,080 articles) - [Sabq](http://sabq.org/) (1,411 articles) - [Saudi Press Agency](http://www.spa.gov.sa) (369 articles) - [Arreyadi](http://www.arreyadi.com.sa/) (133 articles) - [Arreyadiyah](http://www.arreyadiyah.com/) (52 articles) ### Supported Tasks and Leaderboards [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Languages [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Dataset Structure ### Data Instances #### default - **Size of downloaded dataset files:** 29.01 MB - **Size of the generated dataset:** 103.65 MB - **Total amount of disk used:** 132.67 MB An example of 'train' looks as follows. ``` This example was too long and was cropped: { "author": "الرياض: محمد الحميدي", "content": "\"في وقت تتهيأ فيه السعودية لإطلاق الإصدار الثاني من العملات المعدنية، لا تزال التداول بمبالغ النقود المصنوعة من المعدن مستقرة عن...", "date_extracted": "2015-07-22 01:18:37", "source": "aawsat", "title": "\"«العملة المعدنية» السعودية تسجل انحسارًا تاريخيًا وسط تهيؤ لإطلاق الإصدار الثاني\"...", "url": "\"http://aawsat.com/home/article/411671/«العملة-المعدنية»-السعودية-تسجل-انحسارًا-تاريخيًا-وسط-تهيؤ-لإطلاق-الإصدار-الثاني\"..." } ``` ### Data Fields The data fields are the same among all splits. - **`source`** (str): The source newspaper. - **`url`** (str): The full URL from which the article was extracted. - **`date_extracted`** (str): The timestamp of the date on which the article was extracted. It has the format `YYYY-MM-DD hh:mm:ss`. Notice that this field does not necessarily represent the date on which the article was authored (or made available online), however for articles stamped with a date of extraction after August 1, 2015, this field most probably represents the date of authoring. - **`title`** (str): The title of the article. Contains missing values that were replaced with an empty string. - **`author`** (str): The author of the article. Contains missing values that were replaced with an empty string. - **`content`** (str): The content of the article. ### Data Splits | name |train| |-------|----:| |default|31030| ## Dataset Creation ### Curation Rationale [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Source Data | String Identifier | Newspaper | | ------------------ | --------- | | aawsat | [Al-Sharq Al-Awsat](http://aawsat.com/) | | aleqtisadiya | [Al-Eqtisadiya](http://aleqt.com/) | | aljazirah | [Al-Jazirah](http://al-jazirah.com/) | | almadina | [Al-Madina](http://www.al-madina.com/) | | alriyadh | [Al-Riyadh](http://www.alriyadh.com/) | | alwatan | [Al-Watan](http://alwatan.com.sa/) | | alweeam | [Al-Weeam](http://alweeam.com.sa/) | | alyaum | [Al-Yaum](http://alyaum.com/) | | arreyadi | [Arreyadi](http://www.arreyadi.com.sa/) | | arreyadiyah | [Arreyadi](http://www.arreyadiyah.com/) | | okaz | [Okaz](http://www.okaz.com.sa/) | | sabq | [Sabq](http://sabq.org/) | | was | [Saudi Press Agency](http://www.spa.gov.sa/) | | 3alyoum | [Ain Alyoum](http://3alyoum.com/) | #### Initial Data Collection and Normalization The Modern Standard Arabic texts crawled from the Internet. #### Who are the source language producers? Newspaper Websites. ### Annotations The dataset does not contain any additional annotations. ### Personal and Sensitive Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Discussion of Biases [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Other Known Limitations [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Additional Information ### Dataset Curators [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Licensing Information Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License ### Citation Information ``` @misc{hagrima2015, author = "M. Alhagri", title = "Saudi Newspapers Arabic Corpus (SaudiNewsNet)", year = 2015, url = "http://github.com/ParallelMazen/SaudiNewsNet" } ``` ### Contributions Thanks to [@abdulelahsm](https://github.com/abdulelahsm) for adding this dataset.
7,993
[ [ -0.04345703125, -0.033905029296875, 0.0244140625, 0.0162506103515625, -0.024871826171875, -0.006763458251953125, -0.00887298583984375, -0.03680419921875, 0.04296875, 0.0306854248046875, -0.042999267578125, -0.07574462890625, -0.05328369140625, 0.017456054687...
Atsushi/fungi_indexed_mycological_papers_japanese
2023-10-08T21:33:33.000Z
[ "annotations_creators:other", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:ja", "license:cc-by-4.0", "region:us" ]
Atsushi
null
null
0
97
2022-03-02T23:29:22
--- annotations_creators: - other language: - ja license: - cc-by-4.0 multilinguality: - monolingual source_datasets: - original size_categories: - 1K<n<10K --- fungi_indexed_mycological_papers_japanese 大菌輪「論文3行まとめ」データセット 最終更新日:2023/10/9(R3-11041まで) ==== ### Languages Japanese This dataset is available in Japanese only. # 概要 Atsushi Nakajima(中島淳志)が個人で運営しているWebサイト[大菌輪](http://mycoscouter.coolblog.jp/daikinrin/) では、数千件以上の菌類分類学論文を「論文3行まとめ」という形で要約および索引付け(インデキシング)した情報を提供しています。 本データセットは、「論文3行まとめ」のコンテンツに含まれる各論文の3行抄録、タグ(索引)、掲載種一覧、比較種一覧をまとめたものです。 「論文3行まとめ」は毎日更新していますが、本データセットの更新はおおむね1ヶ月に一度とする予定です。 また、本データセットを可視化したWebアプリを[Observableで公開](https://tinyurl.com/2tvryz8u)しています。 ## 関連データセット 「識別形質まとめ」 [Atsushi/fungi_diagnostic_chars_comparison_japanese](https://huggingface.co/datasets/Atsushi/fungi_diagnostic_chars_comparison_japanese) 「Trait Circusデータセット」(統制形質) [Atsushi/fungi_trait_circus_database](https://huggingface.co/datasets/Atsushi/fungi_trait_circus_database) ## 各カラムの説明 * R3ID … 大菌輪「論文3行まとめ」のIDです。 * ja_title_provisional_translate(仮訳和文題名) … 作成者が翻訳したタイトルです。一部、日本語の原題があるものはそれをそのまま使用しています。 * original_title(原文題名) * published_year(出版年) * journal_title(雑誌名) * source(文献リンク) … 各情報の 出典(文献)のURLです。 * daikinrin_url … 大菌輪「論文3行まとめ」のURLです。 * tags … 作成者が論文を全文読んだ上で独自に付与した索引です。カンマ+半角空白区切りです。形態形質、宿主/基質、実験器具/実験手法/試薬、地理的分布、生理/生化学などを幅広く索引しています。 * R3summary_1 … 3行抄録の「1行目」です。 * R3summary_2 … 3行抄録の「2行目」です。 * R3summary_3 … 3行抄録の「3行目」です。 * species_reported(報告種一覧) … 当該論文内で掲載された種の一覧です。「半角空白+半角スラッシュ+半角空白」区切りです。記号の意味は以下の通りです。 * ★=新種(新亜種・新品種・新変種) * ■= 新産種 * ▲=新組み合わせ * ◆=新学名 * ●=新階級 * (無印)=その他 * species_compared(比較種一覧) … いずれかの報告種と論文中で何らかの比較がなされた種の一覧です。「半角空白+半角スラッシュ+半角空白」区切りです。詳細は「識別形質まとめ」データセット([Atsushi/fungi_diagnostic_chars_comparison_japanese](https://huggingface.co/datasets/Atsushi/fungi_diagnostic_chars_comparison_japanese))を参照してください。 * taxon_reported(分類群一覧) … 報告種に対応する上位分類群をまとめたものです。カンマ+半角空白区切りです。MycoBankの情報を基に付与していますが、最新でない可能性があります。
1,976
[ [ -0.0389404296875, -0.049957275390625, 0.045013427734375, 0.0276947021484375, -0.0504150390625, -0.014495849609375, 0.00030875205993652344, -0.042999267578125, 0.077392578125, 0.03778076171875, -0.0294342041015625, -0.06488037109375, -0.037506103515625, 0.055...
HenryAI/KerasAPIReference.txt
2021-12-15T15:55:07.000Z
[ "region:us" ]
HenryAI
null
null
0
97
2022-03-02T23:29:22
Keras API from https://keras.io/api/ <br /> Formatted into .txt file for input to https://huggingface.co/blog/how-to-train
122
[ [ -0.0190277099609375, -0.057525634765625, 0.042755126953125, 0.0172576904296875, -0.01218414306640625, -0.005855560302734375, -0.0013647079467773438, -0.0287017822265625, 0.048614501953125, 0.01983642578125, -0.06488037109375, -0.03558349609375, -0.03811645507812...
laion/laion_100m_vqgan_f8
2021-12-25T05:27:42.000Z
[ "region:us" ]
laion
null
null
2
97
2022-03-02T23:29:22
# VQGAN (f8, 8192) embeddings for LAION-100M This dataset contains __VQGAN (f8, 8192)__ embeddings for the images from the first ~100 million image-text pairs of the [LAION-400M dataset](https://laion.ai/laion-400-open-dataset/). VQGAN was introduced in the paper ["Taming Transformers for High-Resolution Image Synthesis"](https://github.com/CompVis/taming-transformers) and adopted for training [DALLE-mini](https://github.com/borisdayma/dalle-mini). **Warning**: This large-scale dataset is non-curated. It was built for research purposes to enable testing model training on larger scale for broad researcher and other interested communities, and **is not meant for any real-world production or application.** [VQGAN (f8, 8192)](https://github.com/CompVis/taming-transformers#overview-of-pretrained-models) is a pretrained model with downsampling factor `f=8`, 8192 codebook entries, and Gumbel quantization. We did not perform any fine-tuning and used the VQGAN wrapper from the [DALLE-pytorch](https://github.com/lucidrains/DALLE-pytorch) repository for inference. Since LAION-400M contains 256x256 images, the model produces 1024 codes for each image. The data is provided as `*.parquet` files with the embeddings and meta information: - The embeddings (`code` column) are represented as binary data that can be decoded using `np.frombuffer(data, np.int16).reshape(32, 32)`. - The meta information (`caption`, `url`, and other columns) is the same as in the `*.parquet` files from LAION-400M (see description [here](https://laion.ai/laion-400-open-dataset/)). - This dataset does not contain the original images. The data corresponds to the shards `00000`, `00001`, ..., `09999` of LAION-400M. 0.07% of the shards were excluded since they were corrupted in the original dataset. The LAION-400M dataset is distributed under the [CC-BY 4.0 license](https://creativecommons.org/licenses/by/4.0/). The VQGAN models are distributed under the [MIT license](https://github.com/CompVis/taming-transformers/blob/master/License.txt).
2,041
[ [ -0.029571533203125, -0.031951904296875, 0.027191162109375, -0.004180908203125, -0.0282745361328125, -0.02374267578125, -0.0003452301025390625, -0.001049041748046875, 0.00553131103515625, 0.06732177734375, -0.0247802734375, -0.051055908203125, -0.037841796875, ...
jason9693/APEACH
2022-07-05T04:18:07.000Z
[ "task_categories:text-classification", "annotations_creators:crowdsourced", "annotations_creators:crowd-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:ko", "license:cc-by-sa-4.0", "arxiv:2202.12459", "region...
jason9693
null
null
3
97
2022-04-14T14:27:43
--- annotations_creators: - crowdsourced - crowd-generated language_creators: - found language: - ko license: - cc-by-sa-4.0 multilinguality: - monolingual paperswithcode_id: apeach pretty_name: 'APEACH' size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - binary-classification --- # Dataset for project: kor_hate_eval(APEACH) ![](https://github.com/jason9693/APEACH/raw/master/resource/dist_topics.png) ## Sample Code <a href="https://colab.research.google.com/drive/1djd0fuoMYIaf7VCHaLQIziJi4_yBJruP#scrollTo=VPR24ysr5Q7k"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="base"/></a> ## Dataset Descritpion Korean Hate Speech Evaluation Datasets : trained with [BEEP!](https://huggingface.co/datasets/kor_hate) and evaluate with [APEACH](https://github.com/jason9693/APEACH) - **Repository: [Korean HateSpeech Evaluation Dataset](https://github.com/jason9693/APEACH)** - **Paper: [APEACH: Attacking Pejorative Expressions with Analysis on Crowd-Generated Hate Speech Evaluation Datasets](https://arxiv.org/abs/2202.12459)** - **Point of Contact: [Kichang Yang](ykcha9@gmail.com)** ### Languages ko-KR ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json {'text': ['(현재 호텔주인 심정) 아18 난 마른하늘에 날벼락맞고 호텔망하게생겼는데 누군 계속 추모받네....', '....한국적인 미인의 대표적인 분...너무나 곱고아름다운모습...그모습뒤의 슬픔을 미처 알지못했네요ㅠ'], 'class': ['Spoiled', 'Default']} ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "text": "Value(dtype='string', id=None)", "class": "ClassLabel(num_classes=2, names=['Default', 'Spoiled'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train (binarized BEEP!) | 7896 | | valid (APEACH) | 3770 | ## Citation ``` @article{yang2022apeach, title={APEACH: Attacking Pejorative Expressions with Analysis on Crowd-Generated Hate Speech Evaluation Datasets}, author={Yang, Kichang and Jang, Wonjun and Cho, Won Ik}, journal={arXiv preprint arXiv:2202.12459}, year={2022} } ```
2,226
[ [ -0.03948974609375, -0.049896240234375, 0.008087158203125, 0.0202178955078125, -0.0099334716796875, 0.01485443115234375, -0.020263671875, -0.01812744140625, 0.0223236083984375, 0.0153656005859375, -0.016937255859375, -0.06219482421875, -0.050537109375, 0.0088...
bigscience/xP3mt
2023-05-30T15:50:57.000Z
[ "task_categories:other", "annotations_creators:expert-generated", "annotations_creators:crowdsourced", "multilinguality:multilingual", "size_categories:100M<n<1B", "language:ak", "language:ar", "language:as", "language:bm", "language:bn", "language:ca", "language:code", "language:en", "lan...
bigscience
xP3 (Crosslingual Public Pool of Prompts) is a collection of prompts & datasets across 46 of languages & 16 NLP tasks. It is used for the training of BLOOMZ and mT0, multilingual language models capable of following human instructions in dozens of languages zero-shot.
@misc{muennighoff2022crosslingual, title={Crosslingual Generalization through Multitask Finetuning}, author={Niklas Muennighoff and Thomas Wang and Lintang Sutawika and Adam Roberts and Stella Biderman and Teven Le Scao and M Saiful Bari and Sheng Shen and Zheng-Xin Yong and Hailey Schoelkopf and Xiangru Tang and Dragomir Radev and Alham Fikri Aji and Khalid Almubarak and Samuel Albanie and Zaid Alyafeai and Albert Webson and Edward Raff and Colin Raffel}, year={2022}, eprint={2211.01786}, archivePrefix={arXiv}, primaryClass={cs.CL} }
18
97
2022-09-28T12:36:00
--- annotations_creators: - expert-generated - crowdsourced language: - ak - ar - as - bm - bn - ca - code - en - es - eu - fon - fr - gu - hi - id - ig - ki - kn - lg - ln - ml - mr - ne - nso - ny - or - pa - pt - rn - rw - sn - st - sw - ta - te - tn - ts - tum - tw - ur - vi - wo - xh - yo - zh - zu programming_language: - C - C++ - C# - Go - Java - JavaScript - Lua - PHP - Python - Ruby - Rust - Scala - TypeScript license: - apache-2.0 multilinguality: - multilingual pretty_name: xP3 size_categories: - 100M<n<1B task_categories: - other --- # Dataset Card for xP3 ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository:** https://github.com/bigscience-workshop/xmtf - **Paper:** [Crosslingual Generalization through Multitask Finetuning](https://arxiv.org/abs/2211.01786) - **Point of Contact:** [Niklas Muennighoff](mailto:niklas@hf.co) ### Dataset Summary > xP3 (Crosslingual Public Pool of Prompts) is a collection of prompts & datasets across 46 of languages & 16 NLP tasks. It is used for the training of BLOOMZ and mT0, multilingual language models capable of following human instructions in dozens of languages zero-shot. - **Creation:** The dataset can be recreated using instructions available [here](https://github.com/bigscience-workshop/xmtf#create-xp3). We provide this version to save processing time and ease reproducibility. - **Languages:** 46 (Can be extended by [recreating with more splits](https://github.com/bigscience-workshop/xmtf#create-xp3)) - **xP3 Dataset Family:** <table> <tr> <th>Name</th> <th>Explanation</th> <th>Example models</th> </tr> <tr> <td><a href=https://huggingface.co/datasets/Muennighoff/xP3x>xP3x</a></t> <td>Mixture of 17 tasks in 277 languages with English prompts</td> <td>WIP - Join us at Project Aya @<a href=https://cohere.for.ai/>C4AI</a> to help!</td> </tr> <tr> <td><a href=https://huggingface.co/datasets/bigscience/xP3>xP3</a></t> <td>Mixture of 13 training tasks in 46 languages with English prompts</td> <td><a href=https://huggingface.co/bigscience/bloomz>bloomz</a> & <a href=https://huggingface.co/bigscience/mt0-xxl>mt0-xxl</a></td> </tr> <tr> <td><a href=https://huggingface.co/datasets/bigscience/xP3mt>xP3mt</a></t> <td>Mixture of 13 training tasks in 46 languages with prompts in 20 languages (machine-translated from English)</td> <td><a href=https://huggingface.co/bigscience/bloomz-mt>bloomz-mt</a> & <a href=https://huggingface.co/bigscience/mt0-xxl-mt>mt0-xxl-mt</a></td> </tr> <tr> <td><a href=https://huggingface.co/datasets/bigscience/xP3all>xP3all</a></t> <td>xP3 + evaluation datasets adding an additional 3 tasks for a total of 16 tasks in 46 languages with English prompts</td> <td></td> </tr> <tr> <td><a href=https://huggingface.co/datasets/bigscience/xP3megds>xP3megds</a></t> <td><a href=https://github.com/bigscience-workshop/Megatron-DeepSpeed>Megatron-DeepSpeed</a> processed version of xP3</td> <td><a href=https://huggingface.co/bigscience/bloomz>bloomz</a></td> </tr> <tr> <td><a href=https://huggingface.co/datasets/Muennighoff/P3>P3</a></t> <td>Repreprocessed version of the English-only <a href=https://huggingface.co/datasets/bigscience/P3>P3</a> with 8 training tasks</td> <td><a href=https://huggingface.co/bigscience/bloomz-p3>bloomz-p3</a> & <a href=https://huggingface.co/bigscience/mt0-xxl-p3>mt0-xxl-p3</a></td> </tr> </table> ## Dataset Structure ### Data Instances An example of "train" looks as follows: ```json { "inputs": "Oración 1: Fue académico en literatura metafísica, teología y ciencias clásicas.\Oración 2: Fue académico en literatura metafísica, teología y ciencia clásica.\nPregunta: ¿La oración 1 parafrasea la oración 2? ¿Si o no?", "targets": "Sí" } ``` ### Data Fields The data fields are the same among all splits: - `inputs`: the natural language input fed to the model - `targets`: the natural language target that the model has to generate ### Data Splits The below table summarizes sizes per language (computed from the `merged_{lang}.jsonl` files). Due to languages like `tw` only being single sentence translation samples from Flores, their byte percentage is significantly lower than their sample percentage. We machine-translated prompts for monolingual datasets, thus languages with only crosslingual datasets (e.g. Translation) do not have non-English prompts. Languages without non-English prompts are equivalent to [xP3](https://huggingface.co/datasets/bigscience/xP3). |Language|Kilobytes|%|Samples|%|Non-English prompts| |--------|------:|-:|---:|-:|-:| |tw|106288|0.11|265071|0.33| | |bm|107056|0.11|265180|0.33| | |ak|108096|0.11|265071|0.33| | |ca|110608|0.11|271191|0.34| | |eu|113008|0.12|281199|0.35| | |fon|113072|0.12|265063|0.33| | |st|114080|0.12|265063|0.33| | |ki|115040|0.12|265180|0.33| | |tum|116032|0.12|265063|0.33| | |wo|122560|0.13|365063|0.46| | |ln|126304|0.13|365060|0.46| | |as|156256|0.16|265063|0.33| | |or|161472|0.17|265063|0.33| | |kn|165456|0.17|265063|0.33| | |ml|175040|0.18|265864|0.33| | |rn|192992|0.2|318189|0.4| | |nso|229712|0.24|915051|1.14| | |tn|235536|0.24|915054|1.14| | |lg|235936|0.24|915021|1.14| | |rw|249360|0.26|915043|1.14| | |ts|250256|0.26|915044|1.14| | |sn|252496|0.26|865056|1.08| | |xh|254672|0.26|915058|1.14| | |zu|263712|0.27|915061|1.14| | |ny|272128|0.28|915063|1.14| | |ig|325440|0.33|950097|1.19|✅| |yo|339664|0.35|913021|1.14|✅| |ne|398144|0.41|315754|0.39|✅| |pa|529632|0.55|339210|0.42|✅| |sw|561392|0.58|1114439|1.39|✅| |gu|566576|0.58|347499|0.43|✅| |mr|674000|0.69|417269|0.52|✅| |bn|854864|0.88|428725|0.54|✅| |ta|943440|0.97|410633|0.51|✅| |te|1384016|1.42|573354|0.72|✅| |ur|1944416|2.0|855756|1.07|✅| |vi|3113184|3.2|1667306|2.08|✅| |code|4330752|4.46|2707724|3.38| | |hi|4469712|4.6|1543441|1.93|✅| |id|4538768|4.67|2582272|3.22|✅| |zh|4604112|4.74|3571636|4.46|✅| |ar|4703968|4.84|2148970|2.68|✅| |fr|5558912|5.72|5055942|6.31|✅| |pt|6130016|6.31|3562772|4.45|✅| |es|7579424|7.8|5151349|6.43|✅| |en|39252528|40.4|32740750|40.87| | |total|97150128|100.0|80100816|100.0|✅| ## Dataset Creation ### Source Data #### Training datasets - Code Miscellaneous - [CodeComplex](https://huggingface.co/datasets/codeparrot/codecomplex) - [Docstring Corpus](https://huggingface.co/datasets/teven/code_docstring_corpus) - [GreatCode](https://huggingface.co/datasets/great_code) - [State Changes](https://huggingface.co/datasets/Fraser/python-state-changes) - Closed-book QA - [Hotpot QA](https://huggingface.co/datasets/hotpot_qa) - [Trivia QA](https://huggingface.co/datasets/trivia_qa) - [Web Questions](https://huggingface.co/datasets/web_questions) - [Wiki QA](https://huggingface.co/datasets/wiki_qa) - Extractive QA - [Adversarial QA](https://huggingface.co/datasets/adversarial_qa) - [CMRC2018](https://huggingface.co/datasets/cmrc2018) - [DRCD](https://huggingface.co/datasets/clue) - [DuoRC](https://huggingface.co/datasets/duorc) - [MLQA](https://huggingface.co/datasets/mlqa) - [Quoref](https://huggingface.co/datasets/quoref) - [ReCoRD](https://huggingface.co/datasets/super_glue) - [ROPES](https://huggingface.co/datasets/ropes) - [SQuAD v2](https://huggingface.co/datasets/squad_v2) - [xQuAD](https://huggingface.co/datasets/xquad) - TyDI QA - [Primary](https://huggingface.co/datasets/khalidalt/tydiqa-primary) - [Goldp](https://huggingface.co/datasets/khalidalt/tydiqa-goldp) - Multiple-Choice QA - [ARC](https://huggingface.co/datasets/ai2_arc) - [C3](https://huggingface.co/datasets/c3) - [CoS-E](https://huggingface.co/datasets/cos_e) - [Cosmos](https://huggingface.co/datasets/cosmos) - [DREAM](https://huggingface.co/datasets/dream) - [MultiRC](https://huggingface.co/datasets/super_glue) - [OpenBookQA](https://huggingface.co/datasets/openbookqa) - [PiQA](https://huggingface.co/datasets/piqa) - [QUAIL](https://huggingface.co/datasets/quail) - [QuaRel](https://huggingface.co/datasets/quarel) - [QuaRTz](https://huggingface.co/datasets/quartz) - [QASC](https://huggingface.co/datasets/qasc) - [RACE](https://huggingface.co/datasets/race) - [SciQ](https://huggingface.co/datasets/sciq) - [Social IQA](https://huggingface.co/datasets/social_i_qa) - [Wiki Hop](https://huggingface.co/datasets/wiki_hop) - [WiQA](https://huggingface.co/datasets/wiqa) - Paraphrase Identification - [MRPC](https://huggingface.co/datasets/super_glue) - [PAWS](https://huggingface.co/datasets/paws) - [PAWS-X](https://huggingface.co/datasets/paws-x) - [QQP](https://huggingface.co/datasets/qqp) - Program Synthesis - [APPS](https://huggingface.co/datasets/codeparrot/apps) - [CodeContests](https://huggingface.co/datasets/teven/code_contests) - [JupyterCodePairs](https://huggingface.co/datasets/codeparrot/github-jupyter-text-code-pairs) - [MBPP](https://huggingface.co/datasets/Muennighoff/mbpp) - [NeuralCodeSearch](https://huggingface.co/datasets/neural_code_search) - [XLCoST](https://huggingface.co/datasets/codeparrot/xlcost-text-to-code) - Structure-to-text - [Common Gen](https://huggingface.co/datasets/common_gen) - [Wiki Bio](https://huggingface.co/datasets/wiki_bio) - Sentiment - [Amazon](https://huggingface.co/datasets/amazon_polarity) - [App Reviews](https://huggingface.co/datasets/app_reviews) - [IMDB](https://huggingface.co/datasets/imdb) - [Rotten Tomatoes](https://huggingface.co/datasets/rotten_tomatoes) - [Yelp](https://huggingface.co/datasets/yelp_review_full) - Simplification - [BiSECT](https://huggingface.co/datasets/GEM/BiSECT) - Summarization - [CNN Daily Mail](https://huggingface.co/datasets/cnn_dailymail) - [Gigaword](https://huggingface.co/datasets/gigaword) - [MultiNews](https://huggingface.co/datasets/multi_news) - [SamSum](https://huggingface.co/datasets/samsum) - [Wiki-Lingua](https://huggingface.co/datasets/GEM/wiki_lingua) - [XLSum](https://huggingface.co/datasets/GEM/xlsum) - [XSum](https://huggingface.co/datasets/xsum) - Topic Classification - [AG News](https://huggingface.co/datasets/ag_news) - [DBPedia](https://huggingface.co/datasets/dbpedia_14) - [TNEWS](https://huggingface.co/datasets/clue) - [TREC](https://huggingface.co/datasets/trec) - [CSL](https://huggingface.co/datasets/clue) - Translation - [Flores-200](https://huggingface.co/datasets/Muennighoff/flores200) - [Tatoeba](https://huggingface.co/datasets/Helsinki-NLP/tatoeba_mt) - Word Sense disambiguation - [WiC](https://huggingface.co/datasets/super_glue) - [XL-WiC](https://huggingface.co/datasets/pasinit/xlwic) #### Evaluation datasets (included in [xP3all](https://huggingface.co/datasets/bigscience/xP3all) except for NLI & HumanEval) - Natural Language Inference (NLI) - [ANLI](https://huggingface.co/datasets/anli) - [CB](https://huggingface.co/datasets/super_glue) - [RTE](https://huggingface.co/datasets/super_glue) - [XNLI](https://huggingface.co/datasets/xnli) - Coreference Resolution - [Winogrande](https://huggingface.co/datasets/winogrande) - [XWinograd](https://huggingface.co/datasets/Muennighoff/xwinograd) - Program Synthesis - [HumanEval](https://huggingface.co/datasets/openai_humaneval) - Sentence Completion - [COPA](https://huggingface.co/datasets/super_glue) - [Story Cloze](https://huggingface.co/datasets/story_cloze) - [XCOPA](https://huggingface.co/datasets/xcopa) - [XStoryCloze](https://huggingface.co/datasets/Muennighoff/xstory_cloze) ## Additional Information ### Licensing Information The dataset is released under Apache 2.0. ### Citation Information ```bibtex @misc{muennighoff2022crosslingual, title={Crosslingual Generalization through Multitask Finetuning}, author={Niklas Muennighoff and Thomas Wang and Lintang Sutawika and Adam Roberts and Stella Biderman and Teven Le Scao and M Saiful Bari and Sheng Shen and Zheng-Xin Yong and Hailey Schoelkopf and Xiangru Tang and Dragomir Radev and Alham Fikri Aji and Khalid Almubarak and Samuel Albanie and Zaid Alyafeai and Albert Webson and Edward Raff and Colin Raffel}, year={2022}, eprint={2211.01786}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ### Contributions Thanks to the contributors of [promptsource](https://github.com/bigscience-workshop/promptsource/graphs/contributors) for adding many prompts used in this dataset.
13,046
[ [ -0.039520263671875, -0.031524658203125, 0.0206298828125, 0.01303863525390625, 0.00916290283203125, 0.01036834716796875, -0.0216827392578125, -0.024658203125, 0.032318115234375, 0.00876617431640625, -0.05755615234375, -0.056640625, -0.035888671875, 0.02508544...
yhavinga/squad_v2_dutch
2023-01-21T13:53:27.000Z
[ "task_categories:question-answering", "task_ids:open-domain-qa", "task_ids:extractive-qa", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:nl", "license:cc-by-sa-4.0", "arxiv:...
yhavinga
null
null
1
97
2022-12-17T22:50:45
--- pretty_name: SQuAD2.0 Dutch annotations_creators: - crowdsourced language_creators: - crowdsourced language: - nl license: - cc-by-sa-4.0 multilinguality: - monolingual size_categories: - 100K<n<1M source_datasets: - original task_categories: - question-answering task_ids: - open-domain-qa - extractive-qa paperswithcode_id: squad_v2_dutch dataset_info: features: - name: id dtype: string - name: title dtype: string - name: title_en dtype: string - name: context dtype: string - name: question dtype: string - name: answers sequence: - name: text dtype: string - name: text_en dtype: string - name: answer_start_en dtype: int32 --- # Dataset Card for "squad_v2_dutch" ## Dataset Description - **Homepage:** [https://rajpurkar.github.io/SQuAD-explorer/](https://rajpurkar.github.io/SQuAD-explorer/) ## Dataset Summary The squad_v2_dutch dataset is a machine-translated version of the SQuAD v2 dataset from English to Dutch. The SQuAD v2 dataset combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. ## Challenges and Solutions One of the main challenges in translating the SQuAD v2 dataset to Dutch was accurately translating the answers, which are often short phrases or single words. Translating the answers individually would result in obvious mistakes. Examples are * Destiny's Child -> Het kind van Destiny * Dangerously in Love -> Gevaarlijk in de liefde * Imagine -> Stel je voor * Men in Black -> Mannen in zwart * Hottest Female Singer of All Time -> De heetste vrouwelijke zanger aller tijden The correct translation of these phrases often depends on the context in which they are used. To address this, the title, question, answers, and context were concatenated as a single sequence, separated by the newline character. When the translated version had the correct number of newlines and did not contain any apparent mixups of the answers with the question and title, it was used. Otherwise, the one-by-one context-less translation was used as a fallback. Most examples where translated with the context-rich translation: ~95%. * train split: context: 123898, no context: 6406 * validation split: context: 10196, no context: 1644 ### Data Fields The data fields are the same among all splits. #### squad_v2 - `id`: a `string` feature. - `title`: a `string` feature. - `title_en`: a `string` feature. - `context`: a `string` feature. - `question`: a `string` feature. - `answers`: a dictionary feature containing: - `text`: a list of `string` feature. - `text_en`: a list of `string` feature. - `answer_start_en`: a `int32` feature. ### Citation Information ``` @article{2016arXiv160605250R, author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev}, Konstantin and {Liang}, Percy}, title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}", journal = {arXiv e-prints}, year = 2016, eid = {arXiv:1606.05250}, pages = {arXiv:1606.05250}, archivePrefix = {arXiv}, eprint = {1606.05250}, } ``` ### Contributions Thanks to [@lewtun](https://github.com/lewtun), [@albertvillanova](https://github.com/albertvillanova), [@patrickvonplaten](https://github.com/patrickvonplaten), [@thomwolf](https://github.com/thomwolf) for adding the https://huggingface.co/datasets/squad_v2 dataset. This project would not have been possible without compute generously provided by Google through the [TPU Research Cloud](https://sites.research.google/trc/). Created by [Yeb Havinga](https://www.linkedin.com/in/yeb-havinga-86530825/)
3,885
[ [ -0.041015625, -0.059539794921875, 0.01580810546875, 0.03179931640625, -0.0180511474609375, 0.0029315948486328125, -0.015869140625, -0.035247802734375, 0.0250396728515625, 0.0281524658203125, -0.0672607421875, -0.03369140625, -0.02996826171875, 0.031112670898...
Dr-BERT/QUAERO
2023-06-12T20:53:41.000Z
[ "task_categories:token-classification", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:fr", "license:other", "medical", "region:us" ]
Dr-BERT
The QUAERO French Medical Corpus has been initially developed as a resource for named entity recognition and normalization [1]. It was then improved with the purpose of creating a gold standard set of normalized entities for French biomedical text, that was used in the CLEF eHealth evaluation lab [2][3]. A selection of MEDLINE titles and EMEA documents were manually annotated. The annotation process was guided by concepts in the Unified Medical Language System (UMLS): 1. Ten types of clinical entities, as defined by the following UMLS Semantic Groups (Bodenreider and McCray 2003) were annotated: Anatomy, Chemical and Drugs, Devices, Disorders, Geographic Areas, Living Beings, Objects, Phenomena, Physiology, Procedures. 2. The annotations were made in a comprehensive fashion, so that nested entities were marked, and entities could be mapped to more than one UMLS concept. In particular: (a) If a mention can refer to more than one Semantic Group, all the relevant Semantic Groups should be annotated. For instance, the mention “récidive” (recurrence) in the phrase “prévention des récidives” (recurrence prevention) should be annotated with the category “DISORDER” (CUI C2825055) and the category “PHENOMENON” (CUI C0034897); (b) If a mention can refer to more than one UMLS concept within the same Semantic Group, all the relevant concepts should be annotated. For instance, the mention “maniaques” (obsessive) in the phrase “patients maniaques” (obsessive patients) should be annotated with CUIs C0564408 and C0338831 (category “DISORDER”); (c) Entities which span overlaps with that of another entity should still be annotated. For instance, in the phrase “infarctus du myocarde” (myocardial infarction), the mention “myocarde” (myocardium) should be annotated with category “ANATOMY” (CUI C0027061) and the mention “infarctus du myocarde” should be annotated with category “DISORDER” (CUI C0027051) The QUAERO French Medical Corpus BioC release comprises a subset of the QUAERO French Medical corpus, as follows: Training data (BRAT version used in CLEF eHealth 2015 task 1b as training data): - MEDLINE_train_bioc file: 833 MEDLINE titles, annotated with normalized entities in the BioC format - EMEA_train_bioc file: 3 EMEA documents, segmented into 11 sub-documents, annotated with normalized entities in the BioC format Development data (BRAT version used in CLEF eHealth 2015 task 1b as test data and in CLEF eHealth 2016 task 2 as development data): - MEDLINE_dev_bioc file: 832 MEDLINE titles, annotated with normalized entities in the BioC format - EMEA_dev_bioc file: 3 EMEA documents, segmented into 12 sub-documents, annotated with normalized entities in the BioC format Test data (BRAT version used in CLEF eHealth 2016 task 2 as test data): - MEDLINE_test_bioc folder: 833 MEDLINE titles, annotated with normalized entities in the BioC format - EMEA folder_test_bioc: 4 EMEA documents, segmented into 15 sub-documents, annotated with normalized entities in the BioC format This release of the QUAERO French medical corpus, BioC version, comes in the BioC format, through automatic conversion from the original BRAT format obtained with the Brat2BioC tool https://bitbucket.org/nicta_biomed/brat2bioc developped by Jimeno Yepes et al. Antonio Jimeno Yepes, Mariana Neves, Karin Verspoor Brat2BioC: conversion tool between brat and BioC BioCreative IV track 1 - BioC: The BioCreative Interoperability Initiative, 2013 Please note that the original version of the QUAERO corpus distributed in the CLEF eHealth challenge 2015 and 2016 came in the BRAT stand alone format. It was distributed with the CLEF eHealth evaluation tool. This original distribution of the QUAERO French Medical corpus is available separately from https://quaerofrenchmed.limsi.fr All questions regarding the task or data should be addressed to aurelie.neveol@limsi.fr
@InProceedings{neveol14quaero, author = {Névéol, Aurélie and Grouin, Cyril and Leixa, Jeremy and Rosset, Sophie and Zweigenbaum, Pierre}, title = {The {QUAERO} {French} Medical Corpus: A Ressource for Medical Entity Recognition and Normalization}, OPTbooktitle = {Proceedings of the Fourth Workshop on Building and Evaluating Ressources for Health and Biomedical Text Processing}, booktitle = {Proc of BioTextMining Work}, OPTseries = {BioTxtM 2014}, year = {2014}, pages = {24--30}, }
3
97
2023-04-25T22:01:52
--- language: - fr license: other multilinguality: monolingual pretty_name: QUAERO homepage: https://quaerofrenchmed.limsi.fr/ task_categories: - token-classification tags: - medical size_categories: - 1K<n<10K --- # Dataset Card for QUAERO ## Dataset Description - **Homepage:** https://quaerofrenchmed.limsi.fr/ - **Pubmed:** True - **Public:** True - **Tasks:** Named-Entity Recognition (NER) The QUAERO French Medical Corpus has been initially developed as a resource for named entity recognition and normalization [1]. It was then improved with the purpose of creating a gold standard set of normalized entities for French biomedical text, that was used in the CLEF eHealth evaluation lab [2][3]. A selection of MEDLINE titles and EMEA documents were manually annotated. The annotation process was guided by concepts in the Unified Medical Language System (UMLS): 1. Ten types of clinical entities, as defined by the following UMLS Semantic Groups (Bodenreider and McCray 2003) were annotated: Anatomy, Chemical and Drugs, Devices, Disorders, Geographic Areas, Living Beings, Objects, Phenomena, Physiology, Procedures. 2. The annotations were made in a comprehensive fashion, so that nested entities were marked, and entities could be mapped to more than one UMLS concept. In particular: (a) If a mention can refer to more than one Semantic Group, all the relevant Semantic Groups should be annotated. For instance, the mention “récidive” (recurrence) in the phrase “prévention des récidives” (recurrence prevention) should be annotated with the category “DISORDER” (CUI C2825055) and the category “PHENOMENON” (CUI C0034897); (b) If a mention can refer to more than one UMLS concept within the same Semantic Group, all the relevant concepts should be annotated. For instance, the mention “maniaques” (obsessive) in the phrase “patients maniaques” (obsessive patients) should be annotated with CUIs C0564408 and C0338831 (category “DISORDER”); (c) Entities which span overlaps with that of another entity should still be annotated. For instance, in the phrase “infarctus du myocarde” (myocardial infarction), the mention “myocarde” (myocardium) should be annotated with category “ANATOMY” (CUI C0027061) and the mention “infarctus du myocarde” should be annotated with category “DISORDER” (CUI C0027051) The QUAERO French Medical Corpus BioC release comprises a subset of the QUAERO French Medical corpus, as follows: Training data (BRAT version used in CLEF eHealth 2015 task 1b as training data): - MEDLINE_train_bioc file: 833 MEDLINE titles, annotated with normalized entities in the BioC format - EMEA_train_bioc file: 3 EMEA documents, segmented into 11 sub-documents, annotated with normalized entities in the BioC format Development data (BRAT version used in CLEF eHealth 2015 task 1b as test data and in CLEF eHealth 2016 task 2 as development data): - MEDLINE_dev_bioc file: 832 MEDLINE titles, annotated with normalized entities in the BioC format - EMEA_dev_bioc file: 3 EMEA documents, segmented into 12 sub-documents, annotated with normalized entities in the BioC format Test data (BRAT version used in CLEF eHealth 2016 task 2 as test data): - MEDLINE_test_bioc folder: 833 MEDLINE titles, annotated with normalized entities in the BioC format - EMEA folder_test_bioc: 4 EMEA documents, segmented into 15 sub-documents, annotated with normalized entities in the BioC format This release of the QUAERO French medical corpus, BioC version, comes in the BioC format, through automatic conversion from the original BRAT format obtained with the Brat2BioC tool https://bitbucket.org/nicta_biomed/brat2bioc developped by Jimeno Yepes et al. Antonio Jimeno Yepes, Mariana Neves, Karin Verspoor Brat2BioC: conversion tool between brat and BioC BioCreative IV track 1 - BioC: The BioCreative Interoperability Initiative, 2013 Please note that the original version of the QUAERO corpus distributed in the CLEF eHealth challenge 2015 and 2016 came in the BRAT stand alone format. It was distributed with the CLEF eHealth evaluation tool. This original distribution of the QUAERO French Medical corpus is available separately from https://quaerofrenchmed.limsi.fr All questions regarding the task or data should be addressed to aurelie.neveol@limsi.fr ## Citation Information ``` @InProceedings{neveol14quaero, author = {Névéol, Aurélie and Grouin, Cyril and Leixa, Jeremy and Rosset, Sophie and Zweigenbaum, Pierre}, title = {The {QUAERO} {French} Medical Corpus: A Ressource for Medical Entity Recognition and Normalization}, OPTbooktitle = {Proceedings of the Fourth Workshop on Building and Evaluating Ressources for Health and Biomedical Text Processing}, booktitle = {Proc of BioTextMining Work}, OPTseries = {BioTxtM 2014}, year = {2014}, pages = {24--30}, } ```
4,891
[ [ -0.0316162109375, -0.0169219970703125, 0.043792724609375, 0.0111083984375, -0.0089569091796875, -0.00270843505859375, -0.00617218017578125, -0.055267333984375, 0.034881591796875, 0.04168701171875, -0.017974853515625, -0.06072998046875, -0.049560546875, 0.041...
bigcode/ta-prompt
2023-05-04T12:20:22.000Z
[ "language:code", "license:apache-2.0", "region:us" ]
bigcode
null
null
155
97
2023-05-03T14:04:39
--- license: apache-2.0 language: - code programming_language: - Java - JavaScript - Python --- # Dataset summary This repository is dedicated to prompts used to perform in-context learning with [starcoder](https://huggingface.co/bigcode/starcoder). As a matter of fact, the model is an autoregressive language model that is trained on both code and natural language text. It can be turned into an AI-powered technical assistant by prepending conversations to its 8192-tokens context window. # Format The prompt is a .txt file which contains multiple conversations between a human and the assistant. Here is the format ``` ----- Human: <instruction> Assistant: <answer> ----- Human: <instruction> Assistant: <answer> Human: <instruction> Assistant: <answer> . . . ----- ``` # Use cases We want the technical assistant to cover a diverse set of use cases - **Code-to-text**: - `What is the purpose of the following code?<code>` - `What is the bug in the following code?<code>` - **Text-to-code**: - `Write/Design/Implement a function to <task>` - **Code-to-code**: - `Translate this <code> from <programming language> to <programming language>.` - **Text-to-text**: - `What is <technical concept>` - **General-purpose Q&A** - `What are you?` - `What is your purpose?` # Scope of the work As a model designed for coding tasks, the user should not expect the model to output relevant answers when prompted with a general-purpose question. When it comes to coding requests, the output of the model should be post-processed before testing them.
1,574
[ [ -0.0310516357421875, -0.061370849609375, 0.037567138671875, -0.0046539306640625, 0.00110626220703125, -0.005756378173828125, -0.0232696533203125, -0.017333984375, 0.001697540283203125, 0.05157470703125, -0.0621337890625, -0.046661376953125, -0.0292510986328125, ...
tasksource/icl-symbol-tuning-instruct
2023-07-26T07:20:41.000Z
[ "task_categories:text2text-generation", "task_categories:text-classification", "task_categories:text-generation", "size_categories:100K<n<1M", "language:en", "license:apache-2.0", "in-context-learning", "symbol-tuning", "icl", "meta-icl", "meta-learning", "flan", "long-input", "instruction...
tasksource
null
null
11
97
2023-06-15T14:44:19
--- license: apache-2.0 task_categories: - text2text-generation - text-classification - text-generation language: - en tags: - in-context-learning - symbol-tuning - icl - meta-icl - meta-learning - flan - long-input - instruction-tuning - instruct - metaicl dataset_info: features: - name: task dtype: string - name: inputs dtype: string - name: targets dtype: string - name: symbols sequence: string splits: - name: validation num_bytes: 42218685.0 num_examples: 14970 - name: test num_bytes: 43453364.0 num_examples: 16204 - name: train num_bytes: 1303015298.0 num_examples: 452367 download_size: 727062369 dataset_size: 1388687347.0 size_categories: - 100K<n<1M --- # Description Few-shot prompting demonstrates that language models can learn in context even though they were not trained to do. However, explicitly learning to learn in context [meta-icl](https://arxiv.org/abs/2110.15943) leads to better results. With symbol tuning, labels are replaced with arbitrary symbols (e.g. foo/bar), which makes learning in context a key condition to learn the instructions We implement *symbol tuning*, as presented in the [Symbol tuning improves in-context learning](https://arxiv.org/pdf/2305.08298.pdf) paper with tasksource classification datasets. An input is a shuffled sequence of 4 positive and 4 negative examples showing a particular label (replaced with a symbol - a random word), followed by an example to label. This is the largest symbol-tuning dataset to date, with 279 datasets. Symbol tuning improves in-context learning, which tends to be degraded by instruction tuning. # Usage We limit input size to 50_000 characters. This is well enough to challenge long range modeling. But be careful to remove examples that are too long or to truncate from left, otherwise some examples might be unsolvable, as the "question" are at the end of the examples. ```python dataset = load_dataset('tasksource/icl-symbol-tuning-instruct') # assuming 4 characters per token and 1000 tokens dataset = dataset.filter(lambda x:len(x['inputs'])<1000*4) ``` ## References: Code: https://github.com/sileod/tasksource ``` @article{sileo2023tasksource, title={tasksource: Structured Dataset Preprocessing Annotations for Frictionless Extreme Multi-Task Learning and Evaluation}, author={Sileo, Damien}, url= {https://arxiv.org/abs/2301.05948}, journal={arXiv preprint arXiv:2301.05948}, year={2023} } @article{wei2023symbol, title={Symbol tuning improves in-context learning in language models}, author={Wei, Jerry and Hou, Le and Lampinen, Andrew and Chen, Xiangning and Huang, Da and Tay, Yi and Chen, Xinyun and Lu, Yifeng and Zhou, Denny and Ma, Tengyu and others}, journal={arXiv preprint arXiv:2305.08298}, year={2023} } ```
2,809
[ [ -0.027130126953125, -0.053009033203125, 0.02862548828125, -0.0005536079406738281, -0.0322265625, -0.0276641845703125, -0.041961669921875, -0.041412353515625, -0.0269012451171875, 0.0224609375, -0.061676025390625, -0.03961181640625, -0.04498291015625, 0.02685...
tingchih/multi-class
2023-09-12T04:21:02.000Z
[ "region:us" ]
tingchih
null
null
0
97
2023-09-12T00:25:48
--- dataset_info: features: - name: text dtype: string - name: label dtype: int64 splits: - name: train num_bytes: 98926083 num_examples: 570999 - name: test num_bytes: 42106324 num_examples: 245116 download_size: 77717077 dataset_size: 141032407 --- # Dataset Card for "multi-class" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
456
[ [ -0.0517578125, -0.0191802978515625, 0.006580352783203125, 0.01306915283203125, -0.0002818107604980469, 0.0178680419921875, 0.01242828369140625, -0.0217437744140625, 0.052764892578125, 0.0228424072265625, -0.046112060546875, -0.04296875, -0.040069580078125, -...
loremipsum3658/adj_extension
2023-09-28T17:03:46.000Z
[ "region:us" ]
loremipsum3658
null
null
0
97
2023-09-28T17:02:18
--- dataset_info: features: - name: data dtype: string - name: titulo dtype: string - name: andamento dtype: string - name: nup dtype: 'null' - name: classificacao_andamento sequence: string - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 71124 num_examples: 135 download_size: 23610 dataset_size: 71124 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "adj_extension" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
639
[ [ -0.054046630859375, -0.03875732421875, 0.0058441162109375, -0.0186767578125, -0.0033626556396484375, -0.003246307373046875, 0.00051116943359375, -0.01398468017578125, 0.0711669921875, 0.025360107421875, -0.0556640625, -0.053558349609375, -0.0435791015625, -0...
alzoubi36/title_generation
2023-10-01T12:43:11.000Z
[ "region:us" ]
alzoubi36
null
null
0
97
2023-10-01T12:43:03
--- dataset_info: features: - name: text dtype: string - name: summary dtype: string - name: id dtype: int64 splits: - name: validation num_bytes: 1753243 num_examples: 2000 - name: test num_bytes: 1682435 num_examples: 2000 - name: train num_bytes: 17556737 num_examples: 20000 download_size: 10393931 dataset_size: 20992415 --- # Dataset Card for "title_generation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
556
[ [ -0.03369140625, -0.01385498046875, 0.01468658447265625, 0.006114959716796875, -0.01483154296875, 0.01235198974609375, 0.0173492431640625, -0.00023496150970458984, 0.04180908203125, 0.03985595703125, -0.0640869140625, -0.05084228515625, -0.037872314453125, -0...
sheepy928/rt_merged
2023-10-23T22:13:12.000Z
[ "region:us" ]
sheepy928
null
null
0
97
2023-10-23T22:12:30
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: sentence dtype: string - name: label dtype: int64 splits: - name: train num_bytes: 25082040.23509904 num_examples: 170188 - name: test num_bytes: 4426363.76490096 num_examples: 30034 download_size: 18535178 dataset_size: 29508404.0 --- # Dataset Card for "cs490_reddit_twitter_merged" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
617
[ [ -0.0443115234375, -0.00543212890625, 0.0147247314453125, 0.0304412841796875, -0.015777587890625, 0.026458740234375, -0.00042748451232910156, -0.0105438232421875, 0.0614013671875, 0.0273590087890625, -0.06524658203125, -0.043731689453125, -0.044891357421875, ...
capes
2022-11-03T16:15:53.000Z
[ "task_categories:translation", "annotations_creators:found", "language_creators:found", "multilinguality:multilingual", "size_categories:1M<n<10M", "source_datasets:original", "language:en", "language:pt", "license:unknown", "dissertation-abstracts-translation", "theses-translation", "region:u...
null
A parallel corpus of theses and dissertations abstracts in English and Portuguese were collected from the CAPES website (Coordenação de Aperfeiçoamento de Pessoal de Nível Superior) - Brazil. The corpus is sentence aligned for all language pairs. Approximately 240,000 documents were collected and aligned using the Hunalign algorithm.
@inproceedings{soares2018parallel, title={A Parallel Corpus of Theses and Dissertations Abstracts}, author={Soares, Felipe and Yamashita, Gabrielli Harumi and Anzanello, Michel Jose}, booktitle={International Conference on Computational Processing of the Portuguese Language}, pages={345--352}, year={2018}, organization={Springer} }
2
96
2022-03-02T23:29:22
--- annotations_creators: - found language_creators: - found language: - en - pt license: - unknown multilinguality: - multilingual size_categories: - 1M<n<10M source_datasets: - original task_categories: - translation task_ids: [] paperswithcode_id: capes pretty_name: CAPES tags: - dissertation-abstracts-translation - theses-translation dataset_info: features: - name: translation dtype: translation: languages: - en - pt config_name: en-pt splits: - name: train num_bytes: 472484364 num_examples: 1157610 download_size: 162229298 dataset_size: 472484364 --- # Dataset Card for CAPES ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:**[Parallel corpus of theses and dissertation abstracts in Portuguese and English from CAPES](https://sites.google.com/view/felipe-soares/datasets#h.p_kxOR6EhHm2a6) - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary A parallel corpus of theses and dissertations abstracts in English and Portuguese were collected from the CAPES website (Coordenação de Aperfeiçoamento de Pessoal de Nível Superior) - Brazil. The corpus is sentence aligned for all language pairs. Approximately 240,000 documents were collected and aligned using the Hunalign algorithm. ### Supported Tasks and Leaderboards The underlying task is machine translation. ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @inproceedings{soares2018parallel, title={A Parallel Corpus of Theses and Dissertations Abstracts}, author={Soares, Felipe and Yamashita, Gabrielli Harumi and Anzanello, Michel Jose}, booktitle={International Conference on Computational Processing of the Portuguese Language}, pages={345--352}, year={2018}, organization={Springer} } ``` ### Contributions Thanks to [@patil-suraj](https://github.com/patil-suraj) for adding this dataset.
3,857
[ [ -0.040313720703125, -0.0240478515625, 0.007793426513671875, 0.0276336669921875, -0.01092529296875, 0.01306915283203125, -0.031768798828125, -0.03466796875, 0.041229248046875, 0.036865234375, -0.039794921875, -0.0687255859375, -0.059661865234375, 0.0308990478...
msr_text_compression
2022-11-18T21:30:29.000Z
[ "task_categories:summarization", "annotations_creators:crowdsourced", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:extended|other-Open-American-National-Corpus-(OANC1)", "language:en", "license:other", "region:us" ]
null
This dataset contains sentences and short paragraphs with corresponding shorter (compressed) versions. There are up to five compressions for each input text, together with quality judgements of their meaning preservation and grammaticality. The dataset is derived using source texts from the Open American National Corpus (ww.anc.org) and crowd-sourcing.
@inproceedings{Toutanova2016ADA, title={A Dataset and Evaluation Metrics for Abstractive Compression of Sentences and Short Paragraphs}, author={Kristina Toutanova and Chris Brockett and Ke M. Tran and Saleema Amershi}, booktitle={EMNLP}, year={2016} }
3
96
2022-03-02T23:29:22
--- annotations_creators: - crowdsourced language_creators: - found language: - en license: - other license_details: Microsoft Research Data License Agreement multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - extended|other-Open-American-National-Corpus-(OANC1) task_categories: - summarization task_ids: [] pretty_name: MsrTextCompression dataset_info: features: - name: source_id dtype: string - name: domain dtype: string - name: source_text dtype: string - name: targets sequence: - name: compressed_text dtype: string - name: judge_id dtype: string - name: num_ratings dtype: int64 - name: ratings sequence: int64 splits: - name: train num_bytes: 5001312 num_examples: 4936 - name: validation num_bytes: 449691 num_examples: 447 - name: test num_bytes: 804536 num_examples: 785 download_size: 0 dataset_size: 6255539 --- # Dataset Card for [Dataset Name] ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://msropendata.com/datasets/f8ce2ec9-7fbd-48f7-a8bb-2d2279373563 - **Repository:** - **Paper:** https://www.microsoft.com/en-us/research/wp-content/uploads/2016/09/Sentence_Compression_final-1.pdf - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset contains sentences and short paragraphs with corresponding shorter (compressed) versions. There are up to five compressions for each input text, together with quality judgements of their meaning preservation and grammaticality. The dataset is derived using source texts from the Open American National Corpus (ww.anc.org) and crowd-sourcing. ### Supported Tasks and Leaderboards Text Summarization ### Languages English ## Dataset Structure ### Data Instances It contains approximately 6,000 source texts with multiple compressions (about 26,000 pairs of source and compressed texts), representing business letters, newswire, journals, and technical documents sampled from the Open American National Corpus (OANC1). - Each source text is accompanied by up to five crowd-sourced rewrites constrained to a preset compression ratio and annotated with quality judgments. Multiple rewrites permit study of the impact of operations on human compression quality and facilitate automatic evaluation. - This dataset is the first to provide compressions at the multi-sentence (two-sentence paragraph) level, which may present a stepping stone to whole document summarization. - Many of these two-sentence paragraphs are compressed both as paragraphs and separately sentence-bysentence, offering data that may yield insights into the impact of multi-sentence operations on human compression quality. | Description | Source | Target | Average CPS | Meaning Quality | Grammar Quality | | :------------- | :----------: | -----------: | -----------: | -----------: | -----------: | | 1-Sentence | 3764 | 15523 | 4.12 | 2.78 | 2.81 | | 2-Sentence | 2405 | 10900 | 4.53 | 2.78 | 2.83 | **Note**: Average CPS = Average Compressions per Source Text ### Data Fields ``` {'domain': 'Newswire', 'source_id': '106', 'source_text': '" Except for this small vocal minority, we have just not gotten a lot of groundswell against this from members, " says APA president Philip G. Zimbardo of Stanford University.', 'targets': {'compressed_text': ['"Except for this small vocal minority, we have not gotten a lot of groundswell against this," says APA president Zimbardo.', '"Except for a vocal minority, we haven\'t gotten much groundswell from members, " says Philip G. Zimbardo of Stanford University.', 'APA president of Stanford has stated that except for a vocal minority they have not gotten a lot of pushback from members.', 'APA president Philip G. Zimbardo of Stanford says they have not had much opposition against this.'], 'judge_id': ['2', '22', '10', '0'], 'num_ratings': [3, 3, 3, 3], 'ratings': [[6, 6, 6], [11, 6, 6], [6, 11, 6], [6, 11, 11]]}} ``` - source_id: index of article per original dataset - source_text: uncompressed original text - domain: source of the article - targets: - compressed_text: compressed version of `source_text` - judge_id: anonymized ids of crowdworkers who proposed compression - num_ratings: number of ratings available for each proposed compression - ratings: see table below Ratings system (excerpted from authors' README): - 6 = Most important meaning Flawless language (3 on meaning and 3 on grammar as per the paper's terminology) - 7 = Most important meaning Minor errors (3 on meaning and 2 on grammar) - 9 = Most important meaning Disfluent or incomprehensible (3 on meaning and 1 on grammar) - 11 = Much meaning Flawless language (2 on meaning and 3 on grammar) - 12 = Much meaning Minor errors (2 on meaning and 2 on grammar) - 14 = Much meaning Disfluent or incomprehensible (2 on meaning and 1 on grammar) - 21 = Little or none meaning Flawless language (1 on meaning and 3 on grammar) - 22 = Little or none meaning Minor errors (1 on meaning and 2 on grammar) - 24 = Little or none meaning Disfluent or incomprehensible (1 on meaning and 1 on grammar) See **README.txt** from data archive for additional details. ### Data Splits There are 4,936 source texts in the training, 448 in the development, and 785 in the test set. ## Dataset Creation ### Annotations #### Annotation process Compressions were created using UHRS, an inhouse crowd-sourcing system similar to Amazon’s Mechanical Turk, in two annotation rounds, one for shortening and a second to rate compression quality: 1. In the first round, five workers were tasked with abridging each source text by at least 25%, while remaining grammatical and fluent, and retaining the meaning of the original. 2. In the second round, 3-5 judges (raters) were asked to evaluate the grammaticality of each compression on a scale from 1 (major errors, disfluent) through 3 (fluent), and again analogously for meaning preservation on a scale from 1 (orthogonal) through 3 (most important meaning-preserving). ## Additional Information ### Licensing Information Microsoft Research Data License Agreement ### Citation Information @inproceedings{Toutanova2016ADA, title={A Dataset and Evaluation Metrics for Abstractive Compression of Sentences and Short Paragraphs}, author={Kristina Toutanova and Chris Brockett and Ke M. Tran and Saleema Amershi}, booktitle={EMNLP}, year={2016} } ### Contributions Thanks to [@jeromeku](https://github.com/jeromeku) for adding this dataset.
7,664
[ [ -0.035247802734375, -0.028656005859375, 0.0144805908203125, 0.033355712890625, -0.024169921875, -0.00485992431640625, -0.03143310546875, -0.0280609130859375, 0.035369873046875, 0.01194000244140625, -0.043304443359375, -0.042388916015625, -0.06549072265625, 0...
vctk
2022-11-03T16:16:04.000Z
[ "task_categories:automatic-speech-recognition", "annotations_creators:expert-generated", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:cc-by-4.0", "region:us" ]
null
The CSTR VCTK Corpus includes speech data uttered by 110 English speakers with various accents.
@inproceedings{Veaux2017CSTRVC, title = {CSTR VCTK Corpus: English Multi-speaker Corpus for CSTR Voice Cloning Toolkit}, author = {Christophe Veaux and Junichi Yamagishi and Kirsten MacDonald}, year = 2017 }
8
96
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - crowdsourced language: - en license: - cc-by-4.0 multilinguality: - monolingual pretty_name: VCTK size_categories: - 10K<n<100K source_datasets: - original task_categories: - automatic-speech-recognition task_ids: [] paperswithcode_id: vctk train-eval-index: - config: main task: automatic-speech-recognition task_id: speech_recognition splits: train_split: train col_mapping: file: path text: text metrics: - type: wer name: WER - type: cer name: CER dataset_info: features: - name: speaker_id dtype: string - name: audio dtype: audio: sampling_rate: 48000 - name: file dtype: string - name: text dtype: string - name: text_id dtype: string - name: age dtype: string - name: gender dtype: string - name: accent dtype: string - name: region dtype: string - name: comment dtype: string config_name: main splits: - name: train num_bytes: 40103111 num_examples: 88156 download_size: 11747302977 dataset_size: 40103111 --- # Dataset Card for VCTK ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Edinburg DataShare](https://doi.org/10.7488/ds/2645) - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This CSTR VCTK Corpus includes speech data uttered by 110 English speakers with various accents. Each speaker reads out about 400 sentences, which were selected from a newspaper, the rainbow passage and an elicitation paragraph used for the speech accent archive. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances A data point comprises the path to the audio file, called `file` and its transcription, called `text`. ``` { 'speaker_id': 'p225', 'text_id': '001', 'text': 'Please call Stella.', 'age': '23', 'gender': 'F', 'accent': 'English', 'region': 'Southern England', 'file': '/datasets/downloads/extracted/8ed7dad05dfffdb552a3699777442af8e8ed11e656feb277f35bf9aea448f49e/wav48_silence_trimmed/p225/p225_001_mic1.flac', 'audio': { 'path': '/datasets/downloads/extracted/8ed7dad05dfffdb552a3699777442af8e8ed11e656feb277f35bf9aea448f49e/wav48_silence_trimmed/p225/p225_001_mic1.flac', 'array': array([0.00485229, 0.00689697, 0.00619507, ..., 0.00811768, 0.00836182, 0.00854492], dtype=float32), 'sampling_rate': 48000 }, 'comment': '' } ``` Each audio file is a single-channel FLAC with a sample rate of 48000 Hz. ### Data Fields Each row consists of the following fields: - `speaker_id`: Speaker ID - `audio`: Audio recording - `file`: Path to audio file - `text`: Text transcription of corresponding audio - `text_id`: Text ID - `age`: Speaker's age - `gender`: Speaker's gender - `accent`: Speaker's accent - `region`: Speaker's region, if annotation exists - `comment`: Miscellaneous comments, if any ### Data Splits The dataset has no predefined splits. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information The dataset consists of people who have donated their voice online. You agree to not attempt to determine the identity of speakers in this dataset. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information Public Domain, Creative Commons Attribution 4.0 International Public License ([CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/legalcode)) ### Citation Information ```bibtex @inproceedings{Veaux2017CSTRVC, title = {CSTR VCTK Corpus: English Multi-speaker Corpus for CSTR Voice Cloning Toolkit}, author = {Christophe Veaux and Junichi Yamagishi and Kirsten MacDonald}, year = 2017 } ``` ### Contributions Thanks to [@jaketae](https://github.com/jaketae) for adding this dataset.
5,477
[ [ -0.034332275390625, -0.044921875, 0.0132904052734375, 0.0259552001953125, -0.01358795166015625, 0.006038665771484375, -0.03631591796875, -0.0182342529296875, 0.036346435546875, 0.0477294921875, -0.048919677734375, -0.07598876953125, -0.039337158203125, 0.012...
yoruba_text_c3
2023-06-16T15:06:58.000Z
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:y...
null
Yoruba Text C3 is the largest Yoruba texts collected and used to train FastText embeddings in the YorubaTwi Embedding paper: https://www.aclweb.org/anthology/2020.lrec-1.335/
@inproceedings{alabi-etal-2020-massive, title = "Massive vs. Curated Embeddings for Low-Resourced Languages: the Case of Yoruba and {T}wi", author = "Alabi, Jesujoba and Amponsah-Kaakyire, Kwabena and Adelani, David and Espa{\\~n}a-Bonet, Cristina", booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference", month = may, year = "2020", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://www.aclweb.org/anthology/2020.lrec-1.335", pages = "2754--2762", language = "English", ISBN = "979-10-95546-34-4", }
1
96
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - found language: - yo license: - cc-by-nc-4.0 multilinguality: - monolingual size_categories: - 100K<n<1M source_datasets: - original task_categories: - text-generation - fill-mask task_ids: - language-modeling - masked-language-modeling paperswithcode_id: null pretty_name: Yorùbá Text C3 dataset_info: - config_name: plain_text features: - name: text dtype: string splits: - name: train num_bytes: 77094396 num_examples: 562238 download_size: 75407454 dataset_size: 77094396 - config_name: yoruba_text_c3 features: - name: text dtype: string splits: - name: train num_bytes: 77094396 num_examples: 562238 download_size: 75407454 dataset_size: 77094396 --- # Dataset Card for Yorùbá Text C3 ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Repository:** https://github.com/ajesujoba/YorubaTwi-Embedding/ - **Paper:** https://aclanthology.org/2020.lrec-1.335/ - **Leaderboard:** - **Point of Contact:** [Jesujoba Alabi](mailto:alabijesujoba@gmail.com) ### Dataset Summary Yorùbá Text C3 was collected from various sources from the web (Bible, JW300, books, news articles, wikipedia, etc) to compare pre-trained word embeddings (Fasttext and BERT) and embeddings and embeddings trained on curated Yorùbá Texts. The dataset consists of clean texts (i.e texts with proper Yorùbá diacritics) like the Bible & JW300 and noisy texts ( with incorrect or absent diacritics) from other online sources like Wikipedia, BBC Yorùbá, and VON Yorùbá ### Supported Tasks and Leaderboards For training word embeddings and language models on Yoruba texts. ### Languages The language supported is Yorùbá. ## Dataset Structure ### Data Instances A data point is a sentence in each line. { 'text': 'lílo àkàbà — ǹjẹ́ o máa ń ṣe àyẹ̀wò wọ̀nyí tó lè dáàbò bò ẹ́' } ### Data Fields - `text`: a `string` feature. a sentence text per line ### Data Splits Contains only the training split. ## Dataset Creation ### Curation Rationale The data was created to help introduce resources to new language - Yorùbá. ### Source Data #### Initial Data Collection and Normalization The dataset comes from various sources of the web like Bible, JW300, books, news articles, wikipedia, etc. See Table 1 in the [paper](https://www.aclweb.org/anthology/2020.lrec-1.335/) for the summary of the dataset and statistics #### Who are the source language producers? [Jehovah Witness](https://www.jw.org/yo/) (JW300) [Yorùbá Bible](http://www.bible.com/) [Yorùbá Wikipedia](dumps.wikimedia.org/yowiki) [BBC Yorùbá](bbc.com/yoruba) [VON Yorùbá](https://von.gov.ng/) [Global Voices Yorùbá]( yo.globalvoices.org) And other sources, see https://www.aclweb.org/anthology/2020.lrec-1.335/ ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases The dataset is biased to the religion domain (Christianity) because of the inclusion of JW300 and the Bible. ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators The data sets were curated by Jesujoba Alabi and David Adelani, students of Saarland University, Saarbrücken, Germany . ### Licensing Information The data is under the [Creative Commons Attribution-NonCommercial 4.0 ](https://creativecommons.org/licenses/by-nc/4.0/legalcode) ### Citation Information ``` @inproceedings{alabi-etal-2020-massive, title = "Massive vs. Curated Embeddings for Low-Resourced Languages: the Case of {Y}or{\`u}b{\'a} and {T}wi", author = "Alabi, Jesujoba and Amponsah-Kaakyire, Kwabena and Adelani, David and Espa{\~n}a-Bonet, Cristina", booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference", month = may, year = "2020", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://www.aclweb.org/anthology/2020.lrec-1.335", pages = "2754--2762", abstract = "The success of several architectures to learn semantic representations from unannotated text and the availability of these kind of texts in online multilingual resources such as Wikipedia has facilitated the massive and automatic creation of resources for multiple languages. The evaluation of such resources is usually done for the high-resourced languages, where one has a smorgasbord of tasks and test sets to evaluate on. For low-resourced languages, the evaluation is more difficult and normally ignored, with the hope that the impressive capability of deep learning architectures to learn (multilingual) representations in the high-resourced setting holds in the low-resourced setting too. In this paper we focus on two African languages, Yor{\`u}b{\'a} and Twi, and compare the word embeddings obtained in this way, with word embeddings obtained from curated corpora and a language-dependent processing. We analyse the noise in the publicly available corpora, collect high quality and noisy data for the two languages and quantify the improvements that depend not only on the amount of data but on the quality too. We also use different architectures that learn word representations both from surface forms and characters to further exploit all the available information which showed to be important for these languages. For the evaluation, we manually translate the wordsim-353 word pairs dataset from English into Yor{\`u}b{\'a} and Twi. We extend the analysis to contextual word embeddings and evaluate multilingual BERT on a named entity recognition task. For this, we annotate with named entities the Global Voices corpus for Yor{\`u}b{\'a}. As output of the work, we provide corpora, embeddings and the test suits for both languages.", language = "English", ISBN = "979-10-95546-34-4", } ``` ### Contributions Thanks to [@dadelani](https://github.com/dadelani) for adding this dataset.
7,163
[ [ -0.040985107421875, -0.07025146484375, 0.006374359130859375, 0.012847900390625, -0.02117919921875, -0.001129150390625, -0.04901123046875, -0.0435791015625, 0.038604736328125, 0.0240020751953125, -0.03668212890625, -0.0400390625, -0.055938720703125, 0.0143127...
KETI-AIR/aihub
2021-09-21T17:40:36.000Z
[ "region:us" ]
KETI-AIR
0
96
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
gpt3mix/rt20
2021-05-18T09:04:24.000Z
[ "region:us" ]
gpt3mix
null
null
0
96
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.014984130859375, 0.057220458984375, 0.0288238525390625, -0.03509521484375, 0.04656982421875, 0.052520751953125, 0.00506591796875, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060455322265625, 0.03793334...
vblagoje/lfqa
2021-10-17T13:44:46.000Z
[ "region:us" ]
vblagoje
null
null
13
96
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.014984130859375, 0.057220458984375, 0.0288238525390625, -0.03509521484375, 0.04656982421875, 0.052520751953125, 0.00506591796875, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060455322265625, 0.03793334...
ipipan/polqa
2023-09-09T13:37:44.000Z
[ "task_categories:question-answering", "task_categories:text-retrieval", "task_categories:text2text-generation", "task_ids:open-domain-qa", "task_ids:document-retrieval", "task_ids:abstractive-qa", "annotations_creators:expert-generated", "size_categories:10K<n<100K", "language:pl", "license:cc-by-...
ipipan
PolQA is the first Polish dataset for OpenQA. It consists of 7,000 questions, 87,525 manually labeled evidence passages, and a corpus of over 7 million candidate passages.
@misc{rybak2022improving, title={Improving Question Answering Performance through Manual Annotation: Costs, Benefits and Strategies}, author={Piotr Rybak and Piotr Przybyła and Maciej Ogrodniczuk}, year={2022}, eprint={2212.08897}, archivePrefix={arXiv}, primaryClass={cs.CL} }
3
96
2022-12-17T15:03:58
--- task_categories: - question-answering - text-retrieval - text2text-generation task_ids: - open-domain-qa - document-retrieval - abstractive-qa language: - pl pretty_name: PolQA size_categories: - 10K<n<100K annotations_creators: - expert-generated license: cc-by-sa-4.0 --- # Dataset Card for PolQA Dataset ## Dataset Description - **Paper:** [Improving Question Answering Performance through Manual Annotation: Costs, Benefits and Strategies](https://arxiv.org/abs/2212.08897) - **Point of Contact:** [Piotr Rybak](mailto:piotr.cezary.rybak@gmail.com) ### Dataset Summary PolQA is the first Polish dataset for open-domain question answering. It consists of 7,000 questions, 87,525 manually labeled evidence passages, and a corpus of over 7 million candidate passages. The dataset can be used to train both a passage retriever and an abstractive reader. ### Supported Tasks and Leaderboards - `open-domain-qa`: The dataset can be used to train a model for open-domain question answering. Success on this task is typically measured using [metric defined during PolEval 2021](https://2021.poleval.pl/tasks/task4). - `document-retrieval`: The dataset can be used to train a model for document retrieval. Success on this task is typically measured by [top-k retrieval accuracy](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.top_k_accuracy_score.html) or [NDCG](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.ndcg_score.html). - `abstractive-qa`: The dataset can be used to train a model for abstractive question answering. Success on this task is typically measured using [metric defined during PolEval 2021](https://2021.poleval.pl/tasks/task4). ### Languages The text is in Polish, as spoken by the host of the [Jeden z Dziesięciu](https://pl.wikipedia.org/wiki/Jeden_z_dziesi%C4%99ciu) TV show (questions) and [Polish Wikipedia](https://pl.wikipedia.org/) editors (passages). The BCP-47 code for Polish is pl-PL. ## Dataset Structure ### Data Instances The main part of the dataset consists of manually annotated question-passage pairs. For each instance, there is a `question`, a passage (`passage_id`, `passage_title`, `passage_text`), and a boolean indicator if the passage is `relevant` for the given question (i.e. does it contain the answers). For each `question` there is a list of possible `answers` formulated in a natural language, in a way a Polish speaker would answer the questions. It means that the answers might contain prepositions, be inflected, and contain punctuation. In some cases, the answer might have multiple correct variants, e.g. numbers are written as numerals and words, synonyms, abbreviations and their expansions. Additionally, we provide a classification of each question-answer pair based on the `question_formulation`, the `question_type`, and the `entity_type/entity_subtype`, according to the taxonomy proposed by [Maciej Ogrodniczuk and Piotr Przybyła (2021)](http://nlp.ipipan.waw.pl/Bib/ogr:prz:21:poleval.pdf). ``` { 'question_id': 6, 'passage_title': 'Mumbaj', 'passage_text': 'Mumbaj lub Bombaj (marathi मुंबई, trb.: Mumbaj; ang. Mumbai; do 1995 Bombay) – stolica indyjskiego stanu Maharasztra, położona na wyspie Salsette, na Morzu Arabskim.', 'passage_wiki': 'Mumbaj lub Bombaj (mr. मुंबई, trb.: "Mumbaj"; ang. Mumbai; do 1995 Bombay) – stolica indyjskiego stanu Maharasztra, położona na wyspie Salsette, na Morzu Arabskim. Wraz z miastami satelitarnymi tworzy najludniejszą po Delhi aglomerację liczącą 23 miliony mieszkańców. Dzięki naturalnemu położeniu jest to największy port morski kraju. Znajdują się tutaj także najsilniejsze giełdy Azji Południowej: National Stock Exchange of India i Bombay Stock Exchange.', 'passage_id': '42609-0', 'duplicate': False, 'question': 'W którym państwie leży Bombaj?', 'relevant': True, 'annotated_by': 'Igor', 'answers': "['w Indiach', 'Indie']", 'question_formulation': 'QUESTION', 'question_type': 'SINGLE ENTITY', 'entity_type': 'NAMED', 'entity_subtype': 'COUNTRY', 'split': 'train', 'passage_source': 'human' } ``` The second part of the dataset is a corpus of Polish Wikipedia (March 2022 snapshot) passages. The raw Wikipedia snapshot was parsed using [WikiExtractor](https://github.com/attardi/wikiextractor) and split into passages at the ends of the paragraphs or if the passage was longer than 500 characters. ``` { 'id': '42609-0', 'title': 'Mumbaj', 'text': 'Mumbaj lub Bombaj (mr. मुंबई, trb.: "Mumbaj"; ang. Mumbai; do 1995 Bombay) – stolica indyjskiego stanu Maharasztra, położona na wyspie Salsette, na Morzu Arabskim. Wraz z miastami satelitarnymi tworzy najludniejszą po Delhi aglomerację liczącą 23 miliony mieszkańców. Dzięki naturalnemu położeniu jest to największy port morski kraju. Znajdują się tutaj także najsilniejsze giełdy Azji Południowej: National Stock Exchange of India i Bombay Stock Exchange.' } ``` ### Data Fields Question-passage pairs: - `question_id`: an integer id of the question - `passage_title`: a string containing the title of the Wikipedia article - `passage_text`: a string containing the passage text as extracted by the human annotator - `passage_wiki`: a string containing the passage text as it can be found in the provided Wikipedia corpus. Empty if the passage doesn't exist in the corpus. - `passage_id`: a string containing the id of the passage from the provided Wikipedia corpus. Empty if the passage doesn't exist in the corpus. - `duplicate`: a boolean flag representing whether a question-passage pair is duplicated in the dataset. This occurs when the same passage was found in multiple passage sources. - `question`: a string containing the question - `relevant`: a boolean flag representing whether a passage is relevant to the question (i.e. does it contain the answers) - `annotated_by`: a string containing the name of the annotator who verified the relevance of the pair - `answers`: a string containing a list of possible short answers to the question - `question_formulation`: a string containing a kind of expression used to request information. One of the following: - `QUESTION`, e.g. *What is the name of the first letter of the Greek alphabet?* - `COMMAND`, e.g. *Expand the abbreviation ’CIA’.* - `COMPOUND`, e.g. *This French writer, born in the 19th century, is considered a pioneer of sci-fi literature. What is his name?* - `question_type`: a string indicating what type of information is sought by the question. One of the following: - `SINGLE ENTITY`, e.g. *Who is the hero in the Tomb Rider video game series?* - `MULTIPLE ENTITIES`, e.g. *Which two seas are linked by the Corinth Canal?* - `ENTITY CHOICE`, e.g. *Is "Sombrero" a type of dance, a hat, or a dish?* - `YES/NO`, e.g. *When the term of office of the Polish Sejm is terminated, does it apply to the Senate as well?* - `OTHER NAME`, e.g. *What was the nickname of Louis I, the King of the Franks?* - `GAP FILLING`, e.g. *Finish the proverb: "If you fly with the crows... ".* - `entity_type`: a string containing a type of the sought entity. One of the following: `NAMED`, `UNNAMED`, or `YES/NO`. - `entity_subtype`: a string containing a subtype of the sought entity. Can take one of the 34 different values. - `split`: a string containing the split of the dataset. One of the following: `train`, `valid`, or `test`. - `passage_source`: a string containing the source of the passage. One of the following: - `human`: the passage was proposed by a human annotator using any internal (i.e. Wikipedia search) or external (e.g. Google) search engines and any keywords or queries they considered useful - `hard-negatives`: the passage was proposed using a neural retriever trained on the passages found by the human annotators - `zero-shot`: the passage was proposed by the BM25 retriever and re-ranked using [multilingual cross-encoder](https://huggingface.co/unicamp-dl/mMiniLM-L6-v2-mmarco-v2) Corpus of passages: - `id`: a string representing the Wikipedia article id and the index of extracted passage. Matches the `passage_id` from the main part of the dataset. - `title`: a string containing the title of the Wikipedia article. Matches the `passage_title` from the main part of the dataset. - `text`: a string containing the passage text. Matches the `passage_wiki` from the main part of the dataset. ### Data Splits The questions are assigned into one of three splits: `train`, `validation`, and `test`. The `validation` and `test` questions are randomly sampled from the `test-B` dataset from the [PolEval 2021](https://2021.poleval.pl/tasks/task4) competition. | | # questions | # positive passages | # negative passages | |------------|------------:|--------------------:|--------------------:| | train | 5,000 | 27,131 | 34,904 | | validation | 1,000 | 5,839 | 6,927 | | test | 1,000 | 5,938 | 6,786 | ## Dataset Creation ### Curation Rationale The PolQA dataset was created to support and promote the research in the open-domain question answering for Polish. It also serves as a benchmark to evaluate OpenQA systems. ### Source Data #### Initial Data Collection and Normalization The majority of questions come from two existing resources, the 6,000 questions from the [PolEval 2021 shared task on QA](https://2021.poleval.pl/tasks/task4) and additional 1,000 questions gathered by one of the shared task [participants](http://poleval.pl/files/poleval2021.pdf#page=151). Originally, the questions come from collections associated with TV shows, both officially published and gathered online by their fans, as well as questions used in actual quiz competitions, on TV or online. The evidence passages come from the Polish Wikipedia (March 2022 snapshot). The raw Wikipedia snapshot was parsed using [WikiExtractor](https://github.com/attardi/wikiextractor) and split into passages at the ends of the paragraphs or if the passage was longer than 500 characters. #### Who are the source language producers? The questions come from various sources and their authors are unknown but are mostly analogous (or even identical) to questions asked during the [Jeden z Dziesięciu](https://pl.wikipedia.org/wiki/Jeden_z_dziesi%C4%99ciu) TV show. The passages were written by the editors of the Polish Wikipedia. ### Annotations #### Annotation process Two approaches were used to annotate the question-passage pairs. Each of them consists of two phases: the retrieval of candidate passages and the manual verification of their relevance. In the first approach, we asked annotators to use internal (i.e. Wikipedia search) or external (e.g. Google) search engines to find up to five relevant passages using any keywords or queries they consider useful (`passage_source="human"`). Based on those passages, we trained the neural retriever to extend the number of relevant passages, as well as to retrieve the hard negatives (`passage_source="hard-negatives"`). In the second approach, the passage candidates were proposed by the BM25 retriever and re-ranked using [multilingual cross-encoder](https://huggingface.co/unicamp-dl/mMiniLM-L6-v2-mmarco-v2) (`passage_source="zero-shot"`). In both cases, all proposed question-passage pairs were manually verified by the annotators. #### Who are the annotators? The annotation team consisted of 16 annotators, all native Polish speakers, most of them having linguistic backgrounds and previous experience as an annotator. ### Personal and Sensitive Information The dataset does not contain any personal or sensitive information. ## Considerations for Using the Data ### Social Impact of Dataset This dataset was created to promote the research in the open-domain question answering for Polish and allow developing question answering systems. ### Discussion of Biases The passages proposed by the `hard-negative` and `zero-shot` methods are bound to be easier to retrieve by retrievers since they were proposed by such. To mitigate this bias, we include the passages found by the human annotators in an unconstrained way (`passage_source="human"`). We hypothesize that it will result in more unbiased and diverse examples. Moreover, we asked the annotators to find not one but up to five passages, preferably from different articles to even further increase passage diversity. ### Other Known Limitations The PolQA dataset focuses on trivia questions which might limit its usefulness in real-world applications since neural retrievers generalize poorly to other domains. ## Additional Information ### Dataset Curators The PolQA dataset was developed by Piotr Rybak, Piotr Przybyła, and Maciej Ogrodniczuk from the [Institute of Computer Science, Polish Academy of Sciences](http://zil.ipipan.waw.pl/). This work was supported by the European Regional Development Fund as a part of 2014–2020 Smart Growth Operational Programme, CLARIN — Common Language Resources and Technology Infrastructure, project no. POIR.04.02.00-00C002/19. ### Licensing Information CC BY-SA 4.0 ### Citation Information ``` @misc{rybak2022improving, title={Improving Question Answering Performance through Manual Annotation: Costs, Benefits and Strategies}, author={Piotr Rybak and Piotr Przybyła and Maciej Ogrodniczuk}, year={2022}, eprint={2212.08897}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
13,467
[ [ -0.052490234375, -0.07305908203125, 0.033905029296875, 0.006771087646484375, -0.0287933349609375, -0.00724029541015625, -0.0172271728515625, -0.025390625, 0.037750244140625, 0.038818359375, -0.05194091796875, -0.04803466796875, -0.027099609375, 0.03973388671...
c-s-ale/alpaca-gpt4-data
2023-04-07T19:27:51.000Z
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:en", "license:cc-by-4.0", "gpt", "alpaca", "fine-tune", "instruct-tune", "instruction", "arxiv:2304.03277", "region:us" ]
c-s-ale
null
null
17
96
2023-04-07T18:20:58
--- dataset_info: features: - name: instruction dtype: string - name: input dtype: string - name: output dtype: string splits: - name: train num_bytes: 40178951 num_examples: 52002 download_size: 24027484 dataset_size: 40178951 license: cc-by-4.0 language: - en pretty_name: Instruction Tuning with GPT-4 size_categories: - 10K<n<100K task_categories: - text-generation tags: - gpt - alpaca - fine-tune - instruct-tune - instruction --- # Dataset Description - **Project Page:** https://instruction-tuning-with-gpt-4.github.io - **Repo:** https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM - **Paper:** https://arxiv.org/abs/2304.03277 # Dataset Card for "alpaca-gpt4-data" All of the work is done by [this team](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM). # Usage and License Notices The data is intended and licensed for research use only. The dataset is CC BY NC 4.0 (allowing only non-commercial use) and models trained using the dataset should not be used outside of research purposes. # Chinese Dataset [Found here](https://huggingface.co/datasets/c-s-ale/alpaca-gpt4-data-zh) # Citation ``` @article{peng2023gpt4llm, title={Instruction Tuning with GPT-4}, author={Baolin Peng, Chunyuan Li, Pengcheng He, Michel Galley, Jianfeng Gao}, journal={arXiv preprint arXiv:2304.03277}, year={2023} } ```
1,389
[ [ -0.0192108154296875, -0.048004150390625, 0.028839111328125, 0.021484375, -0.0430908203125, -0.027313232421875, -0.01200103759765625, -0.0302581787109375, -0.0006012916564941406, 0.0238494873046875, -0.053802490234375, -0.06170654296875, -0.041595458984375, 0...
cdminix/libritts-r-aligned
2023-07-02T15:13:39.000Z
[ "task_categories:automatic-speech-recognition", "task_categories:text-to-speech", "annotations_creators:crowdsourced", "language:en", "license:cc-by-4.0", "speech", "audio", "automatic-speech-recognition", "text-to-speech", "arxiv:1904.02882", "arxiv:2211.16049", "region:us" ]
cdminix
Dataset used for loading TTS spectrograms and waveform audio with alignments and a number of configurable "measures", which are extracted from the raw audio.
@article{koizumi2023libritts, title={LibriTTS-R: A Restored Multi-Speaker Text-to-Speech Corpus}, author={Koizumi, Yuma and Zen, Heiga and Karita, Shigeki and Ding, Yifan and Yatabe, Kohei and Morioka, Nobuyuki and Bacchiani, Michiel and Zhang, Yu and Han, Wei and Bapna, Ankur}, journal={arXiv preprint arXiv:2305.18802}, year={2023} } @article{zen2019libritts, title={LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech}, author={Zen, Heiga and Dang, Viet and Clark, Rob and Zhang, Yu and Weiss, Ron J and Jia, Ye and Chen, Zhifeng and Wu, Yonghui}, journal={Interspeech}, year={2019} } @article{https://doi.org/10.48550/arxiv.2211.16049, author = {Minixhofer, Christoph and Klejch, Ondřej and Bell, Peter}, title = {Evaluating and reducing the distance between synthetic and real speech distributions}, year = {2022} }
5
96
2023-06-07T08:35:07
--- pretty_name: LibriTTS Corpus with Forced Alignments annotations_creators: - crowdsourced language: en tags: - speech - audio - automatic-speech-recognition - text-to-speech license: - cc-by-4.0 task_categories: - automatic-speech-recognition - text-to-speech extra_gated_prompt: "When using this dataset to download LibriTTS, you agree to the terms on https://www.openslr.org" --- > This dataset is identical to **[cdminix/libritts-aligned](https://huggingface.co/datasets/cdminix/libritts-aligned)** except it uses the newly released LibriTTS-R corpus. Please cite **[Y. Koizumi, et al., "LibriTTS-R: Restoration of a Large-Scale Multi-Speaker TTS Corpus", Interspeech 2023](https://google.github.io/df-conformer/librittsr/)** *When using this dataset to download LibriTTS-R, make sure you agree to the terms on https://www.openslr.org* # Dataset Card for LibriTTS-R with Forced Alignments (and Measures) This dataset downloads LibriTTS-R and preprocesses it on your machine to create alignments using [montreal forced aligner](https://montreal-forced-aligner.readthedocs.io/en/latest/). You need to run ``pip install alignments phones`` before using this dataset. When running this the first time, it can take an hour or two, but subsequent runs will be lightning fast. ## Requirements - ``pip install alignments phones`` **(required)** - ``pip install speech-collator`` (optional) *Note: version >=0.0.15 of alignments is required for this corpus* ## Example Item ```json { 'id': '100_122655_000073_000002.wav', 'speaker': '100', 'text': 'the day after, diana and mary quitted it for distant b.', 'start': 0.0, 'end': 3.6500000953674316, 'phones': ['[SILENCE]', 'ð', 'ʌ', '[SILENCE]', 'd', 'eɪ', '[SILENCE]', 'æ', 'f', 't', 'ɜ˞', '[COMMA]', 'd', 'aɪ', 'æ', 'n', 'ʌ', '[SILENCE]', 'æ', 'n', 'd', '[SILENCE]', 'm', 'ɛ', 'ɹ', 'i', '[SILENCE]', 'k', 'w', 'ɪ', 't', 'ɪ', 'd', '[SILENCE]', 'ɪ', 't', '[SILENCE]', 'f', 'ɜ˞', '[SILENCE]', 'd', 'ɪ', 's', 't', 'ʌ', 'n', 't', '[SILENCE]', 'b', 'i', '[FULL STOP]'], 'phone_durations': [5, 2, 4, 0, 5, 13, 0, 16, 7, 5, 20, 2, 6, 9, 15, 4, 2, 0, 11, 3, 5, 0, 3, 8, 9, 8, 0, 13, 3, 5, 3, 6, 4, 0, 8, 5, 0, 9, 5, 0, 7, 5, 6, 7, 4, 5, 10, 0, 3, 35, 9], 'audio': '/dev/shm/metts/train-clean-360-alignments/100/100_122655_000073_000002.wav' } ``` The phones are IPA phones, and the phone durations are in frames (assuming a hop length of 256, sample rate of 22050 and window length of 1024). These attributes can be changed using the ``hop_length``, ``sample_rate`` and ``window_length`` arguments to ``LibriTTSAlign``. ## Data Collator This dataset comes with a data collator which can be used to create batches of data for training. It can be installed using ``pip install speech-collator`` ([MiniXC/speech-collator](https://www.github.com/MiniXC/speech-collator)) and can be used as follows: ```python import json from datasets import load_dataset from speech_collator import SpeechCollator from torch.utils.data import DataLoader dataset = load_dataset('cdminix/libritts-aligned', split="train") speaker2ixd = json.load(open("speaker2idx.json")) phone2ixd = json.load(open("phone2idx.json")) collator = SpeechCollator( speaker2ixd=speaker2idx, phone2ixd=phone2idx , ) dataloader = DataLoader(dataset, collate_fn=collator.collate_fn, batch_size=8) ``` You can either download the ``speaker2idx.json`` and ``phone2idx.json`` files from [here](https://huggingface.co/datasets/cdminix/libritts-aligned/tree/main/data) or create them yourself using the following code: ```python import json from datasets import load_dataset from speech_collator import SpeechCollator, create_speaker2idx, create_phone2idx dataset = load_dataset("cdminix/libritts-aligned", split="train") # Create speaker2idx and phone2idx speaker2idx = create_speaker2idx(dataset, unk_idx=0) phone2idx = create_phone2idx(dataset, unk_idx=0) # save to json with open("speaker2idx.json", "w") as f: json.dump(speaker2idx, f) with open("phone2idx.json", "w") as f: json.dump(phone2idx, f) ``` ### Measures When using ``speech-collator`` you can also use the ``measures`` argument to specify which measures to use. The following example extracts Pitch and Energy on the fly. ```python import json from torch.utils.data import DataLoader from datasets import load_dataset from speech_collator import SpeechCollator, create_speaker2idx, create_phone2idx from speech_collator.measures import PitchMeasure, EnergyMeasure dataset = load_dataset("cdminix/libritts-aligned", split="train") speaker2idx = json.load(open("data/speaker2idx.json")) phone2idx = json.load(open("data/phone2idx.json")) # Create SpeechCollator speech_collator = SpeechCollator( speaker2idx=speaker2idx, phone2idx=phone2idx, measures=[PitchMeasure(), EnergyMeasure()], return_keys=["measures"] ) # Create DataLoader dataloader = DataLoader( dataset, batch_size=8, collate_fn=speech_collator.collate_fn, ) ``` COMING SOON: Detailed documentation on how to use the measures at [MiniXC/speech-collator](https://www.github.com/MiniXC/speech-collator). ## Splits This dataset has the following splits: - ``train``: All the training data, except one sample per speaker which is used for validation. - ``dev``: The validation data, one sample per speaker. - ``train.clean.100``: Training set derived from the original materials of the train-clean-100 subset of LibriSpeech. - ``train.clean.360``: Training set derived from the original materials of the train-clean-360 subset of LibriSpeech. - ``train.other.500``: Training set derived from the original materials of the train-other-500 subset of LibriSpeech. - ``dev.clean``: Validation set derived from the original materials of the dev-clean subset of LibriSpeech. - ``dev.other``: Validation set derived from the original materials of the dev-other subset of LibriSpeech. - ``test.clean``: Test set derived from the original materials of the test-clean subset of LibriSpeech. - ``test.other``: Test set derived from the original materials of the test-other subset of LibriSpeech. ## Environment Variables There are a few environment variable which can be set. - ``LIBRITTS_VERBOSE``: If set, will print out more information about the dataset creation process. - ``LIBRITTS_MAX_WORKERS``: The number of workers to use when creating the alignments. Defaults to ``cpu_count()``. - ``LIBRITTS_PATH``: The path to download LibriTTS to. Defaults to the value of ``HF_DATASETS_CACHE``. # Citation When using LibriTTS-R please cite the following papers: - [LibriTTS-R: Restoration of a Large-Scale Multi-Speaker TTS Corpus](https://google.github.io/df-conformer/librittsr/) - [LibriTTS: A Corpus Derived from LibriSpeech for Text-to-Speech](https://arxiv.org/abs/1904.02882) - [Montreal Forced Aligner: Trainable text-speech alignment using Kaldi](https://www.researchgate.net/publication/319185277_Montreal_Forced_Aligner_Trainable_Text-Speech_Alignment_Using_Kaldi) When using the Measures please cite the following paper (ours): - [Evaluating and reducing the distance between synthetic and real speech distributions](https://arxiv.org/abs/2211.16049)
7,177
[ [ -0.0211639404296875, -0.0291900634765625, 0.003692626953125, 0.0011777877807617188, -0.006496429443359375, -0.0008664131164550781, -0.0262603759765625, -0.01317596435546875, 0.0232696533203125, 0.0209503173828125, -0.04541015625, -0.038238525390625, -0.014694213...
jitx/Methods2Test_java_unit_test_code
2023-08-30T19:31:25.000Z
[ "task_categories:text-generation", "language:en", "license:mit", "unit test", "java", "code", "arxiv:2203.12776", "region:us" ]
jitx
null
null
3
96
2023-08-30T18:59:03
--- license: mit configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: target dtype: string - name: src_fm dtype: string - name: src_fm_fc dtype: string - name: src_fm_fc_co dtype: string - name: src_fm_fc_ms dtype: string - name: src_fm_fc_ms_ff dtype: string splits: - name: train num_bytes: 3399525755 num_examples: 624022 - name: test num_bytes: 907751466 num_examples: 156922 download_size: 558984469 dataset_size: 4307277221 task_categories: - text-generation language: - en tags: - unit test - java - code --- ## Dataset Description Microsoft created this large dataset of Java Junit test cases with its corresponding focal methods. It contains 780k pairs of JUnit test cases and focal methods which were extracted from a total of 91K Java open source project hosted on GitHub. The mapping between test case and focal methods are based heuristics rules and Java developer's best practice. More information could be found here: - [methods2test Github repo](https://github.com/microsoft/methods2test) - [Methods2Test: A dataset of focal methods mapped to test cases](https://arxiv.org/pdf/2203.12776.pdf) ## Dataset Schema ``` target: <TEST_CASE> src_fm: <FOCAL_METHOD> src_fm_fc: <FOCAL_CLASS_NAME> <FOCAL_METHOD> src_fm_fc_co: <FOCAL_CLASS_NAME> <FOCAL_METHOD> <CONTRSUCTORS> src_fm_fc_ms: <FOCAL_CLASS_NAME> <FOCAL_METHOD> <CONTRSUCTORS> <METHOD_SIGNATURES> src_fm_fc_ms_ff: <FOCAL_CLASS_NAME> <FOCAL_METHOD> <CONTRSUCTORS> <METHOD_SIGNATURES> <FIELDS> ``` ## Focal Context - fm: this representation incorporates exclusively the source code of the focal method. Intuitively, this contains the most important information for generating accurate test cases for the given method. - fm+fc: this representations adds the focal class name, which can provide meaningful semantic information to the model. - fm+fc+c: this representation adds the signatures of the constructor methods of the focal class. The idea behind this augmentation is that the test case may require instantiating an object of the focal class in order to properly test the focal method. - fm+fc+c+m: this representation adds the signatures of the other public methods in the focal class. The rationale which motivated this inclusion is that the test case may need to invoke other auxiliary methods within the class (e.g., getters, setters) to set up or tear down the testing environment. - fm+fc+c+m+f : this representation adds the public fields of the focal class. The motivation is that test cases may need to inspect the status of the public fields to properly test a focal method. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/642382bb6e61cda1b3a20983/PYpNYXweRZyFOp6TbNkyt.png) The different levels of focal contexts are the following: ``` FM: focal method FM_FC: focal method + focal class name FM_FC_CO: focal method + focal class name + constructor signatures FM_FC_MS: focal method + focal class name + constructor signatures + public method signatures FM_FC_MS_FF: focal method + focal class name + constructor signatures + public method signatures + public fields ``` ## Lmitations The original authors validate the heuristics by inspecting a statistically significant sample (confidence level of 95% within 10% margin of error) of 97 samples from the training set. Two authors independently evaluated the sample, then met to discuss the disagreements. We found that 90.72% of the samples have a correct link between the test case and the corresponding focal method ## Contribution All the thanks to the original authors.
3,704
[ [ -0.05511474609375, -0.035186767578125, 0.0295562744140625, 0.03045654296875, -0.005615234375, -0.0177001953125, -0.0098419189453125, -0.047607421875, -0.023468017578125, 0.01119232177734375, -0.036865234375, -0.031005859375, -0.034332275390625, -0.0018520355...
LeoLM/wikitext-en-de
2023-09-28T14:04:12.000Z
[ "size_categories:1K<n<10K", "language:de", "language:en", "license:cc-by-3.0", "arxiv:1609.07843", "region:us" ]
LeoLM
null
null
1
96
2023-09-28T13:39:48
--- license: cc-by-3.0 configs: - config_name: exzellent_de data_files: wiki_de_exzellent.parquet - config_name: featured_en data_files: wiki_en_featured.parquet - config_name: exzellent_de_small data_files: wiki_de_exzellent_small.parquet - config_name: featured_en_small data_files: wiki_en_featured_small.parquet language: - de - en size_categories: - 1K<n<10K --- # German+English Wikitext Wikitext_en_de is a replication of the `wikitext` dataset following the work by [Merity et al. (2016)](https://arxiv.org/abs/1609.07843). It contains (mostly) all articles that Wikipedia classifies as ["exzellent"](https://de.wikipedia.org/wiki/Wikipedia:Exzellente_Artikel) or ["featured"](https://en.wikipedia.org/wiki/Wikipedia:Featured_articles) and can be used for example for perplexity evaluation. This dataset was created by first scraping the names of the articles belonging to these categories from Wikipedia. Afterwards, we take a recent dump from wikipedia ("20230901.de" from [`graelo/wikipedia`](https://huggingface.co/datasets/graelo/wikipedia)) and filter the articles to only include those on either list. | Config Name | Num Documents | |-------------|--------------| | exzellent_de | 2822 | | featured_en | 6356 | | exzellent_de_small | 1024 | | featured_en_small | 1024 | The code for creating the datasets is available in this repository ("wikitext_de.py", "wikitext_en.py"). Be aware that this download a whole wikipedia dump, which might take a while depending on your connection.
1,513
[ [ -0.058746337890625, -0.04364013671875, 0.003993988037109375, 0.0093231201171875, -0.022491455078125, -0.008697509765625, 0.0011224746704101562, -0.03936767578125, 0.05572509765625, 0.026885986328125, -0.04931640625, -0.0267486572265625, -0.0291748046875, 0.0...
alexrs/alpaca-cleaned-30-clusters
2023-10-16T14:44:34.000Z
[ "region:us" ]
alexrs
null
null
0
96
2023-10-16T14:44:30
--- dataset_info: features: - name: instruction dtype: string - name: output dtype: string - name: input dtype: string - name: cluster dtype: int32 splits: - name: train num_bytes: 40490946 num_examples: 51760 download_size: 24195677 dataset_size: 40490946 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "alpaca-cleaned-30-clusters" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
569
[ [ -0.060882568359375, -0.0204315185546875, 0.0212554931640625, 0.0214996337890625, -0.0196380615234375, -0.0015268325805664062, 0.0180511474609375, -0.0153961181640625, 0.06903076171875, 0.042724609375, -0.0626220703125, -0.06610107421875, -0.0379638671875, -0...
pkr7098/bookcorpus-wikipedia-full
2023-10-31T01:06:21.000Z
[ "region:us" ]
pkr7098
null
null
0
96
2023-10-30T11:59:38
--- dataset_info: config_name: 20220301.en features: - name: text dtype: string splits: - name: train num_bytes: 24500165181 num_examples: 80462898 download_size: 0 dataset_size: 24500165181 configs: - config_name: 20220301.en data_files: - split: train path: 20220301.en/train-* --- # Dataset Card for "bookcorpus-wikipedia-full" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
497
[ [ -0.043212890625, -0.0102386474609375, -0.00128173828125, 0.01332855224609375, -0.02410888671875, -0.006160736083984375, 0.0026378631591796875, -0.010345458984375, 0.051239013671875, 0.04412841796875, -0.061553955078125, -0.06170654296875, -0.0254669189453125, ...
result-kand2-sdxl-wuerst-karlo/b8542650
2023-10-30T15:00:46.000Z
[ "region:us" ]
result-kand2-sdxl-wuerst-karlo
null
null
0
96
2023-10-30T15:00:45
--- dataset_info: features: - name: result dtype: string - name: id dtype: int64 splits: - name: train num_bytes: 179 num_examples: 10 download_size: 1367 dataset_size: 179 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "b8542650" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
455
[ [ -0.04791259765625, -0.007694244384765625, 0.00939178466796875, 0.00965118408203125, -0.02191162109375, -0.01222991943359375, 0.0257568359375, -0.01239013671875, 0.05810546875, 0.039581298828125, -0.045745849609375, -0.037628173828125, -0.038543701171875, -0....
hebrew_projectbenyehuda
2022-11-03T16:15:45.000Z
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:...
null
This repository contains a dump of thousands of public domain works in Hebrew, from Project Ben-Yehuda, in plaintext UTF-8 files, with and without diacritics (nikkud). The metadata (pseudocatalogue.csv) file is a list of titles, authors, genres, and file paths, to help you process the dump. All these works are in the public domain, so you are free to make any use of them, and do not need to ask for permission. There are 10078 files, 3181136 lines
@article{, author = {}, title = {Public domain texts from Project Ben-Yehuda}, journal = {}, url = {https://github.com/projectbenyehuda/public_domain_dump}, year = {2020}, }
2
95
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - found language: - he license: - mit multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - text-generation - fill-mask task_ids: - language-modeling - masked-language-modeling paperswithcode_id: null pretty_name: Hebrew Projectbenyehuda dataset_info: features: - name: id dtype: int32 - name: url dtype: string - name: title dtype: string - name: authors dtype: string - name: translators dtype: string - name: original_language dtype: string - name: genre dtype: string - name: source_edition dtype: string - name: text dtype: string splits: - name: train num_bytes: 318732537 num_examples: 10078 download_size: 317749152 dataset_size: 318732537 --- # Dataset Card for Hebrew Projectbenyehuda ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://github.com/projectbenyehuda/public_domain_dump - **Repository:** https://github.com/projectbenyehuda/public_domain_dump - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This repository contains a dump of thousands of public domain works in Hebrew, from Project Ben-Yehuda, in plaintext UTF-8 files, with and without diacritics (nikkud), and in HTML files. The pseudocatalogue.csv file is a list of titles, authors, genres, and file paths, to help you process the dump. The Releases tab contains a downloadable ZIP archive of the full release. The git repo can be used to track individual file changes, or for incremenetal updates. In the ZIPs, each format (plaintext, plaintext stripped of diacritics, and HTML) has a ZIP file containing one directory per author, with all the author's works under that directory. To request changes or improvements to this dump, file an issue against this repository. All these works are in the public domain, so you are free to make any use of them, and do not need to ask for permission. If you would like to give credit, please credit "Project Ben-Yehuda volunteers", and include a link to the site. We'd also love to hear about the uses you've made of this dump, as it encourages us to keep producing the dump. E-mail us with a brief description (and links, if/as appropriate) of your re-use, at editor@benyehuda.org. There are 10078 files, 3181136 lines Data Annotation: ### Supported Tasks and Leaderboards [More Information Needed] ### Languages Hebrew ## Dataset Structure ### Data Instances Sample: ``` { 'id': 10, 'url': 'https://raw.githubusercontent.com/projectbenyehuda/public_domain_dump/master/txt/p23/m10.txt', 'title': 'חצי-נחמה', 'authors': 'אחד העם', 'translators': '', 'original_language': '', 'genre': 'מאמרים ומסות', 'source_edition': '', 'text': '\n\n\n\t\n\tחצי-נחמה\n\t\n\n\n\n1\n\nבין כל הצרות שנתחדשו עלינו בעת האחרונה תעשׂה ביחוד רושם מעציב בלב כל איש ישׂראל התחדשות ‘עלילת־הדם’. העלילה הנתעבה הזאת, בכל יָשנה, היתה ותהיה תמיד בעינינו כחדשה, ומימי הבינים ועד עתה תצטין בפעולתה החזקה על רוח עמנו, לא רק במקום המעשׂה, כי אם גם בארצות רחוקות שהגיעה אליהן השמועה.\n\nאמרתי: ‘על רוח עמנו’, כי אמנם רואה אני מקור החזיון הזה לא בסבּות חיצוניות, כי אם עמוק ברוח העם. בימי הבינים, שהיה כלל ישׂראל במקרים כאלה רגיל לחשוב עצמו כעומד במשפט ביחד עם אותם האומללים שעלה עליהם הגורל להיות כפּרותו, – יש מקום אמנם לראות בזה רק תוצאת הסכנה הגשמית הגדולה להכלל כולו, שהיתה כרוכה אז באמת בעקב כל עלילה כזו. גם לפני חמשים שנה, בימי מנוחה ושלוה, שעוררה עלילת דמשׂק רעש גדול כל־כך בארצות המערב, עדיין יש מקום לאמר, כי היתה בזה, להפך, יד הקנאה הגדולה לכבודם וזכויותיהם ששׂררה אז בלבות אחינו המערביים, אשר זה מעט יצאו מעבדות לחרות. אך בימינו אלה הרי מצד אחד אין הסכנה הגשמית גדולה עוד הרבה, ביחוד לקהלות רחוקות, ומצד אחר כבר הורגלנו לשמוע חרפתנו בקור רוח וקנאת כבודנו לא תאכלנו עוד, ואם בכל זאת גם עתה עודנו מתעוררים ומתנודדים בחזקה לשמע ‘עלילת־דם’, ורגש הכלל יתפרץ החוצה מכל עברים להשליך מעליו את החלאה הזאת, – אות הוא, כי לא הפחד ולא הכבוד החיצוני הם המניעים לזה, כי אם רוח העם הוא המרגיש פה את קלונו והוא זה המתעורר והמעורר; כי אעפ"י שבכל יתר הדברים כבר הביאונו צרותינו לאותו המצב שעליו אמר הנשׂיא החכם בימי קדם: ‘אין בשׂר המת מרגיש באיזמל’, – הנה פה אין ‘האיזמל’ חותך את ‘הבשׂר’ בלבד, כי אם עד הנפש יגע…\n\nאבל – ‘אין רע בלא טוב’, כלומר, בלא לקח טוב. גם הרע הגדול הזה שאנו עסוקים בו אינו ריק מלקח טוב, ואנחנו, אשר לא אדונים אנחנו לגורלנו וגם את הטוב גם את הרע נקבל מן החוץ שלא בטובתנו, ראוי לנו לבקש ברעותינו תמיד את התועלת הלמודית הצפונה בהן, והיתה לנו זאת, לפחות, חצי נחמה.\n\n\n\nאחד הכוחות היותר גדולים בחיי החברה הוא – ‘ההסכמה הכללית’. היו ימים שגם הפלוסופים ראו בהסכמה זו מופת נאמן על הדבר המוסכם ונתנו לה מקום בתוך שאר מופתיהם על מציאות האלהות. עתה אמנם יודעים הפלוסופים , שאין שקר ואין אולת אשר לא תוכל לבוא עליו ‘ההסכמה הכללית’, אם אך תנאי החיים נאותים לזה. אבל רק הפלוסופים יודעים זאת, ובעיני ההמון עוד גם עתה אין אַבטוֹריטט גדול מן ‘ההסכמה’: אם ‘כל העולם’ מאמינים שהדבר כן, בודאי כן הוא; ואם אני איני מבינו, אחרים מבינים; ואם אני רואה כעין סתירה לו, הרי ‘הכל’ רואים גם כן ואעפ"כ מאמינים, וכי חכם אני מכל העולם? – זה הוא בקירוב מהלך הרעיונות של האיש הפשוט, בדעת או בלי דעת ברורה, ומתוך כך הוא מסכים גם מצדו ונעשׂה בעצמו חלק מן ‘ההסכמה’.\n\nוכל־כך גדול כוח ‘ההסכמה’, עד שעל הרוב לא יוכל האדם למַלט נפשו מפעולתה גם כשהוא עצמו הוא ‘הדבר המוסכם’. אם ‘כל העולם’ אומרים על פלוני שגדול הוא בחכמה או ביראה, שיש בו מדה פלונית, טובה או רעה, – סופו להסכים לזה גם בעצמו, אע"פ שמתחלה לא מצא בנפשו אותו היתרון או החסרון שאחרים מיחסים לו. ולא זו בלבד אלא שההסכמה הזאת מצד ‘המוסכם’ עצמו פועלת מעט מעט על תכונת רוחו עד שמקרבתו באמת (או, לפחות, מולידה בו נטיה להתקרב) אל המצב ההוא שרואה בו ‘כל העולם’. על כן יזהירו הפדגוגים בצדק, לבלתי עורר את הילדים על מגרעותיהם המוסריות בראשית התפתחותן, וכל שכּן לבלתי יחס להם מגרעות שאין בהם, כי על ידי זה אפשר שנחזק בלבם את הראשונות ונוליד בם נטיה להאחרונות.\n\nואולם, הדבר מובן, כי ‘כל העולם’ אינו אחד לכל אחד. האדם רואה ‘עולמו’ רק באותה החברה שהוא חושב עצמו לחלק ממנה ורואה באישיה אנשים הקרובים לו מאיזה צד; אבל אין אדם חושב למאומה הסכמת אנשים שרוחם זרה לו לגמרי, שאינו מרגיש בנפשו שום יחס פנימי בינו ובינם. ככה אין האוֹרתוֹדוֹכּסים והמשׂכילים שלנו שׂמים לב כלל אלו להסכמתם של אלו, אף בדברים שאינם נוגעים לאמונה ודת, ושׂחקם ולעגם של אלו על אלו אינו עושׂה שום רושם בלבם של שניהם, לפי שכּל אחת משתי הכּתּות רואה את חברתה כאלו אינה. ואולם כשתנאי החיים מכריחים את בני הכתות השונות להמצא במשׂא ומתן תמידי זה עם זה והם מתרגלים לראות זה בזה קודם כל את האדם, – אז יתרחב ‘עולמם’ והשקפותיהם סובלות שנויים רבים על פי הסכמת ‘העולם’ במובנו החדש.\n\n\n\nלפיכך, בדורות שעברו, כשהיו אבותינו מאמינים בפשטו של ‘אתה בחרתנו’, לא היתה החרפּה שחרפום האומות פועלת כלל על טוהר נפשם פנימה. הם ידעו את ערכם ולא התפעלו עד מה מן ‘ההסכמה הכללית’ אשר מחוץ להם, בהיות כל חברת ‘המסכימים’ נחשבת בעיניהם למין מיוחד של בריות זרות להם ושונות מהם שנוי עצמי, בלי כל יחס וכל דמיון בינם ובינן. אז היה היהודי יכול לשמוע במנוחת לב כל המגרעות המוסריות והחטאים המעשׂיים שטפלה עליו הסכמת העמים, מבלי להרגיש בנפשו שום בושה או שפלוּת פנימית. כי מה לו ולמחשבות ‘הנכרים’ עליו ועל ערכּוֹ? לוּ רק יתנו לו לישב בשלוה! – אבל בדור הזה אין הדבר כן, עתה ‘עולמנו’ נתרחב הרבה, וההסכמה האירופּית פועלת עלינו בחזקה בכל ענפי החיים. ולפי שאין אנו מוציאים עוד את ‘הכל’ מן הכלל, לכן נתפעל בעל כרחנו ממה ש’הכל\' מוציאים אותנו מן הכלל, סופר אחד רוסי שאל באלו הימים בתמימוּת: אחר שכל העולם שׂונאים את היהודים, וכי אפשר לאמור, שכל העולם חייבים והיהודים זכאים? – ושאלה כזו מתגנבת עתה גם אל לב רבים מאחינו: וכי אפשר לאמור, שכל אותן התכונות הנשחתות והמעשׂים הרעים שכל העולם מיחס ליהודים אינם אלא ‘בדותא’?\n\nוהספק הזה, מכיון שנתעורר, מוצא לו מחיה בנקל באותם ההיקשים המוטעים ‘מן הפרט אל הכלל’ הרגילים מאד אצל המון בני האדם. הספור הידוע על דבר נוסע אחד, שבא לאחת הערים ונזדמן לאכסניא שהיה בה משרת כבד־פה, וכתב בפנקסו: בעיר פלונית משרתי האכסניות הם כבדי־פה, – הספור הזה מצייר בצורה של התוּל דרכי־ההגיון של ההמון ברוב משפטיו הכלליים. כל החזיונות הנראים באיזה דבר פרטי רגיל ההמון ליחס אל הכלל שהדבר ההוא מתחשב עליו לפי שמו התמידי, מבלי להתבונן, כי ‘פרט’ אחד יוכל להתחשב על ‘כללים’ רבים ביחד, כלומר, להיות שוּתף בתכוּנה אחת עם פרטיו של כלל אחד ובתכונה אחרת עם פרטיו של כלל אחר, בעוד שהשם הנקרא עליו מציין רק את התיחסותו לאחד הכללים באחד מצדדיו, לא בכולם. – על משפטים ממין זה תוכל להשען, וגם תשען באמת, ההסכמה הכללית ביחוסה אלינו: פלוני ופלוני הם יהודים לפי שמם ורמאים לפי תכוּנתם; שמע מינה, שהיהודים הם לפי תכונתם רמאים. ההגיון האמתי ישיב אמנם על זה, כי אף אם היו באמת כל היהודים בדורנו רמאים, אין מזה עוד ראיה, שהיהודים הם רמאים, כלומר, שתכוּנת הרמאוּת הנמצאת בכל יהודי נמצאת בו מצד התיחסותו אל הכלל ‘יהודים’ ולא מצד איזה כלל אחר (למשל, כלל ‘סוחרים’), שגם אליו מתיחס היהודי בתור פרט, ביחד עם אחרים אשר דבר אין להם עם הכלל ‘יהודים’. וכדי לברר הדבר, צריך לבדוֹק תחלה אותם ‘האחרים’ המשתתפים יחד עם היהודים בכללים אחרים. ורק אחר שנמצא על ידי בדיקה זו, שאין תכוּנת הרמאוּת מצויה בשום ‘כלל’ אחר המשותף ליהודים ולאחרים, – רק אז תהיה לנו צדקה לחרוץ משפט, כי היהדות היא אֵם הרמאוּת. – אבל, כאמור, אין דרכם של בני אדם להעמיק בהגיון, ואין אנו יכולים לדרוש כזאת גם מהמון בני עמנו. הם שומעים את המשפט החרוץ של ההסכמה הכללית ורואים עם זה, שרבים בקרבּנוּ כך הם באמת כמו שאומרת ההסכמה, ובזה די להם, והרי הם מתחילים להסכים גם בעצמם. וככה עוברות ‘תכוּנות היהודים’ כמטבע כשרה מיד ליד, מן ההסכמה החיצונית של העמים אל ההסכמה הפנימית בקרב עמנו, רק עם ההבדל הזה, שהעמים מונים את תכוּנותינו הרעות אחת לאחת בקול ענוֹת גבוּרה ולעג השאננים, ואנחנו עונים אחריהם מלה במלה בקול דממה דקה והצטדקות חלושה; הם ממשילים אותנו לכלי חרס, שאין לו תקנה אלא שבירה, ואנחנו ממשילים עצמנו לכלי מתכת, שאפשר לו בהגעלה ולבּוּן…\n\nהמצב הזה, אם יאריך ימים, יוכל לגרום לנו נזק מוסרי גדול. אין דבר מסוכּן לגוי ולאדם כהודאה על חטאים שאין בו. מי שחטא באמת, הרי שערי תשובה לא ננעלו, וברצונו הטוב יכול להסיר חלאתו מעליו. אבל מי שאחרים הביאוהו לחשוֹד עצמו במה שאין בו, איך יוכל להטהר בעיני עצמו? מצד אחד מאמין הוא לדברי האומרים לו: טול קורה מבין עיניך, ומצד אחר מרגיש הוא, שאינו יכול לטול את הקורה מבין עיניו, אחר שאינה באמת אלא בדמיון, והרי הוא במצב אותם המונומַנים הידועים, שמאיזו סבּה באו לידי אמונה, כי משׂא כבד תלוי להם בחוטמם מבלי שיוכלו להסירו. ולא עוד אלא שלפעמים תביא אמונה זו את האיש הפרטי להשתתף באותה המדה המגוּנה שלפי אמונתו היא קנין הכלל כולו, אעפ“י שהוא עצמו מצד פרטיותו אינו נוטה כלל לזה. אין ספק, למשל, כי בקרב העם שיצאו מתוכו אנשים כהרמב”ם נמצאים גם עתה בעלי דעה מיושבת ואוהבי סדר ושיטה בכל דבר, והם, בקחתם חלק בעבודת הצבּוּר, היו יכולים לתת בה את רוחם ולפעול גם על יתר העובדים. אבל מה נעשׂה, וכל גזרה ‘ההסכמה’, ששׂנאת הסדרים היא תכוּנה יהודית, וכבר הסכמנו גם אנחנו להסכמה זו (אעפ"י שעוד לא נתברר, אם התכוּנה הזאת, המצויה באמת בחלק גדול מעמנו, מתיחסת אל הכלל ‘יהודים’, או אולי – מה שיותר מתקבל על הלב – אל הכלל ‘חניכי־החדר’). ועל כן תרפינה ידי אוהבי הסדר, בהאמינם, כי אין עצה ואין תבונה נגד תכוּנת העם. ואם פטריוטים הם, יעקרו גם מלבם את האהבה לסדרים, המתנגדת לרוח עמם, ויעשׂו גם הם את מעשׂיהם כראוי ליהודים אמתיים…\n\n\n\nצריך איפוא לבקש איזה אמצעי, איך להוציא את עצמנו מתחת השפעת ‘ההסכמה הכללית’ בנוגע לתכוּנות ישׂראל וערכו המוסרי, כדי שלא נהיה בזויים בעיני עצמנו ולא נחשוב, שבאמת גרועים אנחנו מכל בני האדם תחת השמש, וכדי שלא נבוא עי"ז להיות ברבות הימים בפועל מה שאין אנו עתה אלא בדמיון.\n\nואת האמצעי הזה נותנת לנו ‘ההסכמה הכללית’ עצמה על ידי עלילת־הדם. העלילה הזאת היא היחידה בין כל רעותיה אשר בה לא תוכל ההסכמה להביא גם אותנו לידי ספק, אם באמת ‘כל העולם חייבים ואנחנו זכאים’, בהיותה מיוסדת כולה על שקר מוחלט ואין לה משען באיזה היקש מוטעה ‘מן הפרט על הכלל’. כל איש ישׂראל שנתחנך בתוך עמו יודע בבירור גמור, שאין בתוך כלל ישׂראל אף פרט אחד האוכל דם אדם לשם שמים. ואת הידיעה הברורה הזאת משגיאת ‘ההסכמה הכללית’, המתחדשת בלבנו מזמן לזמן על ידי התחדשות עלילת־הדם, צריכים אנו לשמור תמיד בזכרוננו, והיא תעזור לנו לעקור מלבנו את הנטיה להכּנע מפני האַבטוֹריטט של ‘כל העולם’ גם ביתר הדברים. יאמר כל העולם מה שיאמר על דבר פחיתוּת ערכּנוּ המוסרי, – אנחנו יודעים, כי ‘ההסכמה’ הזאת נשענת רק על הגיון המוני, בלי כל יסוד מדעי אמתּי. כי מי בא בסוד עמקי רוחנו וראה את ‘היהודי’ כמו שהוא מצד עצמו? מי שקל זה לעומת זה יהודים ושאינם יהודים הדומים אלו לאלו בכל יתר ‘הכללים’: סוחרים לעומת סוחרים, נרדפים לעומת נרדפים, רעבים לעומת רעבים וכו\'. – מי שקל כל אלה במאזני החכמה האמתּית ומצא את הכף מַכרעת לאחד הצדדים?\n\n‘וכי אפשר שכּל העולם חייבים והיהודים זכאים?’\n\nאפשר ואפשר, ועלילת־הדם תוכיח. פה הרי היהודים זכאים וטהורים כמלאכי השרת: יהודי ודם! היש שני הפכים גדולים מאלו? – ואף על פי כן…\n\n\n\nה\' תשרי תרנ"ג\n\n\n\n\n\n\nנדפס ב‘המליץ’ י“ד תשרי תרנ”ג. \xa0↩\n\n\n\n\n\n\n\n\n\n\nאת הטקסט לעיל הפיקו מתנדבי פרויקט בן־יהודה באינטרנט. הוא זמין תמיד בכתובת הבאה:https://benyehuda.org/read/10' } ``` ### Data Fields - `authors` - `genre` - `id` - `original_language` - `source_edition` - `text` - `title` - `translators` - `url` ### Data Splits | | train | |--------|------:| | corpus | 10078 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? Researchers. ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ### Citation Information ``` @article{, author = {}, title = {Public domain texts from Project Ben-Yehuda}, journal = {}, url = {https://github.com/projectbenyehuda/public_domain_dump}, year = {2020}, } ``` ### Contributions Thanks to [@imvladikon](https://github.com/imvladikon) for adding this dataset.
15,778
[ [ -0.047149658203125, -0.043670654296875, 0.0273590087890625, 0.05401611328125, -0.037445068359375, -0.03875732421875, 0.0104217529296875, -0.047210693359375, 0.055938720703125, 0.0283660888671875, -0.0270843505859375, -0.02081298828125, -0.04644775390625, -0....
hindi_discourse
2023-01-25T14:32:13.000Z
[ "task_categories:text-classification", "task_ids:multi-label-classification", "annotations_creators:other", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:hi", "license:other", "discourse-analysis", "region:us" ]
null
The Hindi Discourse Analysis dataset is a corpus for analyzing discourse modes present in its sentences. It contains sentences from stories written by 11 famous authors from the 20th Century. 4-5 stories by each author have been selected which were available in the public domain resulting in a collection of 53 stories. Most of these short stories were originally written in Hindi but some of them were written in other Indian languages and later translated to Hindi.
@inproceedings{swapnil2020, title={An Annotated Dataset of Discourse Modes in Hindi Stories}, author={Swapnil Dhanwal, Hritwik Dutta, Hitesh Nankani, Nilay Shrivastava, Yaman Kumar, Junyi Jessy Li, Debanjan Mahata, Rakesh Gosangi, Haimin Zhang, Rajiv Ratn Shah, Amanda Stent}, booktitle={Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)}, volume={12}, pages={1191–1196}, year={2020}
1
95
2022-03-02T23:29:22
--- annotations_creators: - other language_creators: - found language: - hi license: - other multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - multi-label-classification pretty_name: Discourse Analysis dataset tags: - discourse-analysis dataset_info: features: - name: Story_no dtype: int32 - name: Sentence dtype: string - name: Discourse Mode dtype: class_label: names: '0': Argumentative '1': Descriptive '2': Dialogue '3': Informative '4': Narrative '5': Other splits: - name: train num_bytes: 1998930 num_examples: 9968 download_size: 4176677 dataset_size: 1998930 --- # Dataset Card for Discourse Analysis dataset ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** https://github.com/midas-research/hindi-discourse - **Paper:** [An Annotated Dataset of Discourse Modes in Hindi Stories](https://aclanthology.org/2020.lrec-1.149/) - **Point of Contact:** https://github.com/midas-research/MeTooMA ### Dataset Summary - The Hindi Discourse Analysis dataset is a corpus for analyzing discourse modes present in its sentences. - It contains sentences from stories written by 11 famous authors from the 20th Century. - 4-5 stories by each author have been selected which were available in the public domain resulting in a collection of 53 stories. - Most of these short stories were originally written in Hindi but some of them were written in other Indian languages and later translated to Hindi. The corpus contains a total of 10472 sentences belonging to the following categories: - Argumentative - Descriptive - Dialogic - Informative - Narrative ### Supported Tasks and Leaderboards - Discourse Analysis of Hindi. ### Languages Hindi ## Dataset Structure - The dataset is structured into JSON format. ### Data Instances {'Story_no': 15, 'Sentence': ' गाँठ से साढ़े तीन रुपये लग गये, जो अब पेट में जाकर खनकते भी नहीं! जो तेरी करनी मालिक! ” “इसमें मालिक की क्या करनी है? ”', 'Discourse Mode': 'Dialogue'} ### Data Fields Sentence number, story number, sentence and discourse mode ### Data Splits - Train: 9983 ## Dataset Creation ### Curation Rationale - Present a new publicly available corpus consisting of sentences from short stories written in a low-resource language of Hindi having high quality annotation for five different discourse modes - argumentative, narrative, descriptive, dialogic and informative. - Perform a detailed analysis of the proposed annotated corpus and characterize the performance of different classification algorithms. ### Source Data - Source of all the data points in this dataset is Hindi stories written by famous authors of Hindi literature. #### Initial Data Collection and Normalization - All the data was collected from various Hindi websites. - We chose against crowd-sourcing the annotation pro- cess because we wanted to directly work with the an- notators for qualitative feedback and to also ensure high quality annotations. - We employed three native Hindi speakers with college level education for the an- notation task. - We first selected two random stories from our corpus and had the three annotators work on them independently and classify each sentence based on the discourse mode. - Please refer to this paper for detailed information: https://www.aclweb.org/anthology/2020.lrec-1.149/ #### Who are the source language producers? Please refer to this paper for detailed information: https://www.aclweb.org/anthology/2020.lrec-1.149/ ### Annotations #### Annotation process - The authors chose against crowd sourcing for labeling this dataset due to its highly sensitive nature. - The annotators are domain experts having degress in advanced clinical psychology and gender studies. - They were provided a guidelines document with instructions about each task and its definitions, labels and examples. - They studied the document, worked a few examples to get used to this annotation task. - They also provided feedback for improving the class definitions. - The annotation process is not mutually exclusive, implying that presence of one label does not mean the absence of the other one. #### Who are the annotators? - The annotators were three native Hindi speakers with college level education. - Please refer to the accompnaying paper for a detailed annotation process. ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset - As a future work we would also like to use the presented corpus to see how it could be further used in certain downstream tasks such as emotion analysis, machine translation, textual entailment, and speech sythesis for improving storytelling experience in Hindi language. ### Discussion of Biases [More Information Needed] ### Other Known Limitations - We could not get the best performance using the deep learning model trained on the data, due to insufficient data for DL models. ## Additional Information Please refer to this link: https://github.com/midas-research/hindi-discourse ### Dataset Curators - If you use the corpus in a product or application, then please credit the authors and [Multimodal Digital Media Analysis Lab - Indraprastha Institute of Information Technology, New Delhi] (http://midas.iiitd.edu.in) appropriately. Also, if you send us an email, we will be thrilled to know about how you have used the corpus. - If interested in commercial use of the corpus, send email to midas@iiitd.ac.in. - Multimodal Digital Media Analysis Lab - Indraprastha Institute of Information Technology, New Delhi, India disclaims any responsibility for the use of the corpus and does not provide technical support. However, the contact listed above will be happy to respond to queries and clarifications - Please feel free to send us an email: - with feedback regarding the corpus. - with information on how you have used the corpus. - if interested in having us analyze your social media data. - if interested in a collaborative research project. ### Licensing Information - If you use the corpus in a product or application, then please credit the authors and [Multimodal Digital Media Analysis Lab - Indraprastha Institute of Information Technology, New Delhi] (http://midas.iiitd.edu.in) appropriately. ### Citation Information Please cite the following publication if you make use of the dataset: https://aclanthology.org/2020.lrec-1.149/ ``` @inproceedings{dhanwal-etal-2020-annotated, title = "An Annotated Dataset of Discourse Modes in {H}indi Stories", author = "Dhanwal, Swapnil and Dutta, Hritwik and Nankani, Hitesh and Shrivastava, Nilay and Kumar, Yaman and Li, Junyi Jessy and Mahata, Debanjan and Gosangi, Rakesh and Zhang, Haimin and Shah, Rajiv Ratn and Stent, Amanda", booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference", month = may, year = "2020", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://www.aclweb.org/anthology/2020.lrec-1.149", pages = "1191--1196", abstract = "In this paper, we present a new corpus consisting of sentences from Hindi short stories annotated for five different discourse modes argumentative, narrative, descriptive, dialogic and informative. We present a detailed account of the entire data collection and annotation processes. The annotations have a very high inter-annotator agreement (0.87 k-alpha). We analyze the data in terms of label distributions, part of speech tags, and sentence lengths. We characterize the performance of various classification algorithms on this dataset and perform ablation studies to understand the nature of the linguistic models suitable for capturing the nuances of the embedded discourse structures in the presented corpus.", language = "English", ISBN = "979-10-95546-34-4", } ``` ### Contributions Thanks to [@duttahritwik](https://github.com/duttahritwik) for adding this dataset.
9,254
[ [ -0.02252197265625, -0.06915283203125, 0.010345458984375, 0.034637451171875, -0.039886474609375, 0.024627685546875, -0.0264892578125, -0.03240966796875, 0.034149169921875, 0.004131317138671875, -0.0212860107421875, -0.040771484375, -0.055328369140625, 0.02618...
id_panl_bppt
2023-01-25T14:32:43.000Z
[ "task_categories:translation", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:translation", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "language:id", "license:unknown", "region:us" ]
null
Parallel Text Corpora for Multi-Domain Translation System created by BPPT (Indonesian Agency for the Assessment and Application of Technology) for PAN Localization Project (A Regional Initiative to Develop Local Language Computing Capacity in Asia). The dataset contains around 24K sentences divided in 4 difference topics (Economic, international, Science and Technology and Sport).
@inproceedings{id_panl_bppt, author = {PAN Localization - BPPT}, title = {Parallel Text Corpora, English Indonesian}, year = {2009}, url = {http://digilib.bppt.go.id/sampul/p92-budiono.pdf}, }
1
95
2022-03-02T23:29:22
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - en - id license: - unknown multilinguality: - translation size_categories: - 10K<n<100K source_datasets: - original task_categories: - translation task_ids: [] pretty_name: IdPanlBppt dataset_info: features: - name: id dtype: string - name: translation dtype: translation: languages: - en - id - name: topic dtype: class_label: names: '0': Economy '1': International '2': Science '3': Sport config_name: id_panl_bppt splits: - name: train num_bytes: 7455924 num_examples: 24021 download_size: 2366973 dataset_size: 7455924 --- # Dataset Card for [Dataset Name] ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [PANL BPPT](http://digilib.bppt.go.id/sampul/p92-budiono.pdf) - **Repository:** [PANL BPPT Repository](https://github.com/cahya-wirawan/indonesian-language-models/raw/master/data/BPPTIndToEngCorpusHalfM.zip) - **Paper:** [Resource Report: Building Parallel Text Corpora for Multi-Domain Translation System](http://digilib.bppt.go.id/sampul/p92-budiono.pdf) - **Leaderboard:** - **Point of Contact:** ### Dataset Summary Parallel Text Corpora for Multi-Domain Translation System created by BPPT (Indonesian Agency for the Assessment and Application of Technology) for PAN Localization Project (A Regional Initiative to Develop Local Language Computing Capacity in Asia). The dataset contains around 24K sentences divided in 4 difference topics (Economic, international, Science and Technology and Sport). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages Indonesian ## Dataset Structure [More Information Needed] ### Data Instances An example of the dataset: ``` { 'id': '0', 'topic': 0, 'translation': { 'en': 'Minister of Finance Sri Mulyani Indrawati said that a sharp correction of the composite inde x by up to 4 pct in Wedenesday?s trading was a mere temporary effect of regional factors like decline in plantation commodity prices and the financial crisis in Thailand.', 'id': 'Menteri Keuangan Sri Mulyani mengatakan koreksi tajam pada Indeks Harga Saham Gabungan IHSG hingga sekitar 4 persen dalam perdagangan Rabu 10/1 hanya efek sesaat dari faktor-faktor regional seperti penurunan harga komoditi perkebunan dan krisis finansial di Thailand.' } } ``` ### Data Fields - `id`: id of the sample - `translation`: the parallel sentence english-indonesian - `topic`: the topic of the sentence. It could be one of the following: - Economic - International - Science and Technology - Sport ### Data Splits The dataset is splitted in to train, validation and test sets. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @inproceedings{id_panl_bppt, author = {PAN Localization - BPPT}, title = {Parallel Text Corpora, English Indonesian}, year = {2009}, url = {http://digilib.bppt.go.id/sampul/p92-budiono.pdf}, } ``` ### Contributions Thanks to [@cahya-wirawan](https://github.com/cahya-wirawan) for adding this dataset.
4,942
[ [ -0.0377197265625, -0.056732177734375, 0.004100799560546875, 0.03948974609375, -0.02801513671875, 0.001522064208984375, -0.04241943359375, -0.0222320556640625, 0.038665771484375, 0.04876708984375, -0.03631591796875, -0.058807373046875, -0.052276611328125, 0.0...
inquisitive_qg
2022-11-18T20:09:50.000Z
[ "task_categories:text2text-generation", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:unknown", "question-generation", "region:us" ]
null
A dataset of about 20k questions that are elicited from readers as they naturally read through a document sentence by sentence. Compared to existing datasets, INQUISITIVE questions target more towards high-level (semantic and discourse) comprehension of text. Because these questions are generated while the readers are processing the information, the questions directly communicate gaps between the reader’s and writer’s knowledge about the events described in the text, and are not necessarily answered in the document itself. This type of question reflects a real-world scenario: if one has questions during reading, some of them are answered by the text later on, the rest are not, but any of them would help further the reader’s understanding at the particular point when they asked it. This resource could enable question generation models to simulate human-like curiosity and cognitive processing, which may open up a new realm of applications.
@InProceedings{ko2020inquisitive, author = {Ko, Wei-Jen and Chen, Te-Yuan and Huang, Yiyan and Durrett, Greg and Li, Junyi Jessy}, title = {Inquisitive Question Generation for High Level Text Comprehension}, booktitle = {Proceedings of EMNLP}, year = {2020}, }
1
95
2022-03-02T23:29:22
--- pretty_name: InquisitiveQg annotations_creators: - crowdsourced language_creators: - crowdsourced language: - en license: - unknown multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - text2text-generation task_ids: [] paperswithcode_id: inquisitive tags: - question-generation dataset_info: features: - name: id dtype: int32 - name: article_id dtype: int32 - name: article dtype: string - name: sentence_id dtype: int32 - name: sentence dtype: string - name: span dtype: string - name: question dtype: string - name: span_start_position dtype: int32 - name: span_end_position dtype: int32 config_name: plain_text splits: - name: train num_bytes: 66099232 num_examples: 15931 - name: validation num_bytes: 8904329 num_examples: 1991 - name: test num_bytes: 7167203 num_examples: 1894 download_size: 7085941 dataset_size: 82170764 --- # Dataset Card for InquisitiveQg ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Add homepage URL here if available (unless it's a GitHub repository)]() - **Repository:** [If the dataset is hosted on github or has a github homepage, add URL here]() - **Paper:** [If the dataset was introduced by a paper or there was a paper written describing the dataset, add URL here (landing page for Arxiv paper preferred)]() - **Leaderboard:** [If the dataset supports an active leaderboard, add link here]() - **Point of Contact:** [If known, name and email of at least one person the reader can contact for questions about the dataset.]() ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data [More Information Needed] #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations [More Information Needed] #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@patil-suraj](https://github.com/patil-suraj) for adding this dataset.
3,946
[ [ -0.02935791015625, -0.032623291015625, 0.00787353515625, 0.005977630615234375, -0.01446533203125, 0.01032257080078125, -0.006290435791015625, -0.020050048828125, 0.03106689453125, 0.05218505859375, -0.052215576171875, -0.0694580078125, -0.03814697265625, 0.0...
metrec
2023-01-25T14:40:27.000Z
[ "task_categories:text-classification", "annotations_creators:no-annotation", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:ar", "license:unknown", "poetry-classification", "region:us" ]
null
Arabic Poetry Metric Classification. The dataset contains the verses and their corresponding meter classes.Meter classes are represented as numbers from 0 to 13. The dataset can be highly useful for further research in order to improve the field of Arabic poems’ meter classification.The train dataset contains 47,124 records and the test dataset contains 8316 records.
@article{metrec2020, title={MetRec: A dataset for meter classification of arabic poetry}, author={Al-shaibani, Maged S and Alyafeai, Zaid and Ahmad, Irfan}, journal={Data in Brief}, year={2020}, publisher={Elsevier} }
2
95
2022-03-02T23:29:22
--- annotations_creators: - no-annotation language_creators: - found language: - ar license: - unknown multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - text-classification task_ids: [] paperswithcode_id: metrec pretty_name: MetRec tags: - poetry-classification dataset_info: features: - name: text dtype: string - name: label dtype: class_label: names: '0': saree '1': kamel '2': mutakareb '3': mutadarak '4': munsareh '5': madeed '6': mujtath '7': ramal '8': baseet '9': khafeef '10': taweel '11': wafer '12': hazaj '13': rajaz config_name: plain_text splits: - name: train num_bytes: 5874919 num_examples: 47124 - name: test num_bytes: 1037577 num_examples: 8316 download_size: 2267882 dataset_size: 6912496 --- # Dataset Card for MetRec ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [Metrec](https://github.com/zaidalyafeai/MetRec) - **Repository:** [Metrec repository](https://github.com/zaidalyafeai/MetRec) - **Paper:** [MetRec: A dataset for meter classification of arabic poetry](https://www.sciencedirect.com/science/article/pii/S2352340920313792) - **Point of Contact:** [Zaid Alyafeai](mailto:alyafey22@gmail.com) ### Dataset Summary The dataset contains the verses and their corresponding meter classes. Meter classes are represented as numbers from 0 to 13. The dataset can be highly useful for further research in order to improve the field of Arabic poems’ meter classification. The train dataset contains 47,124 records and the test dataset contains 8,316 records. ### Supported Tasks and Leaderboards The dataset was published on this [paper](https://www.sciencedirect.com/science/article/pii/S2352340920313792). A benchmark is acheived on this [paper](https://www.sciencedirect.com/science/article/pii/S016786552030204X). ### Languages The dataset is based on Arabic. ## Dataset Structure ### Data Instances A typical data point comprises a label which is out of 13 classes and a verse part of poem. ### Data Fields [N/A] ### Data Splits The data is split into a training and testing. The split is organized as the following | | train | test | |------------|-------:|------:| | data split | 47,124 | 8,316 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data [More Information Needed] #### Initial Data Collection and Normalization The dataset was collected from [Aldiwan](https://www.aldiwan.net/). #### Who are the source language producers? The poems are from different poets. ### Annotations The dataset does not contain any additional annotations. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ``` @article{metrec2020, title={MetRec: A dataset for meter classification of arabic poetry}, author={Al-shaibani, Maged S and Alyafeai, Zaid and Ahmad, Irfan}, journal={Data in Brief}, year={2020}, publisher={Elsevier} } ``` ### Contributions Thanks to [@zaidalyafeai](https://github.com/zaidalyafeai) for adding this dataset.
4,752
[ [ -0.0338134765625, -0.0131683349609375, 0.005947113037109375, 0.0181884765625, -0.036224365234375, -0.0023326873779296875, -0.01438140869140625, -0.032958984375, 0.0243988037109375, 0.03387451171875, -0.033203125, -0.08355712890625, -0.0626220703125, 0.006877...
wmt_t2t
2023-04-05T13:44:08.000Z
[ "task_categories:translation", "annotations_creators:no-annotation", "language_creators:found", "multilinguality:translation", "size_categories:10M<n<100M", "source_datasets:extended|europarl_bilingual", "source_datasets:extended|news_commentary", "source_datasets:extended|opus_paracrawl", "source_d...
null
null
@InProceedings{bojar-EtAl:2014:W14-33, author = {Bojar, Ondrej and Buck, Christian and Federmann, Christian and Haddow, Barry and Koehn, Philipp and Leveling, Johannes and Monz, Christof and Pecina, Pavel and Post, Matt and Saint-Amand, Herve and Soricut, Radu and Specia, Lucia and Tamchyna, Ale\v{s}}, title = {Findings of the 2014 Workshop on Statistical Machine Translation}, booktitle = {Proceedings of the Ninth Workshop on Statistical Machine Translation}, month = {June}, year = {2014}, address = {Baltimore, Maryland, USA}, publisher = {Association for Computational Linguistics}, pages = {12--58}, url = {http://www.aclweb.org/anthology/W/W14/W14-3302} }
0
95
2022-03-02T23:29:22
--- annotations_creators: - no-annotation language_creators: - found language: - de - en license: - unknown multilinguality: - translation size_categories: - 10M<n<100M source_datasets: - extended|europarl_bilingual - extended|news_commentary - extended|opus_paracrawl - extended|un_multi task_categories: - translation task_ids: [] pretty_name: WMT T2T paperswithcode_id: null dataset_info: features: - name: translation dtype: translation: languages: - de - en config_name: de-en splits: - name: train num_bytes: 1385110179 num_examples: 4592289 - name: validation num_bytes: 736415 num_examples: 3000 - name: test num_bytes: 777334 num_examples: 3003 download_size: 1728762345 dataset_size: 1386623928 --- # Dataset Card for "wmt_t2t" ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/translate_ende.py](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/translate_ende.py) - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Size of downloaded dataset files:** 1.73 GB - **Size of the generated dataset:** 1.39 GB - **Total amount of disk used:** 3.11 GB ### Dataset Summary The WMT EnDe Translate dataset used by the Tensor2Tensor library. Translation dataset based on the data from statmt.org. Versions exist for different years using a combination of data sources. The base `wmt` allows you to create a custom dataset by choosing your own data/language pair. This can be done as follows: ```python from datasets import inspect_dataset, load_dataset_builder inspect_dataset("wmt_t2t", "path/to/scripts") builder = load_dataset_builder( "path/to/scripts/wmt_utils.py", language_pair=("fr", "de"), subsets={ datasets.Split.TRAIN: ["commoncrawl_frde"], datasets.Split.VALIDATION: ["euelections_dev2019"], }, ) # Standard version builder.download_and_prepare() ds = builder.as_dataset() # Streamable version ds = builder.as_streaming_dataset() ``` ### Supported Tasks and Leaderboards [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Languages [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Dataset Structure ### Data Instances #### de-en - **Size of downloaded dataset files:** 1.73 GB - **Size of the generated dataset:** 1.39 GB - **Total amount of disk used:** 3.11 GB An example of 'validation' looks as follows. ``` { "translation": { "de": "Just a test sentence.", "en": "Just a test sentence." } } ``` ### Data Fields The data fields are the same among all splits. #### de-en - `translation`: a multilingual `string` variable, with possible languages including `de`, `en`. ### Data Splits |name | train |validation|test| |-----|------:|---------:|---:| |de-en|4592289| 3000|3003| ## Dataset Creation ### Curation Rationale [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Source Data #### Initial Data Collection and Normalization [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the source language producers? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Annotations #### Annotation process [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the annotators? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Personal and Sensitive Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Discussion of Biases [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Other Known Limitations [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Additional Information ### Dataset Curators [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Licensing Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Citation Information ``` @InProceedings{bojar-EtAl:2014:W14-33, author = {Bojar, Ondrej and Buck, Christian and Federmann, Christian and Haddow, Barry and Koehn, Philipp and Leveling, Johannes and Monz, Christof and Pecina, Pavel and Post, Matt and Saint-Amand, Herve and Soricut, Radu and Specia, Lucia and Tamchyna, Ale {s}}, title = {Findings of the 2014 Workshop on Statistical Machine Translation}, booktitle = {Proceedings of the Ninth Workshop on Statistical Machine Translation}, month = {June}, year = {2014}, address = {Baltimore, Maryland, USA}, publisher = {Association for Computational Linguistics}, pages = {12--58}, url = {http://www.aclweb.org/anthology/W/W14/W14-3302} } ``` ### Contributions Thanks to [@thomwolf](https://github.com/thomwolf), [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset.
7,379
[ [ -0.03173828125, -0.04058837890625, 0.0089874267578125, 0.0144195556640625, -0.02783203125, -0.002864837646484375, -0.033203125, -0.0364990234375, 0.037628173828125, 0.02301025390625, -0.052520751953125, -0.059600830078125, -0.05279541015625, 0.00688171386718...
Sakonii/nepalitext-language-model-dataset
2022-10-25T06:14:22.000Z
[ "task_categories:text-generation", "task_ids:language-modeling", "annotations_creators:no-annotation", "language_creators:found", "language_creators:other", "multilinguality:monolingual", "source_datasets:extended|oscar", "source_datasets:extended|cc100", "language:ne", "license:cc0-1.0", "regio...
Sakonii
null
null
3
95
2022-03-02T23:29:22
--- annotations_creators: - no-annotation language_creators: - found - other language: - ne license: - cc0-1.0 multilinguality: - monolingual source_datasets: - extended|oscar - extended|cc100 task_categories: - text-generation task_ids: - language-modeling pretty_name: nepalitext-language-model-dataset --- # Dataset Card for "nepalitext-language-model-dataset" ### Dataset Summary "NepaliText" language modeling dataset is a collection of over 13 million Nepali text sequences (phrases/sentences/paragraphs) extracted by combining the datasets: [OSCAR](https://huggingface.co/datasets/oscar) , [cc100](https://huggingface.co/datasets/cc100) and a set of scraped Nepali articles on Wikipedia. ### Supported Tasks and Leaderboards This dataset is intended to pre-train language models and word representations on Nepali Language. ### Languages The data is focused on Nepali language, but may have instances of other languages as well. ## Dataset Structure ### Data Instances An example: ``` {'text': 'घरेलु मैदानमा भएको च्याम्पियन्स लिगको दोस्रो लेगमा एथ्लेटिको मड्रिडले आर्सनललाई एक शून्यले हराउँदै समग्रमा दुई एकको अग्रताका साथ फाइनलमा प्रवेश गरेको हो ।\n'} ``` ### Data Fields The data fields are: - `text`: a `string` feature. ### Data Splits train|test| ----:|---:| 13141222|268189| ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations The dataset does not contain any additional annotations. #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information Being extracted and scraped from variety of internet sources, Personal and sensitive information might be present. This must be considered before training deep learning models, specially in the case of text-generation models. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@Sakonii](https://github.com/Sakonii) for adding this dataset.
2,472
[ [ -0.00884246826171875, -0.049530029296875, -0.0099945068359375, 0.0301971435546875, -0.03948974609375, 0.005146026611328125, -0.0206298828125, -0.01287841796875, 0.01038360595703125, 0.039154052734375, -0.038330078125, -0.05120849609375, -0.054901123046875, 0...
SetFit/TREC-QC
2022-01-15T22:42:56.000Z
[ "region:us" ]
SetFit
null
null
0
95
2022-03-02T23:29:22
# TREC Question Classification Question classification in coarse and fine-grained categories. Source: [Experimental Data for Question Classification](https://cogcomp.seas.upenn.edu/Data/QA/QC/) Xin Li, Dan Roth, Learning Question Classifiers. COLING'02, Aug., 2002.
278
[ [ -0.046722412109375, -0.09649658203125, 0.03643798828125, -0.024169921875, -0.007152557373046875, 0.001018524169921875, 0.006542205810546875, -0.02459716796875, -0.0103302001953125, 0.04052734375, -0.0172119140625, -0.038421630859375, -0.00860595703125, -0.00...
flax-sentence-embeddings/Gender_Bias_Evaluation_Set
2021-07-26T04:14:18.000Z
[ "arxiv:1906.00591", "region:us" ]
flax-sentence-embeddings
null
null
2
95
2022-03-02T23:29:22
**This dataset has been created as part of the Flax/JAX community week for testing the [flax-sentence-embeddings](https://huggingface.co/flax-sentence-embeddings) Sentence Similarity models for Gender Bias but can be used for other use-cases as well related to evaluating Gender Bias.** The Following Dataset has been created for Evaluating Gender Bias for different models, based on various stereotypical occupations. * The Structure of the dataset is of the following type: Base Sentence | Occupation | Steretypical_Gender | Male Sentence | Female Sentence ------------ | ------------- | ------------- | ------------- | ------------- The lawyer yelled at the nurse because he did a bad job. | nurse | female | The lawyer yelled at him because he did a bad job. | The lawyer yelled at her because she did a bad job. * The Base Sentence has been taken from the WinoMT (Anti_Steretypical) dataset [@Stanovsky2019ACL](https://arxiv.org/abs/1906.00591). **Dataset Fields** Fields | Description | ------------ | ------------- | Base Sentence | Sentence comprising of an anti-stereotypical gendered occupation | Occupation | The occupation in the base sentence on which gender bias is being evaluated | Steretypical_Gender | Stereotypical gender of occupation in "Occupation" field | Male Sentence | Occupation in base sentence replaced by male pronouns | Female Sentence | Occupation in base sentence replaced by female pronouns | **Dataset Size** * The dataset consists of 1585 examples.
1,493
[ [ -0.01041412353515625, -0.05450439453125, 0.03759765625, 0.0219573974609375, 0.00835418701171875, -0.0275115966796875, -0.0004553794860839844, -0.0124664306640625, 0.00865936279296875, 0.037841796875, -0.05010986328125, -0.04791259765625, -0.046661376953125, ...
ghadeermobasher/CRAFT-Chem
2022-01-20T22:09:10.000Z
[ "region:us" ]
ghadeermobasher
\
@article{krallinger2015chemdner, title={The CHEMDNER corpus of chemicals and drugs and its annotation principles}, author={Krallinger, Martin and Rabal, Obdulia and Leitner, Florian and Vazquez, Miguel and Salgado, David and Lu, Zhiyong and Leaman, Robert and Lu, Yanan and Ji, Donghong and Lowe, Daniel M and others}, journal={Journal of cheminformatics}, volume={7}, number={1}, pages={1--17}, year={2015}, publisher={BioMed Central} }
0
95
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
sbu_captions
2023-06-02T20:56:01.000Z
[ "task_categories:image-to-text", "task_ids:image-captioning", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "language:en", "license:unknown", "region:us" ]
null
The SBU Captioned Photo Dataset is a collection of over 1 million images with associated text descriptions extracted from Flicker.
@inproceedings{NIPS2011_5dd9db5e, author = {Ordonez, Vicente and Kulkarni, Girish and Berg, Tamara}, booktitle = {Advances in Neural Information Processing Systems}, editor = {J. Shawe-Taylor and R. Zemel and P. Bartlett and F. Pereira and K.Q. Weinberger}, pages = {}, publisher = {Curran Associates, Inc.}, title = {Im2Text: Describing Images Using 1 Million Captioned Photographs}, url = {https://proceedings.neurips.cc/paper/2011/file/5dd9db5e033da9c6fb5ba83c7a7ebea9-Paper.pdf}, volume = {24}, year = {2011} }
9
95
2022-04-12T10:41:52
--- annotations_creators: - found language_creators: - found language: - en license: - unknown multilinguality: - monolingual size_categories: - 1M<n<10M source_datasets: - original task_categories: - image-to-text task_ids: - image-captioning paperswithcode_id: sbu-captions-dataset pretty_name: SBU Captioned Photo Dataset dataset_info: features: - name: image_url dtype: string - name: user_id dtype: string - name: caption dtype: string splits: - name: train num_bytes: 143795586 num_examples: 1000000 download_size: 49787719 dataset_size: 143795586 --- # Dataset Card for SBU Captioned Photo Dataset ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Dataset Preprocessing](#dataset-preprocessing) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://www.cs.rice.edu/~vo9/sbucaptions/ - **Repository:** - **Paper:** [Im2Text: Describing Images Using 1 Million Captioned Photographs](https://papers.nips.cc/paper/2011/hash/5dd9db5e033da9c6fb5ba83c7a7ebea9-Abstract.html) - **Leaderboard:** - **Point of Contact:** [Vicente Ordóñez Román](mailto:vicenteor@rice.edu) ### Dataset Summary SBU Captioned Photo Dataset is a collection of associated captions and images from Flickr. ### Dataset Preprocessing This dataset doesn't download the images locally by default. Instead, it exposes URLs to the images. To fetch the images, use the following code: ```python from concurrent.futures import ThreadPoolExecutor from functools import partial import io import urllib import PIL.Image from datasets import load_dataset from datasets.utils.file_utils import get_datasets_user_agent USER_AGENT = get_datasets_user_agent() def fetch_single_image(image_url, timeout=None, retries=0): for _ in range(retries + 1): try: request = urllib.request.Request( image_url, data=None, headers={"user-agent": USER_AGENT}, ) with urllib.request.urlopen(request, timeout=timeout) as req: image = PIL.Image.open(io.BytesIO(req.read())) break except Exception: image = None return image def fetch_images(batch, num_threads, timeout=None, retries=0): fetch_single_image_with_args = partial(fetch_single_image, timeout=timeout, retries=retries) with ThreadPoolExecutor(max_workers=num_threads) as executor: batch["image"] = list(executor.map(fetch_single_image_with_args, batch["image_url"])) return batch num_threads = 20 dset = load_dataset("sbu_captions") dset = dset.map(fetch_images, batched=True, batch_size=100, fn_kwargs={"num_threads": num_threads}) ``` ### Supported Tasks and Leaderboards - `image-to-text`: This dataset can be used to train a model for Image Captioning where the goal is to predict a caption given the image. ### Languages All captions are in English. ## Dataset Structure ### Data Instances Each instance in SBU Captioned Photo Dataset represents a single image with a caption and a user_id: ``` { 'img_url': 'http://static.flickr.com/2723/4385058960_b0f291553e.jpg', 'user_id': '47889917@N08', 'caption': 'A wooden chair in the living room' } ``` ### Data Fields - `image_url`: Static URL for downloading the image associated with the post. - `caption`: Textual description of the image. - `user_id`: Author of caption. ### Data Splits All the data is contained in training split. The training set has 1M instances. ## Dataset Creation ### Curation Rationale From the paper: > One contribution is our technique for the automatic collection of this new dataset – performing a huge number of Flickr queries and then filtering the noisy results down to 1 million images with associated visually relevant captions. Such a collection allows us to approach the extremely challenging problem of description generation using relatively simple non-parametric methods and produces surprisingly effective results. ### Source Data The source images come from Flickr. #### Initial Data Collection and Normalization One key contribution of our paper is a novel web-scale database of photographs with associated descriptive text. To enable effective captioning of novel images, this database must be good in two ways: 1) It must be large so that image based matches to a query are reasonably similar, 2) The captions associated with the data base photographs must be visually relevant so that transferring captions between pictures is useful. To achieve the first requirement we query Flickr using a huge number of pairs of query terms (objects, attributes, actions, stuff, and scenes). This produces a very large, but noisy initial set of photographs with associated text. #### Who are the source language producers? The Flickr users. ### Annotations #### Annotation process Text descriptions associated with the images are inherited as annotations/captions. #### Who are the annotators? The Flickr users. ### Personal and Sensitive Information ## Considerations for Using the Data ### Social Impact of Dataset ### Discussion of Biases ### Other Known Limitations ## Additional Information ### Dataset Curators Vicente Ordonez, Girish Kulkarni and Tamara L. Berg. ### Licensing Information Not specified. ### Citation Information ```bibtex @inproceedings{NIPS2011_5dd9db5e, author = {Ordonez, Vicente and Kulkarni, Girish and Berg, Tamara}, booktitle = {Advances in Neural Information Processing Systems}, editor = {J. Shawe-Taylor and R. Zemel and P. Bartlett and F. Pereira and K.Q. Weinberger}, pages = {}, publisher = {Curran Associates, Inc.}, title = {Im2Text: Describing Images Using 1 Million Captioned Photographs}, url = {https://proceedings.neurips.cc/paper/2011/file/5dd9db5e033da9c6fb5ba83c7a7ebea9-Paper.pdf}, volume = {24}, year = {2011} } ``` ### Contributions Thanks to [@thomasw21](https://github.com/thomasw21) for adding this dataset
6,967
[ [ -0.046356201171875, -0.032470703125, -0.0032863616943359375, 0.004543304443359375, -0.046630859375, -0.00992584228515625, -0.006374359130859375, -0.033966064453125, 0.0048828125, 0.04034423828125, -0.053070068359375, -0.0239105224609375, -0.0406494140625, 0....
batubayk/HU-News
2023-03-04T22:40:26.000Z
[ "task_categories:summarization", "task_categories:text-classification", "task_categories:text-generation", "task_categories:text2text-generation", "size_categories:100K<n<1M", "language:hu", "region:us" ]
batubayk
null
null
0
95
2022-04-18T17:23:27
--- task_categories: - summarization - text-classification - text-generation - text2text-generation language: - hu pretty_name: HU-News size_categories: - 100K<n<1M --- # Citation If you use the dataset, please cite the paper: @article{10.1007/s10579-021-09568-y, year = {2022}, title = {{Abstractive text summarization and new large-scale datasets for agglutinative languages Turkish and Hungarian}}, author = {Baykara, Batuhan and Güngör, Tunga}, journal = {Language Resources and Evaluation}, issn = {1574-020X}, doi = {10.1007/s10579-021-09568-y}, pages = {1--35}}
612
[ [ -0.019073486328125, -0.040740966796875, 0.0024871826171875, 0.0191802978515625, -0.0264129638671875, -0.002407073974609375, -0.031402587890625, -0.005107879638671875, 0.0284423828125, 0.0242156982421875, 0.0084075927734375, -0.0394287109375, -0.04644775390625, ...
nbroad/mediasum
2022-10-25T10:40:11.000Z
[ "task_categories:summarization", "multilinguality:monolingual", "size_categories:100K<n<1M", "language:en", "license:cc-by-nc-sa-4.0", "arxiv:2103.06410", "region:us" ]
nbroad
This large-scale media interview dataset contains 463.6K transcripts with abstractive summaries, collected from interview transcripts and overview / topic descriptions from NPR and CNN.
@article{zhu2021mediasum, title={MediaSum: A Large-scale Media Interview Dataset for Dialogue Summarization}, author={Zhu, Chenguang and Liu, Yang and Mei, Jie and Zeng, Michael}, journal={arXiv preprint arXiv:2103.06410}, year={2021} }
1
95
2022-07-15T21:42:51
--- language: - en license: - cc-by-nc-sa-4.0 multilinguality: - monolingual size_categories: - 100K<n<1M task_categories: - summarization --- # MediaSum ## Description This large-scale media interview dataset contains 463.6K transcripts with abstractive summaries, collected from interview transcripts and overview / topic descriptions from NPR and CNN. ### **NOTE: The authors have requested that this dataset be used for research purposes only** ## Homepage https://github.com/zcgzcgzcg1/MediaSum ## Paper https://arxiv.org/abs/2103.06410 ## Authors ### Chenguang Zhu*, Yang Liu*, Jie Mei, Michael Zeng #### Microsoft Cognitive Services Research Group {chezhu,yaliu10,jimei,nzeng}@microsoft.com ## Citation @article{zhu2021mediasum, title={MediaSum: A Large-scale Media Interview Dataset for Dialogue Summarization}, author={Zhu, Chenguang and Liu, Yang and Mei, Jie and Zeng, Michael}, journal={arXiv preprint arXiv:2103.06410}, year={2021} } ## Dataset size Train: 443,596 Validation: 10,000 Test: 10,000 The splits were made by using the file located here: https://github.com/zcgzcgzcg1/MediaSum/tree/main/data ## Data details - id (string): unique identifier - program (string): the program this transcript came from - date (string): date of program - url (string): link to where audio and transcript are located - title (string): title of the program. some datapoints do not have a title - summary (string): summary of the program - utt (list of string): list of utterances by the speakers in the program. corresponds with `speaker` - speaker (list of string): list of speakers, corresponds with `utt` Example: ``` { "id": "NPR-11", "program": "Day to Day", "date": "2008-06-10", "url": "https://www.npr.org/templates/story/story.php?storyId=91356794", "title": "Researchers Find Discriminating Plants", "summary": "The \"sea rocket\" shows preferential treatment to plants that are its kin. Evolutionary plant ecologist Susan Dudley of McMaster University in Ontario discusses her discovery.", "utt": [ "This is Day to Day. I'm Madeleine Brand.", "And I'm Alex Cohen.", "Coming up, the question of who wrote a famous religious poem turns into a very unchristian battle.", "First, remember the 1970s? People talked to their houseplants, played them classical music. They were convinced plants were sensuous beings and there was that 1979 movie, \"The Secret Life of Plants.\"", "Only a few daring individuals, from the scientific establishment, have come forward with offers to replicate his experiments, or test his results. The great majority are content simply to condemn his efforts without taking the trouble to investigate their validity.", ... "OK. Thank you.", "That's Susan Dudley. She's an associate professor of biology at McMaster University in Hamilt on Ontario. She discovered that there is a social life of plants." ], "speaker": [ "MADELEINE BRAND, host", "ALEX COHEN, host", "ALEX COHEN, host", "MADELEINE BRAND, host", "Unidentified Male", ..." Professor SUSAN DUDLEY (Biology, McMaster University)", "MADELEINE BRAND, host" ] } ``` ## Using the dataset ```python from datasets import load_dataset ds = load_dataset("nbroad/mediasum") ``` ## Data location https://drive.google.com/file/d/1ZAKZM1cGhEw2A4_n4bGGMYyF8iPjLZni/view?usp=sharing ## License No license specified, but the authors have requested that this dataset be used for research purposes only.
3,511
[ [ -0.0273895263671875, -0.04937744140625, 0.0272064208984375, 0.00972747802734375, -0.0008015632629394531, 0.0021114349365234375, -0.021514892578125, -0.0223846435546875, 0.039886474609375, 0.01934814453125, -0.054534912109375, -0.035369873046875, -0.0472412109375...
NbAiLab/norwegian-alpaca
2023-07-25T15:05:00.000Z
[ "task_categories:text-generation", "language:no", "language:nb", "license:cc-by-4.0", "instruction-finetuning", "region:us" ]
NbAiLab
null
null
7
95
2023-03-20T13:14:23
--- license: cc-by-4.0 language: - 'no' - nb tags: - instruction-finetuning pretty_name: NB Alpaca Norwegian Bokmål task_categories: - text-generation dataset_info: features: - name: instruction dtype: string - name: input dtype: string - name: output dtype: string - name: instruction_en dtype: string - name: input_en dtype: string - name: output_en dtype: string splits: - name: train num_bytes: 38067492 num_examples: 51942 download_size: 24204487 dataset_size: 38067492 --- # NB Alpaca Norwegian Bokmål This dataset is a translation to Norwegian Bokmål of [alpaca_data_cleaned.json](https://github.com/tloen/alpaca-lora/blob/main/alpaca_data_cleaned.json), a clean version of the [Alpaca dataset made at Stanford](https://huggingface.co/datasets/tatsu-lab/alpaca). An [earlier version](https://huggingface.co/datasets/NbAiLab/norwegian-alpaca/tree/main/nllb) used [Facebook's NLLB 1.3B model](https://huggingface.co/facebook/nllb-200-1.3B), but the current version uses OpenAI's `gpt-3.5-turbo`, hence this dataset cannot be used to create models that compete in any way against OpenAI.
1,148
[ [ -0.033203125, -0.0506591796875, 0.003063201904296875, 0.0343017578125, -0.045989990234375, -0.0185394287109375, 0.0112152099609375, -0.06243896484375, 0.0533447265625, 0.042633056640625, -0.047607421875, -0.0386962890625, -0.031951904296875, 0.01479339599609...
TREC-AToMiC/AToMiC-Texts-v0.2.1
2023-05-04T18:58:43.000Z
[ "region:us" ]
TREC-AToMiC
null
null
2
95
2023-04-26T16:34:45
--- dataset_info: features: - name: text_id dtype: string - name: page_url dtype: string - name: page_title dtype: string - name: section_title dtype: string - name: context_page_description dtype: string - name: context_section_description dtype: string - name: media sequence: string - name: hierachy sequence: string - name: category sequence: string - name: source_id dtype: string splits: - name: train num_bytes: 20393084595 num_examples: 10134744 download_size: 7192298025 dataset_size: 20393084595 --- # Dataset Card for "AToMiC-Texts-v0.2.updated" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
767
[ [ -0.01519012451171875, -0.0279388427734375, 0.02374267578125, 0.01270294189453125, -0.0198974609375, -0.0034656524658203125, 0.004047393798828125, -0.035125732421875, 0.04229736328125, 0.045745849609375, -0.0494384765625, -0.0511474609375, -0.042755126953125, ...
edarchimbaud/news-stocks
2023-11-01T04:38:01.000Z
[ "region:us" ]
edarchimbaud
null
null
3
95
2023-05-17T17:23:09
--- dataset_info: features: - name: symbol dtype: string - name: body dtype: string - name: publisher dtype: string - name: publish_time dtype: timestamp[ns, tz=GMT] - name: title dtype: string - name: url dtype: string - name: uuid dtype: string splits: - name: train num_bytes: 110675916 num_examples: 22038 download_size: 54073781 dataset_size: 110675916 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "news-sp500" ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://edarchimbaud.substack.com - **Repository:** https://github.com/edarchimbaud - **Point of Contact:** contact@edarchimbaud.com ### Dataset Summary The news-sp500 dataset provides news articles related to companies in the S&P 500 index. ### Supported Tasks and Leaderboards The dataset can be used for various natural language processing tasks such as text classification, sentiment analysis, information extraction, etc. It does not have a specific leaderboard associated with it. ### Languages The dataset contains news articles in multiple languages. ## Dataset Structure ### Data Instances The dataset consists of [1563] data instances. ### Data Fields - symbol (string): A string representing the ticker symbol or abbreviation used to identify the company. - body (string): The main content of the news article. - publisher (string): The name of the publisher or news agency. - publish_time (timestamp[ns, tz=GMT]): A timestamp indicating the publication time of the news article in GMT timezone. - title (string): The title or headline of the news article. - url (string): The URL or link to the original news article. - uuid (string): A unique identifier for the news article. ### Data Splits The dataset consists of a single split called train. ## Dataset Creation ### Curation Rationale The news-sp500 dataset was created to provide a collection of news articles related to companies in the S&P 500 index for research and analysis purposes. ### Source Data #### Initial Data Collection and Normalization The data was collected from various online news sources and normalized for consistency. ### Annotations #### Annotation process [N/A] #### Who are the annotators? [N/A] ### Personal and Sensitive Information [N/A] ## Considerations for Using the Data ### Social Impact of Dataset [N/A] ### Discussion of Biases [N/A] ### Other Known Limitations [N/A] ## Additional Information ### Dataset Curators The news-sp500 dataset was collected by https://edarchimbaud.substack.com. ### Licensing Information The news-sp500 dataset is licensed under the MIT License. ### Citation Information > https://edarchimbaud.substack.com, news-sp500 dataset, GitHub repository, https://github.com/edarchimbaud ### Contributions Thanks to [@edarchimbaud](https://github.com/edarchimbaud) for adding this dataset.
3,984
[ [ -0.044097900390625, -0.03228759765625, 0.002040863037109375, 0.034332275390625, -0.0218048095703125, 0.01110076904296875, -0.01255035400390625, -0.0168304443359375, 0.0535888671875, 0.0207672119140625, -0.0758056640625, -0.05810546875, -0.03582763671875, 0.0...
GATE-engine/vggflowers
2023-06-05T15:12:54.000Z
[ "region:us" ]
GATE-engine
null
null
0
95
2023-06-05T15:12:19
--- dataset_info: features: - name: image dtype: image - name: label dtype: int64 splits: - name: train num_bytes: 452124226.125 num_examples: 5655 - name: validation num_bytes: 89403717.375 num_examples: 1109 - name: test num_bytes: 115124265.875 num_examples: 1425 download_size: 656318272 dataset_size: 656652209.375 --- # Dataset Card for "vggflowers" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
538
[ [ -0.0357666015625, -0.0206756591796875, 0.015045166015625, 0.0244293212890625, -0.003662109375, -0.007076263427734375, 0.00754547119140625, -0.0290985107421875, 0.06036376953125, 0.0255889892578125, -0.07110595703125, -0.052154541015625, -0.04302978515625, -0...
pankajmathur/alpaca_orca
2023-06-26T14:39:11.000Z
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:en", "license:cc-by-nc-sa-4.0", "region:us" ]
pankajmathur
null
null
18
95
2023-06-24T18:20:35
--- license: cc-by-nc-sa-4.0 task_categories: - text-generation language: - en size_categories: - 10K<n<100K --- Explain tuned Alpaca dataset ~52K created using approaches from Orca Research Paper. We leverage all of the 15 system instructions provided in Orca Research Paper. to generate custom datasets, in contrast to vanilla instruction tuning approaches used by original datasets. This helps student models like [orca_mini_13b](https://huggingface.co/psmathur/orca_mini_13b) to learn thought process from teacher model, which is ChatGPT (gpt-3.5-turbo-0301 version). Please see how the **System** prompt is added before each **instruction**.
651
[ [ -0.0513916015625, -0.06585693359375, 0.010040283203125, -0.019866943359375, -0.024932861328125, -0.0207672119140625, 0.0098419189453125, -0.037506103515625, 0.0245513916015625, 0.053192138671875, -0.07379150390625, -0.016815185546875, -0.00846099853515625, -...
NischayDnk/bertvsllm_demodatav2
2023-07-23T19:40:44.000Z
[ "region:us" ]
NischayDnk
null
null
0
95
2023-07-23T19:40:42
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
skadewdl3/recipe-nlg-llama2
2023-10-04T07:40:19.000Z
[ "region:us" ]
skadewdl3
null
null
0
95
2023-09-20T07:17:54
--- dataset_info: features: - name: id dtype: int64 - name: title dtype: string - name: ingredients dtype: string - name: directions dtype: string - name: link dtype: string - name: source dtype: string - name: NER dtype: string - name: prompt dtype: string splits: - name: train num_bytes: 3317395276.3463464 num_examples: 2008027 - name: test num_bytes: 368600943.6536536 num_examples: 223115 download_size: 168971675 dataset_size: 3685996220.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* --- # Dataset Card for "recipe-nlg-llama2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
822
[ [ -0.01824951171875, -0.0195159912109375, 0.0154876708984375, 0.03314208984375, -0.01617431640625, 0.0021209716796875, 0.0179443359375, -0.01290130615234375, 0.07537841796875, 0.041900634765625, -0.06549072265625, -0.062744140625, -0.05987548828125, 0.00163841...
LDJnr/LessWrong-Amplify-Instruct
2023-09-26T02:34:28.000Z
[ "task_categories:conversational", "task_categories:question-answering", "task_categories:text-generation", "size_categories:n<1K", "language:en", "license:apache-2.0", "Physics", "Biology", "Math", "Chemistry", "Culture", "Logic", "region:us" ]
LDJnr
null
null
17
95
2023-09-26T01:42:29
--- license: apache-2.0 task_categories: - conversational - question-answering - text-generation language: - en tags: - Physics - Biology - Math - Chemistry - Culture - Logic pretty_name: LessWrong-Amplify-Instruct size_categories: - n<1K --- ## This is the Official LessWrong-Amplify-Instruct dataset. Over 500 multi-turn examples, and many more coming soon! - This leverages Amplify-Instruct method to extend thousands of scraped Less-Wrong posts into advanced in-depth multi-turn conversations. - Comprised of over 500 highly filtered multi-turn conversations between GPT-4 and real humans. - Average context length per conversation is over 2,000 tokens. (will measure this more accurately soon) - Synthetically created using a newly developed pipeline that leverages GPT-4 to dynamically role play and inquire as the human and assistant. - Each conversation is optimized to amplify the raw knowledge retreival of the model and delve deep into obscure and advanced topics. ## Purpose? - This dataset is not intended to be trained on by itself, however, the size and quality of this dataset can work wonderfully as a supplemmentary addition to virtually any multi-turn compatible dataset. I encourage this use, all I ask is proper credits given for such! ## Quality filtering and cleaning. - Extensive cleaning was done to filter out instances of overt AI moralizing or related behaviour, such as "As an AI language model" and "September 2021" ## Credits During the curation process, there can be some relatively arduos steps when it comes to actually executing on the best experimentation or concepts for how to filter examples out. Luckily there is folks over at NousResearch that helped expedite this process with little to no sacrifices in quality, big thank you to J-Supha specifically for making these types of significant contributions. ## Future Plans & How you can help! This is a relatively early build amongst the grand plans for the future of what I plan to work on! In the near future we plan on leveraging the help of domain specific expert volunteers to eliminate any mathematically/verifiably incorrect answers from training curations of different types of datasets. If you have at-least a bachelors in mathematics, physics, biology or chemistry and would like to volunteer even just 30 minutes of your expertise time, please contact LDJ on discord!
2,394
[ [ -0.04852294921875, -0.0672607421875, 0.0298004150390625, 0.00569915771484375, -0.0184173583984375, -0.0035114288330078125, -0.01666259765625, -0.03314208984375, 0.0164947509765625, 0.04595947265625, -0.0623779296875, -0.0293121337890625, -0.03363037109375, 0...
mnoukhov/openai_summarize_comparisons_relabel_pythia7b
2023-10-04T19:20:46.000Z
[ "region:us" ]
mnoukhov
null
null
0
95
2023-10-04T19:20:42
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: prompt dtype: string - name: chosen dtype: string - name: rejected dtype: string splits: - name: train num_bytes: 157425966 num_examples: 92534 - name: test num_bytes: 8367345 num_examples: 5000 download_size: 21804922 dataset_size: 165793311 --- # Dataset Card for "openai_summarize_comparisons_relabel_pythia7b" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
652
[ [ -0.028717041015625, -0.00762176513671875, 0.0007190704345703125, 0.01218414306640625, -0.0234222412109375, -0.01059722900390625, 0.01122283935546875, -0.00467681884765625, 0.0604248046875, 0.0217742919921875, -0.0286865234375, -0.0574951171875, -0.03964233398437...
Cubpaw/voxelgym_5c_42x42_500
2023-10-09T11:26:15.000Z
[ "region:us" ]
Cubpaw
null
null
0
95
2023-10-09T11:26:06
--- dataset_info: features: - name: image dtype: image - name: label dtype: image - name: rgb_label dtype: image - name: path_label dtype: image - name: path_rgb_label dtype: image splits: - name: train num_bytes: 373246.0 num_examples: 400 - name: validation num_bytes: 92510.0 num_examples: 100 download_size: 403202 dataset_size: 465756.0 --- # Dataset Card for "voxelgym_5c_42x42_500" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
579
[ [ -0.055938720703125, -0.006229400634765625, 0.0215911865234375, 0.01438140869140625, -0.01459503173828125, -0.0021800994873046875, 0.0118865966796875, 0.002735137939453125, 0.04620361328125, 0.040679931640625, -0.0518798828125, -0.07073974609375, -0.0295104980468...