id
stringlengths
2
115
lastModified
stringlengths
24
24
tags
list
author
stringlengths
2
42
description
stringlengths
0
6.67k
citation
stringlengths
0
10.7k
likes
int64
0
3.66k
downloads
int64
0
8.89M
created
timestamp[us]
card
stringlengths
11
977k
card_len
int64
11
977k
embeddings
list
CaoHaiNam/summarization_wikilingua_vi
2023-02-12T00:29:41.000Z
[ "region:us" ]
CaoHaiNam
null
null
0
4
2023-02-08T03:00:33
--- dataset_info: features: - name: summary dtype: string - name: document dtype: string - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 61182829.83851693 num_examples: 17622 - name: test num_bytes: 6801564.16148307 num_examples: 1959 download_size: 34140923 dataset_size: 67984394.0 --- # Dataset Card for "summarization_wikilingua_vi" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
539
[ [ -0.041717529296875, -0.01163482666015625, 0.0005736351013183594, 0.0188446044921875, -0.0233001708984375, -0.00470733642578125, 0.003719329833984375, 0.0003457069396972656, 0.07330322265625, 0.0249176025390625, -0.05047607421875, -0.0489501953125, -0.04125976562...
IlyaGusev/ru_stackoverflow
2023-03-09T23:48:16.000Z
[ "task_categories:text-generation", "task_categories:question-answering", "size_categories:100K<n<1M", "language:ru", "license:other", "region:us" ]
IlyaGusev
null
null
8
4
2023-02-13T14:32:35
--- license: other task_categories: - text-generation - question-answering language: - ru size_categories: - 100K<n<1M dataset_info: features: - name: question_id dtype: uint32 - name: url dtype: string - name: answer_count dtype: uint32 - name: text_html dtype: string - name: text_markdown dtype: string - name: score dtype: int32 - name: title dtype: string - name: tags sequence: string - name: views dtype: uint64 - name: author dtype: string - name: timestamp dtype: uint64 - name: comments sequence: - name: text dtype: string - name: author dtype: string - name: comment_id dtype: uint32 - name: score dtype: int32 - name: timestamp dtype: uint64 - name: answers sequence: - name: answer_id dtype: uint32 - name: is_accepted dtype: uint8 - name: text_html dtype: string - name: text_markdown dtype: string - name: score dtype: int32 - name: author dtype: string - name: timestamp dtype: uint64 - name: comments sequence: - name: text dtype: string - name: author dtype: string - name: comment_id dtype: uint32 - name: score dtype: int32 - name: timestamp dtype: uint64 splits: - name: train num_bytes: 3013377174 num_examples: 437604 download_size: 670468664 dataset_size: 3013377174 --- # Russian StackOverflow dataset ## Table of Contents - [Table of Contents](#table-of-contents) - [Description](#description) - [Usage](#usage) - [Data Instances](#data-instances) - [Source Data](#source-data) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Licensing Information](#licensing-information) ## Description **Summary:** Dataset of questions, answers, and comments from [ru.stackoverflow.com](https://ru.stackoverflow.com/). **Script:** [create_stackoverflow.py](https://github.com/IlyaGusev/rulm/blob/hf/data_processing/create_stackoverflow.py) **Point of Contact:** [Ilya Gusev](ilya.gusev@phystech.edu) **Languages:** The dataset is in Russian with some programming code. ## Usage Prerequisites: ```bash pip install datasets zstandard jsonlines pysimdjson ``` Loading: ```python from datasets import load_dataset dataset = load_dataset('IlyaGusev/ru_stackoverflow', split="train") for example in dataset: print(example["text_markdown"]) print() ``` ## Data Instances ``` { "question_id": 11235, "answer_count": 1, "url": "https://ru.stackoverflow.com/questions/11235", "score": 2, "tags": ["c++", "сериализация"], "title": "Извлечение из файла, запись в файл", "views": 1309, "author": "...", "timestamp": 1303205289, "text_html": "...", "text_markdown": "...", "comments": { "text": ["...", "...", "author": ["...", "..."], "comment_id": [11236, 11237], "score": [0, 0], "timestamp": [1303205411, 1303205678] }, "answers": { "answer_id": [11243, 11245], "timestamp": [1303207791, 1303207792], "is_accepted": [1, 0], "text_html": ["...", "..."], "text_markdown": ["...", "..."], "score": [3, 0], "author": ["...", "..."], "comments": { "text": ["...", "..."], "author": ["...", "..."], "comment_id": [11246, 11249], "score": [0, 0], "timestamp": [1303207961, 1303207800] } } } ``` You can use this little helper to unflatten sequences: ```python def revert_flattening(records): fixed_records = [] for key, values in records.items(): if not fixed_records: fixed_records = [{} for _ in range(len(values))] for i, value in enumerate(values): fixed_records[i][key] = value return fixed_records ``` The original JSONL is already unflattened. ## Source Data * The data source is the [Russian StackOverflow](https://ru.stackoverflow.com/) website. * Original XMLs: [ru.stackoverflow.com.7z](https://ia600107.us.archive.org/27/items/stackexchange/ru.stackoverflow.com.7z). * Processing script is [here](https://github.com/IlyaGusev/rulm/blob/hf/data_processing/create_stackoverflow.py). ## Personal and Sensitive Information The dataset is not anonymized, so individuals' names can be found in the dataset. Information about the original authors is included in the dataset where possible. ## Licensing Information According to the license of original data, this dataset is distributed under [CC BY-SA 2.5](https://creativecommons.org/licenses/by-sa/2.5/).
4,582
[ [ -0.0265960693359375, -0.0374755859375, 0.0198211669921875, 0.0106964111328125, -0.01485443115234375, 0.01218414306640625, -0.0230712890625, -0.0019073486328125, 0.01715087890625, 0.0240936279296875, -0.03826904296875, -0.05633544921875, -0.018951416015625, 0...
rcds/swiss_doc2doc_ir
2023-07-20T07:33:37.000Z
[ "task_categories:text-classification", "task_ids:entity-linking-classification", "annotations_creators:machine-generated", "language_creators:expert-generated", "multilinguality:multilingual", "size_categories:100K<n<1M", "source_datasets:original", "language:de", "language:fr", "language:it", "...
rcds
null
null
0
4
2023-02-13T15:51:17
--- annotations_creators: - machine-generated language: - de - fr - it language_creators: - expert-generated license: - cc-by-sa-4.0 multilinguality: - multilingual pretty_name: 'Swiss Doc2doc Information Retrieval' size_categories: - 100K<n<1M source_datasets: - original tags: [] task_categories: - text-classification task_ids: - entity-linking-classification --- https://huggingface.co/spaces/huggingface/datasets-tagging # Dataset Card for Swiss Doc2doc Information Retrieval ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary Swiss Doc2doc Information Retrieval is a multilingual, diachronic dataset of 131K Swiss Federal Supreme Court (FSCS) cases annotated with law citations and ruling citations, posing a challenging text classification task. As unique label we are using decision_id of cited rulings and uuid of cited law articles, which can be found in the SwissCourtRulingCorpus. We also provide additional metadata, i.e., the publication year, the legal area and the canton of origin per case, to promote robustness and fairness studies on the critical area of legal NLP. ### Supported Tasks and Leaderboards Swiss Doc2Doc IR can be used as information retrieval task using documents in Swiss Legislation (https://huggingface.co/datasets/rcds/swiss_legislation) and Swiss Leading desicions (https://huggingface.co/datasets/rcds/swiss_leading_decisions). ### Languages Switzerland has four official languages with three languages (German 86K, French 30k and Italian 10k) being represented. The decisions are written by the judges and clerks in the language of the proceedings. ## Dataset Structure ### Data Instances ``` { "decision_id": "000127ef-17d2-4ded-8621-c0c962c18fd5", "language": de, "year": 2018, "chamber": "CH_BGer_008", "region": "Federation", "origin_chamber": 47, "origin_court": 8, "origin_canton": 151, "law_area": "social_law", "law_sub_area": , "laws": "['75488867-c001-4eb9-93b9-04264ea91f55', 'e6b06567-1236-4210-adb3-e11c26e497d5', '04bf6369-99cb-41fa-8aff-413679bc8c18', ...], "cited_rulings": "['fe8a76b3-8b0f-4f27-a277-2d887140e7ab', '16fef75e-e8d5-4a51-8230-a9ca3676c8a9', '6d21b282-3b23-41dd-9350-6ba5386df9b1', '302fd9f3-e78a-4a9f-9f8d-cde51fcbdfe7']", "facts": "Sachverhalt: A. A._, geboren 1954, war ab November 2002 als Pflegehilfe im Altersheim C._ angestellt. Am 23. Dezember 2002 meldete sie sich erstmals unter Hinweis auf Depressionen ...", "considerations": "Erwägungen: 1. 1.1. Die Beschwerde kann wegen Rechtsverletzung gemäss Art. 95 und Art. 96 BGG erhoben werden. Das Bundesgericht wendet das ...", "rulings": "Demnach erkennt das Bundesgericht: 1. Die Beschwerde wird abgewiesen. 2. Die Gerichtskosten von Fr. 800.- werden der Beschwerdeführerin ...", } ``` ### Data Fields ``` decision_id: (str) a unique identifier of the for the document language: (str) one of (de, fr, it) year: (int) the publication year chamber: (str) the chamber of the case region: (str) the region of the case origin_chamber: (str) the chamber of the origin case origin_court: (str) the court of the origin case origin_canton: (str) the canton of the origin case law_area: (str) the law area of the case law_sub_area:(str) the law sub area of the case laws: (str) a list of law ids cited rulings: (str) a list of cited rulings ids facts: (str) the facts of the case considerations: (str) the considerations of the case rulings: (str) the rulings of the case ``` ### Data Splits The dataset was split date-stratisfied - Train: 2002-2015 - Validation: 2016-2017 - Test: 2018-2022 | Language | Subset | Number of Documents (Training/Validation/Test) | |------------|------------|------------------------------------------------| | German | **de** | 86'832 (59'170 / 19'002 / 8'660) | | French | **fr** | 46'203 (30'513 / 10'816 / 4'874) | | Italian | **it** | 8'306 (5'673 / 1'855 / 778) | ## Dataset Creation ### Curation Rationale The dataset was created by Stern et al. (2023). ### Source Data #### Initial Data Collection and Normalization The original data are available at the Swiss Federal Supreme Court (https://www.bger.ch) in unprocessed formats (HTML). The documents were downloaded from the Entscheidsuche portal (https://entscheidsuche.ch) in HTML. #### Who are the source language producers? The original data are published from the Swiss Federal Supreme Court (https://www.bger.ch) in unprocessed formats (HTML). The documents were downloaded from the Entscheidsuche portal (https://entscheidsuche.ch) in HTML. ### Annotations #### Annotation process The decisions have been annotated with the citation ids using html tags and parsers. For more details on laws (rcds/swiss_legislation) and rulings (rcds/swiss_rulings). #### Who are the annotators? Stern annotated the citations. Metadata is published by the Swiss Federal Supreme Court (https://www.bger.ch). ### Personal and Sensitive Information The dataset contains publicly available court decisions from the Swiss Federal Supreme Court. Personal or sensitive information has been anonymized by the court before publication according to the following guidelines: https://www.bger.ch/home/juridiction/anonymisierungsregeln.html. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information We release the data under CC-BY-4.0 which complies with the court licensing (https://www.bger.ch/files/live/sites/bger/files/pdf/de/urteilsveroeffentlichung_d.pdf) © Swiss Federal Supreme Court, 2002-2022 The copyright for the editorial content of this website and the consolidated texts, which is owned by the Swiss Federal Supreme Court, is licensed under the Creative Commons Attribution 4.0 International licence. This means that you can re-use the content provided you acknowledge the source and indicate any changes you have made. Source: https://www.bger.ch/files/live/sites/bger/files/pdf/de/urteilsveroeffentlichung_d.pdf ### Citation Information Please cite our [ArXiv-Preprint](https://arxiv.org/abs/2306.09237) ``` @misc{rasiah2023scale, title={SCALE: Scaling up the Complexity for Advanced Language Model Evaluation}, author={Vishvaksenan Rasiah and Ronja Stern and Veton Matoshi and Matthias Stürmer and Ilias Chalkidis and Daniel E. Ho and Joel Niklaus}, year={2023}, eprint={2306.09237}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ### Contributions Thanks to [@Stern5497](https://github.com/stern5497) for adding this dataset.
7,950
[ [ -0.0290069580078125, -0.049163818359375, 0.03582763671875, 0.02178955078125, -0.0296783447265625, -0.02069091796875, -0.0207366943359375, -0.01409912109375, 0.0212860107421875, 0.040496826171875, -0.03948974609375, -0.06976318359375, -0.05615234375, 0.013320...
Piro17/affectnethq
2023-02-16T06:56:12.000Z
[ "region:us" ]
Piro17
null
null
2
4
2023-02-16T06:47:30
--- dataset_info: features: - name: image dtype: image - name: label dtype: class_label: names: '0': anger '1': disgust '2': fear '3': happy '4': neutral '5': sad '6': surprise splits: - name: train num_bytes: 5858852632.634 num_examples: 27823 download_size: 0 dataset_size: 5858852632.634 --- # Dataset Card for "affectnethq" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
574
[ [ -0.034698486328125, -0.01230621337890625, 0.0048675537109375, 0.0202789306640625, -0.005344390869140625, -0.00962066650390625, 0.0224456787109375, -0.0123748779296875, 0.0777587890625, 0.0316162109375, -0.061370849609375, -0.048614501953125, -0.03790283203125, ...
KocLab-Bilkent/turkish-constitutional-court
2023-02-20T19:53:46.000Z
[ "task_categories:text-classification", "annotations_creators:found", "language_creators:found", "multilinguality:monolingual", "size_categories:10M<n<100M", "source_datasets:original", "language:tr", "license:cc-by-4.0", "region:us" ]
KocLab-Bilkent
null
null
0
4
2023-02-18T15:03:21
--- license: cc-by-4.0 task_categories: - text-classification annotations_creators: - found language_creators: - found multilinguality: - monolingual language: - tr size_categories: - 10M<n<100M pretty_name: predicting-turkish-constitutional-court-decisions source_datasets: - original --- ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) - **Homepage:** - **Repository:** https://github.com/koc-lab/law-turk - **Paper:** https://doi.org/10.1016/j.ipm.2021.102684 - **Point of Contact:** [Ceyhun Emre Öztürk](mailto:ceyhun.ozturk@bilkent.edu.tr) ### Dataset Summary This dataset is extracted from the following Github repo, which was created for the journal paper with URL https://www.sciencedirect.com/science/article/abs/pii/S0306457321001692. https://github.com/koc-lab/law-turk The dataset includes 1290 court case decision texts from the Turkish Court of Cassation. Each sample has one label, which is the ruling of the court. The possible rulings are "Violation" and "No violation". There are 1290 samples. 1141 of these samples are labeled as "Violation". ### Supported Tasks and Leaderboards Legal Judgment Prediction ### Languages Turkish ## Dataset Structure ### Data Instances The file format is jsonl and three data splits are present (train, validation and test) for each configuration. ### Data Fields The dataset contains the following fields: - `Text`: Legal case decision texts - `Label`: The ruling of the court. - 'Violation': The court decides for the legal case that there is a violation of the constitution. - 'No violation': The court decides for the legal case that there is no violation of the constitution. ### Data Splits The data has been split randomly into 70% train (903), 15% validation (195), 15% test (195). ## Dataset Creation ### Curation Rationale This dataset was created to further the research on developing models for predicting Brazilian court decisions that are also able to predict whether the decision will be unanimous. ### Source Data The data were collected from *Türkiye Cumhuriyeti Anayasa Mahkemesi* (T.C. AYM, Turkish Constitutional Court). #### Initial Data Collection and Normalization The data were collected from the official website of the Turkish Contitutional Court: https://www.anayasa.gov.tr/tr/kararlar-bilgi-bankasi/. #### Who are the source language producers? The source language producers are judges. ### Annotations #### Annotation process The dataset was not annotated. #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information The court decisions might contain sensitive information about individuals. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ### Dataset Curators The data collection was done by Emre Mumcuoğlu ([Email](mailto:mumcuoglu@ee.bilkent.edu.tr)). ### Licensing Information No licensing information was provided for this dataset. However, please make sure that you use the dataset according to Turkish law. ### Citation Information ``` @article{mumcuoglu21natural, title = {{Natural language processing in law: Prediction of outcomes in the higher courts of Turkey}}, journal = {Information Processing \& Management}, volume = {58}, number = {5}, year = {2021}, author = {Mumcuoğlu, Emre and Öztürk, Ceyhun E. and Ozaktas, Haldun M. and Koç, Aykut} } ```
4,577
[ [ -0.031402587890625, -0.046356201171875, 0.0226593017578125, 0.0175018310546875, -0.025909423828125, -0.0272369384765625, -0.01580810546875, -0.0212249755859375, 0.01172637939453125, 0.049652099609375, -0.035430908203125, -0.0667724609375, -0.057830810546875, ...
Brendan/nlp244_french_snli
2023-02-21T07:32:38.000Z
[ "region:us" ]
Brendan
null
null
1
4
2023-02-21T07:32:09
--- dataset_info: features: - name: premise dtype: string - name: hypothesis dtype: string - name: label dtype: class_label: names: '0': entailment '1': neutral '2': contradiction - name: fr_premise dtype: string - name: fr_hypothesis dtype: string splits: - name: test num_bytes: 2298242 num_examples: 10000 - name: train num_bytes: 122710788 num_examples: 550152 - name: validation num_bytes: 2305275 num_examples: 10000 download_size: 40406975 dataset_size: 127314305 --- # Dataset Card for "nlp244_french_snli" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
759
[ [ -0.03271484375, -0.005863189697265625, 0.01110076904296875, 0.04345703125, -0.0001735687255859375, 0.0041656494140625, -0.002361297607421875, -0.01073455810546875, 0.065673828125, 0.041473388671875, -0.059051513671875, -0.046875, -0.045867919921875, 0.001828...
vietgpt/ted_talks_iwslt_vi
2023-04-03T01:15:01.000Z
[ "task_categories:text-generation", "size_categories:1K<n<10K", "language:vi", "LM", "region:us" ]
vietgpt
null
null
0
4
2023-02-21T21:20:41
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 23236337 num_examples: 1566 download_size: 11586233 dataset_size: 23236337 task_categories: - text-generation language: - vi tags: - LM size_categories: - 1K<n<10K --- # Ted Talks - Source: https://huggingface.co/datasets/ted_talks_iwslt - Num examples: 1,566 - Language: Vietnamese ```python from datasets import load_dataset load_dataset("tdtunlp/ted_talks_iwslt_vi") ```
488
[ [ -0.01299285888671875, -0.04278564453125, 0.0153656005859375, 0.04083251953125, -0.039794921875, 0.01280975341796875, -0.012908935546875, 0.0016241073608398438, 0.0203704833984375, 0.0309906005859375, -0.042724609375, -0.038482666015625, -0.046661376953125, 0...
vietgpt/wiktionary_en
2023-03-30T18:41:38.000Z
[ "task_categories:text-generation", "size_categories:100K<n<1M", "language:en", "LM", "region:us" ]
vietgpt
null
null
0
4
2023-02-21T21:25:18
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 398822275 num_examples: 194570 download_size: 239743805 dataset_size: 398822275 task_categories: - text-generation language: - en tags: - LM size_categories: - 100K<n<1M --- # wiktionary - Source: https://huggingface.co/datasets/bigscience-data/roots_en_wiktionary - Num examples: 194,570 - Language: English ```python from datasets import load_dataset load_dataset("tdtunlp/wiktionary_en") ```
509
[ [ -0.00959014892578125, -0.0267181396484375, 0.0017786026000976562, 0.03564453125, -0.03570556640625, 0.005069732666015625, -0.01110076904296875, -0.0029277801513671875, 0.041015625, 0.0135040283203125, -0.044219970703125, -0.0242767333984375, -0.0384521484375, ...
lansinuote/nlp.1.predict_last_word
2023-02-22T11:26:30.000Z
[ "region:us" ]
lansinuote
null
null
0
4
2023-02-22T06:22:11
--- dataset_info: features: - name: input_ids sequence: int32 - name: attention_mask sequence: int8 - name: labels sequence: int64 splits: - name: train num_bytes: 4628980 num_examples: 39905 - name: validation num_bytes: 98368 num_examples: 848 - name: test num_bytes: 200680 num_examples: 1730 download_size: 0 dataset_size: 4928028 --- # Dataset Card for "1.predict_last_word" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
568
[ [ -0.034149169921875, -0.02691650390625, 0.0246429443359375, 0.004550933837890625, -0.02294921875, -0.00518035888671875, 0.007205963134765625, -0.01213836669921875, 0.056365966796875, 0.041534423828125, -0.067138671875, -0.0623779296875, -0.05853271484375, -0....
vietgpt/wikiquote_en
2023-03-30T18:37:26.000Z
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:en", "LM", "region:us" ]
vietgpt
null
null
0
4
2023-02-22T09:48:00
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 375096657 num_examples: 31929 download_size: 231902050 dataset_size: 375096657 task_categories: - text-generation language: - en tags: - LM size_categories: - 10K<n<100K --- # wikiquote_filtered - Source: https://huggingface.co/datasets/bigscience-data/roots_en_wikiquote - Num examples: 31,929 - Language: English ```python from datasets import load_dataset load_dataset("tdtunlp/wikiquote_en") ```
514
[ [ -0.02337646484375, -0.04571533203125, 0.0223541259765625, 0.023590087890625, -0.048126220703125, 0.0012769699096679688, -0.0067138671875, -0.0151519775390625, 0.037933349609375, 0.03863525390625, -0.05682373046875, -0.03546142578125, -0.0361328125, 0.0481262...
vietgpt/wikivoyage_en
2023-03-30T18:39:38.000Z
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:en", "LM", "region:us" ]
vietgpt
null
null
0
4
2023-02-22T09:51:06
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 240563228 num_examples: 24838 download_size: 148244766 dataset_size: 240563228 task_categories: - text-generation language: - en tags: - LM size_categories: - 10K<n<100K --- # wikivoyage_filtered - Source: https://huggingface.co/datasets/bigscience-data/roots_en_wikivoyage - Num examples: 24,838 - Language: English ```python from datasets import load_dataset load_dataset("tdtunlp/wikivoyage_en") ```
517
[ [ -0.03363037109375, -0.0296173095703125, 0.015350341796875, 0.0286865234375, -0.041107177734375, -0.01519012451171875, -0.0089874267578125, -0.00592803955078125, 0.037567138671875, 0.03131103515625, -0.06475830078125, -0.050048828125, -0.04705810546875, 0.036...
philschmid/flanv2
2023-02-22T19:39:49.000Z
[ "license:apache-2.0", "flan", "flan 2022", "flan v2", "arxiv:2301.13688", "region:us" ]
philschmid
null
null
23
4
2023-02-22T19:38:58
--- license: apache-2.0 tags: - flan - flan 2022 - flan v2 pretty_name: Flan v2 duplicated_from: SirNeural/flan_v2 --- # Fork of [SirNeural/flan_v2](https://huggingface.co/datasets/SirNeural/flan_v2) just in case it gets deleted. # Dataset Card for Flan V2 ## Dataset Description - **Homepage:** https://ai.googleblog.com/2023/02/the-flan-collection-advancing-open.html - **Repository:** https://github.com/google-research/FLAN/tree/main/flan/v2 - **Paper:** https://arxiv.org/abs/2301.13688 - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This is a processed version of the Flan V2 dataset. I'm not affiliated with the creators, I'm just releasing the files in an easier-to-access format after processing. The authors of the Flan Collection recommend experimenting with different mixing ratio's of tasks to get optimal results downstream. This current version I've processed is missing a few datasets compared to the main branch of the flan v2 repo: - cs-en WMT translation task requires manual download and I wasn't able to get the credentials - q_re_cc dataset preprocessing for the dialog task wasn't working - These are minor hits to the total size of the collection (orders of MB compared to GB) but once those are fixed I will upload a complete version. ## Dataset Structure ### Data Instances Flan 2021 (flan), P3 (t0), Super-Natural Instructions (niv2), Chain-of-thought (cot), and Dialog (dialog) ### Data Fields Instruction data comes in a few formats: - Few Shot (fs) - Zero Shot (zs) - Options Provided in context (i.e. multiple choice pick one) (opt) - No Options Provided (noopt) Each combination of the above tasks + formats are saved as a JSONL with following schema `{"input": ..., "target": ..., "task": ...}` ### Data Splits Everything is saved as a train split
1,819
[ [ -0.0255889892578125, -0.03997802734375, 0.0178375244140625, 0.0028820037841796875, -0.0085601806640625, -0.01114654541015625, -0.0224761962890625, -0.047576904296875, 0.0391845703125, 0.046478271484375, -0.06549072265625, -0.015533447265625, -0.035369873046875, ...
jerteh/SrpELTeC
2023-09-10T06:13:01.000Z
[ "size_categories:1M<n<10M", "language:sr", "license:cc-by-4.0", "region:us" ]
jerteh
null
null
1
4
2023-02-24T17:41:53
--- license: cc-by-4.0 language: - sr field: "sents" pretty_name: Serbian Literary Text Collection size_categories: - 1M<n<10M --- SrpELTeC is a corpus of old Serbian novels for the first time published in the period 1840-1920. years of digitized within COST ACTION CO16204: Distant Reading for European Literary History, 2018-2022. The corpus includes 120 novels with 5,263.071 words, 22700 pages, 2557 chapters, 158,317 passages, 567 songs, 2972 verses, 803 segments in foreign language and 949 mentioned works. Dataset is constituted of JSON files, where the textual sentences are located in the "sents" attribute of the object root and can be obtianed via: ```python from json import load with open("ELTeC.json") as jf: sentences = load(jf)["sents"] ```
765
[ [ -0.00006175041198730469, -0.01788330078125, 0.0229034423828125, 0.0184326171875, -0.01393890380859375, -0.01006317138671875, -0.016510009765625, -0.033477783203125, 0.0305023193359375, 0.0428466796875, -0.063232421875, -0.042510986328125, -0.0408935546875, 0...
gokuls/wiki_book_corpus_raw_dataset_medium
2023-02-25T20:10:20.000Z
[ "region:us" ]
gokuls
null
null
0
4
2023-02-25T19:38:11
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 12250082590.5 num_examples: 40231449 download_size: 7774316723 dataset_size: 12250082590.5 --- # Dataset Card for "wiki_book_corpus_raw_dataset_medium" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
396
[ [ -0.037078857421875, -0.021026611328125, -0.0012578964233398438, 0.00618743896484375, -0.022613525390625, -0.00498199462890625, -0.0207366943359375, -0.004802703857421875, 0.053497314453125, 0.037353515625, -0.03802490234375, -0.061492919921875, -0.041015625, ...
trelent/the-stack-dedup-python-docstrings-1.0-percent-unified
2023-02-26T01:33:38.000Z
[ "region:us" ]
trelent
null
null
0
4
2023-02-26T01:31:05
--- dataset_info: features: - name: body_hash dtype: string - name: body dtype: string - name: docstring dtype: string - name: path dtype: string - name: name dtype: string - name: repository_name dtype: string - name: repository_stars dtype: float64 - name: lang dtype: string - name: body_without_docstring dtype: string - name: unified dtype: string splits: - name: train num_bytes: 680876286 num_examples: 237074 download_size: 247316903 dataset_size: 680876286 --- # Dataset Card for "the-stack-dedup-python-docstrings-1.0-percent-unified" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
755
[ [ -0.03509521484375, -0.0276336669921875, -0.01035308837890625, 0.0172576904296875, -0.0205230712890625, -0.01154327392578125, 0.00838470458984375, 0.00504302978515625, 0.04986572265625, 0.0266571044921875, -0.04339599609375, -0.06231689453125, -0.035675048828125,...
marianna13/biorxiv
2023-03-02T08:01:54.000Z
[ "region:us" ]
marianna13
null
null
2
4
2023-03-02T08:00:02
Entry not found
15
[ [ -0.021392822265625, -0.01494598388671875, 0.05718994140625, 0.028839111328125, -0.0350341796875, 0.046539306640625, 0.052490234375, 0.00507354736328125, 0.051361083984375, 0.01702880859375, -0.052093505859375, -0.01494598388671875, -0.06036376953125, 0.03790...
biglam/european_art
2023-08-03T09:39:40.000Z
[ "task_categories:object-detection", "task_categories:image-classification", "size_categories:10K<n<100K", "license:cc-by-nc-2.0", "lam", "art", "historical", "arxiv:2211.01226", "region:us" ]
biglam
Yalt AI Tabular Dataset
@dataset{clerice_thibault_2022_6827706, author = {Clérice, Thibault}, title = {YALTAi: Tabular Dataset}, month = jul, year = 2022, publisher = {Zenodo}, version = {1.0.0}, doi = {10.5281/zenodo.6827706}, url = {https://doi.org/10.5281/zenodo.6827706} }
3
4
2023-03-04T15:05:33
--- dataset_info: - config_name: raw features: - name: image dtype: image - name: source dtype: string - name: width dtype: int16 - name: height dtype: int16 - name: dept dtype: int8 - name: segmented dtype: int8 - name: objects list: - name: name dtype: class_label: names: '0': zebra '1': tree '2': nude '3': crucifixion '4': scroll '5': head '6': swan '7': shield '8': lily '9': mouse '10': knight '11': dragon '12': horn '13': dog '14': palm '15': tiara '16': helmet '17': sheep '18': deer '19': person '20': sword '21': rooster '22': bear '23': halo '24': lion '25': monkey '26': prayer '27': crown of thorns '28': elephant '29': zucchetto '30': unicorn '31': holy shroud '32': cat '33': apple '34': banana '35': chalice '36': bird '37': eagle '38': pegasus '39': crown '40': camauro '41': saturno '42': arrow '43': dove '44': centaur '45': horse '46': hands '47': skull '48': orange '49': monk '50': trumpet '51': key of heaven '52': fish '53': cow '54': angel '55': devil '56': book '57': stole '58': butterfly '59': serpent '60': judith '61': mitre '62': banner '63': donkey '64': shepherd '65': boat '66': god the father '67': crozier '68': jug '69': lance - name: pose dtype: class_label: names: '0': stand '1': sit '2': partial '3': Unspecified '4': squats '5': lie '6': bend '7': fall '8': walk '9': push '10': pray '11': undefined '12': kneel '13': unrecognize '14': unknown '15': other '16': ride - name: diffult dtype: int32 - name: xmin dtype: float64 - name: ymin dtype: float64 - name: xmax dtype: float64 - name: ymax dtype: float64 splits: - name: train num_bytes: 9046918 num_examples: 15156 download_size: 18160510195 dataset_size: 9046918 - config_name: coco features: - name: image dtype: image - name: source dtype: string - name: width dtype: int16 - name: height dtype: int16 - name: dept dtype: int8 - name: segmented dtype: int8 - name: objects list: - name: category_id dtype: class_label: names: '0': zebra '1': tree '2': nude '3': crucifixion '4': scroll '5': head '6': swan '7': shield '8': lily '9': mouse '10': knight '11': dragon '12': horn '13': dog '14': palm '15': tiara '16': helmet '17': sheep '18': deer '19': person '20': sword '21': rooster '22': bear '23': halo '24': lion '25': monkey '26': prayer '27': crown of thorns '28': elephant '29': zucchetto '30': unicorn '31': holy shroud '32': cat '33': apple '34': banana '35': chalice '36': bird '37': eagle '38': pegasus '39': crown '40': camauro '41': saturno '42': arrow '43': dove '44': centaur '45': horse '46': hands '47': skull '48': orange '49': monk '50': trumpet '51': key of heaven '52': fish '53': cow '54': angel '55': devil '56': book '57': stole '58': butterfly '59': serpent '60': judith '61': mitre '62': banner '63': donkey '64': shepherd '65': boat '66': god the father '67': crozier '68': jug '69': lance - name: image_id dtype: string - name: area dtype: int64 - name: bbox sequence: float32 length: 4 - name: segmentation list: list: float32 - name: iscrowd dtype: bool - name: image_id dtype: string splits: - name: train num_bytes: 8285204 num_examples: 15156 download_size: 18160510195 dataset_size: 8285204 license: cc-by-nc-2.0 task_categories: - object-detection - image-classification tags: - lam - art - historical pretty_name: 'DEArt: Dataset of European Art' size_categories: - 10K<n<100K --- # Dataset Card for DEArt: Dataset of European Art ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** https://doi.org/10.5281/zenodo.6984525 - **Paper:** https://arxiv.org/abs/2211.01226 - **Leaderboard:** - **Point of Contact:** ### Dataset Summary > DEArt is an object detection and pose classification dataset meant to be a reference for paintings between the XIIth and the XVIIIth centuries. It contains more than 15000 images, about 80% non-iconic, aligned with manual annotations for the bounding boxes identifying all instances of 69 classes as well as 12 possible poses for boxes identifying human-like objects. Of these, more than 50 classes are cultural heritage specific and thus do not appear in other datasets; these reflect imaginary beings, symbolic entities and other categories related to art. ### Supported Tasks and Leaderboards - `object-detection`: This dataset can be used to train or evaluate models for object-detection on historical document images. - `image-classification`: This dataset can be used for image classification tasks by using only the labels and not the bounding box information ## Dataset Structure This dataset has two configurations. These configurations both cover the same data and annotations but provide these annotations in different forms to make it easier to integrate the data with existing processing pipelines. - The first configuration, `raw, uses the data's original format. - The second configuration converts the annotations into a format that is closer to the `COCO` annotation format. This is done to make it easier to work with the [`image_processors`](https://huggingface.co/docs/transformers/main_classes/image_processor) (formerly known as`feature_extractor`s) from the `Transformers` models for object detection, which expects data to be in a COCO-style format. ### Data Instances An instance from the `raw` config: ```python {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=1019x1680>, 'source': 'Europeana Collection', 'width': 1019, 'height': 1680, 'dept': 3, 'segmented': None, 'objects': [{'name': 40, 'pose': 3, 'diffult': 0, 'xmin': 259.0, 'ymin': 166.0, 'xmax': 679.0, 'ymax': 479.0}, {'name': 19, 'pose': 2, 'diffult': 0, 'xmin': 115.0, 'ymin': 354.0, 'xmax': 882.0, 'ymax': 1168.0}, {'name': 15, 'pose': 3, 'diffult': 0, 'xmin': 445.0, 'ymin': 1170.0, 'xmax': 579.0, 'ymax': 1302.0}, {'name': 51, 'pose': 3, 'diffult': 0, 'xmin': 354.0, 'ymin': 1196.0, 'xmax': 445.0, 'ymax': 1330.0}, {'name': 51, 'pose': 3, 'diffult': 0, 'xmin': 580.0, 'ymin': 1203.0, 'xmax': 701.0, 'ymax': 1326.0}, {'name': 57, 'pose': 3, 'diffult': 0, 'xmin': 203.0, 'ymin': 642.0, 'xmax': 882.0, 'ymax': 1172.0}]} ``` An instance from the `coco` config: ```python {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=1019x1680>, 'source': 'Europeana Collection', 'width': 1019, 'height': 1680, 'dept': 3, 'segmented': None, 'image_id': '0', 'annotations': [{'category_id': 40, 'image_id': '0', 'area': 131460, 'bbox': [259.0, 166.0, 420.0, 313.0], 'segmentation': [], 'iscrowd': False}, {'category_id': 19, 'image_id': '0', 'area': 624338, 'bbox': [115.0, 354.0, 767.0, 814.0], 'segmentation': [], 'iscrowd': False}, {'category_id': 15, 'image_id': '0', 'area': 17688, 'bbox': [445.0, 1170.0, 134.0, 132.0], 'segmentation': [], 'iscrowd': False}, {'category_id': 51, 'image_id': '0', 'area': 12194, 'bbox': [354.0, 1196.0, 91.0, 134.0], 'segmentation': [], 'iscrowd': False}, {'category_id': 51, 'image_id': '0', 'area': 14883, 'bbox': [580.0, 1203.0, 121.0, 123.0], 'segmentation': [], 'iscrowd': False}, {'category_id': 57, 'image_id': '0', 'area': 359870, 'bbox': [203.0, 642.0, 679.0, 530.0], 'segmentation': [], 'iscrowd': False}]} ``` ### Data Fields The fields for the COCO config: - `image`: The Image being annotated - `source`: source of the image i.e.'Europeana Collection' - `width`: width of the image - `height`: height of the image - `dept`: number of channels in the image - `segmented`: Whether the image has been segmented - `image_id`: ID for the image - `annotations`: annotations in coco format, consisting of a list containing dictionaries with the following keys: - `bbox`: bounding boxes for the images - `category_id`: a label for the image - `image_id`: id for the image - `iscrowd`: COCO `iscrowd` flag - `segmentation`: COCO segmentation annotations (empty in this case but kept for compatibility with other processing scripts) ### Data Splits The dataset doesn't define set splits, so only a train split is provided. The paper associated with the dataset does discuss a train and validation split, but it doesn't appear this split was shared with the dataset or associated paper. ## Dataset Creation ### Curation Rationale The creators of the dataset authors outline some of their motivations for creating the dataset in the abstract for their paper: > Large datasets that were made publicly available to the research community over the last 20 years have been a key enabling factor for the advances in deep learning algorithms for NLP or computer vision. These datasets are generally pairs of aligned image / manually annotated metadata, where images are photographs of everyday life. Scholarly and historical content, on the other hand, treat subjects that are not necessarily popular to a general audience, they may not always contain a large number of data points, and new data may be difficult or impossible to collect. Some exceptions do exist, for instance, scientific or health data, but this is not the case for cultural heritage (CH). The poor performance of the best models in computer vision - when tested over artworks - coupled with the lack of extensively annotated datasets for CH, and the fact that artwork images depict objects and actions not captured by photographs, indicate that a CH-specific dataset would be highly valuable for this community. We propose DEArt, at this point primarily an object detection and pose classification dataset meant to be a reference for paintings between the XIIth and the XVIIIth centuries. It contains more than 15000 images, about 80% non-iconic, aligned with manual annotations for the bounding boxes identifying all instances of 69 classes as well as 12 possible poses for boxes identifying human-like objects. Of these, more than 50 classes are CH-specific and thus do not appear in other datasets; these reflect imaginary beings, symbolic entities and other categories related to art. Additionally, existing datasets do not include pose annotations. ### Source Data The source data comes from several cultural heritage institutions that have shared openly licenced images. The dictionary below shows the institutions and the frequency with which they are the provider of images in the dataset. ```python {'National Museum in Warsaw': 2030, 'Europeana Collection': 1991, 'The Art Institute of Chicago': 1237, 'The Metropolitan Museum of Art': 1218, 'Rijksmuseum': 1066, 'National Gallery of Art': 871, 'Philadelphia Museum of Art': 774, 'WikiArt': 687, 'National museum in Krakow': 661, 'National Gallery of Denmark': 660, 'British Museum': 618, 'Victoria and Albert Museum': 561, 'Paul Mellon Centre': 391, 'National Gallery of Scotland': 384, 'Yale University Art Gallery': 376, 'Museo Nacional Thyssen-Bornemisza': 332, 'Harvard Art Museum': 279, 'The National Museum of Norvay': 270, 'LACMA': 262, 'The Cleveland Museum of Art': 242, 'The Leiden Collection': 159, 'The Clark Museum': 77, 'Pharos': 6, 'Wikimedia Commons': 2, 'Wikipedia': 1, 'Unknown': 1} ``` #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
15,283
[ [ -0.053009033203125, -0.0345458984375, 0.00547027587890625, 0.0005965232849121094, -0.031829833984375, -0.0016069412231445312, -0.0060272216796875, -0.057037353515625, 0.03387451171875, 0.03485107421875, -0.039764404296875, -0.07080078125, -0.0384521484375, 0...
its5Q/habr_qna
2023-03-11T04:43:35.000Z
[ "task_categories:text-generation", "task_categories:question-answering", "task_ids:language-modeling", "task_ids:open-domain-qa", "annotations_creators:crowdsourced", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language...
its5Q
null
null
2
4
2023-03-09T14:02:50
--- annotations_creators: - crowdsourced language: - ru language_creators: - crowdsourced license: - cc0-1.0 multilinguality: - monolingual pretty_name: Habr QnA size_categories: - 100K<n<1M source_datasets: - original tags: [] task_categories: - text-generation - question-answering task_ids: - language-modeling - open-domain-qa --- # Dataset Card for Habr QnA ## Table of Contents - [Dataset Card for Habr QnA](#dataset-card-for-habr-qna) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) ## Dataset Description - **Repository:** https://github.com/its5Q/habr-qna-parser ### Dataset Summary This is a dataset of questions and answers scraped from [Habr QnA](https://qna.habr.com/). There are 723430 asked questions with answers, comments and other metadata. ### Languages The dataset is mostly Russian with source code in different languages. ## Dataset Structure ### Data Fields Data fields can be previewed on the dataset card page. ### Data Splits All 723430 examples are in the train split, there is no validation split. ## Dataset Creation The data was scraped with a script, located in [my GitHub repository](https://github.com/its5Q/habr-qna-parser) ## Additional Information ### Dataset Curators - https://github.com/its5Q
1,618
[ [ -0.033447265625, -0.024810791015625, 0.00771331787109375, 0.01261138916015625, -0.038665771484375, 0.0214080810546875, 0.0209197998046875, -0.00794219970703125, 0.051727294921875, 0.032318115234375, -0.046966552734375, -0.054656982421875, -0.010406494140625, ...
pcuenq/face_synthetics
2023-03-13T09:37:52.000Z
[ "region:us" ]
pcuenq
null
null
1
4
2023-03-12T21:37:41
--- dataset_info: features: - name: image dtype: image - name: image_seg dtype: image - name: landmarks dtype: string splits: - name: train num_bytes: 33730885609.0 num_examples: 100000 download_size: 34096881533 dataset_size: 33730885609.0 --- # Dataset Card for `face_synthetics` This is a copy of [Microsoft FaceSynthetics dataset](https://github.com/microsoft/FaceSynthetics), uploaded to Hugging Face Datasets for convenience. Please, refer to the original [license](LICENSE.txt), which we replicate in this repo. The dataset was uploaded using the following code, which assumes the original `zip` file was uncompressed to `/data/microsoft_face_synthetics`: ```Python from datasets import Dataset from pathlib import Path from PIL import Image face_synthetics = Path("/data/microsoft_face_synthetics") def entry_for_id(entry_id): if type(entry_id) == int: entry_id = f"{entry_id:06}" image = Image.open(face_synthetics/f"{entry_id}.png") image_seg = Image.open(face_synthetics/f"{entry_id}_seg.png") with open(face_synthetics/f"{entry_id}_ldmks.txt") as f: landmarks = f.read() return { "image": image, "image_seg": image_seg, "landmarks": landmarks, } def generate_entries(): for x in range(100000): yield entry_for_id(x) ds = Dataset.from_generator(generate_entries) ds.push_to_hub('pcuenq/face_synthetics') ``` Note that `image_seg`, the segmented images, appear to be black because each pixel contains a number between `0` to `18` corresponging to the different categories, see the [original README]() for details. We haven't created visualization code yet.
1,688
[ [ -0.0280914306640625, -0.022796630859375, 0.02435302734375, 0.030548095703125, -0.026092529296875, 0.01001739501953125, 0.001739501953125, -0.0279083251953125, 0.03851318359375, 0.02703857421875, -0.072021484375, -0.047393798828125, -0.024444580078125, 0.0112...
mweiss/mnist_ambiguous
2023-03-16T19:12:38.000Z
[ "task_categories:image-classification", "annotations_creators:machine-generated", "size_categories:10K<n<100K", "source_datasets:extended|mnist", "language:en", "license:cc-by-sa-3.0", "arxiv:2207.10495", "region:us" ]
mweiss
The images were created such that they have an unclear ground truth, i.e., such that they are similar to multiple - but not all - of the datasets classes. Robust and uncertainty-aware models should be able to detect and flag these ambiguous images. As such, the dataset should be merged / mixed with the original dataset and we provide such 'mixed' splits for convenience. Please refer to the dataset card for details.
@misc{https://doi.org/10.48550/arxiv.2207.10495, doi = {10.48550/ARXIV.2207.10495}, url = {https://arxiv.org/abs/2207.10495}, author = {Weiss, Michael and Gómez, André García and Tonella, Paolo}, title = {A Forgotten Danger in DNN Supervision Testing: Generating and Detecting True Ambiguity}, publisher = {arXiv}, year = {2022} }
0
4
2023-03-16T08:44:53
--- license: cc-by-sa-3.0 task_categories: - image-classification language: - en pretty_name: mnist_ambigous size_categories: - 10K<n<100K source_datasets: - extended|mnist annotations_creators: - machine-generated --- # Mnist-Ambiguous This dataset contains mnist-like images, but with an unclear ground truth. For each image, there are two classes which could be considered true. Robust and uncertainty-aware DNNs should thus detect and flag these issues. ### Features Same as mnist, the supervised dataset has an `image` (28x28 int array) and a `label` (int). Additionally, the following features are exposed for your convenience: - `text_label` (str): A textual representation of the probabilistic label, e.g. `p(0)=0.54, p(5)=0.46` - `p_label` (list of floats): Ground-Truth probabilities for each class (two nonzero values for our ambiguous images) - `is_ambiguous` (bool): Flag indicating if this is one of our ambiguous images (see 'splits' below) ### Splits We provide four splits: - `test`: 10'000 ambiguous images - `train`: 10'000 ambiguous images - adding ambiguous images to the training set makes sure test-time ambiguous images are in-distribution. - `test_mixed`: 20'000 images, consisting of the (shuffled) concatenation of our ambiguous `test` set and the nominal mnist test set by LeCun et. al., - `train_mixed`: 70'000 images, consisting of the (shuffled) concatenation of our ambiguous `training` and the nominal training set. Note that the ambiguous test images are highly ambiguous (i.e., the two classes have very similar ground truth likelihoods), the training set images allow for more unbalanced ambiguity. This is to make the training set more closely connected to the nominal data, while still keeping the test set clearly ambiguous. For research targeting explicitly aleatoric uncertainty, we recommend training the model using `train_mixed`. Otherwise, our `test` set will lead to both epistemic and aleatoric uncertainty. In related literature, such 'mixed' splits are sometimes denoted as *dirty* splits. ### Assessment and Validity For a brief discussion of the strength and weaknesses of this dataset, including a quantitative comparison to the (only) other ambiguous datasets available in the literature, we refer to our paper. ### Paper Pre-print here: [https://arxiv.org/abs/2207.10495](https://arxiv.org/abs/2207.10495) Citation: ``` @misc{https://doi.org/10.48550/arxiv.2207.10495, doi = {10.48550/ARXIV.2207.10495}, url = {https://arxiv.org/abs/2207.10495}, author = {Weiss, Michael and Gómez, André García and Tonella, Paolo}, title = {A Forgotten Danger in DNN Supervision Testing: Generating and Detecting True Ambiguity}, publisher = {arXiv}, year = {2022} } ``` ### License As this is a derivative work of mnist, which is CC-BY-SA 3.0 licensed, our dataset is released using the same license.
2,874
[ [ -0.0267181396484375, -0.05157470703125, 0.0262603759765625, 0.0165863037109375, -0.0226287841796875, 0.00011557340621948242, 0.0023193359375, -0.02825927734375, 0.006927490234375, 0.0184783935546875, -0.043731689453125, -0.0328369140625, -0.057403564453125, ...
mweiss/fashion_mnist_ambiguous
2023-03-16T12:43:23.000Z
[ "task_categories:image-classification", "annotations_creators:machine-generated", "size_categories:10K<n<100K", "source_datasets:extended|mnist", "language:en", "license:mit", "arxiv:2207.10495", "region:us" ]
mweiss
The images were created such that they have an unclear ground truth, i.e., such that they are similar to multiple - but not all - of the datasets classes. Robust and uncertainty-aware models should be able to detect and flag these ambiguous images. As such, the dataset should be merged / mixed with the original dataset and we provide such 'mixed' splits for convenience. Please refer to the dataset card for details.
@misc{https://doi.org/10.48550/arxiv.2207.10495, doi = {10.48550/ARXIV.2207.10495}, url = {https://arxiv.org/abs/2207.10495}, author = {Weiss, Michael and Gómez, André García and Tonella, Paolo}, title = {A Forgotten Danger in DNN Supervision Testing: Generating and Detecting True Ambiguity}, publisher = {arXiv}, year = {2022} }
0
4
2023-03-16T12:22:41
--- license: mit task_categories: - image-classification language: - en pretty_name: mnist_ambigous size_categories: - 10K<n<100K source_datasets: - extended|mnist annotations_creators: - machine-generated --- # Fashion-Mnist-Ambiguous This dataset contains fashion-mnist-like images, but with an unclear ground truth. For each image, there are two classes that could be considered true. Robust and uncertainty-aware DNNs should thus detect and flag these issues. ### Features Same as fashion-mnist, the supervised dataset has an `image` (28x28 int array) and a `label` (int). Additionally, the following features are exposed for your convenience: - `text_label` (str): A textual representation of the probabilistic label, e.g. `p(Pullover)=0.54, p(Shirt)=0.46` - `p_label` (list of floats): Ground-Truth probabilities for each class (two nonzero values for our ambiguous images) - `is_ambiguous` (bool): Flag indicating if this is one of our ambiguous images (see 'splits' below) ### Splits We provide four splits: - `test`: 10'000 ambiguous images - `train`: 10'000 ambiguous images - adding ambiguous images to the training set makes sure test-time ambiguous images are in-distribution. - `test_mixed`: 20'000 images, consisting of the (shuffled) concatenation of our ambiguous `test` set and the nominal *original* fashion mnist test set - `train_mixed`: 70'000 images, consisting of the (shuffled) concatenation of our ambiguous `training` and the nominal training set. Note that the ambiguous train images are highly ambiguous (i.e., the two classes have very similar ground truth likelihoods), the training set images allow for more unbalanced ambiguity. This is to make the training set more closely connected to the nominal data, while still keeping the test set clearly ambiguous. For research targeting explicitly aleatoric uncertainty, we recommend training the model using `train_mixed`. Otherwise, our `test` set will lead to both epistemic and aleatoric uncertainty. In related literature, such 'mixed' splits are sometimes denoted as *dirty* splits. ### Assessment and Validity For a brief discussion of the strength and weaknesses of this dataset we refer to our paper. Please note that our images are not typically realistic - i.e., while they represent multiple classes and thus have an ambiguous ground truth, they do not resemble real-world photographs. ### Paper Pre-print here: [https://arxiv.org/abs/2207.10495](https://arxiv.org/abs/2207.10495) Citation: ``` @misc{https://doi.org/10.48550/arxiv.2207.10495, doi = {10.48550/ARXIV.2207.10495}, url = {https://arxiv.org/abs/2207.10495}, author = {Weiss, Michael and Gómez, André García and Tonella, Paolo}, title = {A Forgotten Danger in DNN Supervision Testing: Generating and Detecting True Ambiguity}, publisher = {arXiv}, year = {2022} } ``` ### Related Datasets - Ambiguous Mnist Dataset: [https://huggingface.co/datasets/mweiss/mnist_ambiguous](https://huggingface.co/datasets/mweiss/mnist_ambiguous) - Corrupted Fashion-Mnist Dataset: [https://huggingface.co/datasets/mweiss/fashion_mnist_corrupted](https://huggingface.co/datasets/mweiss/fashion_mnist_corrupted)
3,178
[ [ -0.0254364013671875, -0.0535888671875, 0.0243988037109375, 0.020782470703125, -0.0235748291015625, 0.0020294189453125, 0.0017900466918945312, -0.033660888671875, 0.00804901123046875, 0.01116180419921875, -0.057159423828125, -0.03741455078125, -0.05078125, 0....
SKyu/my-image-captioning-dataset
2023-03-20T06:24:06.000Z
[ "size_categories:1K<n<10K", "region:us" ]
SKyu
null
null
0
4
2023-03-20T05:45:04
--- dataset_info: features: - name: image dtype: image - name: prompt dtype: string splits: - name: train num_bytes: 417257082.9 num_examples: 3100 download_size: 480865927 dataset_size: 417257082.9 pretty_name: jl_pics size_categories: - 1K<n<10K --- # Dataset Card for "my-image-captioning-dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
463
[ [ -0.04901123046875, -0.004383087158203125, 0.00873565673828125, 0.0220794677734375, -0.024993896484375, 0.01268768310546875, 0.0255889892578125, -0.0021686553955078125, 0.06292724609375, 0.0457763671875, -0.05450439453125, -0.0413818359375, -0.044952392578125, ...
thegoodfellas/mc4-pt-cleaned
2023-04-13T13:35:19.000Z
[ "task_categories:fill-mask", "task_categories:text-generation", "size_categories:10M<n<100M", "language:pt", "license:apache-2.0", "region:us" ]
thegoodfellas
null
null
1
4
2023-03-20T14:06:42
--- license: apache-2.0 task_categories: - fill-mask - text-generation language: - pt size_categories: - 10M<n<100M --- ## Description This is a clenned version of AllenAI mC4 PtBR section. The original dataset can be found here https://huggingface.co/datasets/allenai/c4 ## Clean procedure We applied the same clenning procedure as explained here: https://gitlab.com/yhavinga/c4nlpreproc.git The repository offers two strategies. The first one, found in the main.py file, uses pyspark to create a dataframe that can both clean the text and create a pseudo mix on the entire dataset. We found this strategy clever, but it is time/resource-consuming. To overcome this we jumped into the second approach consisting in leverage the singlefile.py script and parallel all together. We did the following: ``` GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/datasets/allenai/c4 cd c4 git lfs pull --include "multilingual/c4-pt.*.json.gz" ls c4-nl* | parallel --gnu --jobs 96 --progress python ~/c4nlpreproc/singlefile.py {} ``` Be advice you should install parallel first if you want to reproduce this dataset, or to create another in a different language. ## Dataset Structure We kept the same structure as the original, so it is like this: ``` { 'timestamp': '2020-02-22T22:24:31Z', 'url': 'https://url here', 'text': 'the content' } ``` ## Considerations for Using the Data We do not perform any procedure to remove bad words, vulgarity, or profanity. it must be considered that model trained on this scraped corpus will inevitably reflect biases present in blog articles and comments on the Internet. This makes the corpus especially interesting in the context of studying data biases and how to limit their impacts.
1,749
[ [ -0.0261383056640625, -0.054779052734375, 0.0220947265625, 0.0274658203125, -0.0213623046875, -0.00884246826171875, -0.01165771484375, -0.02691650390625, 0.0479736328125, 0.038330078125, -0.04327392578125, -0.040557861328125, -0.035888671875, 0.03408813476562...
mrojas/task1a
2023-03-20T16:07:43.000Z
[ "region:us" ]
mrojas
null
null
0
4
2023-03-20T15:52:09
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
nastyboget/synthetic_cyrillic
2023-03-23T18:45:21.000Z
[ "task_categories:image-to-text", "size_categories:100K<n<1M", "language:ru", "license:mit", "region:us" ]
nastyboget
null
null
0
4
2023-03-22T08:18:18
--- license: mit task_categories: - image-to-text language: - ru size_categories: - 100K<n<1M --- Dataset generated using handwritten fonts ========================================= Number of images: 300000 Sources: * [Handwriting generation code](https://github.com/NastyBoget/HandwritingGeneration) The code was executed with `cyrillic` option (more augmentations)
371
[ [ -0.004001617431640625, -0.018646240234375, 0.016693115234375, 0.00962066650390625, -0.04803466796875, 0.007472991943359375, 0.0062408447265625, -0.01499176025390625, 0.01299285888671875, 0.06390380859375, -0.058013916015625, -0.057891845703125, -0.03768920898437...
cahya/instructions-id-small
2023-03-22T09:28:34.000Z
[ "region:us" ]
cahya
null
null
0
4
2023-03-22T09:28:19
--- dataset_info: features: - name: id dtype: int64 - name: text dtype: string splits: - name: train num_bytes: 48844.8 num_examples: 90 - name: test num_bytes: 2713.6 num_examples: 5 - name: validation num_bytes: 2713.6 num_examples: 5 download_size: 36845 dataset_size: 54272.0 --- # Dataset Card for "instructions-id-small" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
509
[ [ -0.03570556640625, -0.0282440185546875, 0.030120849609375, 0.014892578125, -0.0192108154296875, -0.02203369140625, 0.00939178466796875, 0.004730224609375, 0.057220458984375, 0.026092529296875, -0.07159423828125, -0.050872802734375, -0.032379150390625, -0.014...
qanastek/MORFITT
2023-08-25T10:41:05.000Z
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:fr", "license:apache-2.0", "medical", "biology", "region:us" ]
qanastek
This article presents MORFITT, the first multi-label corpus in French annotated in specialties in the medical field. MORFITT is composed of 3~624 abstracts of scientific articles from PubMed, annotated in 12 specialties for a total of 5,116 annotations. We detail the corpus, the experiments and the preliminary results obtained using a classifier based on the pre-trained language model CamemBERT. These preliminary results demonstrate the difficulty of the task, with a weighted average F1-score of 61.78%.
ddd
0
4
2023-03-22T18:42:06
--- license: apache-2.0 task_categories: - text-classification language: - fr tags: - medical - biology pretty_name: MORFITT size_categories: - 1K<n<10K --- # MORFITT ## Data ([Zenodo](https://zenodo.org/record/7893841#.ZFLFDnZBybg)) | Publication ([HAL](https://hal.science/hal-04131591/)) [Yanis LABRAK](https://www.linkedin.com/in/yanis-labrak-8a7412145/), [Richard DUFOUR](https://cv.hal.science/richard-dufour), [Mickaël ROUVIER](https://cv.hal.science/mickael-rouvier) [![](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/115EixHBcjf-se6xQeaTwZWE1i4idTNbm?usp=sharing) or [![Python](https://img.shields.io/badge/python-3670A0?style=for-the-badge&logo=python&logoColor=ffdd54)](https://github.com/qanastek/MORFITT/blob/main/TrainTransformers.py) We introduce MORFITT, the first multi-label corpus for the classification of specialties in the medical field, in French. MORFITT is composed of 3,624 summaries of scientific articles from PubMed, annotated in 12 specialties. The article details the corpus, the experiments and the preliminary results obtained using a classifier based on the pre-trained language model CamemBERT. For more details, please refer to our paper: **MORFITT: A multi-label topic classification for French Biomedical literature** ([HAL](https://hal.science/hal-04131591/)) # Key Features ## Documents distribution | Train | Dev | Test | |-------|-------|-------| | 1,514 | 1,022 | 1,088 | ## Multi-label distribution | | Train | Dev | Test | Total | |:----------------------:|:--------------:|:--------------:|:--------------:|:--------------:| | Vétérinaire | 320 | 250 | 254 | 824 | | Étiologie | 317 | 202 | 222 | 741 | | Psychologie | 255 | 175 | 179 | 609 | | Chirurgie | 223 | 169 | 157 | 549 | | Génétique | 207 | 139 | 159 | 505 | | Physiologie | 217 | 125 | 148 | 490 | | Pharmacologie | 112 | 84 | 103 | 299 | | Microbiologie | 115 | 72 | 86 | 273 | | Immunologie | 106 | 86 | 70 | 262 | | Chimie | 94 | 53 | 65 | 212 | | Virologie | 76 | 57 | 67 | 200 | | Parasitologie | 68 | 34 | 50 | 152 | | Total | 2,110 | 1,446 | 1,560 | 5,116 | ## Number of labels per document distribution <p align="left"> <img src="https://github.com/qanastek/MORFITT/raw/main/images/distributions_nbr_elements_colors.png" alt="drawing" width="400"/> </p> ## Co-occurences distribution <p align="left"> <img src="https://github.com/qanastek/MORFITT/raw/main/images/distributions_co-references-fixed.png" alt="drawing" width="400"/> </p> # If you use HuggingFace Transformers ```python from datasets import load_dataset dataset = load_dataset("qanastek/MORFITT") print(dataset) ``` or ```python from datasets import load_dataset dataset_base = load_dataset( 'csv', data_files={ 'train': f"./train.tsv", 'validation': f"./dev.tsv", 'test': f"./test.tsv", }, delimiter="\t", ) ``` # License and Citation The code is under [Apache-2.0 License](./LICENSE). The MORFITT dataset is licensed under *Attribution-ShareAlike 4.0 International* ([CC BY-SA 4.0](https://creativecommons.org/licenses/by/4.0/)). If you find this project useful in your research, please cite the following papers: ```plain Labrak, Y., Rouvier, M., & Dufour, R. (2023). MORFITT : Un corpus multi-labels d’articles scientifiques français dans le domaine biomédical. In F. Boudin, B. Daille, R. Dufour, O. Khettari, M. Houbre, L. Jourdan, & N. Kooli (Eds.), 18e Conférence en Recherche d’Information et Applications – 16e Rencontres Jeunes Chercheurs en RI – 30e Conférence sur le Traitement Automatique des Langues Naturelles – 25e Rencontre des Étudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (pp. 66–70). ATALA. https://hal.science/hal-04131591 ``` or using the bibtex: ```bibtex @inproceedings{labrak:hal-04131591, TITLE = {{MORFITT : Un corpus multi-labels d'articles scientifiques fran{\c c}ais dans le domaine biom{\'e}dical}}, AUTHOR = {Labrak, Yanis and Rouvier, Mickael and Dufour, Richard}, URL = {https://hal.science/hal-04131591}, BOOKTITLE = {{18e Conf{\'e}rence en Recherche d'Information et Applications -- 16e Rencontres Jeunes Chercheurs en RI -- 30e Conf{\'e}rence sur le Traitement Automatique des Langues Naturelles -- 25e Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues}}, ADDRESS = {Paris, France}, EDITOR = {Boudin, Florian and Daille, B{\'e}atrice and Dufour, Richard and Khettari, Oumaima and Houbre, Ma{\"e}l and Jourdan, L{\'e}ane and Kooli, Nihel}, PUBLISHER = {{ATALA}}, PAGES = {66-70}, YEAR = {2023}, KEYWORDS = {Analyse de documents scientifiques ; Jeux de donn{\'e}es compos{\'e}s des textes scientifiques}, PDF = {https://hal.science/hal-04131591/file/1465546.pdf}, HAL_ID = {hal-04131591}, HAL_VERSION = {v1}, } ```
5,354
[ [ -0.031890869140625, -0.019805908203125, 0.0087127685546875, 0.007541656494140625, -0.0019054412841796875, 0.0007152557373046875, 0.0025691986083984375, -0.023040771484375, 0.040069580078125, 0.00885009765625, -0.01959228515625, -0.051971435546875, -0.04592895507...
yangwooko/github-issues
2023-03-23T04:47:27.000Z
[ "task_categories:text-classification", "task_ids:semantic-similarity-classification", "task_ids:sentiment-classification", "multilinguality:monolingual", "size_categories:n<1K", "language:en", "region:us" ]
yangwooko
null
null
0
4
2023-03-23T04:22:45
--- annotations_creators: [] language: - en language_creators: [] license: [] multilinguality: - monolingual pretty_name: github issues very small size_categories: - n<1K source_datasets: [] tags: [] task_categories: - text-classification task_ids: - semantic-similarity-classification - sentiment-classification --- dataset_info: features: - name: url dtype: string - name: repository_url dtype: string - name: labels_url dtype: string - name: comments_url dtype: string - name: events_url dtype: string - name: html_url dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: number dtype: int64 - name: title dtype: string - name: user struct: - name: login dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: avatar_url dtype: string - name: gravatar_id dtype: string - name: url dtype: string - name: html_url dtype: string - name: followers_url dtype: string - name: following_url dtype: string - name: gists_url dtype: string - name: starred_url dtype: string - name: subscriptions_url dtype: string - name: organizations_url dtype: string - name: repos_url dtype: string - name: events_url dtype: string - name: received_events_url dtype: string - name: type dtype: string - name: site_admin dtype: bool - name: labels list: - name: id dtype: int64 - name: node_id dtype: string - name: url dtype: string - name: name dtype: string - name: color dtype: string - name: default dtype: bool - name: description dtype: string - name: state dtype: string - name: locked dtype: bool - name: assignee struct: - name: login dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: avatar_url dtype: string - name: gravatar_id dtype: string - name: url dtype: string - name: html_url dtype: string - name: followers_url dtype: string - name: following_url dtype: string - name: gists_url dtype: string - name: starred_url dtype: string - name: subscriptions_url dtype: string - name: organizations_url dtype: string - name: repos_url dtype: string - name: events_url dtype: string - name: received_events_url dtype: string - name: type dtype: string - name: site_admin dtype: bool - name: assignees list: - name: login dtype: string - name: id dtype: int64 - name: node_id dtype: string - name: avatar_url dtype: string - name: gravatar_id dtype: string - name: url dtype: string - name: html_url dtype: string - name: followers_url dtype: string - name: following_url dtype: string - name: gists_url dtype: string - name: starred_url dtype: string - name: subscriptions_url dtype: string - name: organizations_url dtype: string - name: repos_url dtype: string - name: events_url dtype: string - name: received_events_url dtype: string - name: type dtype: string - name: site_admin dtype: bool - name: milestone dtype: 'null' - name: comments dtype: int64 - name: created_at dtype: timestamp[s] - name: updated_at dtype: timestamp[s] - name: closed_at dtype: timestamp[s] - name: author_association dtype: string - name: active_lock_reason dtype: 'null' - name: body dtype: string - name: reactions struct: - name: url dtype: string - name: total_count dtype: int64 - name: '+1' dtype: int64 - name: '-1' dtype: int64 - name: laugh dtype: int64 - name: hooray dtype: int64 - name: confused dtype: int64 - name: heart dtype: int64 - name: rocket dtype: int64 - name: eyes dtype: int64 - name: timeline_url dtype: string - name: performed_via_github_app dtype: 'null' - name: state_reason dtype: string - name: draft dtype: bool - name: pull_request struct: - name: url dtype: string - name: html_url dtype: string - name: diff_url dtype: string - name: patch_url dtype: string - name: merged_at dtype: timestamp[s] - name: is_pull_request dtype: bool splits: - name: train num_bytes: 201451 num_examples: 60 download_size: 0 dataset_size: 201451 --- # Dataset Card for "github-issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
4,942
[ [ -0.032989501953125, -0.0304107666015625, 0.012359619140625, 0.003314971923828125, 0.0013027191162109375, 0.00872802734375, 0.0013828277587890625, -0.00862884521484375, 0.04119873046875, 0.0273284912109375, -0.0469970703125, -0.06854248046875, -0.0457763671875, ...
abhi28577/nennepedia
2023-06-24T08:27:44.000Z
[ "task_categories:question-answering", "size_categories:n<1K", "language:en", "license:openrail", "region:us" ]
abhi28577
null
null
0
4
2023-03-23T10:55:02
--- license: openrail task_categories: - question-answering language: - en pretty_name: nennepedia size_categories: - n<1K --- # Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
1,658
[ [ -0.038177490234375, -0.02984619140625, -0.0036067962646484375, 0.027130126953125, -0.0323486328125, 0.0037822723388671875, -0.01727294921875, -0.02020263671875, 0.049041748046875, 0.04046630859375, -0.0634765625, -0.08062744140625, -0.052947998046875, 0.0020...
s-nlp/ru_non_detoxified
2023-09-08T08:36:46.000Z
[ "task_categories:text-classification", "language:ru", "license:openrail++", "region:us" ]
s-nlp
null
null
0
4
2023-03-24T14:48:00
--- license: openrail++ task_categories: - text-classification language: - ru --- # ParaDetox: Detoxification with Parallel Data (Russian). Paraphrase Task Negative Results This repository contains information about **Paraphrase Task** markup from [Russian Paradetox dataset](https://huggingface.co/datasets/s-nlp/ru_paradetox) collection pipeline. ## ParaDetox Collection Pipeline The ParaDetox Dataset collection was done via [Yandex.Toloka](https://toloka.yandex.com/) crowdsource platform. The collection was done in three steps: * *Task 1:* **Generation of Paraphrases**: The first crowdsourcing task asks users to eliminate toxicity in a given sentence while keeping the content. * *Task 2:* **Content Preservation Check**: We show users the generated paraphrases along with their original variants and ask them to indicate if they have close meanings. * *Task 3:* **Toxicity Check**: Finally, we check if the workers succeeded in removing toxicity. Specifically this repo contains the results of **Task 1: Generation of Paraphrases**. The general size of the dataset is about 11,446 samples. Here, the samples that were marked by annotators that they cannot detoxify are present. The reason for this can be following: * *non-toxic*: the text is simply non toxic, can be with negative sentiment, however, without any obscene or rude lexicon; * *toxic content*: the text is passive aggressive, sarcastic, or other, so the insult is deeply incorporated in the message. To detoxify it, you need to change the meaning dramantically. * *unclear*: the text is only about obscene lexicon, random words, or any other tokens combination that makes it difficult to understand the main content. Annotators could select several options. ## Citation ``` @inproceedings{logacheva-etal-2022-study, title = "A Study on Manual and Automatic Evaluation for Text Style Transfer: The Case of Detoxification", author = "Logacheva, Varvara and Dementieva, Daryna and Krotova, Irina and Fenogenova, Alena and Nikishina, Irina and Shavrina, Tatiana and Panchenko, Alexander", booktitle = "Proceedings of the 2nd Workshop on Human Evaluation of NLP Systems (HumEval)", month = may, year = "2022", address = "Dublin, Ireland", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.humeval-1.8", doi = "10.18653/v1/2022.humeval-1.8", pages = "90--101", abstract = "It is often difficult to reliably evaluate models which generate text. Among them, text style transfer is a particularly difficult to evaluate, because its success depends on a number of parameters.We conduct an evaluation of a large number of models on a detoxification task. We explore the relations between the manual and automatic metrics and find that there is only weak correlation between them, which is dependent on the type of model which generated text. Automatic metrics tend to be less reliable for better-performing models. However, our findings suggest that, ChrF and BertScore metrics can be used as a proxy for human evaluation of text detoxification to some extent.", } ``` ## Contacts For any questions, please contact: Daryna Dementieva (dardem96@gmail.com)
3,258
[ [ -0.0017232894897460938, -0.042388916015625, 0.0452880859375, 0.022491455078125, -0.026824951171875, -0.0016651153564453125, -0.01300811767578125, -0.027008056640625, 0.01372528076171875, 0.047027587890625, -0.03338623046875, -0.053009033203125, -0.04437255859375...
vietgpt/alpaca_vi
2023-07-03T13:49:13.000Z
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:vi", "SFT", "region:us" ]
vietgpt
null
null
0
4
2023-03-27T18:32:58
--- dataset_info: features: - name: text dtype: string - name: instruction dtype: string - name: input dtype: string - name: output dtype: string splits: - name: train num_bytes: 60333806.54451752 num_examples: 51548 download_size: 28605089 dataset_size: 60333806.54451752 task_categories: - text-generation language: - vi tags: - SFT size_categories: - 10K<n<100K --- # Alpaca-Cleaned - Source: https://huggingface.co/datasets/yahma/alpaca-cleaned - Num examples: 51,548 - Language: Vietnamese ```python from datasets import load_dataset load_dataset("vietgpt/alpaca_vi") ``` - Format for Instruction ```python def preprocess( sample, instruction_key="### Instruction:", input_key="Input:", response_key="### Response:", end_key="<|endoftext|>" ): instruction = sample['instruction'] input = sample['input'] response = sample['output'] if input: return {'text': """Dưới đây là một hướng dẫn mô tả một tác vụ, được ghép nối với một đầu vào cung cấp thêm ngữ cảnh. Viết một phản hồi hoàn thành yêu cầu một cách thích hợp. {instruction_key} {instruction} {input_key} {input} {response_key} {response} {end_key}""".format( instruction_key=instruction_key, instruction=instruction, input_key=input_key, input=input, response_key=response_key, response=response, end_key=end_key, )} else: return {'text': """Dưới đây là một hướng dẫn mô tả một nhiệm vụ. Viết một phản hồi hoàn thành yêu cầu một cách thích hợp. {instruction_key} {instruction} {response_key} {response} {end_key}""".format( instruction_key=instruction_key, instruction=instruction, response_key=response_key, response=response, end_key=end_key, )} """ Dưới đây là một hướng dẫn mô tả một nhiệm vụ. Viết một phản hồi hoàn thành yêu cầu một cách thích hợp. ### Instruction: Đưa ra ba lời khuyên để giữ gìn sức khỏe. ### Response: 1. Ăn một chế độ ăn cân bằng và chắc chắn bao gồm đủ rau và hoa quả. 2. Tập thể dục thường xuyên để giữ cho cơ thể của bạn hoạt động và khỏe mạnh. 3. Ngủ đủ giấc và duy trì lịch trình ngủ ổn định. <|endoftext|> """ ```
2,169
[ [ -0.0272674560546875, -0.0638427734375, 0.0153656005859375, 0.0274658203125, -0.02789306640625, -0.0155487060546875, -0.00397491455078125, -0.0124969482421875, 0.0330810546875, 0.0400390625, -0.044647216796875, -0.049774169921875, -0.050567626953125, 0.021759...
cartesinus/leyzer-fedcsis-translated
2023-03-27T21:52:34.000Z
[ "task_categories:text-classification", "size_categories:10K<n<100K", "language:pl", "license:cc-by-4.0", "natural-language-understanding", "region:us" ]
cartesinus
Leyzer is a multilingual text corpus designed to study multilingual and cross-lingual natural language understanding (NLU) models and the strategies of localization of virtual assistants. It consists of 20 domains across three languages: English, Spanish and Polish, with 186 intents and a wide range of samples, ranging from 1 to 672 sentences per intent.
@inproceedings{sowanski2020leyzer, title={Leyzer: A Dataset for Multilingual Virtual Assistants}, author={Sowa{\'n}ski, Marcin and Janicki, Artur}, booktitle={International Conference on Text, Speech, and Dialogue}, pages={477--486}, year={2020}, organization={Springer} }
0
4
2023-03-27T21:51:34
--- license: cc-by-4.0 task_categories: - text-classification language: - pl tags: - natural-language-understanding size_categories: - 10K<n<100K --- # Leyzer: A Dataset for Multilingual Virtual Assistants Leyzer is a multilingual text corpus designed to study multilingual and cross-lingual natural language understanding (NLU) models and the strategies of localization of virtual assistants. It consists of 20 domains across three languages: English, Spanish and Polish, with 186 intents and a wide range of samples, ranging from 1 to 672 sentences per intent. For more stats please refer to wiki.
601
[ [ -0.041229248046875, -0.05029296875, 0.0305328369140625, 0.0276336669921875, 0.00914764404296875, 0.01450347900390625, 0.00872802734375, -0.0255126953125, 0.021759033203125, 0.03717041015625, -0.06396484375, -0.04937744140625, -0.0233306884765625, 0.033386230...
patrickramos/conceptual_captions
2023-03-28T07:44:47.000Z
[ "region:us" ]
patrickramos
null
null
0
4
2023-03-28T02:41:41
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
gligen/gqa_tsv
2023-03-29T00:35:42.000Z
[ "region:us" ]
gligen
null
null
0
4
2023-03-28T23:56:57
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
akadhim-ai/martin_valen_dataset
2023-03-29T05:15:50.000Z
[ "region:us" ]
akadhim-ai
null
null
0
4
2023-03-29T05:15:43
--- dataset_info: features: - name: image dtype: image - name: text dtype: string splits: - name: train num_bytes: 82775.0 num_examples: 10 download_size: 82229 dataset_size: 82775.0 --- # Dataset Card for "martin_valen_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
391
[ [ -0.042938232421875, -0.023162841796875, 0.018035888671875, 0.01389312744140625, -0.00782012939453125, -0.00943756103515625, 0.02545166015625, -0.005504608154296875, 0.06475830078125, 0.046539306640625, -0.055999755859375, -0.061309814453125, -0.04150390625, ...
vietgpt/alpaca_en
2023-07-03T13:48:57.000Z
[ "task_categories:text-generation", "size_categories:10K<n<100K", "language:en", "SFT", "region:us" ]
vietgpt
null
null
1
4
2023-03-29T15:52:38
--- dataset_info: features: - name: instruction dtype: string - name: input dtype: string - name: output dtype: string - name: text dtype: string splits: - name: train num_bytes: 46071779.64893658 num_examples: 51848 download_size: 24154901 dataset_size: 46071779.64893658 task_categories: - text-generation language: - en tags: - SFT size_categories: - 10K<n<100K --- # Alpaca-Cleaned - Source: https://huggingface.co/datasets/yahma/alpaca-cleaned - Num examples: 51,848 - Language: English ```python from datasets import load_dataset load_dataset("tdtunlp/alpaca_en") ``` - Format for Instruction task ```python def preprocess( sample, instruction_key="### Instruction:", input_key="Input:", response_key="### Response:", end_key="<|endoftext|>" ): instruction = sample['instruction'] input = sample['input'] response = sample['output'] if input: return {'text': """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. {instruction_key} {instruction} {input_key} {input} {response_key} {response} {end_key}""".format( instruction_key=instruction_key, instruction=instruction, input_key=input_key, input=input, response_key=response_key, response=response, end_key=end_key, )} else: return {'text': """Below is an instruction that describes a task. Write a response that appropriately completes the request. {instruction_key} {instruction} {response_key} {response} {end_key}""".format( instruction_key=instruction_key, instruction=instruction, response_key=response_key, response=response, end_key=end_key, )} """ Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: Give three tips for staying healthy. ### Response: 1.Eat a balanced diet and make sure to include plenty of fruits and vegetables. 2. Exercise regularly to keep your body active and strong. 3. Get enough sleep and maintain a consistent sleep schedule. <|endoftext|> """ ```
2,181
[ [ -0.017578125, -0.064697265625, 0.01081085205078125, 0.031768798828125, -0.0146636962890625, -0.0262908935546875, -0.014862060546875, -0.0235137939453125, 0.032623291015625, 0.041717529296875, -0.059356689453125, -0.04022216796875, -0.046844482421875, 0.02148...
turuta/Multi30k-uk
2023-05-04T19:11:45.000Z
[ "task_categories:translation", "task_categories:text-generation", "size_categories:10K<n<100K", "language:uk", "language:en", "license:unknown", "common", "multi30k", "ukrainian", "region:us" ]
turuta
Ukrainian Multi30k
\
3
4
2023-03-29T20:26:58
--- license: unknown task_categories: - translation - text-generation language: - uk - en pretty_name: ukr-multi30k size_categories: - 10K<n<100K tags: - common - multi30k - ukrainian --- ## Dataset Multi30k: English-Ukrainian variation Multi30K dataset is designed to develop multilingual multimodal researches. Initially this dataset extends the Flickr30K dataset by adding German translations. The descriptions were collected from a crowdsourcing platform, while the translations were collected from professionally contracted translators. We present a variation of this dataset manually translated for Ukrainian language. Paper: ```python @inproceedings{saichyshyna-etal-2023-extension, title = "Extension {M}ulti30{K}: Multimodal Dataset for Integrated Vision and Language Research in {U}krainian", author = "Saichyshyna, Nataliia and Maksymenko, Daniil and Turuta, Oleksii and Yerokhin, Andriy and Babii, Andrii and Turuta, Olena", booktitle = "Proceedings of the Second Ukrainian Natural Language Processing Workshop (UNLP)", month = may, year = "2023", address = "Dubrovnik, Croatia", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.unlp-1.7", pages = "54--61", abstract = "We share the results of the project within the well-known Multi30k dataset dedicated to improving machine translation of text from English into Ukrainian. The main task was to manually prepare the dataset and improve the translation of texts. The importance of collecting such datasets for low-resource languages for improving the quality of machine translation has been discussed. We also studied the features of translations of words and sentences with ambiguous meanings.The collection of multimodal datasets is essential for natural language processing tasks because it allows the development of more complex and comprehensive machine learning models that can understand and analyze different types of data. These models can learn from a variety of data types, including images, text, and audio, for more accurate and meaningful results.", } ```
2,159
[ [ -0.02978515625, -0.0073394775390625, 0.016326904296875, 0.01055145263671875, -0.0228424072265625, 0.0161895751953125, -0.02508544921875, -0.028778076171875, 0.0012807846069335938, 0.029998779296875, -0.059967041015625, -0.04095458984375, -0.029693603515625, ...
rcds/swiss_leading_decisions
2023-07-20T07:38:35.000Z
[ "task_categories:text-classification", "annotations_creators:machine-generated", "language_creators:expert-generated", "multilinguality:multilingual", "size_categories:10K<n<100K", "source_datasets:original", "language:de", "language:it", "language:fr", "license:cc-by-sa-4.0", "arxiv:2306.09237"...
rcds
null
null
2
4
2023-03-30T08:38:35
--- license: cc-by-sa-4.0 language: - de - it - fr size_categories: - 10K<n<100K annotations_creators: - machine-generated language_creators: - expert-generated multilinguality: - multilingual source_datasets: - original task_categories: - text-classification pretty_name: Swiss Leading Decisions --- # Dataset Card for Swiss Leading Decisions ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary Swiss Leading Decisions is a multilingual, diachronic dataset of 21K Swiss Federal Supreme Court (FSCS) cases. This dataset is part of a challenging text classification task. We also provide additional metadata as the publication year, the law area and the canton of origin per case, to promote robustness and fairness studies on the critical area of legal NLP. ### Supported Tasks and Leaderboards Swiss Leading Decisions hepled in a text classification task ### Languages Switzerland has four official languages with three languages German, French and Italian being represenated. The decisions are written by the judges and clerks in the language of the proceedings. | Language | Subset | Number of Documents | |------------|------------|----------------------| | German | **de** | 14K | | French | **fr** | 6K | | Italian | **it** | 1K | ## Dataset Structure ### Data Fields ``` decision_id: (str) a unique identifier of the for the document language: (int64) one of (0,1,2) chamber_id: (int64) id to identfy chamber file_id: (int64) id to identify file date: (int64) topic: (string) year: (float64) language: (string) facts: (string) text section of the full text facts_num_tokens_bert: (int64) facts_num_tokens_spacy: (int64) considerations: (string) text section of the full text considerations_num_tokens_bert: (int64) considerations_num_tokens_spacy: (int64) rulings: (string) text section of the full text rulings_num_tokens_bert: (int64) rulings_num_tokens_spacy: (int64) chamber (string): court: (string) canton: (string) region: (string) file_name: (string) html_url: (string) pdf_url: (string) file_number: (string) ``` ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits ## Dataset Creation ### Curation Rationale The dataset was created by Stern (2023). ### Source Data #### Initial Data Collection and Normalization The original data are published from the Swiss Federal Supreme Court (https://www.bger.ch) in unprocessed formats (HTML). The documents were downloaded from the Entscheidsuche portal (https://entscheidsuche.ch) in HTML. #### Who are the source language producers? The decisions are written by the judges and clerks in the language of the proceedings. ### Annotations #### Annotation process #### Who are the annotators? Metadata is published by the Swiss Federal Supreme Court (https://www.bger.ch). ### Personal and Sensitive Information The dataset contains publicly available court decisions from the Swiss Federal Supreme Court. Personal or sensitive information has been anonymized by the court before publication according to the following guidelines: https://www.bger.ch/home/juridiction/anonymisierungsregeln.html. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information We release the data under CC-BY-4.0 which complies with the court licensing (https://www.bger.ch/files/live/sites/bger/files/pdf/de/urteilsveroeffentlichung_d.pdf) © Swiss Federal Supreme Court, 2002-2022 The copyright for the editorial content of this website and the consolidated texts, which is owned by the Swiss Federal Supreme Court, is licensed under the Creative Commons Attribution 4.0 International licence. This means that you can re-use the content provided you acknowledge the source and indicate any changes you have made. Source: https://www.bger.ch/files/live/sites/bger/files/pdf/de/urteilsveroeffentlichung_d.pdf ### Citation Information Please cite our [ArXiv-Preprint](https://arxiv.org/abs/2306.09237) ``` @misc{rasiah2023scale, title={SCALE: Scaling up the Complexity for Advanced Language Model Evaluation}, author={Vishvaksenan Rasiah and Ronja Stern and Veton Matoshi and Matthias Stürmer and Ilias Chalkidis and Daniel E. Ho and Joel Niklaus}, year={2023}, eprint={2306.09237}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ### Contributions Thanks to [@Stern5497](https://github.com/stern5497) for adding this dataset.
5,878
[ [ -0.02178955078125, -0.045440673828125, 0.025848388671875, 0.022369384765625, -0.0318603515625, -0.0080718994140625, -0.023223876953125, -0.01294708251953125, 0.007556915283203125, 0.044097900390625, -0.04754638671875, -0.07025146484375, -0.054901123046875, 0...
Francesco/coins-1apki
2023-03-30T09:10:17.000Z
[ "task_categories:object-detection", "annotations_creators:crowdsourced", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:cc", "rf100", "region:us" ]
Francesco
null
null
0
4
2023-03-30T09:08:48
--- dataset_info: features: - name: image_id dtype: int64 - name: image dtype: image - name: width dtype: int32 - name: height dtype: int32 - name: objects sequence: - name: id dtype: int64 - name: area dtype: int64 - name: bbox sequence: float32 length: 4 - name: category dtype: class_label: names: '0': coins '1': coin '2': nail '3': nut '4': screw annotations_creators: - crowdsourced language_creators: - found language: - en license: - cc multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - object-detection task_ids: [] pretty_name: coins-1apki tags: - rf100 --- # Dataset Card for coins-1apki ** The original COCO dataset is stored at `dataset.tar.gz`** ## Dataset Description - **Homepage:** https://universe.roboflow.com/object-detection/coins-1apki - **Point of Contact:** francesco.zuppichini@gmail.com ### Dataset Summary coins-1apki ### Supported Tasks and Leaderboards - `object-detection`: The dataset can be used to train a model for Object Detection. ### Languages English ## Dataset Structure ### Data Instances A data point comprises an image and its object annotations. ``` { 'image_id': 15, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x640 at 0x2373B065C18>, 'width': 964043, 'height': 640, 'objects': { 'id': [114, 115, 116, 117], 'area': [3796, 1596, 152768, 81002], 'bbox': [ [302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0] ], 'category': [4, 4, 0, 0] } } ``` ### Data Fields - `image`: the image id - `image`: `PIL.Image.Image` object containing the image. Note that when accessing the image column: `dataset[0]["image"]` the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the `"image"` column, *i.e.* `dataset[0]["image"]` should **always** be preferred over `dataset["image"][0]` - `width`: the image width - `height`: the image height - `objects`: a dictionary containing bounding box metadata for the objects present on the image - `id`: the annotation id - `area`: the area of the bounding box - `bbox`: the object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format) - `category`: the object's category. #### Who are the annotators? Annotators are Roboflow users ## Additional Information ### Licensing Information See original homepage https://universe.roboflow.com/object-detection/coins-1apki ### Citation Information ``` @misc{ coins-1apki, title = { coins 1apki Dataset }, type = { Open Source Dataset }, author = { Roboflow 100 }, howpublished = { \url{ https://universe.roboflow.com/object-detection/coins-1apki } }, url = { https://universe.roboflow.com/object-detection/coins-1apki }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { nov }, note = { visited on 2023-03-29 }, }" ``` ### Contributions Thanks to [@mariosasko](https://github.com/mariosasko) for adding this dataset.
3,387
[ [ -0.04193115234375, -0.043914794921875, 0.0118560791015625, -0.008819580078125, -0.038909912109375, -0.00618743896484375, -0.00821685791015625, -0.04107666015625, 0.0311431884765625, 0.0379638671875, -0.043914794921875, -0.06414794921875, -0.0433349609375, 0....
Francesco/aerial-cows
2023-03-30T09:12:41.000Z
[ "task_categories:object-detection", "annotations_creators:crowdsourced", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:cc", "rf100", "region:us" ]
Francesco
null
null
0
4
2023-03-30T09:11:54
--- dataset_info: features: - name: image_id dtype: int64 - name: image dtype: image - name: width dtype: int32 - name: height dtype: int32 - name: objects sequence: - name: id dtype: int64 - name: area dtype: int64 - name: bbox sequence: float32 length: 4 - name: category dtype: class_label: names: '0': aerial-cows '1': cow annotations_creators: - crowdsourced language_creators: - found language: - en license: - cc multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - object-detection task_ids: [] pretty_name: aerial-cows tags: - rf100 --- # Dataset Card for aerial-cows ** The original COCO dataset is stored at `dataset.tar.gz`** ## Dataset Description - **Homepage:** https://universe.roboflow.com/object-detection/aerial-cows - **Point of Contact:** francesco.zuppichini@gmail.com ### Dataset Summary aerial-cows ### Supported Tasks and Leaderboards - `object-detection`: The dataset can be used to train a model for Object Detection. ### Languages English ## Dataset Structure ### Data Instances A data point comprises an image and its object annotations. ``` { 'image_id': 15, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x640 at 0x2373B065C18>, 'width': 964043, 'height': 640, 'objects': { 'id': [114, 115, 116, 117], 'area': [3796, 1596, 152768, 81002], 'bbox': [ [302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0] ], 'category': [4, 4, 0, 0] } } ``` ### Data Fields - `image`: the image id - `image`: `PIL.Image.Image` object containing the image. Note that when accessing the image column: `dataset[0]["image"]` the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the `"image"` column, *i.e.* `dataset[0]["image"]` should **always** be preferred over `dataset["image"][0]` - `width`: the image width - `height`: the image height - `objects`: a dictionary containing bounding box metadata for the objects present on the image - `id`: the annotation id - `area`: the area of the bounding box - `bbox`: the object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format) - `category`: the object's category. #### Who are the annotators? Annotators are Roboflow users ## Additional Information ### Licensing Information See original homepage https://universe.roboflow.com/object-detection/aerial-cows ### Citation Information ``` @misc{ aerial-cows, title = { aerial cows Dataset }, type = { Open Source Dataset }, author = { Roboflow 100 }, howpublished = { \url{ https://universe.roboflow.com/object-detection/aerial-cows } }, url = { https://universe.roboflow.com/object-detection/aerial-cows }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { nov }, note = { visited on 2023-03-29 }, }" ``` ### Contributions Thanks to [@mariosasko](https://github.com/mariosasko) for adding this dataset.
3,326
[ [ -0.051788330078125, -0.0301513671875, -0.01015472412109375, -0.01540374755859375, -0.030242919921875, -0.01171875, 0.0089874267578125, -0.04052734375, 0.0286102294921875, 0.033050537109375, -0.04473876953125, -0.073486328125, -0.042205810546875, 0.0237121582...
Francesco/csgo-videogame
2023-03-30T09:15:55.000Z
[ "task_categories:object-detection", "annotations_creators:crowdsourced", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:cc", "rf100", "region:us" ]
Francesco
null
null
0
4
2023-03-30T09:15:12
--- dataset_info: features: - name: image_id dtype: int64 - name: image dtype: image - name: width dtype: int32 - name: height dtype: int32 - name: objects sequence: - name: id dtype: int64 - name: area dtype: int64 - name: bbox sequence: float32 length: 4 - name: category dtype: class_label: names: '0': CSGO '1': CT '2': T annotations_creators: - crowdsourced language_creators: - found language: - en license: - cc multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - object-detection task_ids: [] pretty_name: csgo-videogame tags: - rf100 --- # Dataset Card for csgo-videogame ** The original COCO dataset is stored at `dataset.tar.gz`** ## Dataset Description - **Homepage:** https://universe.roboflow.com/object-detection/csgo-videogame - **Point of Contact:** francesco.zuppichini@gmail.com ### Dataset Summary csgo-videogame ### Supported Tasks and Leaderboards - `object-detection`: The dataset can be used to train a model for Object Detection. ### Languages English ## Dataset Structure ### Data Instances A data point comprises an image and its object annotations. ``` { 'image_id': 15, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x640 at 0x2373B065C18>, 'width': 964043, 'height': 640, 'objects': { 'id': [114, 115, 116, 117], 'area': [3796, 1596, 152768, 81002], 'bbox': [ [302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0] ], 'category': [4, 4, 0, 0] } } ``` ### Data Fields - `image`: the image id - `image`: `PIL.Image.Image` object containing the image. Note that when accessing the image column: `dataset[0]["image"]` the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the `"image"` column, *i.e.* `dataset[0]["image"]` should **always** be preferred over `dataset["image"][0]` - `width`: the image width - `height`: the image height - `objects`: a dictionary containing bounding box metadata for the objects present on the image - `id`: the annotation id - `area`: the area of the bounding box - `bbox`: the object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format) - `category`: the object's category. #### Who are the annotators? Annotators are Roboflow users ## Additional Information ### Licensing Information See original homepage https://universe.roboflow.com/object-detection/csgo-videogame ### Citation Information ``` @misc{ csgo-videogame, title = { csgo videogame Dataset }, type = { Open Source Dataset }, author = { Roboflow 100 }, howpublished = { \url{ https://universe.roboflow.com/object-detection/csgo-videogame } }, url = { https://universe.roboflow.com/object-detection/csgo-videogame }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { nov }, note = { visited on 2023-03-29 }, }" ``` ### Contributions Thanks to [@mariosasko](https://github.com/mariosasko) for adding this dataset.
3,364
[ [ -0.045166015625, -0.043670654296875, 0.01093292236328125, -0.004207611083984375, -0.026397705078125, 0.0014505386352539062, -0.0168304443359375, -0.03509521484375, 0.01073455810546875, 0.022308349609375, -0.050811767578125, -0.07330322265625, -0.041534423828125,...
Francesco/robomasters-285km
2023-03-30T09:32:37.000Z
[ "task_categories:object-detection", "annotations_creators:crowdsourced", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:en", "license:cc", "rf100", "region:us" ]
Francesco
null
null
0
4
2023-03-30T09:31:52
--- dataset_info: features: - name: image_id dtype: int64 - name: image dtype: image - name: width dtype: int32 - name: height dtype: int32 - name: objects sequence: - name: id dtype: int64 - name: area dtype: int64 - name: bbox sequence: float32 length: 4 - name: category dtype: class_label: names: '0': robots '1': armor '2': base '3': car '4': rune '5': rune-blue '6': rune-gray '7': rune-grey '8': rune-red '9': watcher annotations_creators: - crowdsourced language_creators: - found language: - en license: - cc multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - object-detection task_ids: [] pretty_name: robomasters-285km tags: - rf100 --- # Dataset Card for robomasters-285km ** The original COCO dataset is stored at `dataset.tar.gz`** ## Dataset Description - **Homepage:** https://universe.roboflow.com/object-detection/robomasters-285km - **Point of Contact:** francesco.zuppichini@gmail.com ### Dataset Summary robomasters-285km ### Supported Tasks and Leaderboards - `object-detection`: The dataset can be used to train a model for Object Detection. ### Languages English ## Dataset Structure ### Data Instances A data point comprises an image and its object annotations. ``` { 'image_id': 15, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x640 at 0x2373B065C18>, 'width': 964043, 'height': 640, 'objects': { 'id': [114, 115, 116, 117], 'area': [3796, 1596, 152768, 81002], 'bbox': [ [302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0] ], 'category': [4, 4, 0, 0] } } ``` ### Data Fields - `image`: the image id - `image`: `PIL.Image.Image` object containing the image. Note that when accessing the image column: `dataset[0]["image"]` the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the `"image"` column, *i.e.* `dataset[0]["image"]` should **always** be preferred over `dataset["image"][0]` - `width`: the image width - `height`: the image height - `objects`: a dictionary containing bounding box metadata for the objects present on the image - `id`: the annotation id - `area`: the area of the bounding box - `bbox`: the object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format) - `category`: the object's category. #### Who are the annotators? Annotators are Roboflow users ## Additional Information ### Licensing Information See original homepage https://universe.roboflow.com/object-detection/robomasters-285km ### Citation Information ``` @misc{ robomasters-285km, title = { robomasters 285km Dataset }, type = { Open Source Dataset }, author = { Roboflow 100 }, howpublished = { \url{ https://universe.roboflow.com/object-detection/robomasters-285km } }, url = { https://universe.roboflow.com/object-detection/robomasters-285km }, journal = { Roboflow Universe }, publisher = { Roboflow }, year = { 2022 }, month = { nov }, note = { visited on 2023-03-29 }, }" ``` ### Contributions Thanks to [@mariosasko](https://github.com/mariosasko) for adding this dataset.
3,574
[ [ -0.054412841796875, -0.029998779296875, 0.019683837890625, -0.01236724853515625, -0.0289306640625, -0.01267242431640625, -0.01256561279296875, -0.039947509765625, 0.0213623046875, 0.032318115234375, -0.0595703125, -0.0672607421875, -0.044677734375, 0.0260772...
bigbio/cardiode
2023-04-05T01:14:13.000Z
[ "multilinguality:monolingual", "language:ger", "license:other", "region:us" ]
bigbio
First freely available and distributable large German clinical corpus from the cardiovascular domain.
@data{ data/AFYQDY_2022, author = {Christoph Dieterich}, publisher = {heiDATA}, title = {{CARDIO:DE}}, year = {2022}, version = {V5}, doi = {10.11588/data/AFYQDY}, url = {https://doi.org/10.11588/data/AFYQDY} }
4
4
2023-04-01T16:40:12
--- language: - ger bigbio_language: - German license: other multilinguality: monolingual pretty_name: CARDIO:DE homepage: https://heidata.uni-heidelberg.de/dataset.xhtml?persistentId=doi:10.11588/data/AFYQDY bigbio_pubmed: false bigbio_public: false bigbio_tasks: - NAMED_ENTITY_RECOGNITION --- # Dataset Card for CARDIO.DE ## Dataset Description - **Homepage:** https://heidata.uni-heidelberg.de/dataset.xhtml?persistentId=doi:10.11588/data/AFYQDY - **Pubmed:** False - **Public:** False - **Tasks:** NER We present CARDIO:DE, the first freely available and distributable large German clinical corpus from the cardiovascular domain. CARDIO:DE encompasses 500 clinical routine German doctor’s letters from Heidelberg University Hospital, which were manually annotated. Our prospective study design complies well with current data protection regulations and allows us to keep the original structure of clinical documents consistent. In order to ease access to our corpus, we manually de-identified all letters. To enable various information extraction tasks the temporal information in the documents was preserved. We added two high-quality manual annotation layers to CARDIO:DE, (1) medication information and (2) CDA-compliant section classes. ## Citation Information ``` @data{ data/AFYQDY_2022, author = {Christoph Dieterich}, publisher = {heiDATA}, title = {{CARDIO:DE}}, year = {2022}, version = {V5}, doi = {10.11588/data/AFYQDY}, url = {https://doi.org/10.11588/data/AFYQDY} } ```
1,536
[ [ -0.036712646484375, -0.036773681640625, 0.031524658203125, 0.006206512451171875, -0.031494140625, -0.00974273681640625, -0.0231781005859375, -0.039459228515625, 0.038543701171875, 0.0389404296875, -0.036529541015625, -0.0794677734375, -0.05731201171875, 0.01...
FourthBrainGenAI/Product-Descriptions-and-Ads
2023-04-04T20:26:04.000Z
[ "task_categories:text-generation", "size_categories:n<1K", "language:en", "license:openrail", "art", "region:us" ]
FourthBrainGenAI
null
null
2
4
2023-04-04T20:11:11
--- dataset_info: features: - name: product dtype: string - name: description dtype: string - name: ad dtype: string splits: - name: train num_bytes: 27511.2 num_examples: 90 - name: test num_bytes: 3056.8 num_examples: 10 download_size: 24914 dataset_size: 30568 license: openrail task_categories: - text-generation language: - en tags: - art pretty_name: Product Descriptions and Ads size_categories: - n<1K --- # Synthetic Dataset for Product Descriptions and Ads The basic process was as follows: 1. Prompt GPT-4 to create a list of 100 sample clothing items and descriptions for those items. 2. Split the output into desired format `{"product" : "<PRODUCT NAME>", "description" : "<DESCRIPTION>"} 3. Prompt GPT-4 to create adverts for each of the 100 samples based on their name and description. This data was not cleaned or verified manually.
897
[ [ -0.0380859375, -0.06268310546875, 0.0193328857421875, 0.009979248046875, -0.021759033203125, 0.01399993896484375, 0.0014925003051757812, -0.01361846923828125, 0.026092529296875, 0.04461669921875, -0.07928466796875, -0.037689208984375, -0.0019168853759765625, ...
nlplabtdtu/xlsum_en
2023-04-05T17:41:33.000Z
[ "region:us" ]
nlplabtdtu
null
null
0
4
2023-04-05T17:18:01
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Ariel4/related-drugs-network
2023-04-05T20:02:19.000Z
[ "size_categories:1K<n<10K", "license:cc-by-4.0", "chemistry", "biology", "graph", "network", "drugs", "region:us" ]
Ariel4
null
null
2
4
2023-04-05T19:08:30
--- license: cc-by-4.0 tags: - chemistry - biology - graph - network - drugs pretty_name: Network of Related Drugs from Drugs.com Database size_categories: - 1K<n<10K --- Dataset created by crawling the [Drugs.com](https://www.drugs.com/) database - please abide by their [Terms and Conditions](https://www.drugs.com/support/terms.html) ### How the Graph was Created Most drugs on Drugs.com have a **Related/Similar Drugs** page (e.g. [here](https://www.drugs.com/acetaminophen.html)). In my graph, nodes are drugs in the database, and edges are Related/Similar Drugs linked by the drug's description page. Note: Not all drugs in the dataset are part of the graph, as not all drugs have a "Related/Similar Drugs" section
724
[ [ -0.0180511474609375, -0.0426025390625, 0.050994873046875, -0.00887298583984375, -0.01084136962890625, 0.004913330078125, 0.015869140625, -0.034271240234375, 0.07037353515625, 0.058990478515625, -0.06634521484375, -0.079833984375, -0.0298309326171875, 0.01615...
ar852/scraped-chatgpt-conversations
2023-04-05T21:45:07.000Z
[ "task_categories:question-answering", "task_categories:text-generation", "task_categories:conversational", "size_categories:100K<n<1M", "region:us" ]
ar852
null
null
8
4
2023-04-05T19:10:58
--- task_categories: - question-answering - text-generation - conversational size_categories: - 100K<n<1M --- # Dataset Card for Dataset Name ## Dataset Description - **Repository: {https://github.com/ar852/chatgpt-scraping}** ### Dataset Summary scraped-chatgpt-conversations contains ~100k conversations between a user and chatgpt that were shared online through reddit, twitter, or sharegpt. For sharegpt, the conversations were directly scraped from the website. For reddit and twitter, images were downloaded from submissions, segmented, and run through an OCR pipeline to obtain a conversation list. For information on how the each json file is structured, please see `json_guides.md` ### Languages - twitter 1, twitter 2, and sharegpt json files are multilingual - reddit and twitter 2 json files are english only ## Dataset Structure - refer to *json_guide.txt* ## Dataset Creation This dataset was created by scraping images from twitter, reddit, and sharegpt.com using the pushshift and twitter APIs, respectively. The images are run through a filter to check if they contain a chatgpt conversation, then the image is processed and run through an OCR pipeline to obtain the conversation text. More info can be found in the repository. ### Source Data - twitter.com - reddit.com - sharegpt.com ## Considerations for Using the Data A significant amount of dicts created from parsing reddit and twitter images may be parsed incorrectly for a number of reasons: cropping done by the image poster, incorrectly classifying the image as containing a chatgpt conversation, incorrect image parsing (segmentation) by the parser, incorrect OCR by pytesseract. ### Licensing Information [More Information Needed] ### Contributions [More Information Needed]
1,780
[ [ -0.024932861328125, -0.06256103515625, 0.0137939453125, 0.02099609375, -0.0295257568359375, 0.0242767333984375, -0.01445770263671875, -0.03814697265625, 0.031768798828125, 0.02435302734375, -0.049896240234375, -0.04974365234375, -0.054718017578125, 0.0035915...
enesxgrahovac/the-feynman-lectures-on-physics
2023-04-07T20:56:25.000Z
[ "region:us" ]
enesxgrahovac
null
null
3
4
2023-04-07T20:22:57
--- dataset_info: features: - name: book_volume dtype: string - name: book_title dtype: string - name: chapter_number dtype: string - name: chapter_title dtype: string - name: section_number dtype: string - name: section_title dtype: string - name: section_text dtype: string splits: - name: train num_bytes: 4609643 num_examples: 641 download_size: 2276758 dataset_size: 4609643 --- # Dataset Card for "the-feynman-lectures-on-physics" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
629
[ [ -0.03662109375, -0.0130767822265625, 0.01934814453125, 0.00373077392578125, -0.01142120361328125, -0.007381439208984375, 0.026824951171875, 0.0014410018920898438, 0.049041748046875, 0.020599365234375, -0.048583984375, -0.027313232421875, -0.027557373046875, ...
MedIR/roco
2023-04-07T20:32:36.000Z
[ "region:us" ]
MedIR
null
null
0
4
2023-04-07T20:32:26
--- dataset_info: features: - name: id dtype: string - name: semtypes sequence: string - name: cuis sequence: string - name: caption dtype: string - name: image dtype: image splits: - name: test num_bytes: 170468574.0 num_examples: 8176 download_size: 167802110 dataset_size: 170468574.0 --- # Dataset Card for "roco" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
498
[ [ -0.0301055908203125, -0.009765625, 0.007114410400390625, 0.01474761962890625, -0.0232086181640625, -0.0037250518798828125, 0.0020389556884765625, -0.0281219482421875, 0.054290771484375, 0.04779052734375, -0.060028076171875, -0.059539794921875, -0.044769287109375...
nihalbaig/alpaca_bangla
2023-04-12T10:25:19.000Z
[ "region:us" ]
nihalbaig
null
null
0
4
2023-04-10T15:59:45
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
nasa-cisto-data-science-group/modis-lake-powell-raster-dataset
2023-04-11T18:19:51.000Z
[ "license:apache-2.0", "region:us" ]
nasa-cisto-data-science-group
null
null
0
4
2023-04-11T17:16:17
--- license: apache-2.0 --- # MODIS Water Lake Powell Raster Dataset ### Dataset Summary Raster dataset comprised of MODIS surface reflectance bands along with calculated indices and a label (water/not-water) ## Dataset Structure ### Data Fields - `water`: Label, water or not-water (binary) - `sur_refl_b01_1`: MODIS surface reflection band 1 (-100, 16000) - `sur_refl_b02_1`: MODIS surface reflection band 2 (-100, 16000) - `sur_refl_b03_1`: MODIS surface reflection band 3 (-100, 16000) - `sur_refl_b04_1`: MODIS surface reflection band 4 (-100, 16000) - `sur_refl_b05_1`: MODIS surface reflection band 5 (-100, 16000) - `sur_refl_b06_1`: MODIS surface reflection band 6 (-100, 16000) - `sur_refl_b07_1`: MODIS surface reflection band 7 (-100, 16000) - `ndvi`: Normalized differential vegetation index (-20000, 20000) - `ndwi1`: Normalized differential water index 1 (-20000, 20000) - `ndwi2`: Normalized differential water index 2 (-20000, 20000) ## Dataset Creation ## Source Data [MODIS MOD44W](https://lpdaac.usgs.gov/products/mod44wv006/) [MODIS MOD09GA](https://lpdaac.usgs.gov/products/mod09gav006/) [MODIS MOD09GQ](https://lpdaac.usgs.gov/products/mod09gqv006/) ## Annotation process Labels were created by using the MOD44W C6 product to designate pixels in MODIS surface reflectance products as land or water.
1,334
[ [ -0.059356689453125, -0.0221405029296875, 0.03753662109375, 0.0181121826171875, -0.039093017578125, -0.00965118408203125, 0.03497314453125, -0.0093841552734375, 0.0161590576171875, 0.037689208984375, -0.05548095703125, -0.0543212890625, -0.037200927734375, -0...
LEAP/ClimSim_high-res
2023-09-29T20:30:24.000Z
[ "license:cc-by-4.0", "arxiv:2306.08754", "doi:10.57967/hf/0739", "region:us" ]
LEAP
null
null
5
4
2023-04-12T18:27:42
--- license: cc-by-4.0 --- The corresponding GitHub repo can be found here:https://github.com/leap-stc/ClimSim Read more: https://arxiv.org/abs/2306.08754.
157
[ [ -0.034271240234375, -0.0133514404296875, 0.0271148681640625, 0.0104217529296875, -0.0133209228515625, -0.017120361328125, 0.0025787353515625, -0.02410888671875, 0.032928466796875, 0.0479736328125, -0.041290283203125, -0.043060302734375, -0.0391845703125, -0....
LEL-A/translated_german_alpaca_validation
2023-10-02T16:50:04.000Z
[ "language:de", "region:us" ]
LEL-A
null
null
0
4
2023-04-12T18:39:19
--- dataset_info: features: - name: text dtype: 'null' - name: inputs struct: - name: _instruction dtype: string - name: input dtype: string - name: output dtype: string - name: prediction list: - name: label dtype: string - name: score dtype: float64 - name: prediction_agent dtype: string - name: annotation dtype: string - name: annotation_agent dtype: string - name: vectors struct: - name: input sequence: float64 - name: instruction sequence: float64 - name: output sequence: float64 - name: multi_label dtype: bool - name: explanation dtype: 'null' - name: id dtype: string - name: metadata struct: - name: original_id dtype: int64 - name: translation_model dtype: string - name: status dtype: string - name: event_timestamp dtype: timestamp[us] - name: metrics struct: - name: text_length dtype: int64 splits: - name: train num_bytes: 152890 num_examples: 8 download_size: 0 dataset_size: 152890 language: - de --- # Dataset Card for "translated_german_alpaca_validation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1,324
[ [ -0.04638671875, -0.0369873046875, 0.021514892578125, 0.034515380859375, -0.0330810546875, -0.020416259765625, 0.01451873779296875, -0.0216827392578125, 0.05426025390625, 0.0284271240234375, -0.06292724609375, -0.0699462890625, -0.047027587890625, 0.000203251...
prashanthpillai/docvqa_train_and_val
2023-04-13T17:29:28.000Z
[ "region:us" ]
prashanthpillai
null
null
0
4
2023-04-13T17:19:13
--- dataset_info: features: - name: questionId dtype: int64 - name: question dtype: string - name: image sequence: sequence: sequence: uint8 - name: docId dtype: int64 - name: ucsf_document_id dtype: string - name: ucsf_document_page_no dtype: string - name: answers sequence: string - name: data_split dtype: string - name: words sequence: string - name: boxes sequence: sequence: int64 splits: - name: val num_bytes: 869361798 num_examples: 5349 - name: train num_bytes: 6381793673 num_examples: 39454 download_size: 2578887111 dataset_size: 7251155471 --- # Dataset Card for "docvqa_train_and_val" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
841
[ [ -0.0360107421875, -0.00928497314453125, 0.01548004150390625, 0.0062408447265625, -0.005107879638671875, -0.00598907470703125, 0.040557861328125, 0.004718780517578125, 0.03204345703125, 0.034423828125, -0.052215576171875, -0.0423583984375, -0.044891357421875, ...
aligeniewcp22/LCSTS
2023-04-14T04:51:09.000Z
[ "region:us" ]
aligeniewcp22
null
null
0
4
2023-04-14T04:47:24
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
biglab/webui-all
2023-05-05T02:24:25.000Z
[ "license:other", "region:us" ]
biglab
null
null
3
4
2023-04-15T20:08:49
--- license: other --- This data accompanies the WebUI project (https://dl.acm.org/doi/abs/10.1145/3544548.3581158) For more information, check out the project website: https://uimodeling.github.io/ To download this dataset, you need to install the huggingface-hub package ``` pip install huggingface-hub ``` Use snapshot_download ``` from huggingface_hub import snapshot_download snapshot_download(repo_id="biglab/webui-all", repo_type="dataset") ``` IMPORTANT * Before downloading and using, please review the copyright info here: https://github.com/js0nwu/webui/blob/main/COPYRIGHT.txt * Not all data samples have the same number of files (e.g., same number of device screenshots) due to the fact that the crawler used a timeout during collection * The dataset released on HuggingFace was filtered using a list of explicit words and therefore contains fewer samples than the experiments originally used in the paper. The raw dataset is currently available (https://drive.google.com/drive/folders/1hcO75W2FjsZoibsj2TIbKz67hy9JkOBz?usp=share_link) but may be removed in the future.
1,091
[ [ -0.03326416015625, -0.0496826171875, 0.007808685302734375, 0.0175018310546875, -0.010833740234375, -0.01366424560546875, -0.0003554821014404297, -0.0188446044921875, 0.034515380859375, 0.02618408203125, -0.05584716796875, -0.0379638671875, -0.0311126708984375, ...
Adam173/seinfeld-scripts
2023-04-16T10:25:15.000Z
[ "region:us" ]
Adam173
null
null
0
4
2023-04-16T10:25:13
--- dataset_info: features: - name: title dtype: string - name: script dtype: string splits: - name: train num_bytes: 3909219 num_examples: 176 download_size: 2212310 dataset_size: 3909219 --- # Dataset Card for "seinfeld-scripts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
393
[ [ -0.027862548828125, -0.0167999267578125, 0.0145111083984375, 0.02178955078125, 0.006450653076171875, -0.00928497314453125, 0.0053253173828125, 0.01143646240234375, 0.07574462890625, 0.04791259765625, -0.06756591796875, -0.04901123046875, -0.035614013671875, ...
chrisociepa/wikipedia-pl-20230401
2023-04-17T20:41:24.000Z
[ "task_categories:text-generation", "task_categories:fill-mask", "task_ids:language-modeling", "task_ids:masked-language-modeling", "size_categories:1M<n<10M", "language:pl", "license:cc-by-sa-3.0", "pretraining", "language modelling", "wikipedia", "web", "region:us" ]
chrisociepa
null
null
0
4
2023-04-17T17:14:21
--- license: cc-by-sa-3.0 dataset_info: features: - name: id dtype: string - name: url dtype: string - name: title dtype: string - name: text dtype: string splits: - name: train num_bytes: 2883878741 num_examples: 1562327 download_size: 1761971402 dataset_size: 2883878741 task_categories: - text-generation - fill-mask task_ids: - language-modeling - masked-language-modeling language: - pl pretty_name: Polish Wikipedia 2023-04-01 size_categories: - 1M<n<10M tags: - pretraining - language modelling - wikipedia - web --- # Dataset Card for April 2023 Polish Wikipedia Wikipedia dataset containing cleaned articles of Polish language. The dataset has been built from the Wikipedia dump (https://dumps.wikimedia.org/) using the [OLM Project](https://github.com/huggingface/olm-datasets). Each example contains the content of one full Wikipedia article with cleaning to strip markdown and unwanted sections (references, etc.). ### Licensing Information Most of Wikipedia's text and many of its images are co-licensed under the [Creative Commons Attribution-ShareAlike 3.0 Unported License](https://en.wikipedia.org/wiki/Wikipedia:Text_of_Creative_Commons_Attribution-ShareAlike_3.0_Unported_License) (CC BY-SA) and the [GNU Free Documentation License](https://en.wikipedia.org/wiki/Wikipedia:Text_of_the_GNU_Free_Documentation_License) (GFDL) (unversioned, with no invariant sections, front-cover texts, or back-cover texts). Some text has been imported only under CC BY-SA and CC BY-SA-compatible license and cannot be reused under GFDL; such text will be identified on the page footer, in the page history, or on the discussion page of the article that utilizes the text. ### Citation Information ``` @ONLINE{wikidump, author = "Wikimedia Foundation", title = "Wikimedia Downloads", url = "https://dumps.wikimedia.org" } ```
1,911
[ [ -0.050445556640625, -0.04034423828125, 0.0271453857421875, 0.0146636962890625, -0.04156494140625, -0.035552978515625, -0.032684326171875, -0.038726806640625, 0.020294189453125, 0.0518798828125, -0.06610107421875, -0.0489501953125, -0.00894927978515625, 0.029...
ranWang/UN_PDF_RECORD_SET
2023-04-18T14:08:03.000Z
[ "region:us" ]
ranWang
null
null
0
4
2023-04-18T12:59:26
--- dataset_info: features: - name: record dtype: int64 - name: language dtype: string - name: year_time dtype: int64 - name: file_name dtype: string - name: url dtype: string splits: - name: train num_bytes: 162579384 num_examples: 1338864 - name: 2000year num_bytes: 106669952.46696304 num_examples: 878442 download_size: 44831302 dataset_size: 269249336.46696305 --- # Dataset Card for "UN_PDF_RECORD_SET" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
599
[ [ -0.0276031494140625, 0.00452423095703125, -0.0011358261108398438, 0.006748199462890625, -0.0276336669921875, -0.0022907257080078125, 0.00922393798828125, 0.008453369140625, 0.0380859375, 0.046661376953125, -0.04669189453125, -0.06011962890625, -0.049957275390625...
diffusers/cat_toy_example
2023-04-18T14:24:58.000Z
[ "region:us" ]
diffusers
null
null
3
4
2023-04-18T14:16:57
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
nihalbaig/alpaca-bangla
2023-04-18T18:14:59.000Z
[ "region:us" ]
nihalbaig
null
null
0
4
2023-04-18T18:14:52
--- dataset_info: features: - name: text dtype: 'null' - name: inputs struct: - name: input dtype: string - name: instruction dtype: string - name: output dtype: string - name: prediction list: - name: label dtype: string - name: score dtype: float64 - name: prediction_agent dtype: 'null' - name: annotation dtype: 'null' - name: annotation_agent dtype: 'null' - name: vectors dtype: 'null' - name: multi_label dtype: bool - name: explanation dtype: 'null' - name: id dtype: string - name: metadata dtype: 'null' - name: status dtype: string - name: event_timestamp dtype: timestamp[us] - name: metrics dtype: 'null' splits: - name: train num_bytes: 36188108 num_examples: 18000 download_size: 13437852 dataset_size: 36188108 --- # Dataset Card for "alpaca-bangla" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1,050
[ [ -0.054046630859375, -0.03460693359375, -0.0027294158935546875, 0.033111572265625, -0.0292816162109375, -0.007022857666015625, 0.0232696533203125, -0.0310821533203125, 0.074951171875, 0.031494140625, -0.055511474609375, -0.054779052734375, -0.0548095703125, -...
iamketan25/gsm-general-qa-instructions
2023-04-19T16:53:17.000Z
[ "region:us" ]
iamketan25
null
null
0
4
2023-04-19T16:52:49
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
nihalbaig/alpaca-bangla_validation
2023-10-24T17:25:28.000Z
[ "region:us" ]
nihalbaig
null
null
0
4
2023-04-20T17:29:03
--- dataset_info: features: - name: text dtype: 'null' - name: inputs struct: - name: input dtype: string - name: instruction dtype: string - name: output dtype: string - name: prediction list: - name: label dtype: string - name: score dtype: float64 - name: prediction_agent dtype: string - name: annotation dtype: string - name: annotation_agent dtype: string - name: vectors dtype: 'null' - name: multi_label dtype: bool - name: explanation dtype: 'null' - name: id dtype: string - name: metadata dtype: 'null' - name: status dtype: string - name: event_timestamp dtype: timestamp[us] - name: metrics struct: - name: text_length dtype: int64 splits: - name: train num_bytes: 2487054 num_examples: 1258 download_size: 0 dataset_size: 2487054 --- # Dataset Card for "alpaca-bangla_validation" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1,088
[ [ -0.04730224609375, -0.03607177734375, 0.003566741943359375, 0.0389404296875, -0.027862548828125, -0.00933837890625, 0.027435302734375, -0.0224456787109375, 0.052978515625, 0.0299072265625, -0.055145263671875, -0.054351806640625, -0.037628173828125, -0.004577...
Dahoas/sft-static
2023-04-20T21:13:57.000Z
[ "region:us" ]
Dahoas
null
null
3
4
2023-04-20T21:13:52
--- dataset_info: features: - name: prompt dtype: string - name: response dtype: string splits: - name: train num_bytes: 16012237 num_examples: 20000 download_size: 9353471 dataset_size: 16012237 --- # Dataset Card for "sft-static" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
394
[ [ -0.0447998046875, -0.0209808349609375, 0.0162353515625, 0.019775390625, -0.0272216796875, 0.018096923828125, 0.0150909423828125, -0.007686614990234375, 0.057403564453125, 0.031097412109375, -0.06756591796875, -0.0447998046875, -0.0196533203125, -0.0072097778...
p1atdev/niji-v5
2023-07-09T07:04:20.000Z
[ "license:cc0-1.0", "region:us" ]
p1atdev
null
null
17
4
2023-04-23T08:06:03
--- license: cc0-1.0 --- 私がnijijourney v5で生成した画像。自由に使えます。(けど詐欺とか犯罪につかうのはやめてね) おすすめの使い方としては、とりあえず中の画像を見てみて好きなものだけ選んで使うとよいと思います。 全体の注意点として、必ずしもすべての画像にキャプションが付属してるとは限らないのと、キャプションがついていてもそのまま使うと問題が発生する可能性があるのであまり信用しないでください。 また人為的なミスにより、4分割されずに結合している画像があったり、過度に分割されている画像があったりするので注意してください。 ## vol1 だいたい2000枚くらいで、多分全部デフォルトスタイルのものです。 解答すると中にLAION Aesthetic v2のスコアでいくつかのフォルダに分類されています。`aesthetic_50` ならスコア0.5以上のものです。`not_aesthetic` は0.5未満のものです。 ただし、`exceptional` フォルダはチェリーピックした画像が入っており、`aesthetic_xx` の中のものと重複します。`exclude` フォルダは、主観で奇妙なものを除いたものです。 `aesthetic_xx` と `exceptional` にはキャプション(BLIP2、Tagger)ファイルがついていますが、いろいろ変な調整しているのでおそらく各自でキャプションをつけ直したほうがいいと思います。 ## vol2 だいたい1200枚くらいで、複数のスタイルモードで生成したものが含まれます。 手動でスタイルごとにフォルダを分けています。 `default`、`cute`、`expressive`、`scenic` はそれぞれのスタイルっぽい画像で分類していますが、たまに分類を間違えています(ごめん)。 `clear` と `rough painting` は、個人的に近いと思ったスタイルの画像を入れていて、4つのスタイルの画像とは重複しません。 |default|cute|expressive|scenic|clear|rough painting| |-|-|-|-|-|-| |<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/ROBUltJHEdadypi8JJ7QZ.jpeg" width="200px" />|<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/lPpxxFZggOD4QZLgQ03WS.jpeg" width="200px" />|<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/E5T2nAxwiYxSORoGov_8e.jpeg" width="200px" />|<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/juur651e8PS1TcDVwxITm.jpeg" width="200px" />|<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/j9GUce5nsKMVN4z2E14sW.jpeg" width="300px" />|<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/w3OrlxnDtFEiDEUe8ey5c.jpeg" width="300px" />| ## vol3 450枚くらいです。キャプションは一切ついていません。 雰囲気で分類しています。 - `background`: 背景のみで人物が写っていないもの - `comic`: 白黒だったり漫画風なもの(百合が多いので微妙に注意) - `ink painting`: 水墨画・水彩っぽいもの - `scenic`: scenic っぽい画像で、人物が写っているものも含む。`background` の画像と一部重複する。 含まれる画像の例 |background|comic|ink painting|scenic| |-|-|-|-| |<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/ZoH3PCg918_WhoMfKu-JJ.png" width="300px" />|<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/i5KLwkPJN0guLgval5aBr.png" width="200px" />|<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/MrGOEretLVNjM4ZaO8yPe.png" width="200px" />|<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/uHH7rswou_9ZzL1-phbip.png" width="200px" />| ## vol4 現在は48枚です。 - `minimalist`: 非常にシンプルな感じの画風の画像 含まれる画像の例 |minimalist| |-| |<img src="https://s3.amazonaws.com/moonup/production/uploads/6305db1fcfbde33ef7d480ff/UCuGYVyvkqS7JmUseKF3c.png" width="200px" />|
2,763
[ [ -0.06072998046875, -0.021759033203125, 0.04193115234375, 0.03094482421875, -0.0408935546875, -0.009033203125, 0.00449371337890625, -0.05078125, 0.05230712890625, 0.051025390625, -0.068603515625, -0.0369873046875, -0.025390625, 0.0222320556640625, 0.01484...
Aruno/guanaco_jp
2023-04-24T03:45:26.000Z
[ "task_categories:text-generation", "language:ja", "license:apache-2.0", "region:us" ]
Aruno
null
null
3
4
2023-04-24T03:07:04
--- license: apache-2.0 task_categories: - text-generation language: - ja pretty_name: Guanaco Japanese Prompt --- Japanese Prompt of [GuanacoDataset](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset) extracted using `langdetect`.
244
[ [ -0.0238189697265625, -0.059326171875, 0.0341796875, 0.0379638671875, -0.022003173828125, 0.0208740234375, -0.003704071044921875, -0.0160369873046875, 0.07366943359375, 0.0224456787109375, -0.08917236328125, -0.048187255859375, -0.03533935546875, 0.0121536254...
NicholasSynovic/Victorian-Era-Authorship-Attribution
2023-04-25T17:32:52.000Z
[ "task_categories:text-classification", "size_categories:10K<n<100K", "language:en", "region:us" ]
NicholasSynovic
null
null
0
4
2023-04-25T16:30:22
--- language: - en pretty_name: Victorian Era Authorship Attribution Data Set task_categories: - text-classification size_categories: - 10K<n<100K --- # Victorian Era Authorship Attribution Data Set > GUNGOR, ABDULMECIT, Benchmarking Authorship Attribution Techniques Using Over A Thousand Books by Fifty Victorian Era Novelists, Purdue Master of Thesis, 2018-04 ## NOTICE This dataset was downloaded from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php) at [this link](https://archive.ics.uci.edu/ml/datasets/Victorian+Era+Authorship+Attribution). The [description](#description) of this dataset was copied from the source's dataset card. However, I have applied Markdown styling to prettify it and make it easier to navigate. ## Description > **Abstract**: To create the largest authorship attribution dataset, we extracted works of 50 well-known authors. To have a non-exhaustive learning, in training there are 45 authors whereas, in the testing, it's 50 ### Source They're extracted from the GDELT database. The GDELT Project is an open platform for research and analysis of global society and thus all datasets released by the GDELT Project are available for unlimited and unrestricted use for any academic, commercial, or governmental use of any kind without fee. ### Data Set Information To decrease the bias and create a reliable authorship attribution dataset the following criteria have been chosen to filter out authors in Gdelt database: English language writing authors, authors that have enough books available (at least 5), 19th century authors. With these criteria 50 authors have been selected and their books were queried through Big Query Gdelt database. The next task has been cleaning the dataset due to OCR reading problems in the original raw form. To achieve that, firstly all books have been scanned through to get the overall number of unique words and each words frequencies. While scanning the texts, the first 500 words and the last 500 words have been removed to take out specific features such as the name of the author, the name of the book and other word specific features that could make the classification task easier. After this step, we have chosen top 10,000 words that occurred in the whole 50 authors text data corpus. The words that are not in top 10,000 words were removed while keeping the rest of the sentence structure intact. The entire book is split into text fragments with 1000 words each. We separately maintained author and book identification number for each one of them in different arrays. Text segments with less than 1000 words were filled with zeros to keep them in the dataset as well. 1000 words make approximately 2 pages of writing, which is long enough to extract a variety of features from the document. Each instance in the training set consists of a text piece of 1000 words and an author id attached. In the testing set, there is only the text piece of 1000 words to do authorship attribution. Training data consists of 45 authors and testing data has 50 information. %34 of testing data is the percentile of unknown authors in the testing set. ### Attribute Information Each instance consists of 1000 word sequences that are divided from the works of every author's book. In the training, the author id is also provided. ### Relevant Papers * E. Stamatatos, A Survey of Modern Authorship Attribution Methods. Journal of the American Society for Information Science and Technology, 2009. ## Citation Request: * `GUNGOR, ABDULMECIT, Benchmarking Authorship Attribution Techniques Using Over A Thousand Books by Fifty Victorian Era Novelists, Purdue Master of Thesis, 2018-04`
3,689
[ [ -0.0044097900390625, -0.0241546630859375, 0.0244903564453125, 0.00012290477752685547, -0.016632080078125, -0.02093505859375, -0.0050811767578125, -0.027679443359375, -0.007320404052734375, 0.056427001953125, -0.033172607421875, -0.0367431640625, -0.0367431640625...
NicholasSynovic/Free-AutoTrain-VEAA
2023-04-25T17:42:58.000Z
[ "task_categories:text-classification", "size_categories:1K<n<10K", "source_datasets:NicholasSynovic/Victorian-Era-Authorship-Attribution", "language:en", "license:agpl-3.0", "region:us" ]
NicholasSynovic
null
null
0
4
2023-04-25T17:33:55
--- license: agpl-3.0 task_categories: - text-classification language: - en pretty_name: Victorian Era Authorship Attribution Data Set (For Free AutoTrain Account) size_categories: - 1K<n<10K source_datasets: - NicholasSynovic/Victorian-Era-Authorship-Attribution --- # Free AutoTrain VEAA > Victorian Era Authorship Attribution Data Set (For Free AutoTrain Account) ## About See the [original HF-hosted dataset](https://huggingface.co/datasets/NicholasSynovic/Victorian-Era-Authorship-Attribution) for more information. The code to generate this dataset came from this [GitHub Repo](https://github.com/NicholasSynovic/nlp-victorianAuthor).
645
[ [ 0.00287628173828125, -0.007663726806640625, 0.033843994140625, 0.01343536376953125, 0.00765228271484375, -0.00676727294921875, 0.0180206298828125, -0.025360107421875, 0.05206298828125, 0.0537109375, -0.06878662109375, -0.026031494140625, -0.0204620361328125, ...
lucasmccabe-lmi/oig_small_chip2_python
2023-04-25T22:30:03.000Z
[ "task_categories:text-generation", "size_categories:1K<n<10K", "language:en", "license:apache-2.0", "code", "python", "code-generation", "region:us" ]
lucasmccabe-lmi
null
null
2
4
2023-04-25T22:14:09
--- dataset_info: features: - name: instruction dtype: string - name: input dtype: string - name: output dtype: string splits: - name: train num_bytes: 1930175 num_examples: 4742 download_size: 741759 dataset_size: 1930175 license: apache-2.0 task_categories: - text-generation language: - en tags: - code - python - code-generation size_categories: - 1K<n<10K --- # Dataset Card for "oig_small_chip2_python" ### Dataset Summary From [LAION's Open Instruction Generalist (OIG) dataset](https://huggingface.co/datasets/laion/OIG), we use a 4775-prompt segment pertaining to Python code generation. OIG text elements are formatted as dialogue exerpts between a "human" and "bot" agent. The code generation prompt is parsed from the initial "human" agent's statement and the resultant response from the "bot" agent's statement. We then reformat the text/response pairs according to the format of the original Alpaca dataset; that is, instruction/input/output triplets. In cases where the instruction field does not specify the code language, we provide "Write the code in Python" in the input field. Otherwise, the input field is left blank. The OIG dataset was prepared by LAION, and released under the Apache 2.0 license. Numbers: - **Prompts**: 4775 - **Tokens**: 578083 using the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer (counting instruction+input+output)
1,441
[ [ -0.0307464599609375, -0.045257568359375, 0.01232147216796875, 0.01226806640625, -0.0111846923828125, -0.034027099609375, -0.006591796875, -0.026611328125, 0.011199951171875, 0.03204345703125, -0.046539306640625, -0.03924560546875, -0.01190948486328125, 0.008...
itacasehold/itacasehold
2023-04-30T13:13:21.000Z
[ "task_categories:summarization", "task_categories:text-classification", "size_categories:n<1K", "language:it", "license:apache-2.0", "legal", "region:us" ]
itacasehold
null
null
0
4
2023-04-26T17:05:50
--- license: apache-2.0 dataset_info: features: - name: url dtype: string - name: title dtype: string - name: doc dtype: string - name: summary dtype: string - name: materia dtype: string splits: - name: train num_bytes: 25541563 num_examples: 792 - name: validation num_bytes: 2932410 num_examples: 88 - name: test num_bytes: 6870636 num_examples: 221 download_size: 18051772 dataset_size: 35344609 task_categories: - summarization - text-classification language: - it tags: - legal pretty_name: ita_casehold size_categories: - n<1K --- # ITA-CASEHOLD ## Dataset Summary - This dataset contains the data used in the research of the ITA-CASEHOLD model, an extractive summarization model to extract holdings from Italian Legal Administrative documents. - The research paper titled 'Legal Holding Extraction from Italian Case Documents using Italian-LEGAL-BERT Text Summarization' is accepted for ICAIL 23. - It consists of 1101 pairs of judgments and their official holdings between the years 2019 and 2022 from the archives of [Italian Administrative Justice](https://www.giustizia-amministrativa.it/it/web/guest/massime). - The Administrative Justice system in Italy covers a wide range of issues, including public contracts, environmental protection, public services, immigration, taxes, and compensation for damages caused by the State ### Download the dataset To download the dataset, use the following lines: from datasets import load_dataset dataset = load_dataset("itacasehold/itacasehold") To split the train, test, and validation dataset, use dataset = load_dataset("itacasehold/itacasehold", split = 'train') ### Supported Tasks and Leaderboards Summarization, Multi-class Text classification ### Languages Italian ### Data Fields The dataset consists of - **URL**: link to the document - **Document**: The document - **Summary**: The holding of the document - **Materia** : Legal subject - **Title** : Title of the document ### Data Splits - **Train** : 792 - **Validatio** : 88 - **Test** : 221 ### Source Data The data is collected from ['Judicial Administration site'](https://www.giustizia-amministrativa.it/it/web/guest/massime). ### Social Impact of Dataset Legal holdings are considered the most essential part of a legal decision because they summarize it without going into the merits of the specific case, establish a legal principle and set a legal precedent. The holdings writing is carried out by legal experts who, starting from a judgment, set out the applied principle of law in a clear, precise, and concise manner. We approached the problem of extracting legal holdings as an Extractive text summarization task. This Dataset addresses the Legal holding Extraction topic and so far the first and the only one present in the Italian language. This dataset contributes to Summarization in the Italian language and Summarization tasks in Legal domains. Apart from this, the Dataset can also be used as a multi-class text classification task utilizing legal subjects. ### Dataset Limitation This Dataset specifically focuses on the Italian Legal domain, and it is only in Italian. The documents are only from the period of 2019-2022. ## Additional Information ### Dataset Curators The Dataset was curated by researchers from Scoula Superiore Sant'Anna as a part of the project ['Guistizia Agile (Agile Justice)'](https://www.unitus.it/it/unitus/mappatura-della-ricerca/articolo/giustizia-agile) funded by the Italian Ministry of Justice. ### Licensing Information The data sets are distributed under the `Apache 2.0` License. More information about the terms of use of the original data sets is listed [here](https://www.apache.org/licenses/LICENSE-2.0). ### Citation Information If you use this dataset then, please, cite the following paper: Legal Holding Extraction from Italian Case Documents using Italian-LEGAL-BERT Text Summarization. The citation will be added soon.
4,026
[ [ -0.01320648193359375, -0.0224761962890625, 0.0169525146484375, -0.0009503364562988281, -0.04248046875, -0.0151214599609375, -0.0165863037109375, -0.0255279541015625, 0.03729248046875, 0.041046142578125, -0.01377105712890625, -0.058013916015625, -0.06402587890625...
stjarvie/question_to_sql_with_ddl
2023-04-26T20:22:59.000Z
[ "region:us" ]
stjarvie
null
null
4
4
2023-04-26T18:06:56
--- dataset_info: features: - name: question dtype: string - name: sql dtype: string - name: schema dtype: string splits: - name: train num_bytes: 1856 num_examples: 10 - name: test num_bytes: 2005 num_examples: 10 download_size: 6616 dataset_size: 3861 --- # Dataset Card for "question_to_sql_with_ddl" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
482
[ [ -0.043914794921875, -0.053802490234375, 0.0248260498046875, 0.01141357421875, -0.01947021484375, -0.004291534423828125, 0.0262603759765625, 0.005218505859375, 0.044769287109375, 0.042144775390625, -0.06365966796875, -0.054779052734375, -0.023712158203125, -0...
gkrishnan/Resume_Dataset
2023-05-10T02:22:52.000Z
[ "region:us" ]
gkrishnan
null
null
0
4
2023-04-26T20:28:58
--- dataset_info: features: - name: Category dtype: string - name: summarized_resume dtype: string splits: - name: train num_bytes: 69749 num_examples: 183 download_size: 10468 dataset_size: 69749 --- # Dataset Card for "Resume_Dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
399
[ [ -0.032867431640625, -0.00011283159255981445, 0.01415252685546875, 0.02880859375, -0.0012464523315429688, 0.00800323486328125, 0.01247406005859375, 0.01107025146484375, 0.06195068359375, 0.036590576171875, -0.06414794921875, -0.055511474609375, -0.03778076171875,...
metaeval/universal-joy
2023-04-27T10:58:46.000Z
[ "task_categories:text-classification", "license:gpl", "multilingual", "emotion", "region:us" ]
metaeval
null
null
2
4
2023-04-27T10:55:41
--- license: gpl task_categories: - text-classification tags: - multilingual - emotion --- ```bib @inproceedings{lamprinidis2021universal, title={Universal Joy A Dataset and Results for Classifying Emotions Across Languages}, author={Lamprinidis, Sotiris and Bianchi, Federico and Hardt, Daniel and Hovy, Dirk}, year={2021}, volume={11th Workshop on Computational Approaches to Subjectivity, Sentiment & Social Media Analysis (WASSA 2021)} organization={Association for Computational Linguistics} } ```
515
[ [ -0.018646240234375, -0.0156707763671875, 0.00775146484375, 0.03973388671875, -0.018096923828125, 0.006999969482421875, -0.047882080078125, -0.0308685302734375, 0.0271453857421875, 0.0002758502960205078, -0.03631591796875, -0.05487060546875, -0.03472900390625, ...
bigcode/santacoder-fim-task
2023-04-28T11:12:16.000Z
[ "license:openrail", "code", "arxiv:2301.03988", "region:us" ]
bigcode
null
null
1
4
2023-04-28T11:07:59
--- dataset_info: features: - name: name dtype: string - name: language dtype: string - name: prompt dtype: string - name: suffix dtype: string - name: canonical_solution dtype: string - name: tests dtype: string splits: - name: train num_bytes: 8627440 num_examples: 4792 download_size: 1918113 dataset_size: 8627440 license: openrail tags: - code --- # Dataset Card for "santacoder-fim-task" This is a dataset of prompts and solutions to the fill-in-the-middle (FIM) task presented in the [SantaCoder] paper. This dataset was generated using [this notebook](https://github.com/nuprl/MultiPL-E/blob/main/fill_in_the_middle/dataset_builder.ipynb). [SantaCoder]: https://arxiv.org/abs/2301.03988
751
[ [ -0.056396484375, -0.0297088623046875, 0.0077056884765625, 0.006282806396484375, -0.0191802978515625, 0.01531219482421875, 0.001422882080078125, 0.01114654541015625, 0.0262908935546875, 0.041778564453125, -0.08087158203125, -0.029022216796875, -0.0255279541015625...
enryu43/twitter100m_tweets
2023-05-02T16:44:34.000Z
[ "region:us" ]
enryu43
null
null
8
4
2023-04-30T13:59:41
--- dataset_info: features: - name: user dtype: string - name: id dtype: int64 - name: tweet dtype: string - name: replies dtype: int64 - name: retweets dtype: int64 - name: likes dtype: int64 - name: quotes dtype: int64 - name: date dtype: string splits: - name: train num_bytes: 20356236942 num_examples: 88084332 download_size: 9614694227 dataset_size: 20356236942 --- # Dataset Card for "twitter100m_tweets" Dataset with tweets for [this post](https://medium.com/@enryu9000/fun-with-large-scale-tweet-analysis-783c96b45df4).
595
[ [ -0.0250396728515625, -0.044219970703125, 0.025115966796875, 0.064208984375, -0.04266357421875, 0.01163482666015625, -0.0131988525390625, 0.0059661865234375, 0.06488037109375, 0.04974365234375, -0.053436279296875, -0.058258056640625, -0.04644775390625, -0.027...
akhmedsakip/ravdess-singing-emotions
2023-05-01T10:53:15.000Z
[ "region:us" ]
akhmedsakip
null
null
0
4
2023-05-01T10:48:18
--- dataset_info: features: - name: audio dtype: audio: sampling_rate: 16000 - name: label dtype: class_label: names: '0': fearful '1': neutral '2': calm '3': happy '4': sad '5': angry splits: - name: train num_bytes: 120236438.52470355 num_examples: 809 - name: test num_bytes: 30381090.47529644 num_examples: 203 download_size: 115056541 dataset_size: 150617529.0 --- # Dataset Card for "ravdess-singing-emotions" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
676
[ [ -0.039947509765625, -0.01271820068359375, -0.007045745849609375, 0.04071044921875, -0.00972747802734375, 0.000865936279296875, -0.002666473388671875, -0.0215911865234375, 0.068115234375, 0.02728271484375, -0.07550048828125, -0.054473876953125, -0.038726806640625...
bjoernp/wikipedia_sentence_level_en_de
2023-05-03T22:19:41.000Z
[ "region:us" ]
bjoernp
null
null
0
4
2023-05-03T22:14:51
--- dataset_info: features: - name: sentences dtype: string - name: de_sentences dtype: string splits: - name: train num_bytes: 18768712971 num_examples: 27736968 download_size: 11340576833 dataset_size: 18768712971 --- # Dataset Card for "wikipedia_sentence_level_en_de" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
434
[ [ -0.04425048828125, -0.039794921875, 0.014556884765625, 0.016021728515625, -0.007720947265625, -0.01922607421875, -0.00836181640625, -0.016845703125, 0.04913330078125, 0.0263671875, -0.053924560546875, -0.06732177734375, -0.040435791015625, 0.0041542053222656...
mnaguib/QuaeroFrenchMed
2023-09-13T20:01:06.000Z
[ "task_categories:token-classification", "language:fr", "medical", "region:us" ]
mnaguib
The QUAEROFrenchMed is a manually annotated corpus developed as a resource for named entity named recognition and normalization.
@article{neveol2014quaero, title={The QUAERO French medical corpus: A ressource for medical entity recognition and normalization}, author={N{\'e}v{\'e}ol, Aur{\'e}lie and Grouin, Cyril and Leixa, Jeremy and Rosset, Sophie and Zweigenbaum, Pierre}, journal={Proc of BioTextMining Work}, pages={24--30}, year={2014} }
1
4
2023-05-04T13:35:50
--- language: - fr task_categories: - token-classification tags: - medical --- ⚠️ **WARNING : THIS VERSION OF THE DATASET IS MODIFIED IN FORMAT AND CONTENT FROM THE ORIGINAL DATASET AVAILABLE [HERE](https://quaerofrenchmed.limsi.fr/). NESTED ENTITIES HAVE BEEN REMOVED AND THIS DATASET ONLY RETAINS THE LARGEST OF NESTED ENTITIES. OVERALL, THIS CORRESPONDS TO 80% OF THE ENTITIES ANNOTATED IN THE ORIGINAL DATASET.** ⚠️ The QUAERO French Medical Corpus has been initially developed as a resource for named entity recognition and normalization [1]. It was then improved with the purpose of creating a gold standard set of normalized entities for French biomedical text, that was used in the CLEF eHealth evaluation lab [2][3]. A selection of MEDLINE titles and EMEA documents were manually annotated. The annotation process was guided by concepts in the Unified Medical Language System (UMLS): 1. Ten types of clinical entities, as defined by the following UMLS Semantic Groups (Bodenreider and McCray 2003) were annotated: Anatomy (ANAT), Chemical and Drugs (CHEM), Devices (DEVI), Disorders (DISO), Geographic Areas (GEOG), Living Beings (LIVB), Objects (OBJC), Phenomena (PHEN), Physiology (PHYS), Procedures (PROC). 2. The annotations were made in a comprehensive fashion, so that nested entities were marked, and entities could be mapped to more than one UMLS concept. In particular: (a) If a mention can refer to more than one Semantic Group, all the relevant Semantic Groups should be annotated. For instance, the mention “récidive” (recurrence) in the phrase “prévention des récidives” (recurrence prevention) should be annotated with the category “DISORDER” (CUI C2825055) and the category “PHENOMENON” (CUI C0034897); (b) If a mention can refer to more than one UMLS concept within the same Semantic Group, all the relevant concepts should be annotated. For instance, the mention “maniaques” (obsessive) in the phrase “patients maniaques” (obsessive patients) should be annotated with CUIs C0564408 and C0338831 (category “DISORDER”); (c) Entities which span overlaps with that of another entity should still be annotated. For instance, in the phrase “infarctus du myocarde” (myocardial infarction), the mention “myocarde” (myocardium) should be annotated with category “ANATOMY” (CUI C0027061) and the mention “infarctus du myocarde” should be annotated with category “DISORDER” (CUI C0027051) For more details, please refer to [the official webpage](https://quaerofrenchmed.limsi.fr/). ⚠️ **WARNING : THIS VERSION OF THE DATASET IS MODIFIED IN FORMAT AND CONTENT FROM THE ORIGINAL DATASET AVAILABLE [HERE](https://quaerofrenchmed.limsi.fr/). NESTED ENTITIES HAVE BEEN REMOVED AND THIS DATASET ONLY RETAINS THE LARGEST OF NESTED ENTITIES. OVERALL, THIS CORRESPONDS TO 80% OF THE ENTITIES ANNOTATED IN THE ORIGINAL DATASET.** ⚠️ In this format, each word of the sentence has an associated ner_tag, corresponding to the type of clinical entity, here is the mapping : ``` 0: "O" 1: "DISO" 2: "PROC" 3: "ANAT" 4: "LIVB" 5: "CHEM" 6: "PHYS" 7: "PHEN" 8: "GEOG" 9: "DEVI" 10: "OBJC" ``` [1] Névéol A, Grouin C, Leixa J, Rosset S, Zweigenbaum P. The QUAERO French Medical Corpus: A Ressource for Medical Entity Recognition and Normalization. Fourth Workshop on Building and Evaluating Ressources for Health and Biomedical Text Processing - BioTxtM2014. 2014:24-30 [2] Névéol A, Grouin C, Tannier X, Hamon T, Kelly L, Goeuriot L, Zweigenbaum P. (2015) Task 1b of the CLEF eHealth Evaluation Lab 2015: Clinical Named Entity Recognition. CLEF 2015 Evaluation Labs and Workshop: Online Working Notes, CEUR-WS, September, 2015. [3] Névéol A, Cohen, KB, Grouin C, Hamon T, Lavergne T, Kelly L, Goeuriot L, Rey G, Robert A, Tannier X, Zweigenbaum P. Clinical Information Extraction at the CLEF eHealth Evaluation lab 2016. CLEF 2016, Online Working Notes, CEUR-WS 1609.2016:28-42.
3,894
[ [ -0.0286712646484375, -0.0271453857421875, 0.043548583984375, 0.01384735107421875, -0.018096923828125, -0.00882720947265625, -0.0196075439453125, -0.05462646484375, 0.031829833984375, 0.04937744140625, -0.011932373046875, -0.06756591796875, -0.053375244140625, ...
tekkithorse/mlp-show-scripts
2023-05-21T23:35:44.000Z
[ "region:us" ]
tekkithorse
null
null
0
4
2023-05-04T15:52:43
Taken from https://www.kaggle.com/datasets/liury123/my-little-pony-transcript?select=pony_synopsis.csv. Contains the show script and various data inferred from it.
163
[ [ 0.002193450927734375, -0.047515869140625, 0.01861572265625, -0.0006899833679199219, 0.0251617431640625, 0.005855560302734375, 0.0032787322998046875, -0.015167236328125, 0.061309814453125, 0.05902099609375, -0.08917236328125, -0.01413726806640625, -0.028091430664...
llm-book/jawiki-20220404-c400
2023-10-25T15:26:19.000Z
[ "task_categories:question-answering", "size_categories:10M<n<100M", "language:ja", "license:mit", "region:us" ]
llm-book
This dataset is used for AIO (AI王), a competition to promote research on question answering systems for the Japanese language. This dataset contains passages, each of which consists of consecutive sentences no longer than 400 characters from Japanese Wikipedia as of 2022-04-04.
null
0
4
2023-05-05T07:34:52
--- license: mit task_categories: - question-answering language: - ja size_categories: - 10M<n<100M --- # Dataset Card for jawiki-20220404-c400 This dataset contains passages, each of which consists of consecutive sentences no longer than 400 characters from Japanese Wikipedia as of 2022-04-04. This dataset is used in baseline systems for [the AI王 question answering competition](https://sites.google.com/view/project-aio/home), such as [cl-tohoku/AIO3_BPR_baseline](https://github.com/cl-tohoku/AIO3_BPR_baseline). Please refer to [the original repository](https://github.com/cl-tohoku/quiz-datasets) for further details.
627
[ [ -0.035247802734375, -0.04400634765625, 0.0299072265625, 0.01491546630859375, -0.03692626953125, -0.01026153564453125, -0.00797271728515625, -0.01885986328125, 0.00469207763671875, 0.05291748046875, -0.069091796875, -0.0309600830078125, -0.01544952392578125, ...
ybelkada/food101-tiny
2023-05-05T16:13:57.000Z
[ "region:us" ]
ybelkada
null
null
0
4
2023-05-05T16:13:56
--- dataset_info: features: - name: image dtype: image - name: label dtype: class_label: names: '0': apple_pie '1': baby_back_ribs '2': baklava '3': beef_carpaccio '4': beef_tartare '5': beet_salad '6': beignets '7': bibimbap '8': bread_pudding '9': breakfast_burrito '10': bruschetta '11': caesar_salad '12': cannoli '13': caprese_salad '14': carrot_cake '15': ceviche '16': cheesecake '17': cheese_plate '18': chicken_curry '19': chicken_quesadilla '20': chicken_wings '21': chocolate_cake '22': chocolate_mousse '23': churros '24': clam_chowder '25': club_sandwich '26': crab_cakes '27': creme_brulee '28': croque_madame '29': cup_cakes '30': deviled_eggs '31': donuts '32': dumplings '33': edamame '34': eggs_benedict '35': escargots '36': falafel '37': filet_mignon '38': fish_and_chips '39': foie_gras '40': french_fries '41': french_onion_soup '42': french_toast '43': fried_calamari '44': fried_rice '45': frozen_yogurt '46': garlic_bread '47': gnocchi '48': greek_salad '49': grilled_cheese_sandwich '50': grilled_salmon '51': guacamole '52': gyoza '53': hamburger '54': hot_and_sour_soup '55': hot_dog '56': huevos_rancheros '57': hummus '58': ice_cream '59': lasagna '60': lobster_bisque '61': lobster_roll_sandwich '62': macaroni_and_cheese '63': macarons '64': miso_soup '65': mussels '66': nachos '67': omelette '68': onion_rings '69': oysters '70': pad_thai '71': paella '72': pancakes '73': panna_cotta '74': peking_duck '75': pho '76': pizza '77': pork_chop '78': poutine '79': prime_rib '80': pulled_pork_sandwich '81': ramen '82': ravioli '83': red_velvet_cake '84': risotto '85': samosa '86': sashimi '87': scallops '88': seaweed_salad '89': shrimp_and_grits '90': spaghetti_bolognese '91': spaghetti_carbonara '92': spring_rolls '93': steak '94': strawberry_shortcake '95': sushi '96': tacos '97': takoyaki '98': tiramisu '99': tuna_tartare '100': waffles splits: - name: train num_bytes: 5343359.0 num_examples: 100 download_size: 5256650 dataset_size: 5343359.0 --- # Dataset Card for "food101-tiny" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
3,209
[ [ -0.035430908203125, -0.017425537109375, 0.0150604248046875, -0.0013837814331054688, 0.00482177734375, -0.011810302734375, 0.00841522216796875, -0.01201629638671875, 0.0804443359375, 0.0198516845703125, -0.05224609375, -0.0294647216796875, -0.0401611328125, -...
aneeshas/imsdb-comedy-movie-scripts
2023-05-07T21:27:39.000Z
[ "region:us" ]
aneeshas
null
null
0
4
2023-05-07T21:27:31
--- dataset_info: features: - name: Comedy dtype: string splits: - name: train num_bytes: 34816719 num_examples: 150 download_size: 15474490 dataset_size: 34816719 --- # Dataset Card for "imsdb-comedy-movie-scripts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
372
[ [ -0.037811279296875, -0.0110015869140625, 0.0090179443359375, 0.0257568359375, -0.0237579345703125, 0.00978851318359375, 0.01947021484375, 0.0239410400390625, 0.08135986328125, 0.044158935546875, -0.07830810546875, -0.049896240234375, -0.046905517578125, -0.0...
aneeshas/imsdb-500tokendrama-movie-scripts
2023-05-10T19:37:26.000Z
[ "region:us" ]
aneeshas
null
null
0
4
2023-05-10T19:37:25
--- dataset_info: features: - name: Drama dtype: string splits: - name: train num_bytes: 307903 num_examples: 652 download_size: 189402 dataset_size: 307903 --- # Dataset Card for "imsdb-500tokendrama-movie-scripts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
372
[ [ -0.04345703125, -0.0005283355712890625, 0.007526397705078125, 0.025543212890625, -0.01403045654296875, 0.01213836669921875, 0.0196533203125, 0.018310546875, 0.073974609375, 0.0447998046875, -0.0765380859375, -0.050384521484375, -0.04962158203125, -0.00327491...
lucasmccabe-lmi/sql-create-context_alpaca_style
2023-05-15T21:16:51.000Z
[ "region:us" ]
lucasmccabe-lmi
null
null
5
4
2023-05-12T02:32:40
--- dataset_info: features: - name: instruction dtype: string - name: input dtype: string - name: output dtype: string splits: - name: train num_bytes: 28203562.0 num_examples: 78577 download_size: 9312899 dataset_size: 28203562.0 --- # Dataset Card for "sql-create-context_alpaca_style" We provide a minor modification of the [sql-create-context](https://huggingface.co/datasets/b-mc2/sql-create-context) dataset. In particular, we 1) prepend each instruction with the phrase, "Write a SQL query that answers the following question: " and 2) prepend each context with the phrase, "The relevant table was constructed using the following SQL CREATE TABLE statement: ". ## Numbers: Prompts: 78577 Tokens: 6438971 using the EleutherAI/gpt-neox-20b tokenizer (counting instruction+input+output)
830
[ [ -0.05230712890625, -0.0802001953125, 0.01523590087890625, 0.028839111328125, -0.047454833984375, -0.035247802734375, 0.00732421875, -0.0158233642578125, 0.07257080078125, 0.0650634765625, -0.07305908203125, -0.04229736328125, -0.0138397216796875, -0.00079870...
lighteval/synthetic_reasoning_natural
2023-05-12T09:30:32.000Z
[ "region:us" ]
lighteval
3
4
2023-05-12T08:59:11
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
lighteval/DyckLanguage
2023-05-12T12:10:38.000Z
[ "region:us" ]
lighteval
0
4
2023-05-12T12:04:30
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
KakologArchives/KakologArchives
2023-11-03T01:19:28.000Z
[ "task_categories:text-classification", "language:ja", "license:mit", "region:us" ]
KakologArchives
null
null
2
4
2023-05-12T13:31:56
--- pretty_name: ニコニコ実況 過去ログアーカイブ license: mit language: - ja task_categories: - text-classification --- # ニコニコ実況 過去ログアーカイブ ニコニコ実況 過去ログアーカイブは、[ニコニコ実況](https://jk.nicovideo.jp)のサービス開始から現在までのすべての過去ログコメントを収集したデータセットです。 去る2020年12月、ニコニコ実況は[ニコニコ生放送内の一公式チャンネルとしてリニューアル](https://blog.nicovideo.jp/niconews/143148.html)されました。 これに伴い、2009年11月から運用されてきた旧システムは提供終了となり(事実上のサービス終了)、torne や BRAVIA などの家電への対応が軒並み終了する中、当時の生の声が詰まった約11年分の過去ログも同時に失われることとなってしまいました。 そこで 5ch の DTV 板の住民が中心となり、旧ニコニコ実況が終了するまでに11年分の全チャンネルの過去ログをアーカイブする計画が立ち上がりました。紆余曲折あり Nekopanda 氏が約11年分のラジオや BS も含めた全チャンネルの過去ログを完璧に取得してくださったおかげで、11年分の過去ログが電子の海に消えていく事態は回避できました。 しかし、旧 API が廃止されてしまったため過去ログを API 経由で取得することができなくなり、またアーカイブされた過去ログから見たい範囲のログを探す場合も、アーカイブのサイズが合計約 150GB もあることから、とても以前のように手軽に過去ログに触れることはできなくなってしまいました。 一方、ニコニコ生放送内の一公式チャンネルとして移行した新ニコニコ実況では、タイムシフト(旧ニコニコ実況での過去ログに相当)の視聴期限は3週間までとなっているため、その期限を過ぎると過去ログは視聴できなくなってしまいます。 また一般会員は事前にタイムシフト予約をしておく必要があるなど、以前のような利便性は失われています。 私たちは、ニコニコ実況に投稿された日本のテレビ放送についてのコメントは、当時の世相や時代背景を端的に表す、歴史的価値のある資料だと考えています。 このデータセットでは、ニコニコ実況のすべての過去ログを後世に残すべく、Nekopanda 氏が配布されていた旧ニコニコ実況の 2020/12/15 までのすべての過去ログに加え、コミュニティベースの番組も含めた新ニコニコ実況の当日分の過去ログを5分に1回収集し、随時反映しています。 過去ログをかんたんに取得するための [API](https://jikkyo.tsukumijima.net/) もあります。 よろしければそちらもご活用ください。 ## Dataset Structure ### Builder Config | Key | Value Type | Default Value | Description | | --------------- | ---------- | ------------- | ----------- | | channel_id | string | None | 過去ログを取得するニコニコ実況チャンネルの ID (省略時はすべてのチャンネル) | | year | int | None | 取得する過去ログの年 (省略時はすべての年) | | number_of_files | int | None | 取得する過去ログファイルの数 (省略時はすべてのファイル) | ### Data Splits | Split | Approximate Size | Description | | ------- | ---------------- | ----------- | | sample | 1GB | サンプルとして、2022年中に投稿された TOKYO MX (ID: jk9) のすべての過去ログコメントを取得します。1GB ほどあります。 | | all | 180GB | 全チャンネル/全期間のすべての過去ログコメントを取得します。180GB 近くあるため注意してください。 | ### Data Fields | Field | Type | Description | | --------------- | -------- | ----------- | | thread | string | コメントのスレッド ID | | no | int64 | コメント番号 (コメ番) | | vpos | int64 | スレッド ID から起算したコメントの再生位置 (1/100秒) | | date | int64 | コメント投稿時間の UNIX タイムスタンプ | | date_usec | int64 | コメント投稿時間の小数点以下の時間 | | user_id | string | ユーザー ID (コマンドに 184 が指定されている場合は匿名化され、1週間ほどでシャッフルされる) | | mail | string | コメントのコマンド (184, red naka big など、省略されることもある) | | premium | boolean | コメントしたユーザーがプレミアム会員であれば True | | anonymity | boolean | 匿名コメントであれば True | | content | string | コメント本文 (AA など、まれに複数行コメントがあるので注意) | ## Example ```python from datasets import load_dataset dataset = load_dataset('KakologArchives/KakologArchives', 'all', channel_id='jk211', year=2023, number_of_files=10) for data in dataset['train']: print(data) ``` ## Licensing Information [MIT License](https://opensource.org/license/mit/)
2,966
[ [ -0.060272216796875, -0.0361328125, 0.016937255859375, 0.0284881591796875, -0.048004150390625, -0.0018281936645507812, -0.01189422607421875, -0.0277099609375, 0.05548095703125, 0.0288848876953125, -0.061492919921875, -0.041412353515625, -0.029754638671875, 0....
orionweller/NevIR
2023-05-26T14:53:16.000Z
[ "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:1K<n<10K", "language:en", "license:mit", "negation", "information_retrieval", "IR", "arxiv:2212.10002", "arxiv:2305.07614", "region:us" ]
orionweller
null
null
1
4
2023-05-12T19:40:48
--- license: mit language: - en language_creators: - crowdsourced multilinguality: - monolingual pretty_name: NevIR size_categories: - 1K<n<10K tags: - negation - information_retrieval - IR --- # Dataset Card for NevIR: Negation in Neural Information Retrieval ## Dataset Description - **Repository:** [https://github.com/orionw/NevIR](https://github.com/orionw/NevIR) - **Paper:** [https://arxiv.org/abs/2212.10002](https://arxiv.org/abs/2212.10002) - **Point of Contact:** oweller@cs.jhu.edu ## Dataset Summary Data from the paper: ["NevIR: Negation in Neural Information Retrieval"](https://arxiv.org/abs/2305.07614). If you use this dataset, we would appreciate you citing our work: ``` @inproceedings{weller-et-al-2023-nevir, title={NevIR: Negation in Neural Information Retrieval}, author={Weller, Orion and Lawrie, Dawn, and Van Durme, Benjamin}, year={2023}, eprint={2305.07614}, archivePrefix={arXiv}, year={2023} } ``` Please also consider citing the work that created the initial documents: ``` @inproceedings{ravichander-et-al-2022-condaqa, title={CONDAQA: A Contrastive Reading Comprehension Dataset for Reasoning about Negation}, author={‪Ravichander‬, Abhilasha and Gardner, Matt and Marasovi\'{c}, Ana}, proceedings={EMNLP 2022}, year={2022} } ``` From the paper: "Negation is a common everyday phenomena and has been a consistent area of weakness for language models (LMs). Although the Information Retrieval (IR) community has adopted LMs as the backbone of modern IR architectures, there has been little to no research in understanding how negation impacts neural IR. We therefore construct a straightforward benchmark on this theme: asking IR models to rank two documents that differ only by negation. We show that the results vary widely according to the type of IR architecture: cross-encoders perform best, followed by late-interaction models, and in last place are bi-encoder and sparse neural architectures. We find that most current information retrieval models do not consider negation, performing similarly or worse than randomly ranking.We show that although the obvious approach of continued fine-tuning on a dataset of contrastive documents containing negations increases performance (as does model size), there is still a large gap between machine and human performance." ### Supported Tasks and Leaderboards The task is to rank each query in the pair correctly, where only one query is relevant to one document in the pair. There is no official leaderboard. ### Language English ## Dataset Structure ### Data Instances Here's an example instance: ``` { "id": "1-2", "WorkerId": 0, "q1": "Which mayor did more vetoing than anticipated?", "q2": "Which mayor did less vetoing than anticipated?", "doc1": "In his first year as mayor, Medill received very little legislative resistance from the Chicago City Council. While he vetoed what was an unprecedented eleven City Council ordinances that year, most narrowly were involved with specific financial practices considered wasteful and none of the vetoes were overridden. He used his new powers to appoint the members of the newly constituted Chicago Board of Education and the commissioners of its constituted public library. His appointments were approved unanimously by the City Council.", "doc2": "In his first year as mayor, Medill received very little legislative resistance from the Chicago City Council. While some expected an unprecedented number of vetoes, in actuality he only vetoed eleven City Council ordinances that year, and most of those were narrowly involved with specific financial practices he considered wasteful and none of the vetoes were overridden. He used his new powers to appoint the members of the newly constituted Chicago Board of Education and the commissioners of its constituted public library. His appointments were approved unanimously by the City Council." } ``` ### Data Fields * `id`: unique ID for the pair, the first number indicates the document pair number in CondaQA and the second number indicates the PassageEditID in CondaQA. * `WorkerId`: The ID for the Worker who created the queries for the pair. * `q1`: the query that is only relevant to `doc1` * `q2`: the query that is only relevant to `doc2` * `doc1`: the original document, from CondaQA * `doc2`: the edited document, from CondaQA ### Data Splits Data splits can be accessed as: ``` from datasets import load_dataset train_set = load_dataset("orionweller/nevir", "train") dev_set = load_dataset("orionweller/nevir", "validation") test_set = load_dataset("orionweller/nevir", "test") ``` ## Dataset Creation Full details are in the paper: https://arxiv.org/abs/2305.07614
4,727
[ [ -0.04547119140625, -0.0435791015625, 0.0249176025390625, -0.00811004638671875, 0.003002166748046875, -0.023834228515625, -0.0068817138671875, -0.0247955322265625, 0.03289794921875, 0.04632568359375, -0.026519775390625, -0.049713134765625, -0.03887939453125, ...
Nan-Do/code-search-net-javascript
2023-05-15T00:57:43.000Z
[ "task_categories:text-generation", "task_categories:text2text-generation", "task_categories:summarization", "language:en", "license:apache-2.0", "code", "javascript", "CodeSearchNet", "summary", "region:us" ]
Nan-Do
null
null
1
4
2023-05-14T04:31:20
--- dataset_info: features: - name: repo dtype: string - name: path dtype: string - name: func_name dtype: string - name: original_string dtype: string - name: language dtype: string - name: code dtype: string - name: code_tokens sequence: string - name: docstring dtype: string - name: docstring_tokens sequence: string - name: sha dtype: string - name: url dtype: string - name: partition dtype: string - name: summary dtype: string splits: - name: train num_bytes: 543032741 num_examples: 138155 download_size: 182237165 dataset_size: 543032741 license: apache-2.0 task_categories: - text-generation - text2text-generation - summarization language: - en tags: - code - javascript - CodeSearchNet - summary pretty_name: JavaScript CodeSearchNet with Summaries --- # Dataset Card for "code-search-net-javascript" ## Dataset Description - **Homepage:** None - **Repository:** https://huggingface.co/datasets/Nan-Do/code-search-net-JavaScript - **Paper:** None - **Leaderboard:** None - **Point of Contact:** [@Nan-Do](https://github.com/Nan-Do) ### Dataset Summary This dataset is the JavaScript portion of the CodeSarchNet annotated with a summary column. The code-search-net dataset includes open source functions that include comments found at GitHub. The summary is a short description of what the function does. ### Languages The dataset's comments are in English and the functions are coded in JavaScript ### Data Splits Train, test, validation labels are included in the dataset as a column. ## Dataset Creation May of 2023 ### Curation Rationale This dataset can be used to generate instructional (or many other interesting) datasets that are useful to train LLMs ### Source Data The CodeSearchNet dataset can be found at https://www.kaggle.com/datasets/omduggineni/codesearchnet ### Annotations This datasets include a summary column including a short description of the function. #### Annotation process The annotation procedure was done using [Salesforce](https://huggingface.co/Salesforce) T5 summarization models. A sample notebook of the process can be found at https://github.com/Nan-Do/OpenAssistantInstructionResponsePython The annontations have been cleaned to make sure there are no repetitions and/or meaningless summaries. (some may still be present in the dataset) ### Licensing Information Apache 2.0
2,452
[ [ -0.023101806640625, -0.01715087890625, 0.0007348060607910156, 0.016937255859375, -0.00754547119140625, 0.0007338523864746094, -0.017242431640625, -0.0085601806640625, 0.053253173828125, 0.033660888671875, -0.04541015625, -0.07513427734375, -0.02734375, 0.006...
Englishman2022/prosocial-dialog-filtered
2023-05-14T17:48:49.000Z
[ "task_categories:conversational", "task_categories:text-classification", "task_ids:dialogue-generation", "task_ids:multi-class-classification", "language_creators:crowdsourced", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:Proso...
Englishman2022
null
null
1
4
2023-05-14T12:41:10
--- license: cc-by-4.0 task_categories: - conversational - text-classification language: - en source_datasets: - ProsocialDialog language_creators: - crowdsourced - machine-generated multilinguality: - monolingual pretty_name: ProsocialDialogFiltered tags: - dialogue - dialogue safety - social norm - rules-of-thumb size_categories: - 10K<n<100K task_ids: - dialogue-generation - multi-class-classification --- ## Dataset Summary ProsocialDialogFiltered is a filtered version of the ProsocialDialog dataset. Multiple versions are present: - In train_no_casual, rows with the label "casual" have been filtered out as a starting point. - In train_no_possibly, rows with "possibly needs caution" have been filtered out. - In train_no_probably, rows with "probably needs caution" have been filtered out, as I found those to be largely pointless as well, leaving only "needs caution" and "needs intervention". - In the final train dataset, rows containing multiple phrases such as "You should not" and "you should refrain from" have been filtered out. This is done in an attempt to reduce the number of refusals language models issue to the user, in order to create better, and more open models. ProsocialDialog is a large-scale multi-turn English dialogue dataset to teach conversational agents to respond to problematic content. **For more information on the source dataset, refer to the original official [huggingface](https://huggingface.co/datasets/allenai/prosocial-dialog) and [paper](https://arxiv.org/abs/2205.12688).** Possible drawbacks: - Some ending messages have been cut off. This is only of concern if you rely on the 'episode_done' indicator. ## Languages English ## Additional Information ### Citation ``` @inproceedings{kim2022prosocialdialog, title={ProsocialDialog: A Prosocial Backbone for Conversational Agents}, author={Hyunwoo Kim and Youngjae Yu and Liwei Jiang and Ximing Lu and Daniel Khashabi and Gunhee Kim and Yejin Choi and Maarten Sap}, booktitle={EMNLP}, year=2022 } ```
2,025
[ [ -0.026214599609375, -0.056884765625, 0.028076171875, 0.0281219482421875, -0.0309295654296875, -0.017791748046875, -0.01369476318359375, -0.0209197998046875, 0.0175018310546875, 0.057830810546875, -0.06658935546875, -0.03558349609375, -0.036712646484375, 0.02...
scaredmeow/shopee-reviews-tl-binary
2023-05-19T19:44:57.000Z
[ "task_categories:text-classification", "size_categories:10K<n<100K", "language:tl", "license:odc-by", "reviews", "shopee", "doi:10.57967/hf/0657", "region:us" ]
scaredmeow
null
null
0
4
2023-05-14T17:14:40
--- license: odc-by task_categories: - text-classification language: - tl tags: - reviews - shopee size_categories: - 10K<n<100K dataset_info: features: - name: text dtype: string - name: label dtype: class_label: names: '0': negative '1': positive --- # Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** [Enhancement to Low Resource Text Classification via Sequential Transfer Learning](#) - **Leaderboard:** - **Point of Contact:** [Neil Riego](mailto:neilchristianriego3@gmail.com) ### Dataset Summary This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances A typical data point, comprises of a text and the corresponding label. An example from the YelpReviewFull test set looks as follows: ``` { 'label': pos, 'text': 'Huyyy ang gandaaaaaaaaaaa. Grabe sobrang ganda talaga wala ako masabi. Complete orders pa pinadala sa akin. Buti hindi nabasag kahit walang bubble wrap. Okay na lang din para save mother earth and at least hindi nabasag hehe. Oorder ulit ako ang ganda eh' } ``` ### Data Fields - 'text': The review texts are escaped using double quotes ("), and any internal double quote is escaped by 2 double quotes (""). - 'label': Corresponds to the score associated with the review (between positive and negative). ### Data Splits The Shopee reviews tl binary dataset is constructed by randomly taking 14000 training samples and 3000 samples for testing and validation for each review star from neg and pos. In total there are 28000 training samples and 6000 each in validation and testing samples. ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
2,818
[ [ -0.02557373046875, -0.03582763671875, -0.00849151611328125, 0.0189666748046875, -0.027069091796875, -0.00374603271484375, -0.0177001953125, -0.035888671875, 0.025787353515625, 0.041229248046875, -0.045166015625, -0.05731201171875, -0.054351806640625, 0.01612...
lucasmccabe-lmi/FLAN_CoT_alpaca_style
2023-05-15T18:28:43.000Z
[ "arxiv:2210.11416", "region:us" ]
lucasmccabe-lmi
null
null
4
4
2023-05-15T18:22:04
--- dataset_info: features: - name: output dtype: string - name: instruction dtype: string - name: input dtype: string splits: - name: train num_bytes: 37140971 num_examples: 74771 download_size: 14062550 dataset_size: 37140971 --- # Dataset Card for "FLAN_CoT_alpaca_style" We provide a dataset representing the 9 chain-of-thought (reasoning) fine-tuning tasks from [FLAN](https://arxiv.org/pdf/2210.11416.pdf). Minor formatting has been applied: - We apply an Alpaca-style format (i.e. instruction/input/output fields) - If the question is multiple-choice, the options are provided in the input field - The phrase "Explain your reasoning step-by-step before providing the correct answer." is added to the end of the instruction field. Numbers: Prompts: 74771 Tokens: 9016176 using the EleutherAI/gpt-neox-20b tokenizer (counting instruction+input+output)
898
[ [ -0.06890869140625, -0.07818603515625, 0.0318603515625, 0.01042938232421875, -0.026123046875, -0.023193359375, -0.01013946533203125, -0.018402099609375, 0.03533935546875, 0.050323486328125, -0.07562255859375, -0.0265350341796875, -0.03155517578125, 0.00304222...
ai4bharat/Bhasha-Abhijnaanam
2023-06-22T08:01:44.000Z
[ "task_categories:text-generation", "language_creators:crowdsourced", "language_creators:expert-generated", "language_creators:machine-generated", "language_creators:found", "language_creators:other", "multilinguality:multilingual", "source_datasets:original", "language:asm", "language:ben", "lan...
ai4bharat
null
null
1
4
2023-05-17T04:43:57
--- license: cc0-1.0 annotations_creators: [] language_creators: - crowdsourced - expert-generated - machine-generated - found - other language: - asm - ben - brx - guj - hin - kan - kas - kok - mai - mal - mar - mni - nep - ori - pan - san - sat - sid - snd - tam - tel - urd multilinguality: - multilingual pretty_name: Bhasha-Abhijnaanam size_categories: [] source_datasets: - original task_categories: - text-generation task_ids: [] --- # Dataset Card for Aksharantar ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** https://github.com/AI4Bharat/IndicLID - **Paper:** [Bhasha-Abhijnaanam: Native-script and romanized Language Identification for 22 Indic languages](https://arxiv.org/abs/2305.15814) - **Leaderboard:** - **Point of Contact:** ### Dataset Summary Bhasha-Abhijnaanam is a language identification test set for native-script as well as Romanized text which spans 22 Indic languages. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages | <!-- --> | <!-- --> | <!-- --> | <!-- --> | <!-- --> | <!-- --> | | -------------- | -------------- | -------------- | --------------- | -------------- | ------------- | | Assamese (asm) | Hindi (hin) | Maithili (mai) | Nepali (nep) | Sanskrit (san) | Tamil (tam) | | Bengali (ben) | Kannada (kan) | Malayalam (mal)| Oriya (ori) | Santali (sat) | Telugu (tel) | | Bodo(brx) | Kashmiri (kas) | Manipuri (mni) | Punjabi (pan) | Sindhi (snd) | Urdu (urd) | | Gujarati (guj) | Konkani (kok) | Marathi (mar) ## Dataset Structure ### Data Instances ``` A random sample from Hindi (hin) Test dataset. { "unique_identifier": "hin1", "native sentence": "", "romanized sentence": "", "language": "Hindi", "script": "Devanagari", "source": "Dakshina", } ``` ### Data Fields - `unique_identifier` (string): 3-letter language code followed by a unique number in Test set. - `native sentence` (string): A sentence in Indic language. - `romanized sentence` (string): Transliteration of native sentence in English (Romanized sentence). - `language` (string): Language of native sentence. - `script` (string): Script in which native sentence is written. - `source` (string): Source of the data. For created data sources, depending on the destination/sampling method of a pair in a language, it will be one of: - Dakshina Dataset - Flores-200 - Manually Romanized - Manually generated ### Data Splits | Subset | asm | ben | brx | guj | hin | kan | kas (Perso-Arabic) | kas (Devanagari) | kok | mai | mal | mni (Bengali) | mni (Meetei Mayek) | mar | nep | ori | pan | san | sid | tam | tel | urd | |:------:|:---:|:---:|:---:|:---:|:---:|:---:|:------------------:|:----------------:|:---:|:---:|:---:|:-------------:|:------------------:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | Native | 1012 | 5606 | 1500 | 5797 | 5617 | 5859 | 2511 | 1012 | 1500 | 2512 | 5628 | 1012 | 1500 | 5611 | 2512 | 1012 | 5776 | 2510 | 2512 | 5893 | 5779 | 5751 | 6883 | | Romanized | 512 | 4595 | 433 | 4785 | 4606 | 4848 | 450 | 0 | 444 | 439 | 4617 | 0 | 442 | 4603 | 423 | 512 | 4765 | 448 | 0 | 4881 | 4767 | 4741 | 4371 | ## Dataset Creation Information in the paper. [Bhasha-Abhijnaanam: Native-script and romanized Language Identification for 22 Indic languages](https://arxiv.org/abs/2305.15814) ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization Information in the paper. [Bhasha-Abhijnaanam: Native-script and romanized Language Identification for 22 Indic languages](https://arxiv.org/abs/2305.15814) #### Who are the source language producers? [More Information Needed] ### Annotations Information in the paper. [Bhasha-Abhijnaanam: Native-script and romanized Language Identification for 22 Indic languages](https://arxiv.org/abs/2305.15814) #### Who are the annotators? Information in the paper. [Bhasha-Abhijnaanam: Native-script and romanized Language Identification for 22 Indic languages](https://arxiv.org/abs/2305.15814) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information <!-- <a rel="license" float="left" href="http://creativecommons.org/publicdomain/zero/1.0/"> <img src="https://licensebuttons.net/p/zero/1.0/88x31.png" style="border-style: none;" alt="CC0" width="100" /> <img src="https://mirrors.creativecommons.org/presskit/buttons/88x31/png/by.png" style="border-style: none;" alt="CC-BY" width="100" href="http://creativecommons.org/publicdomain/zero/1.0/"/> </a> <br/> --> This data is released under the following licensing scheme: - Manually collected data: Released under CC0 license. **CC0 License Statement** <a rel="license" float="left" href="https://creativecommons.org/about/cclicenses/"> <img src="https://licensebuttons.net/p/zero/1.0/88x31.png" style="border-style: none;" alt="CC0" width="100"/> </a> <br> <br> - We do not own any of the text from which this data has been extracted. - We license the actual packaging of manually collected data under the [Creative Commons CC0 license (“no rights reserved”)](http://creativecommons.org/publicdomain/zero/1.0). - To the extent possible under law, <a rel="dct:publisher" href="https://indicnlp.ai4bharat.org/"> <span property="dct:title">AI4Bharat</span></a> has waived all copyright and related or neighboring rights to <span property="dct:title">Aksharantar</span> manually collected data and existing sources. - This work is published from: India. ### Citation Information ``` @misc{madhani2023bhashaabhijnaanam, title={Bhasha-Abhijnaanam: Native-script and romanized Language Identification for 22 Indic languages}, author={Yash Madhani and Mitesh M. Khapra and Anoop Kunchukuttan}, year={2023}, eprint={2305.15814}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ### Contributions ---
7,254
[ [ -0.024139404296875, -0.0309906005859375, -0.0121002197265625, 0.0226287841796875, -0.0284576416015625, 0.027435302734375, -0.03021240234375, -0.03314208984375, 0.028167724609375, 0.0120697021484375, -0.0287628173828125, -0.056182861328125, -0.03765869140625, ...
0x22almostEvil/ws-semantics-simnrel
2023-05-20T09:35:49.000Z
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:en", "language:ru", "language:de", "language:it", "license:apache-2.0", "semantics", "arxiv:1508.00106", "region:us" ]
0x22almostEvil
null
null
0
4
2023-05-17T15:38:22
--- license: apache-2.0 task_categories: - text-classification language: - en - ru - de - it tags: - semantics size_categories: - 1K<n<10K --- # Dataset Card for WS353-semantics-sim-and-rel with ~2K entries. ### Dataset Summary License: Apache-2.0. Contains CSV of a list of word1, word2, their `connection score`, type of connection and language. - ### Original Datasets are available here: - https://leviants.com/multilingual-simlex999-and-wordsim353/ ### Paper of original Dataset: - https://arxiv.org/pdf/1508.00106v5.pdf
533
[ [ -0.03387451171875, -0.01904296875, 0.010284423828125, 0.035980224609375, -0.03240966796875, -0.0237274169921875, -0.01471710205078125, -0.035552978515625, 0.01053619384765625, 0.04144287109375, -0.061798095703125, -0.042388916015625, -0.026092529296875, 0.02...
Amiri/Google-Play-Reviews-for-Sentiment-Analysis
2023-05-18T18:51:17.000Z
[ "region:us" ]
Amiri
null
null
1
4
2023-05-18T18:41:30
Entry not found
15
[ [ -0.0213775634765625, -0.01494598388671875, 0.057159423828125, 0.02880859375, -0.0350341796875, 0.046478271484375, 0.052520751953125, 0.005077362060546875, 0.051361083984375, 0.0170135498046875, -0.05206298828125, -0.01494598388671875, -0.06036376953125, 0.03...
joey234/mmlu-professional_law
2023-08-23T04:52:47.000Z
[ "region:us" ]
joey234
null
null
0
4
2023-05-19T04:34:16
--- dataset_info: features: - name: question dtype: string - name: choices sequence: string - name: answer dtype: class_label: names: '0': A '1': B '2': C '3': D - name: negate_openai_prompt struct: - name: content dtype: string - name: role dtype: string - name: neg_question dtype: string - name: fewshot_context dtype: string - name: fewshot_context_neg dtype: string splits: - name: dev num_bytes: 14459 num_examples: 5 - name: test num_bytes: 14307400 num_examples: 1534 download_size: 2073867 dataset_size: 14321859 configs: - config_name: default data_files: - split: dev path: data/dev-* - split: test path: data/test-* --- # Dataset Card for "mmlu-professional_law" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
965
[ [ -0.02978515625, -0.01476287841796875, 0.01541900634765625, 0.002246856689453125, -0.0173797607421875, -0.002376556396484375, 0.0232391357421875, -0.004764556884765625, 0.0511474609375, 0.038787841796875, -0.057952880859375, -0.04925537109375, -0.03961181640625, ...
RossVermouth/chensu_test_dataset
2023-05-19T08:23:29.000Z
[ "task_categories:image-classification", "size_categories:1K<n<10K", "language:aa", "language:ae", "license:apache-2.0", "not-for-all-audiences", "region:us" ]
RossVermouth
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.
@TECHREPORT{Krizhevsky09learningmultiple, author = {Alex Krizhevsky}, title = {Learning multiple layers of features from tiny images}, institution = {}, year = {2009} }
0
4
2023-05-19T07:58:00
--- license: apache-2.0 task_categories: - image-classification language: - aa - ae tags: - not-for-all-audiences size_categories: - 1K<n<10K --- # Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary just for test ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
1,459
[ [ -0.03265380859375, -0.03240966796875, -0.0035343170166015625, 0.0227813720703125, -0.019744873046875, 0.01214599609375, -0.021453857421875, -0.015625, 0.031829833984375, 0.049591064453125, -0.0609130859375, -0.07977294921875, -0.047943115234375, 0.0102310180...
taesiri/imagenet-hard-4K
2023-06-11T00:37:29.000Z
[ "task_categories:image-classification", "size_categories:10K<n<100K", "language:en", "license:mit", "OOD", "ImageNet", "Out Of Distribution", "arxiv:2304.05538", "region:us" ]
taesiri
null
null
2
4
2023-05-21T17:33:17
--- dataset_info: features: - name: id dtype: int64 - name: image dtype: image - name: label sequence: int64 - name: origin dtype: string - name: english_label sequence: string splits: - name: validation num_bytes: 70959420455.86 num_examples: 10980 download_size: 66129324319 dataset_size: 70959420455.86 license: mit task_categories: - image-classification language: - en tags: - OOD - ImageNet - Out Of Distribution pretty_name: ImageNet-Hard-4K size_categories: - 10K<n<100K --- # Dataset Card for "Imagenet-Hard-4K" [Project Page](https://taesiri.github.io/ZoomIsAllYouNeed/) - [Paper](https://arxiv.org/abs/2304.05538) - [Github](https://github.com/taesiri/ZoomIsAllYouNeed) **ImageNet-Hard-4K** is 4K version of the original [**ImageNet-Hard**](https://huggingface.co/datasets/taesiri/imagenet-hard) dataset, which is a new benchmark that comprises 10,980 images collected from various existing ImageNet-scale benchmarks (ImageNet, ImageNet-V2, ImageNet-Sketch, ImageNet-C, ImageNet-R, ImageNet-ReaL, ImageNet-A, and ObjectNet). This dataset poses a significant challenge to state-of-the-art vision models as merely zooming in often fails to improve their ability to classify images correctly. As a result, even the most advanced models, such as `CLIP-ViT-L/14@336px`, struggle to perform well on this dataset, achieving a mere `2.02%` accuracy. ## Upscaling Procedure We employed [GigaGAN](https://mingukkang.github.io/GigaGAN/) to upscale each image from the original ImageNet-Hard dataset to a resolution of 4K. ### Dataset Distribution ![Dataset Distribution](https://taesiri.github.io/ZoomIsAllYouNeed/static/svg/imagenet_hard_distribution.svg) ### Classifiers Performance | Model | Accuracy | | ------------------- | -------- | | AlexNet | 7.08 | | VGG-16 | 11.32 | | ResNet-18 | 10.42 | | ResNet-50 | 13.93 | | ViT-B/32 | 18.12 | | EfficientNet-B0 | 12.94 | | EfficientNet-B7 | 18.67 | | EfficientNet-L2-Ns | 28.42 | | CLIP-ViT-L/14@224px | 1.81 | | CLIP-ViT-L/14@336px | 1.88 | | OpenCLIP-ViT-bigG-14| 14.33 | | OpenCLIP-ViT-L-14 | 13.04 | **Evaluation Code** * CLIP <a target="_blank" href="https://colab.research.google.com/github/taesiri/ZoomIsAllYouNeed/blob/main/src/ImageNet_Hard/Prompt_Engineering_for_ImageNet_Hard.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> * Other models <a target="_blank" href="https://colab.research.google.com/github/taesiri/ZoomIsAllYouNeed/blob/main/src/ImageNet_Hard/Benchmark_ImageNet_Hard.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> ## Supported Tasks - `image-classification`: The objective of this task is to classify an image into one or more classes, selected from 1000 ImageNet categories (allowing for multiple ground-truth labels per image). ## Languages The `english_label` field in the dataset are in English. ## Dataset Structure Data Instances An example looks like this: ```python { 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=575x409 at 0x7F09456B53A0>, 'label': [0], 'origin': 'imagenet_sketch', 'english_label': ['tench'] } ``` ### Data Fields The data instances have the following fields: - image: A PIL.Image.Image object containing the image. Note that when accessing the image column: dataset[0]["image"] the image file is automatically decoded. Decoding of a large number of image files might take a significant amount of time. Thus it is important to first query the sample index before the "image" column, i.e. dataset[0]["image"] should always be preferred over dataset["image"][0]. - label: A List[int] collection containing the ground-truth ids. - origin: A string containing source dataset. - english_label: A List[str] collection containg the english labels for the ground-truth classes. <details> <summary> Click here to see the full list of ImageNet class labels mapping: </summary> |id|Class| |--|-----| |0 | tench, Tinca tinca| |1 | goldfish, Carassius auratus| |2 | great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias| |3 | tiger shark, Galeocerdo cuvieri| |4 | hammerhead, hammerhead shark| |5 | electric ray, crampfish, numbfish, torpedo| |6 | stingray| |7 | cock| |8 | hen| |9 | ostrich, Struthio camelus| |10 | brambling, Fringilla montifringilla| |11 | goldfinch, Carduelis carduelis| |12 | house finch, linnet, Carpodacus mexicanus| |13 | junco, snowbird| |14 | indigo bunting, indigo finch, indigo bird, Passerina cyanea| |15 | robin, American robin, Turdus migratorius| |16 | bulbul| |17 | jay| |18 | magpie| |19 | chickadee| |20 | water ouzel, dipper| |21 | kite| |22 | bald eagle, American eagle, Haliaeetus leucocephalus| |23 | vulture| |24 | great grey owl, great gray owl, Strix nebulosa| |25 | European fire salamander, Salamandra salamandra| |26 | common newt, Triturus vulgaris| |27 | eft| |28 | spotted salamander, Ambystoma maculatum| |29 | axolotl, mud puppy, Ambystoma mexicanum| |30 | bullfrog, Rana catesbeiana| |31 | tree frog, tree-frog| |32 | tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui| |33 | loggerhead, loggerhead turtle, Caretta caretta| |34 | leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea| |35 | mud turtle| |36 | terrapin| |37 | box turtle, box tortoise| |38 | banded gecko| |39 | common iguana, iguana, Iguana iguana| |40 | American chameleon, anole, Anolis carolinensis| |41 | whiptail, whiptail lizard| |42 | agama| |43 | frilled lizard, Chlamydosaurus kingi| |44 | alligator lizard| |45 | Gila monster, Heloderma suspectum| |46 | green lizard, Lacerta viridis| |47 | African chameleon, Chamaeleo chamaeleon| |48 | Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis| |49 | African crocodile, Nile crocodile, Crocodylus niloticus| |50 | American alligator, Alligator mississipiensis| |51 | triceratops| |52 | thunder snake, worm snake, Carphophis amoenus| |53 | ringneck snake, ring-necked snake, ring snake| |54 | hognose snake, puff adder, sand viper| |55 | green snake, grass snake| |56 | king snake, kingsnake| |57 | garter snake, grass snake| |58 | water snake| |59 | vine snake| |60 | night snake, Hypsiglena torquata| |61 | boa constrictor, Constrictor constrictor| |62 | rock python, rock snake, Python sebae| |63 | Indian cobra, Naja naja| |64 | green mamba| |65 | sea snake| |66 | horned viper, cerastes, sand viper, horned asp, Cerastes cornutus| |67 | diamondback, diamondback rattlesnake, Crotalus adamanteus| |68 | sidewinder, horned rattlesnake, Crotalus cerastes| |69 | trilobite| |70 | harvestman, daddy longlegs, Phalangium opilio| |71 | scorpion| |72 | black and gold garden spider, Argiope aurantia| |73 | barn spider, Araneus cavaticus| |74 | garden spider, Aranea diademata| |75 | black widow, Latrodectus mactans| |76 | tarantula| |77 | wolf spider, hunting spider| |78 | tick| |79 | centipede| |80 | black grouse| |81 | ptarmigan| |82 | ruffed grouse, partridge, Bonasa umbellus| |83 | prairie chicken, prairie grouse, prairie fowl| |84 | peacock| |85 | quail| |86 | partridge| |87 | African grey, African gray, Psittacus erithacus| |88 | macaw| |89 | sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita| |90 | lorikeet| |91 | coucal| |92 | bee eater| |93 | hornbill| |94 | hummingbird| |95 | jacamar| |96 | toucan| |97 | drake| |98 | red-breasted merganser, Mergus serrator| |99 | goose| |100 | black swan, Cygnus atratus| |101 | tusker| |102 | echidna, spiny anteater, anteater| |103 | platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus| |104 | wallaby, brush kangaroo| |105 | koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus| |106 | wombat| |107 | jellyfish| |108 | sea anemone, anemone| |109 | brain coral| |110 | flatworm, platyhelminth| |111 | nematode, nematode worm, roundworm| |112 | conch| |113 | snail| |114 | slug| |115 | sea slug, nudibranch| |116 | chiton, coat-of-mail shell, sea cradle, polyplacophore| |117 | chambered nautilus, pearly nautilus, nautilus| |118 | Dungeness crab, Cancer magister| |119 | rock crab, Cancer irroratus| |120 | fiddler crab| |121 | king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica| |122 | American lobster, Northern lobster, Maine lobster, Homarus americanus| |123 | spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish| |124 | crayfish, crawfish, crawdad, crawdaddy| |125 | hermit crab| |126 | isopod| |127 | white stork, Ciconia ciconia| |128 | black stork, Ciconia nigra| |129 | spoonbill| |130 | flamingo| |131 | little blue heron, Egretta caerulea| |132 | American egret, great white heron, Egretta albus| |133 | bittern| |134 | crane| |135 | limpkin, Aramus pictus| |136 | European gallinule, Porphyrio porphyrio| |137 | American coot, marsh hen, mud hen, water hen, Fulica americana| |138 | bustard| |139 | ruddy turnstone, Arenaria interpres| |140 | red-backed sandpiper, dunlin, Erolia alpina| |141 | redshank, Tringa totanus| |142 | dowitcher| |143 | oystercatcher, oyster catcher| |144 | pelican| |145 | king penguin, Aptenodytes patagonica| |146 | albatross, mollymawk| |147 | grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus| |148 | killer whale, killer, orca, grampus, sea wolf, Orcinus orca| |149 | dugong, Dugong dugon| |150 | sea lion| |151 | Chihuahua| |152 | Japanese spaniel| |153 | Maltese dog, Maltese terrier, Maltese| |154 | Pekinese, Pekingese, Peke| |155 | Shih-Tzu| |156 | Blenheim spaniel| |157 | papillon| |158 | toy terrier| |159 | Rhodesian ridgeback| |160 | Afghan hound, Afghan| |161 | basset, basset hound| |162 | beagle| |163 | bloodhound, sleuthhound| |164 | bluetick| |165 | black-and-tan coonhound| |166 | Walker hound, Walker foxhound| |167 | English foxhound| |168 | redbone| |169 | borzoi, Russian wolfhound| |170 | Irish wolfhound| |171 | Italian greyhound| |172 | whippet| |173 | Ibizan hound, Ibizan Podenco| |174 | Norwegian elkhound, elkhound| |175 | otterhound, otter hound| |176 | Saluki, gazelle hound| |177 | Scottish deerhound, deerhound| |178 | Weimaraner| |179 | Staffordshire bullterrier, Staffordshire bull terrier| |180 | American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier| |181 | Bedlington terrier| |182 | Border terrier| |183 | Kerry blue terrier| |184 | Irish terrier| |185 | Norfolk terrier| |186 | Norwich terrier| |187 | Yorkshire terrier| |188 | wire-haired fox terrier| |189 | Lakeland terrier| |190 | Sealyham terrier, Sealyham| |191 | Airedale, Airedale terrier| |192 | cairn, cairn terrier| |193 | Australian terrier| |194 | Dandie Dinmont, Dandie Dinmont terrier| |195 | Boston bull, Boston terrier| |196 | miniature schnauzer| |197 | giant schnauzer| |198 | standard schnauzer| |199 | Scotch terrier, Scottish terrier, Scottie| |200 | Tibetan terrier, chrysanthemum dog| |201 | silky terrier, Sydney silky| |202 | soft-coated wheaten terrier| |203 | West Highland white terrier| |204 | Lhasa, Lhasa apso| |205 | flat-coated retriever| |206 | curly-coated retriever| |207 | golden retriever| |208 | Labrador retriever| |209 | Chesapeake Bay retriever| |210 | German short-haired pointer| |211 | vizsla, Hungarian pointer| |212 | English setter| |213 | Irish setter, red setter| |214 | Gordon setter| |215 | Brittany spaniel| |216 | clumber, clumber spaniel| |217 | English springer, English springer spaniel| |218 | Welsh springer spaniel| |219 | cocker spaniel, English cocker spaniel, cocker| |220 | Sussex spaniel| |221 | Irish water spaniel| |222 | kuvasz| |223 | schipperke| |224 | groenendael| |225 | malinois| |226 | briard| |227 | kelpie| |228 | komondor| |229 | Old English sheepdog, bobtail| |230 | Shetland sheepdog, Shetland sheep dog, Shetland| |231 | collie| |232 | Border collie| |233 | Bouvier des Flandres, Bouviers des Flandres| |234 | Rottweiler| |235 | German shepherd, German shepherd dog, German police dog, alsatian| |236 | Doberman, Doberman pinscher| |237 | miniature pinscher| |238 | Greater Swiss Mountain dog| |239 | Bernese mountain dog| |240 | Appenzeller| |241 | EntleBucher| |242 | boxer| |243 | bull mastiff| |244 | Tibetan mastiff| |245 | French bulldog| |246 | Great Dane| |247 | Saint Bernard, St Bernard| |248 | Eskimo dog, husky| |249 | malamute, malemute, Alaskan malamute| |250 | Siberian husky| |251 | dalmatian, coach dog, carriage dog| |252 | affenpinscher, monkey pinscher, monkey dog| |253 | basenji| |254 | pug, pug-dog| |255 | Leonberg| |256 | Newfoundland, Newfoundland dog| |257 | Great Pyrenees| |258 | Samoyed, Samoyede| |259 | Pomeranian| |260 | chow, chow chow| |261 | keeshond| |262 | Brabancon griffon| |263 | Pembroke, Pembroke Welsh corgi| |264 | Cardigan, Cardigan Welsh corgi| |265 | toy poodle| |266 | miniature poodle| |267 | standard poodle| |268 | Mexican hairless| |269 | timber wolf, grey wolf, gray wolf, Canis lupus| |270 | white wolf, Arctic wolf, Canis lupus tundrarum| |271 | red wolf, maned wolf, Canis rufus, Canis niger| |272 | coyote, prairie wolf, brush wolf, Canis latrans| |273 | dingo, warrigal, warragal, Canis dingo| |274 | dhole, Cuon alpinus| |275 | African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus| |276 | hyena, hyaena| |277 | red fox, Vulpes vulpes| |278 | kit fox, Vulpes macrotis| |279 | Arctic fox, white fox, Alopex lagopus| |280 | grey fox, gray fox, Urocyon cinereoargenteus| |281 | tabby, tabby cat| |282 | tiger cat| |283 | Persian cat| |284 | Siamese cat, Siamese| |285 | Egyptian cat| |286 | cougar, puma, catamount, mountain lion, painter, panther, Felis concolor| |287 | lynx, catamount| |288 | leopard, Panthera pardus| |289 | snow leopard, ounce, Panthera uncia| |290 | jaguar, panther, Panthera onca, Felis onca| |291 | lion, king of beasts, Panthera leo| |292 | tiger, Panthera tigris| |293 | cheetah, chetah, Acinonyx jubatus| |294 | brown bear, bruin, Ursus arctos| |295 | American black bear, black bear, Ursus americanus, Euarctos americanus| |296 | ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus| |297 | sloth bear, Melursus ursinus, Ursus ursinus| |298 | mongoose| |299 | meerkat, mierkat| |300 | tiger beetle| |301 | ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle| |302 | ground beetle, carabid beetle| |303 | long-horned beetle, longicorn, longicorn beetle| |304 | leaf beetle, chrysomelid| |305 | dung beetle| |306 | rhinoceros beetle| |307 | weevil| |308 | fly| |309 | bee| |310 | ant, emmet, pismire| |311 | grasshopper, hopper| |312 | cricket| |313 | walking stick, walkingstick, stick insect| |314 | cockroach, roach| |315 | mantis, mantid| |316 | cicada, cicala| |317 | leafhopper| |318 | lacewing, lacewing fly| |319 | dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk| |320 | damselfly| |321 | admiral| |322 | ringlet, ringlet butterfly| |323 | monarch, monarch butterfly, milkweed butterfly, Danaus plexippus| |324 | cabbage butterfly| |325 | sulphur butterfly, sulfur butterfly| |326 | lycaenid, lycaenid butterfly| |327 | starfish, sea star| |328 | sea urchin| |329 | sea cucumber, holothurian| |330 | wood rabbit, cottontail, cottontail rabbit| |331 | hare| |332 | Angora, Angora rabbit| |333 | hamster| |334 | porcupine, hedgehog| |335 | fox squirrel, eastern fox squirrel, Sciurus niger| |336 | marmot| |337 | beaver| |338 | guinea pig, Cavia cobaya| |339 | sorrel| |340 | zebra| |341 | hog, pig, grunter, squealer, Sus scrofa| |342 | wild boar, boar, Sus scrofa| |343 | warthog| |344 | hippopotamus, hippo, river horse, Hippopotamus amphibius| |345 | ox| |346 | water buffalo, water ox, Asiatic buffalo, Bubalus bubalis| |347 | bison| |348 | ram, tup| |349 | bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis| |350 | ibex, Capra ibex| |351 | hartebeest| |352 | impala, Aepyceros melampus| |353 | gazelle| |354 | Arabian camel, dromedary, Camelus dromedarius| |355 | llama| |356 | weasel| |357 | mink| |358 | polecat, fitch, foulmart, foumart, Mustela putorius| |359 | black-footed ferret, ferret, Mustela nigripes| |360 | otter| |361 | skunk, polecat, wood pussy| |362 | badger| |363 | armadillo| |364 | three-toed sloth, ai, Bradypus tridactylus| |365 | orangutan, orang, orangutang, Pongo pygmaeus| |366 | gorilla, Gorilla gorilla| |367 | chimpanzee, chimp, Pan troglodytes| |368 | gibbon, Hylobates lar| |369 | siamang, Hylobates syndactylus, Symphalangus syndactylus| |370 | guenon, guenon monkey| |371 | patas, hussar monkey, Erythrocebus patas| |372 | baboon| |373 | macaque| |374 | langur| |375 | colobus, colobus monkey| |376 | proboscis monkey, Nasalis larvatus| |377 | marmoset| |378 | capuchin, ringtail, Cebus capucinus| |379 | howler monkey, howler| |380 | titi, titi monkey| |381 | spider monkey, Ateles geoffroyi| |382 | squirrel monkey, Saimiri sciureus| |383 | Madagascar cat, ring-tailed lemur, Lemur catta| |384 | indri, indris, Indri indri, Indri brevicaudatus| |385 | Indian elephant, Elephas maximus| |386 | African elephant, Loxodonta africana| |387 | lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens| |388 | giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca| |389 | barracouta, snoek| |390 | eel| |391 | coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch| |392 | rock beauty, Holocanthus tricolor| |393 | anemone fish| |394 | sturgeon| |395 | gar, garfish, garpike, billfish, Lepisosteus osseus| |396 | lionfish| |397 | puffer, pufferfish, blowfish, globefish| |398 | abacus| |399 | abaya| |400 | academic gown, academic robe, judge's robe| |401 | accordion, piano accordion, squeeze box| |402 | acoustic guitar| |403 | aircraft carrier, carrier, flattop, attack aircraft carrier| |404 | airliner| |405 | airship, dirigible| |406 | altar| |407 | ambulance| |408 | amphibian, amphibious vehicle| |409 | analog clock| |410 | apiary, bee house| |411 | apron| |412 | ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin| |413 | assault rifle, assault gun| |414 | backpack, back pack, knapsack, packsack, rucksack, haversack| |415 | bakery, bakeshop, bakehouse| |416 | balance beam, beam| |417 | balloon| |418 | ballpoint, ballpoint pen, ballpen, Biro| |419 | Band Aid| |420 | banjo| |421 | bannister, banister, balustrade, balusters, handrail| |422 | barbell| |423 | barber chair| |424 | barbershop| |425 | barn| |426 | barometer| |427 | barrel, cask| |428 | barrow, garden cart, lawn cart, wheelbarrow| |429 | baseball| |430 | basketball| |431 | bassinet| |432 | bassoon| |433 | bathing cap, swimming cap| |434 | bath towel| |435 | bathtub, bathing tub, bath, tub| |436 | beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon| |437 | beacon, lighthouse, beacon light, pharos| |438 | beaker| |439 | bearskin, busby, shako| |440 | beer bottle| |441 | beer glass| |442 | bell cote, bell cot| |443 | bib| |444 | bicycle-built-for-two, tandem bicycle, tandem| |445 | bikini, two-piece| |446 | binder, ring-binder| |447 | binoculars, field glasses, opera glasses| |448 | birdhouse| |449 | boathouse| |450 | bobsled, bobsleigh, bob| |451 | bolo tie, bolo, bola tie, bola| |452 | bonnet, poke bonnet| |453 | bookcase| |454 | bookshop, bookstore, bookstall| |455 | bottlecap| |456 | bow| |457 | bow tie, bow-tie, bowtie| |458 | brass, memorial tablet, plaque| |459 | brassiere, bra, bandeau| |460 | breakwater, groin, groyne, mole, bulwark, seawall, jetty| |461 | breastplate, aegis, egis| |462 | broom| |463 | bucket, pail| |464 | buckle| |465 | bulletproof vest| |466 | bullet train, bullet| |467 | butcher shop, meat market| |468 | cab, hack, taxi, taxicab| |469 | caldron, cauldron| |470 | candle, taper, wax light| |471 | cannon| |472 | canoe| |473 | can opener, tin opener| |474 | cardigan| |475 | car mirror| |476 | carousel, carrousel, merry-go-round, roundabout, whirligig| |477 | carpenter's kit, tool kit| |478 | carton| |479 | car wheel| |480 | cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM| |481 | cassette| |482 | cassette player| |483 | castle| |484 | catamaran| |485 | CD player| |486 | cello, violoncello| |487 | cellular telephone, cellular phone, cellphone, cell, mobile phone| |488 | chain| |489 | chainlink fence| |490 | chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour| |491 | chain saw, chainsaw| |492 | chest| |493 | chiffonier, commode| |494 | chime, bell, gong| |495 | china cabinet, china closet| |496 | Christmas stocking| |497 | church, church building| |498 | cinema, movie theater, movie theatre, movie house, picture palace| |499 | cleaver, meat cleaver, chopper| |500 | cliff dwelling| |501 | cloak| |502 | clog, geta, patten, sabot| |503 | cocktail shaker| |504 | coffee mug| |505 | coffeepot| |506 | coil, spiral, volute, whorl, helix| |507 | combination lock| |508 | computer keyboard, keypad| |509 | confectionery, confectionary, candy store| |510 | container ship, containership, container vessel| |511 | convertible| |512 | corkscrew, bottle screw| |513 | cornet, horn, trumpet, trump| |514 | cowboy boot| |515 | cowboy hat, ten-gallon hat| |516 | cradle| |517 | crane_1| |518 | crash helmet| |519 | crate| |520 | crib, cot| |521 | Crock Pot| |522 | croquet ball| |523 | crutch| |524 | cuirass| |525 | dam, dike, dyke| |526 | desk| |527 | desktop computer| |528 | dial telephone, dial phone| |529 | diaper, nappy, napkin| |530 | digital clock| |531 | digital watch| |532 | dining table, board| |533 | dishrag, dishcloth| |534 | dishwasher, dish washer, dishwashing machine| |535 | disk brake, disc brake| |536 | dock, dockage, docking facility| |537 | dogsled, dog sled, dog sleigh| |538 | dome| |539 | doormat, welcome mat| |540 | drilling platform, offshore rig| |541 | drum, membranophone, tympan| |542 | drumstick| |543 | dumbbell| |544 | Dutch oven| |545 | electric fan, blower| |546 | electric guitar| |547 | electric locomotive| |548 | entertainment center| |549 | envelope| |550 | espresso maker| |551 | face powder| |552 | feather boa, boa| |553 | file, file cabinet, filing cabinet| |554 | fireboat| |555 | fire engine, fire truck| |556 | fire screen, fireguard| |557 | flagpole, flagstaff| |558 | flute, transverse flute| |559 | folding chair| |560 | football helmet| |561 | forklift| |562 | fountain| |563 | fountain pen| |564 | four-poster| |565 | freight car| |566 | French horn, horn| |567 | frying pan, frypan, skillet| |568 | fur coat| |569 | garbage truck, dustcart| |570 | gasmask, respirator, gas helmet| |571 | gas pump, gasoline pump, petrol pump, island dispenser| |572 | goblet| |573 | go-kart| |574 | golf ball| |575 | golfcart, golf cart| |576 | gondola| |577 | gong, tam-tam| |578 | gown| |579 | grand piano, grand| |580 | greenhouse, nursery, glasshouse| |581 | grille, radiator grille| |582 | grocery store, grocery, food market, market| |583 | guillotine| |584 | hair slide| |585 | hair spray| |586 | half track| |587 | hammer| |588 | hamper| |589 | hand blower, blow dryer, blow drier, hair dryer, hair drier| |590 | hand-held computer, hand-held microcomputer| |591 | handkerchief, hankie, hanky, hankey| |592 | hard disc, hard disk, fixed disk| |593 | harmonica, mouth organ, harp, mouth harp| |594 | harp| |595 | harvester, reaper| |596 | hatchet| |597 | holster| |598 | home theater, home theatre| |599 | honeycomb| |600 | hook, claw| |601 | hoopskirt, crinoline| |602 | horizontal bar, high bar| |603 | horse cart, horse-cart| |604 | hourglass| |605 | iPod| |606 | iron, smoothing iron| |607 | jack-o'-lantern| |608 | jean, blue jean, denim| |609 | jeep, landrover| |610 | jersey, T-shirt, tee shirt| |611 | jigsaw puzzle| |612 | jinrikisha, ricksha, rickshaw| |613 | joystick| |614 | kimono| |615 | knee pad| |616 | knot| |617 | lab coat, laboratory coat| |618 | ladle| |619 | lampshade, lamp shade| |620 | laptop, laptop computer| |621 | lawn mower, mower| |622 | lens cap, lens cover| |623 | letter opener, paper knife, paperknife| |624 | library| |625 | lifeboat| |626 | lighter, light, igniter, ignitor| |627 | limousine, limo| |628 | liner, ocean liner| |629 | lipstick, lip rouge| |630 | Loafer| |631 | lotion| |632 | loudspeaker, speaker, speaker unit, loudspeaker system, speaker system| |633 | loupe, jeweler's loupe| |634 | lumbermill, sawmill| |635 | magnetic compass| |636 | mailbag, postbag| |637 | mailbox, letter box| |638 | maillot| |639 | maillot, tank suit| |640 | manhole cover| |641 | maraca| |642 | marimba, xylophone| |643 | mask| |644 | matchstick| |645 | maypole| |646 | maze, labyrinth| |647 | measuring cup| |648 | medicine chest, medicine cabinet| |649 | megalith, megalithic structure| |650 | microphone, mike| |651 | microwave, microwave oven| |652 | military uniform| |653 | milk can| |654 | minibus| |655 | miniskirt, mini| |656 | minivan| |657 | missile| |658 | mitten| |659 | mixing bowl| |660 | mobile home, manufactured home| |661 | Model T| |662 | modem| |663 | monastery| |664 | monitor| |665 | moped| |666 | mortar| |667 | mortarboard| |668 | mosque| |669 | mosquito net| |670 | motor scooter, scooter| |671 | mountain bike, all-terrain bike, off-roader| |672 | mountain tent| |673 | mouse, computer mouse| |674 | mousetrap| |675 | moving van| |676 | muzzle| |677 | nail| |678 | neck brace| |679 | necklace| |680 | nipple| |681 | notebook, notebook computer| |682 | obelisk| |683 | oboe, hautboy, hautbois| |684 | ocarina, sweet potato| |685 | odometer, hodometer, mileometer, milometer| |686 | oil filter| |687 | organ, pipe organ| |688 | oscilloscope, scope, cathode-ray oscilloscope, CRO| |689 | overskirt| |690 | oxcart| |691 | oxygen mask| |692 | packet| |693 | paddle, boat paddle| |694 | paddlewheel, paddle wheel| |695 | padlock| |696 | paintbrush| |697 | pajama, pyjama, pj's, jammies| |698 | palace| |699 | panpipe, pandean pipe, syrinx| |700 | paper towel| |701 | parachute, chute| |702 | parallel bars, bars| |703 | park bench| |704 | parking meter| |705 | passenger car, coach, carriage| |706 | patio, terrace| |707 | pay-phone, pay-station| |708 | pedestal, plinth, footstall| |709 | pencil box, pencil case| |710 | pencil sharpener| |711 | perfume, essence| |712 | Petri dish| |713 | photocopier| |714 | pick, plectrum, plectron| |715 | pickelhaube| |716 | picket fence, paling| |717 | pickup, pickup truck| |718 | pier| |719 | piggy bank, penny bank| |720 | pill bottle| |721 | pillow| |722 | ping-pong ball| |723 | pinwheel| |724 | pirate, pirate ship| |725 | pitcher, ewer| |726 | plane, carpenter's plane, woodworking plane| |727 | planetarium| |728 | plastic bag| |729 | plate rack| |730 | plow, plough| |731 | plunger, plumber's helper| |732 | Polaroid camera, Polaroid Land camera| |733 | pole| |734 | police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria| |735 | poncho| |736 | pool table, billiard table, snooker table| |737 | pop bottle, soda bottle| |738 | pot, flowerpot| |739 | potter's wheel| |740 | power drill| |741 | prayer rug, prayer mat| |742 | printer| |743 | prison, prison house| |744 | projectile, missile| |745 | projector| |746 | puck, hockey puck| |747 | punching bag, punch bag, punching ball, punchball| |748 | purse| |749 | quill, quill pen| |750 | quilt, comforter, comfort, puff| |751 | racer, race car, racing car| |752 | racket, racquet| |753 | radiator| |754 | radio, wireless| |755 | radio telescope, radio reflector| |756 | rain barrel| |757 | recreational vehicle, RV, R.V.| |758 | reel| |759 | reflex camera| |760 | refrigerator, icebox| |761 | remote control, remote| |762 | restaurant, eating house, eating place, eatery| |763 | revolver, six-gun, six-shooter| |764 | rifle| |765 | rocking chair, rocker| |766 | rotisserie| |767 | rubber eraser, rubber, pencil eraser| |768 | rugby ball| |769 | rule, ruler| |770 | running shoe| |771 | safe| |772 | safety pin| |773 | saltshaker, salt shaker| |774 | sandal| |775 | sarong| |776 | sax, saxophone| |777 | scabbard| |778 | scale, weighing machine| |779 | school bus| |780 | schooner| |781 | scoreboard| |782 | screen, CRT screen| |783 | screw| |784 | screwdriver| |785 | seat belt, seatbelt| |786 | sewing machine| |787 | shield, buckler| |788 | shoe shop, shoe-shop, shoe store| |789 | shoji| |790 | shopping basket| |791 | shopping cart| |792 | shovel| |793 | shower cap| |794 | shower curtain| |795 | ski| |796 | ski mask| |797 | sleeping bag| |798 | slide rule, slipstick| |799 | sliding door| |800 | slot, one-armed bandit| |801 | snorkel| |802 | snowmobile| |803 | snowplow, snowplough| |804 | soap dispenser| |805 | soccer ball| |806 | sock| |807 | solar dish, solar collector, solar furnace| |808 | sombrero| |809 | soup bowl| |810 | space bar| |811 | space heater| |812 | space shuttle| |813 | spatula| |814 | speedboat| |815 | spider web, spider's web| |816 | spindle| |817 | sports car, sport car| |818 | spotlight, spot| |819 | stage| |820 | steam locomotive| |821 | steel arch bridge| |822 | steel drum| |823 | stethoscope| |824 | stole| |825 | stone wall| |826 | stopwatch, stop watch| |827 | stove| |828 | strainer| |829 | streetcar, tram, tramcar, trolley, trolley car| |830 | stretcher| |831 | studio couch, day bed| |832 | stupa, tope| |833 | submarine, pigboat, sub, U-boat| |834 | suit, suit of clothes| |835 | sundial| |836 | sunglass| |837 | sunglasses, dark glasses, shades| |838 | sunscreen, sunblock, sun blocker| |839 | suspension bridge| |840 | swab, swob, mop| |841 | sweatshirt| |842 | swimming trunks, bathing trunks| |843 | swing| |844 | switch, electric switch, electrical switch| |845 | syringe| |846 | table lamp| |847 | tank, army tank, armored combat vehicle, armoured combat vehicle| |848 | tape player| |849 | teapot| |850 | teddy, teddy bear| |851 | television, television system| |852 | tennis ball| |853 | thatch, thatched roof| |854 | theater curtain, theatre curtain| |855 | thimble| |856 | thresher, thrasher, threshing machine| |857 | throne| |858 | tile roof| |859 | toaster| |860 | tobacco shop, tobacconist shop, tobacconist| |861 | toilet seat| |862 | torch| |863 | totem pole| |864 | tow truck, tow car, wrecker| |865 | toyshop| |866 | tractor| |867 | trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi| |868 | tray| |869 | trench coat| |870 | tricycle, trike, velocipede| |871 | trimaran| |872 | tripod| |873 | triumphal arch| |874 | trolleybus, trolley coach, trackless trolley| |875 | trombone| |876 | tub, vat| |877 | turnstile| |878 | typewriter keyboard| |879 | umbrella| |880 | unicycle, monocycle| |881 | upright, upright piano| |882 | vacuum, vacuum cleaner| |883 | vase| |884 | vault| |885 | velvet| |886 | vending machine| |887 | vestment| |888 | viaduct| |889 | violin, fiddle| |890 | volleyball| |891 | waffle iron| |892 | wall clock| |893 | wallet, billfold, notecase, pocketbook| |894 | wardrobe, closet, press| |895 | warplane, military plane| |896 | washbasin, handbasin, washbowl, lavabo, wash-hand basin| |897 | washer, automatic washer, washing machine| |898 | water bottle| |899 | water jug| |900 | water tower| |901 | whiskey jug| |902 | whistle| |903 | wig| |904 | window screen| |905 | window shade| |906 | Windsor tie| |907 | wine bottle| |908 | wing| |909 | wok| |910 | wooden spoon| |911 | wool, woolen, woollen| |912 | worm fence, snake fence, snake-rail fence, Virginia fence| |913 | wreck| |914 | yawl| |915 | yurt| |916 | web site, website, internet site, site| |917 | comic book| |918 | crossword puzzle, crossword| |919 | street sign| |920 | traffic light, traffic signal, stoplight| |921 | book jacket, dust cover, dust jacket, dust wrapper| |922 | menu| |923 | plate| |924 | guacamole| |925 | consomme| |926 | hot pot, hotpot| |927 | trifle| |928 | ice cream, icecream| |929 | ice lolly, lolly, lollipop, popsicle| |930 | French loaf| |931 | bagel, beigel| |932 | pretzel| |933 | cheeseburger| |934 | hotdog, hot dog, red hot| |935 | mashed potato| |936 | head cabbage| |937 | broccoli| |938 | cauliflower| |939 | zucchini, courgette| |940 | spaghetti squash| |941 | acorn squash| |942 | butternut squash| |943 | cucumber, cuke| |944 | artichoke, globe artichoke| |945 | bell pepper| |946 | cardoon| |947 | mushroom| |948 | Granny Smith| |949 | strawberry| |950 | orange| |951 | lemon| |952 | fig| |953 | pineapple, ananas| |954 | banana| |955 | jackfruit, jak, jack| |956 | custard apple| |957 | pomegranate| |958 | hay| |959 | carbonara| |960 | chocolate sauce, chocolate syrup| |961 | dough| |962 | meat loaf, meatloaf| |963 | pizza, pizza pie| |964 | potpie| |965 | burrito| |966 | red wine| |967 | espresso| |968 | cup| |969 | eggnog| |970 | alp| |971 | bubble| |972 | cliff, drop, drop-off| |973 | coral reef| |974 | geyser| |975 | lakeside, lakeshore| |976 | promontory, headland, head, foreland| |977 | sandbar, sand bar| |978 | seashore, coast, seacoast, sea-coast| |979 | valley, vale| |980 | volcano| |981 | ballplayer, baseball player| |982 | groom, bridegroom| |983 | scuba diver| |984 | rapeseed| |985 | daisy| |986 | yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum| |987 | corn| |988 | acorn| |989 | hip, rose hip, rosehip| |990 | buckeye, horse chestnut, conker| |991 | coral fungus| |992 | agaric| |993 | gyromitra| |994 | stinkhorn, carrion fungus| |995 | earthstar| |996 | hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa| |997 | bolete| |998 | ear, spike, capitulum| |999 | toilet tissue, toilet paper, bathroom tissue| </details> ### Data Splits This dataset is a validation-only set. ## Dataset Creation ### Source Data This dataset is sourced from ImageNet, ImageNet-ReaL, ImageNet-V2, ImageNet-A, ImageNet-C, ImageNet-R, ImageNet-Sketch, and ObjectNet. ## Citation Information ``` @article{taesiri2023zoom, title={ImageNet-Hard: The Hardest Images Remaining from a Study of the Power of Zoom and Spatial Biases in Image Classification}, author={Taesiri, Mohammad Reza and Nguyen, Giang and Habchi, Sarra and Bezemer, Cor-Paul and Nguyen, Anh}, journal={arXiv preprint arXiv:2304.05538}, year={2023} } ```
36,306
[ [ -0.04913330078125, -0.0190582275390625, -0.0156707763671875, 0.0019273757934570312, -0.00574493408203125, 0.00574493408203125, 0.0030879974365234375, -0.033599853515625, 0.0474853515625, 0.0160369873046875, -0.0216522216796875, -0.046051025390625, -0.04855346679...
ttxy/sts
2023-05-22T11:02:07.000Z
[ "task_categories:text-classification", "language:code", "license:bsd", "sts", "region:us" ]
ttxy
null
null
0
4
2023-05-22T09:52:45
--- language: - code pretty_name: "semantic text similarity" tags: - sts license: "bsd" task_categories: - text-classification --- sts 2012-2016 datasets
156
[ [ -0.0007109642028808594, -0.0039520263671875, 0.033935546875, 0.007228851318359375, -0.0106964111328125, 0.023284912109375, 0.01375579833984375, -0.014984130859375, 0.01036834716796875, 0.04620361328125, -0.0548095703125, -0.049468994140625, -0.01702880859375, ...
camenduru/hdvila_test
2023-05-22T14:39:53.000Z
[ "region:us" ]
camenduru
null
null
0
4
2023-05-22T14:39:37
--- dataset_info: features: - name: video_id dtype: string - name: url dtype: string - name: clip list: - name: clip_id dtype: string - name: span sequence: string splits: - name: train num_bytes: 764532309 num_examples: 300000 download_size: 236034101 dataset_size: 764532309 --- # Dataset Card for "hdvila_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
502
[ [ -0.05743408203125, -0.04010009765625, 0.0018367767333984375, 0.0170745849609375, -0.01325225830078125, -0.008636474609375, 0.033233642578125, -0.007232666015625, 0.0594482421875, 0.0278472900390625, -0.037811279296875, -0.048309326171875, -0.02435302734375, ...