author
stringlengths
2
29
cardData
null
citation
stringlengths
0
9.58k
description
stringlengths
0
5.93k
disabled
bool
1 class
downloads
float64
1
1M
gated
bool
2 classes
id
stringlengths
2
108
lastModified
stringlengths
24
24
paperswithcode_id
stringlengths
2
45
private
bool
2 classes
sha
stringlengths
40
40
siblings
list
tags
list
readme_url
stringlengths
57
163
readme
stringlengths
0
977k
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-ag_news-default-684001-14155939
2022-08-29T12:47:36.000Z
null
false
269ed925eb51425013b692d0ac25ef66f51611d5
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:ag_news" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-ag_news-default-684001-14155939/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - ag_news eval_info: task: multi_class_classification model: mrm8488/bert-mini-finetuned-age_news-classification metrics: [] dataset_name: ag_news dataset_config: default dataset_split: test col_mapping: text: text target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: mrm8488/bert-mini-finetuned-age_news-classification * Dataset: ag_news * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-boolq-default-049b58-14205948
2022-08-29T14:36:34.000Z
null
false
01ca83ee3481af6129dca76258ee734f20013aa4
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:boolq" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-boolq-default-049b58-14205948/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - boolq eval_info: task: natural_language_inference model: andi611/distilbert-base-uncased-qa-boolq metrics: [] dataset_name: boolq dataset_config: default dataset_split: validation col_mapping: text1: question text2: passage target: answer --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: andi611/distilbert-base-uncased-qa-boolq * Dataset: boolq * Config: default * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-emotion-default-63bd40-14245951
2022-08-29T16:05:35.000Z
null
false
b509d87f11b98dee9d10d6f037479b98824e9fbe
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:emotion" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-emotion-default-63bd40-14245951/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - emotion eval_info: task: multi_class_classification model: bergum/xtremedistil-emotion metrics: [] dataset_name: emotion dataset_config: default dataset_split: test col_mapping: text: text target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: bergum/xtremedistil-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
munggok
null
\
false
1
false
munggok/KoPI
2022-08-30T19:42:36.000Z
oscar
false
d288945200ebed82f502b5695f50a7cec61f2e1e
[]
[ "license:cc", "annotations_creators:no-annotation", "language_creators:found", "multilinguality:monolingual", "language:id", "source_datasets:original", "task_ids:language-modeling" ]
https://huggingface.co/datasets/munggok/KoPI/resolve/main/README.md
--- license: cc annotations_creators: - no-annotation language_creators: - found multilinguality: - monolingual language: - id source_datasets: - original task_categories: - sequence-modeling task_ids: - language-modeling paperswithcode_id: oscar --- KoPI (Korpus Perayapan Indonesia) is Indonesian general corpora for sequence language modelling Subset of KoPI corpora: KoPI-CC + KoPI-CC-NEWS + KoPI-Mc4 + KoPI-Wiki + KoPI-Leipzig + KoPI-Paper
jonaskoenig
null
null
null
false
null
false
jonaskoenig/future_time_references
2022-08-29T18:17:36.000Z
null
false
1d6374d26b730848ac2e01cf6bbca222f6e973f1
[]
[ "license:mit" ]
https://huggingface.co/datasets/jonaskoenig/future_time_references/resolve/main/README.md
--- license: mit ---
mschi
null
null
null
false
1
false
mschi/blogspot_raw
2022-09-13T08:48:23.000Z
null
false
062592d41bbc04c0715c50f75184907f2adc70ca
[]
[ "language:en", "language_creators:other", "license:mit", "multilinguality:monolingual", "size_categories:1M<n<10M", "source_datasets:original", "tags:blogspot", "tags:blogger", "tags:texts", "task_categories:text-classification", "task_categories:text-retrieval", "task_categories:text-generati...
https://huggingface.co/datasets/mschi/blogspot_raw/resolve/main/README.md
--- annotations_creators: [] language: - en language_creators: - other license: - mit multilinguality: - monolingual pretty_name: Blogspot_raw_texts size_categories: - 1M<n<10M source_datasets: - original tags: - blogspot - blogger - texts task_categories: - text-classification - text-retrieval - text-generation - time-series-forecasting task_ids: [] --- # Dataset Card for blogspot raw dataset ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary This dataset is a corpus of raw blogposts from [blogspot](https://blogger.com) mostly in the English language. It was obtained by scraping corpora of [webarchive](https://archive.org) and [commoncrawl](https://commoncrawl.org). ### Supported Tasks and Leaderboards The dataset may be used for training language models or serve other research interests. ### Languages Mostly English language, but some outliers may occur. ## Dataset Structure [Distribution](https://huggingface.co/datasets/mschi/blogspot_raw/blob/main/blospot_comm_dist.png) The distribution of the blog posts over time can be viewed at ./blogspot_dist_comm.png ### Data Instances [More Information Needed] ### Data Fields text: string URL: string date: string comment: int ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale The dataset was constructed by utilizing the [WARC-dl pipeline](https://github.com/webis-de/web-archive-keras). It was executed on cluster architecture. The corpora of archive.org and commoncrawl.org contain WARC files that contain HTML which gets parsed by the pipeline. The pipeline extracts HTML from the WARC files and applies distributed filtering to efficiently filter for the desired content. ### Source Data #### Initial Data Collection and Normalization The corpora "corpus-commoncrawl-main-2022-05" and "corpus-iwo-internet-archive-wide00001" have been searched for the content present in this dataset. Search terms have been inserted into the preciously mentioned pipeline to filter URLs for "blogspot.com" and characteristic timestamp information contained in the URL (e.g. "/01/2007"). The HTML documents were parsed for specific tags to obtain the timestamps. Further, the data was labeled with the "comment" label if there were some comment markers in the URL, indicating that the retrieved text is from the main text of a blog post or from the comments section. The texts are stored raw and no further processing has been done. #### Who are the source language producers? Since [blogspot](https://blogger.com) provides a high-level framework to allow people everywhere in the world to set up and maintain a blog, the producers of the texts may not be further specified. ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information Texts are raw and unfiltered, thus personal and sensitive information, as well as explicit language, may be present in the dataset. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases The retrieval of the timestamps from the HTML documents was not 100% accurate, so a small proportion of wrong or nonsense timestamps can be present in the data. Also we can not guarantee the correctness of the timestamps as well as the "comment" labels. ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators The dataset was constructed during the course "Big Data and Language Technologies" of the Text Mining and Retrieval Group, Department of Computer Science at the University of Leipzig. ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@jonaskonig](https://github.com/jonaskonig), [@maschirmer](https://github.com/maschirmer) and [@1BlattPapier](https://github.com/1BlattPapier) for contributing.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-anli-plain_text-c507f2-14355972
2022-08-29T20:25:09.000Z
null
false
c8b72f8c242a0d8e052de3041c50c5a5e8f2a38e
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:anli" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-anli-plain_text-c507f2-14355972/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - anli eval_info: task: natural_language_inference model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli metrics: [] dataset_name: anli dataset_config: plain_text dataset_split: test_r3 col_mapping: text1: premise text2: hypothesis target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli * Dataset: anli * Config: plain_text * Split: test_r3 To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@MoritzLaurer](https://huggingface.co/MoritzLaurer) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-anli-plain_text-1f482c-14395973
2022-08-29T20:37:44.000Z
null
false
0aa1d1e2793c68feafc3ea0267ffbdbb6e145bd2
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:anli" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-anli-plain_text-1f482c-14395973/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - anli eval_info: task: natural_language_inference model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli metrics: [] dataset_name: anli dataset_config: plain_text dataset_split: test_r2 col_mapping: text1: premise text2: hypothesis target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli * Dataset: anli * Config: plain_text * Split: test_r2 To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@MoritzLaurer](https://huggingface.co/MoritzLaurer) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-anli-plain_text-dfb10f-14405974
2022-08-29T20:37:45.000Z
null
false
53ffa6b0c5abc115794bc3ac6d4524487cf12499
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:anli" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-anli-plain_text-dfb10f-14405974/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - anli eval_info: task: natural_language_inference model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli metrics: [] dataset_name: anli dataset_config: plain_text dataset_split: test_r1 col_mapping: text1: premise text2: hypothesis target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli * Dataset: anli * Config: plain_text * Split: test_r1 To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@MoritzLaurer](https://huggingface.co/MoritzLaurer) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-multi_nli-default-68c6a6-14415975
2022-08-29T20:51:17.000Z
null
false
2b52953aaf495435ed9e0a4beeaf3190b7149f09
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:multi_nli" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-multi_nli-default-68c6a6-14415975/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - multi_nli eval_info: task: natural_language_inference model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli metrics: [] dataset_name: multi_nli dataset_config: default dataset_split: validation_matched col_mapping: text1: premise text2: hypothesis target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli * Dataset: multi_nli * Config: default * Split: validation_matched To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@MoritzLaurer](https://huggingface.co/MoritzLaurer) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-multi_nli-default-4a02ee-14425976
2022-08-29T20:51:17.000Z
null
false
b6aeba317590bd7a8fb11ba1d41bbcb1788dd388
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:multi_nli" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-multi_nli-default-4a02ee-14425976/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - multi_nli eval_info: task: natural_language_inference model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli metrics: [] dataset_name: multi_nli dataset_config: default dataset_split: validation_mismatched col_mapping: text1: premise text2: hypothesis target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli * Dataset: multi_nli * Config: default * Split: validation_mismatched To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@MoritzLaurer](https://huggingface.co/MoritzLaurer) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-glue-mrpc-4a87ed-14445977
2022-08-30T02:40:01.000Z
null
false
4d80aed9505bdcfd4f7bfa577c66467fb71db4c2
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-mrpc-4a87ed-14445977/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: Intel/roberta-base-mrpc metrics: [] dataset_name: glue dataset_config: mrpc dataset_split: validation col_mapping: text1: sentence1 text2: sentence2 target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: Intel/roberta-base-mrpc * Dataset: glue * Config: mrpc * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@xinhe](https://huggingface.co/xinhe) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-mrpc-71a11b-14455978
2022-08-30T02:40:01.000Z
null
false
19ab9a4e0a4ad3dce1adbc4f0e6595d7c9ebc0d9
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-mrpc-71a11b-14455978/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: Intel/bert-base-uncased-mrpc metrics: [] dataset_name: glue dataset_config: mrpc dataset_split: validation col_mapping: text1: sentence1 text2: sentence2 target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: Intel/bert-base-uncased-mrpc * Dataset: glue * Config: mrpc * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@xinhe](https://huggingface.co/xinhe) for evaluating this model.
Bingsu
null
null
null
false
1
false
Bingsu/national_library_of_korea_book_info
2022-08-30T08:32:14.000Z
null
false
64a374c848cde26885e77f50fa7de87d58697d5d
[]
[ "language:ko", "license:other", "multilinguality:monolingual", "size_categories:1M<n<10M" ]
https://huggingface.co/datasets/Bingsu/national_library_of_korea_book_info/resolve/main/README.md
--- language: - ko license: - other multilinguality: - monolingual pretty_name: national_library_of_korea_book_info size_categories: - 1M<n<10M --- # national_library_of_korea_book_info ## Dataset Description - **Homepage** [문화 빅데이터 플랫폼](https://www.culture.go.kr/bigdata/user/data_market/detail.do?id=63513d7b-9b87-4ec1-a398-0a18ecc45411) - **Download Size** 759 MB - **Generated Size** 2.33 GB - **Total Size** 3.09 GB 국립중앙도서관에서 배포한, 국립중앙도서관에서 보관중인 도서 정보에 관한 데이터. ### License other ([KOGL](https://www.kogl.or.kr/info/license.do#05-tab) (Korea Open Government License) Type-1) ![KOGL_image](https://www.kogl.or.kr/images/front/sub/img_opencode1_m_en.jpg) - According to above KOGL, user can use public works freely and without fee regardless of its commercial use, and can change or modify to create secondary works when user complies with the terms provided as follows: <details> <summary>KOGL Type 1</summary> 1. Source Indication Liability - Users who use public works shall indicate source or copyright as follows: - EX : “000(public institution's name)'s public work is used according to KOGL” - The link shall be provided when online hyperlink for the source website is available. - Marking shall not be used to misguide the third party that the user is sponsored by public institution or user has a special relationship with public institutions. 2. Use Prohibited Information - Personal information that is protected by Personal Information Protection Act, Promotion for Information Network Use and Information Protection Act, etc. - Credit information protected by the Use and Protection of Credit Information Act, etc. - Military secrets protected by Military Secret Protection Act, etc. - Information that is the object of other rights such as trademark right, design right, design right or patent right, etc., or that is owned by third party's copyright. - Other information that is use prohibited information according to other laws. 3. Public Institution's Liability Exemption - Public institution does not guarantee the accuracy or continued service of public works. - Public institution and its employees do not have any liability for any kind of damage or disadvantage that may arise by using public works. 4. Effect of Use Term Violation - The use permission is automatically terminated when user violates any of the KOGL's Use Terms, and the user shall immediately stop using public works. </details> ## Data Structure ### Data Instance ```python >>> from datasets import load_dataset >>> >>> ds = load_dataset("Bingsu/national_library_of_korea_book_info", split="train") >>> ds Dataset({ features: ['isbn13', 'vol', 'title', 'author', 'publisher', 'price', 'img_url', 'description'], num_rows: 7919278 }) ``` ```python >>> ds.features {'isbn13': Value(dtype='string', id=None), 'vol': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None), 'author': Value(dtype='string', id=None), 'publisher': Value(dtype='string', id=None), 'price': Value(dtype='string', id=None), 'img_url': Value(dtype='string', id=None), 'description': Value(dtype='string', id=None)} ``` or ```python >>> import pandas as pd >>> >>> url = "https://huggingface.co/datasets/Bingsu/national_library_of_korea_book_info/resolve/main/train.csv.gz" >>> df = pd.read_csv(url, low_memory=False) ``` ```python >>> df.info() <class 'pandas.core.frame.DataFrame'> RangeIndex: 7919278 entries, 0 to 7919277 Data columns (total 8 columns): # Column Dtype --- ------ ----- 0 isbn13 object 1 vol object 2 title object 3 author object 4 publisher object 5 price object 6 img_url object 7 description object dtypes: object(8) memory usage: 483.4+ MB ``` ### Null data ```python >>> df.isnull().sum() isbn13 3277 vol 5933882 title 19662 author 122998 publisher 1007553 price 3096535 img_url 3182882 description 4496194 dtype: int64 ``` ### Note ```python >>> df[df["description"].str.contains("[해외주문원서]", regex=False) == True].head()["description"] 10773 [해외주문원서] 고객님의 요청으로 수입 주문하는 도서이므로, 주문취소 및 반품이 불... 95542 [해외주문원서] 고객님의 요청으로 수입 주문하는 도서이므로, 주문취소 및 반품이 불... 95543 [해외주문원서] 고객님의 요청으로 수입 주문하는 도서이므로, 주문취소 및 반품이 불... 96606 [해외주문원서] 고객님의 요청으로 수입 주문하는 도서이므로, 주문취소 및 반품이 불... 96678 [해외주문원서] 고객님의 요청으로 수입 주문하는 도서이므로, 주문취소 및 반품이 불... Name: description, dtype: object ```
alexandrainst
null
# @InProceedings{huggingface:dataset, # title = {ScandiQA: A Scandinavian Question Answering Dataset}, # author={Dan Saattrup Nielsen}, # year={2022} # } #
ScandiQA is a dataset of questions and answers in the Danish, Norwegian, and Swedish languages. All samples come from the Natural Questions (NQ) dataset, which is a large question answering dataset from Google searches. The Scandinavian questions and answers come from the MKQA dataset, where 10,000 NQ samples were manually translated into, among others, Danish, Norwegian, and Swedish. However, this did not include a translated context, hindering the training of extractive question answering models. We merged the NQ dataset with the MKQA dataset, and extracted contexts as either "long answers" from the NQ dataset, being the paragraph in which the answer was found, or otherwise we extract the context by locating the paragraphs which have the largest cosine similarity to the question, and which contains the desired answer. Further, many answers in the MKQA dataset were "language normalised": for instance, all date answers were converted to the format "YYYY-MM-DD", meaning that in most cases these answers are not appearing in any paragraphs. We solve this by extending the MKQA answers with plausible "answer candidates", being slight perturbations or translations of the answer. With the contexts extracted, we translated these to Danish, Swedish and Norwegian using the DeepL translation service for Danish and Swedish, and the Google Translation service for Norwegian. After translation we ensured that the Scandinavian answers do indeed occur in the translated contexts. As we are filtering the MKQA samples at both the "merging stage" and the "translation stage", we are not able to fully convert the 10,000 samples to the Scandinavian languages, and instead get roughly 8,000 samples per language. These have further been split into a training, validation and test split, with the former two containing roughly 750 samples. The splits have been created in such a way that the proportion of samples without an answer is roughly the same in each split.
false
518
false
alexandrainst/scandiqa
2022-11-01T11:12:10.000Z
null
false
c0f82536badc6d25932513fb8a314f167e65d77a
[]
[ "language:da", "language:sv", "language:no", "license:cc-by-sa-4.0", "multilinguality:multilingual", "size_categories:1K<n<10K", "source_datasets:mkqa", "source_datasets:natural_questions", "task_categories:question-answering", "task_ids:extractive-qa" ]
https://huggingface.co/datasets/alexandrainst/scandiqa/resolve/main/README.md
--- pretty_name: ScandiQA language: - da - sv - no license: - cc-by-sa-4.0 multilinguality: - multilingual size_categories: - 1K<n<10K source_datasets: - mkqa - natural_questions task_categories: - question-answering task_ids: - extractive-qa --- # Dataset Card for ScandiQA ## Dataset Description - **Repository:** <https://github.com/alexandrainst/scandi-qa> - **Point of Contact:** [Dan Saattrup Nielsen](mailto:dan.nielsen@alexandra.dk) - **Size of downloaded dataset files:** 69 MB - **Size of the generated dataset:** 67 MB - **Total amount of disk used:** 136 MB ### Dataset Summary ScandiQA is a dataset of questions and answers in the Danish, Norwegian, and Swedish languages. All samples come from the Natural Questions (NQ) dataset, which is a large question answering dataset from Google searches. The Scandinavian questions and answers come from the MKQA dataset, where 10,000 NQ samples were manually translated into, among others, Danish, Norwegian, and Swedish. However, this did not include a translated context, hindering the training of extractive question answering models. We merged the NQ dataset with the MKQA dataset, and extracted contexts as either "long answers" from the NQ dataset, being the paragraph in which the answer was found, or otherwise we extract the context by locating the paragraphs which have the largest cosine similarity to the question, and which contains the desired answer. Further, many answers in the MKQA dataset were "language normalised": for instance, all date answers were converted to the format "YYYY-MM-DD", meaning that in most cases these answers are not appearing in any paragraphs. We solve this by extending the MKQA answers with plausible "answer candidates", being slight perturbations or translations of the answer. With the contexts extracted, we translated these to Danish, Swedish and Norwegian using the [DeepL translation service](https://www.deepl.com/pro-api?cta=header-pro-api) for Danish and Swedish, and the [Google Translation service](https://cloud.google.com/translate/docs/reference/rest/) for Norwegian. After translation we ensured that the Scandinavian answers do indeed occur in the translated contexts. As we are filtering the MKQA samples at both the "merging stage" and the "translation stage", we are not able to fully convert the 10,000 samples to the Scandinavian languages, and instead get roughly 8,000 samples per language. These have further been split into a training, validation and test split, with the latter two containing roughly 750 samples. The splits have been created in such a way that the proportion of samples without an answer is roughly the same in each split. ### Supported Tasks and Leaderboards Training machine learning models for extractive question answering is the intended task for this dataset. No leaderboard is active at this point. ### Languages The dataset is available in Danish (`da`), Swedish (`sv`) and Norwegian (`no`). ## Dataset Structure ### Data Instances - **Size of downloaded dataset files:** 69 MB - **Size of the generated dataset:** 67 MB - **Total amount of disk used:** 136 MB An example from the `train` split of the `da` subset looks as follows. ``` { 'example_id': 123, 'question': 'Er dette en test?', 'answer': 'Dette er en test', 'answer_start': 0, 'context': 'Dette er en testkontekst.', 'answer_en': 'This is a test', 'answer_start_en': 0, 'context_en': "This is a test context.", 'title_en': 'Train test' } ``` ### Data Fields The data fields are the same among all splits. - `example_id`: an `int64` feature. - `question`: a `string` feature. - `answer`: a `string` feature. - `answer_start`: an `int64` feature. - `context`: a `string` feature. - `answer_en`: a `string` feature. - `answer_start_en`: an `int64` feature. - `context_en`: a `string` feature. - `title_en`: a `string` feature. ### Data Splits | name | train | validation | test | |----------|------:|-----------:|-----:| | da | 6311 | 749 | 750 | | sv | 6299 | 750 | 749 | | no | 6314 | 749 | 750 | ## Dataset Creation ### Curation Rationale The Scandinavian languages does not have any gold standard question answering dataset. This is not quite gold standard, but the fact both the questions and answers are all manually translated, it is a solid silver standard dataset. ### Source Data The original data was collected from the [MKQA](https://github.com/apple/ml-mkqa/) and [Natural Questions](https://ai.google.com/research/NaturalQuestions) datasets from Apple and Google, respectively. ## Additional Information ### Dataset Curators [Dan Saattrup Nielsen](https://saattrupdan.github.io/) from the [The Alexandra Institute](https://alexandra.dk/) curated this dataset. ### Licensing Information The dataset is licensed under the [CC BY-SA 4.0 license](https://creativecommons.org/licenses/by-sa/4.0/).
pokameswaran
null
null
null
false
4
false
pokameswaran/ami-6h
2022-08-31T09:17:59.000Z
null
false
7b18a94b22ed20a4651164cb34365c94f35441d0
[]
[ "license:cc-by-4.0" ]
https://huggingface.co/datasets/pokameswaran/ami-6h/resolve/main/README.md
--- license: cc-by-4.0 ---
roskoN
null
\
\
false
10
false
roskoN/stereoset_german
2022-08-30T14:53:55.000Z
null
false
d322d49c6234ed7c3fd867ef57a2aed1539a5b20
[]
[ "license:cc-by-sa-4.0" ]
https://huggingface.co/datasets/roskoN/stereoset_german/resolve/main/README.md
--- license: cc-by-sa-4.0 ---
demo-org
null
null
null
false
1
false
demo-org/diabetes
2022-08-30T21:08:59.000Z
null
false
2c1e9e1a4deba071907e637095df2467c0c29472
[]
[ "language:en", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "task_categories:text-classification" ]
https://huggingface.co/datasets/demo-org/diabetes/resolve/main/README.md
--- language: - en multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification paperswithcode_id: null pretty_name: Diabetes --- # Dataset Card for Auditor_Review This file is a copy, the original version is hosted at [data.world](https://data.world/rshah/diabetes)
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-billsum-default-dd3eba-14585981
2022-08-31T07:44:21.000Z
null
false
03a3c90f11ff6485cd4955a23f0a6e07b5158936
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:billsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-billsum-default-dd3eba-14585981/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - billsum eval_info: task: summarization model: pszemraj/long-t5-tglobal-base-16384-booksum-V11-big_patent-V2 metrics: [] dataset_name: billsum dataset_config: default dataset_split: test col_mapping: text: text target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-base-16384-booksum-V11-big_patent-V2 * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-billsum-default-3fec5f-14625986
2022-09-01T10:02:44.000Z
null
false
c2248d5acd8782d3046775ac52db8eb3dad50305
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:billsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-billsum-default-3fec5f-14625986/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - billsum eval_info: task: summarization model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP11 metrics: [] dataset_name: billsum dataset_config: default dataset_split: test col_mapping: text: text target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP11 * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-billsum-default-3fec5f-14625985
2022-09-01T04:09:46.000Z
null
false
f4f32ebb0db7da41e075f69405e7e396dd93d2d0
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:billsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-billsum-default-3fec5f-14625985/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - billsum eval_info: task: summarization model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP metrics: [] dataset_name: billsum dataset_config: default dataset_split: test col_mapping: text: text target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-xsum-default-6f5db0-14615984
2022-09-01T13:24:17.000Z
null
false
7977d7e4d2c8bd3f9da965a99d6057387f58875a
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:xsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-xsum-default-6f5db0-14615984/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - xsum eval_info: task: summarization model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP13 metrics: [] dataset_name: xsum dataset_config: default dataset_split: test col_mapping: text: document target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP13 * Dataset: xsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-billsum-default-3fec5f-14625987
2022-09-01T08:04:11.000Z
null
false
0cd9acdb0ea6acb0442697499b54a323105dc95d
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:billsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-billsum-default-3fec5f-14625987/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - billsum eval_info: task: summarization model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP13 metrics: [] dataset_name: billsum dataset_config: default dataset_split: test col_mapping: text: text target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP13 * Dataset: billsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-samsum-samsum-f593d1-14645991
2022-08-31T01:18:28.000Z
null
false
d51d497dbd52f384789619ba69627cd55541ecd9
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:samsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-samsum-samsum-f593d1-14645991/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - samsum eval_info: task: summarization model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP11 metrics: [] dataset_name: samsum dataset_config: samsum dataset_split: test col_mapping: text: dialogue target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP11 * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-samsum-samsum-f593d1-14645992
2022-08-31T01:33:07.000Z
null
false
22b0f359dc343c3842ae0b3b25410185a06dc368
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:samsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-samsum-samsum-f593d1-14645992/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - samsum eval_info: task: summarization model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP13 metrics: [] dataset_name: samsum dataset_config: samsum dataset_split: test col_mapping: text: dialogue target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: pszemraj/long-t5-tglobal-large-pubmed-3k-booksum-16384-WIP13 * Dataset: samsum * Config: samsum * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model.
pixta-ai
null
null
null
false
null
false
pixta-ai/Plane-images-in-multiple-scenes
2022-09-05T04:23:05.000Z
null
false
626afad55214c9e1949031f8a19c13834f5b817f
[]
[]
https://huggingface.co/datasets/pixta-ai/Plane-images-in-multiple-scenes/resolve/main/README.md
--- YAML tags: - copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging --- # Dataset Card for pixta-ai/Plane-images-in-multiple-scenes ## Dataset Description - **Homepage:** https://www.pixta.ai/ - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary 4,000 Plane images in multiple scenes, including multiple types of planes disproportionately, the passenger plan are the majorities. Each image contains from 1 to 10 visible planes For more details, please refer to the link: https://www.pixta.ai/ Or send your inquiries to contact@pixta.ai ### Supported Tasks and Leaderboards object-detection, computer-vision: The dataset can be used to train or enhance model for object detection. ### Languages English ### License Academic & commercial usage
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-squad-plain_text-d52fee-14655993
2022-08-31T06:45:10.000Z
null
false
44488c9a08a774143dca37c60c28116c766e48fd
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad-plain_text-d52fee-14655993/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad eval_info: task: extractive_question_answering model: mrp/bert-finetuned-squad metrics: ['bleu', 'rouge'] dataset_name: squad dataset_config: plain_text dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: mrp/bert-finetuned-squad * Dataset: squad * Config: plain_text * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@saminaminaeheh](https://huggingface.co/saminaminaeheh) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-mrpc-e15d1b-14665994
2022-08-31T07:31:19.000Z
null
false
a8b2fb9790419752e26300ce37c9eabc36411bd4
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-mrpc-e15d1b-14665994/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: sgugger/glue-mrpc metrics: [] dataset_name: glue dataset_config: mrpc dataset_split: validation col_mapping: text1: sentence1 text2: sentence2 target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: sgugger/glue-mrpc * Dataset: glue * Config: mrpc * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-mrpc-e15d1b-14665997
2022-08-31T07:33:42.000Z
null
false
1a3a5ca04db7486f9737e64f16c54c1d2b48fba4
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-mrpc-e15d1b-14665997/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: Intel/camembert-base-mrpc metrics: [] dataset_name: glue dataset_config: mrpc dataset_split: validation col_mapping: text1: sentence1 text2: sentence2 target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: Intel/camembert-base-mrpc * Dataset: glue * Config: mrpc * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-mrpc-e15d1b-14666001
2022-08-31T07:36:29.000Z
null
false
55730ed50204cd1be2d9f3d0f828b34a762f6ae9
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-mrpc-e15d1b-14666001/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: sgugger/bert-finetuned-mrpc metrics: [] dataset_name: glue dataset_config: mrpc dataset_split: validation col_mapping: text1: sentence1 text2: sentence2 target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: sgugger/bert-finetuned-mrpc * Dataset: glue * Config: mrpc * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-qqp-c973af-14676003
2022-08-31T07:38:38.000Z
null
false
09a1805befbcdb794978a12558e99ea3d8dd2cb1
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-qqp-c973af-14676003/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: Alireza1044/mobilebert_qqp metrics: [] dataset_name: glue dataset_config: qqp dataset_split: validation col_mapping: text1: question1 text2: question2 target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: Alireza1044/mobilebert_qqp * Dataset: glue * Config: qqp * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-qqp-c973af-14676011
2022-08-31T07:43:33.000Z
null
false
0434b76db92af9825be658211a80b3ce2fcb41ba
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-qqp-c973af-14676011/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: gchhablani/bert-base-cased-finetuned-qqp metrics: [] dataset_name: glue dataset_config: qqp dataset_split: validation col_mapping: text1: question1 text2: question2 target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: gchhablani/bert-base-cased-finetuned-qqp * Dataset: glue * Config: qqp * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-mnli-026a6e-14686015
2022-08-31T07:44:58.000Z
null
false
0b092afb93ac87046ff0da854e0f025408b23915
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-mnli-026a6e-14686015/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: Alireza1044/mobilebert_mnli metrics: [] dataset_name: glue dataset_config: mnli dataset_split: validation_matched col_mapping: text1: premise text2: hypothesis target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: Alireza1044/mobilebert_mnli * Dataset: glue * Config: mnli * Split: validation_matched To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-mnli-026a6e-14686017
2022-08-31T07:48:14.000Z
null
false
8d30d6afd086cb75a9a24e114001dcbadd64c5b4
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-mnli-026a6e-14686017/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: Jiva/xlm-roberta-large-it-mnli metrics: [] dataset_name: glue dataset_config: mnli dataset_split: validation_matched col_mapping: text1: premise text2: hypothesis target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: Jiva/xlm-roberta-large-it-mnli * Dataset: glue * Config: mnli * Split: validation_matched To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-mnli-026a6e-14686020
2022-08-31T07:51:25.000Z
null
false
b55cb6fad539ade72ccb0bf50f7cc661dc764116
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-mnli-026a6e-14686020/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: nbhimte/tiny-bert-mnli-distilled metrics: [] dataset_name: glue dataset_config: mnli dataset_split: validation_matched col_mapping: text1: premise text2: hypothesis target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: nbhimte/tiny-bert-mnli-distilled * Dataset: glue * Config: mnli * Split: validation_matched To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-glue-qnli-1747ab-14696022
2022-08-31T07:53:56.000Z
null
false
2207c72eb1dfd42516e8bb8e8e428a1f15fc0f9e
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-qnli-1747ab-14696022/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: JeremiahZ/roberta-base-qnli metrics: [] dataset_name: glue dataset_config: qnli dataset_split: validation col_mapping: text1: question text2: sentence target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: JeremiahZ/roberta-base-qnli * Dataset: glue * Config: qnli * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-project-a741994f-efcd-40c8-8652-be4f42ba26cd-31
2022-08-31T08:10:00.000Z
null
false
7bcd8d67060c921ea89a52433ce80e7dc753784c
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:emotion" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-project-a741994f-efcd-40c8-8652-be4f42ba26cd-31/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - emotion eval_info: task: multi_class_classification model: autoevaluate/multi-class-classification metrics: ['matthews_correlation'] dataset_name: emotion dataset_config: default dataset_split: test col_mapping: text: text target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: autoevaluate/multi-class-classification * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-glue-qnli-1747ab-14696030
2022-08-31T08:10:09.000Z
null
false
9ad5d61faaa69bf55d889259015496b6d39ea90a
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:glue" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-glue-qnli-1747ab-14696030/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - glue eval_info: task: natural_language_inference model: gchhablani/bert-base-cased-finetuned-qnli metrics: [] dataset_name: glue dataset_config: qnli dataset_split: validation col_mapping: text1: question text2: sentence target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Natural Language Inference * Model: gchhablani/bert-base-cased-finetuned-qnli * Dataset: glue * Config: qnli * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model.
lucifertrj
null
null
null
false
null
false
lucifertrj/AnimeQuotes
2022-08-31T11:03:26.000Z
null
false
adc9a8b5f8384baca023be9e41de453cdecb5c01
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/lucifertrj/AnimeQuotes/resolve/main/README.md
--- license: apache-2.0 ---
cakiki
null
null
null
false
1
false
cakiki/ORCAS
2022-08-31T11:44:09.000Z
null
false
17fac3405c9f2fd59b18ef5cbb6f73fede1f3c40
[]
[ "license:cc-by-4.0" ]
https://huggingface.co/datasets/cakiki/ORCAS/resolve/main/README.md
--- license: cc-by-4.0 ---
khalidalt
null
null
null
false
1
false
khalidalt/SANAD
2022-09-03T19:36:00.000Z
null
false
cc04efc6edd44fc890b7625b82e36e023a353c59
[]
[ "license:cc-by-4.0" ]
https://huggingface.co/datasets/khalidalt/SANAD/resolve/main/README.md
--- license: cc-by-4.0 --- # Dataset Card for SANAD ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:https://data.mendeley.com/datasets/57zpx667y9/2** ### Dataset Summary SANAD Dataset is a large collection of Arabic news articles that can be used in different Arabic NLP tasks such as Text Classification and Word Embedding. The articles were collected using Python scripts written specifically for three popular news websites: AlKhaleej, AlArabiya and Akhbarona. All datasets have seven categories [Culture, Finance, Medical, Politics, Religion, Sports and Tech], except AlArabiya which doesn’t have [Religion]. SANAD contains a total number of 190k+ articles. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information license: cc-by-4.0 ### Citation Information ``` @article{einea2019sanad, title={Sanad: Single-label arabic news articles dataset for automatic text categorization}, author={Einea, Omar and Elnagar, Ashraf and Al Debsi, Ridhwan}, journal={Data in brief}, volume={25}, pages={104076}, year={2019}, publisher={Elsevier} } ``` ### Contributions
Ramamurthi
null
null
null
false
1
false
Ramamurthi/yelp_reviews_encoded_hidden_outputs
2022-08-31T21:31:04.000Z
null
false
a9ce33acf817e1d68c82a1fd3ab615c3515f0852
[]
[ "license:mit" ]
https://huggingface.co/datasets/Ramamurthi/yelp_reviews_encoded_hidden_outputs/resolve/main/README.md
--- license: mit ---
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906065
2022-08-31T21:51:46.000Z
null
false
78d2052bec6926a380c29fafca8557bced46ad43
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906065/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/tinyroberta-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/tinyroberta-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906066
2022-08-31T21:52:06.000Z
null
false
ce4204c2bd9b8eb2d0872b9b0ea63f0200030771
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906066/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/roberta-base-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/roberta-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906067
2022-08-31T21:53:49.000Z
null
false
101220450c4e9337566488a595372390246937c9
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906067/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/roberta-large-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/roberta-large-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906068
2022-08-31T21:55:28.000Z
null
false
72b9520267fa0633669d76cdf4968d6c25521b96
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906068/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/xlm-roberta-base-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/xlm-roberta-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906069
2022-08-31T21:57:53.000Z
null
false
cca945ceb6b114937af9e69853666dc3d12ef1c0
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906069/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/xlm-roberta-large-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/xlm-roberta-large-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906070
2022-08-31T21:57:24.000Z
null
false
b66f3c90f539de1eb33ae4b3b6e84c86e67d644a
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906070/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/roberta-base-squad2-covid metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/roberta-base-squad2-covid * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906071
2022-08-31T21:58:49.000Z
null
false
ec55bc782a252819ffe12f8097640286e5130157
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906071/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/roberta-base-squad2-distilled metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/roberta-base-squad2-distilled * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906072
2022-08-31T22:01:20.000Z
null
false
85b74f86f553a969c7d22d22ee177c07739ede2f
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906072/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/xlm-roberta-base-squad2-distilled metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/xlm-roberta-base-squad2-distilled * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916074
2022-08-31T22:01:33.000Z
null
false
6441cc0b487b62b88a44999da1d1a6df5051db1d
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916074/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/bert-base-cased-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/bert-base-cased-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906073
2022-08-31T22:02:14.000Z
null
false
9e7039c7a58178ec63a3938b449bbd35ebf912df
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-76c05b-14906073/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepakvk/roberta-base-squad2-finetuned-squad metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepakvk/roberta-base-squad2-finetuned-squad * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916077
2022-08-31T22:04:31.000Z
null
false
30825b4b2d8e9b5671ec15a8218bdda56f470b0b
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916077/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/bert-medium-squad2-distilled metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/bert-medium-squad2-distilled * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916076
2022-08-31T22:06:09.000Z
null
false
ac5b4b0694f05ab94ed402208b645204dbc7f685
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916076/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/bert-base-uncased-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/bert-base-uncased-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916078
2022-08-31T22:10:07.000Z
null
false
70eb6800ed3b65b6ef9c1b424928669979a9e322
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916078/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/bert-large-uncased-whole-word-masking-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/bert-large-uncased-whole-word-masking-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916080
2022-08-31T22:13:11.000Z
null
false
4ae1a5e50013521e0d49bacbc0e4759230b2e0c7
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916080/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/deberta-v3-large-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/deberta-v3-large-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916075
2022-08-31T22:11:47.000Z
null
false
2455dc91a08af79fa79ed41e9a60ceec159629c0
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916075/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/tinybert-6l-768d-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/tinybert-6l-768d-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916079
2022-08-31T22:14:24.000Z
null
false
e3290585c7c08b65826dbf628bb64eb9e3d60e92
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916079/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/deberta-v3-base-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/deberta-v3-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916081
2022-08-31T22:14:00.000Z
null
false
6403e178c742dcd7c2b572e9e4df8f33577eb62d
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916081/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/electra-base-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/electra-base-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916082
2022-08-31T22:15:10.000Z
null
false
6ec84a0ec5da70e845deca75ffa6141a28839907
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-38b250-14916082/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: deepset/minilm-uncased-squad2 metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: deepset/minilm-uncased-squad2 * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
evaluate
null
@inproceedings{wang2019glue, title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019} }
GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems.
false
1
false
evaluate/glue-ci
2022-09-15T20:12:43.000Z
glue
false
ba06dc05a1b91c497f489bfa9793acdfb4ce06ec
[]
[ "annotations_creators:other", "language_creators:other", "language:en", "license:cc-by-4.0", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "task_categories:text-classification", "task_ids:acceptability-classification", "task_ids:natural-language-inference...
https://huggingface.co/datasets/evaluate/glue-ci/resolve/main/README.md
--- annotations_creators: - other language_creators: - other language: - en license: - cc-by-4.0 multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - text-classification task_ids: - acceptability-classification - natural-language-inference - semantic-similarity-scoring - sentiment-classification - text-classification-other-coreference-nli - text-classification-other-paraphrase-identification - text-classification-other-qa-nli - text-scoring paperswithcode_id: glue pretty_name: GLUE (General Language Understanding Evaluation benchmark) train-eval-index: - config: cola task: text-classification task_id: binary_classification splits: train_split: train eval_split: validation col_mapping: sentence: text label: target - config: sst2 task: text-classification task_id: binary_classification splits: train_split: train eval_split: validation col_mapping: sentence: text label: target - config: mrpc task: text-classification task_id: natural_language_inference splits: train_split: train eval_split: validation col_mapping: sentence1: text1 sentence2: text2 label: target - config: qqp task: text-classification task_id: natural_language_inference splits: train_split: train eval_split: validation col_mapping: question1: text1 question2: text2 label: target - config: stsb task: text-classification task_id: natural_language_inference splits: train_split: train eval_split: validation col_mapping: sentence1: text1 sentence2: text2 label: target - config: mnli task: text-classification task_id: natural_language_inference splits: train_split: train eval_split: validation_matched col_mapping: premise: text1 hypothesis: text2 label: target - config: mnli_mismatched task: text-classification task_id: natural_language_inference splits: train_split: train eval_split: validation col_mapping: premise: text1 hypothesis: text2 label: target - config: mnli_matched task: text-classification task_id: natural_language_inference splits: train_split: train eval_split: validation col_mapping: premise: text1 hypothesis: text2 label: target - config: qnli task: text-classification task_id: natural_language_inference splits: train_split: train eval_split: validation col_mapping: question: text1 sentence: text2 label: target - config: rte task: text-classification task_id: natural_language_inference splits: train_split: train eval_split: validation col_mapping: sentence1: text1 sentence2: text2 label: target - config: wnli task: text-classification task_id: natural_language_inference splits: train_split: train eval_split: validation col_mapping: sentence1: text1 sentence2: text2 label: target configs: - ax - cola - mnli - mnli_matched - mnli_mismatched - mrpc - qnli - qqp - rte - sst2 - stsb - wnli --- # Dataset Card for GLUE ## Table of Contents - [Dataset Card for GLUE](#dataset-card-for-glue) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [ax](#ax) - [cola](#cola) - [mnli](#mnli) - [mnli_matched](#mnli_matched) - [mnli_mismatched](#mnli_mismatched) - [mrpc](#mrpc) - [qnli](#qnli) - [qqp](#qqp) - [rte](#rte) - [sst2](#sst2) - [stsb](#stsb) - [wnli](#wnli) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [ax](#ax-1) - [cola](#cola-1) - [mnli](#mnli-1) - [mnli_matched](#mnli_matched-1) - [mnli_mismatched](#mnli_mismatched-1) - [mrpc](#mrpc-1) - [qnli](#qnli-1) - [qqp](#qqp-1) - [rte](#rte-1) - [sst2](#sst2-1) - [stsb](#stsb-1) - [wnli](#wnli-1) - [Data Fields](#data-fields) - [ax](#ax-2) - [cola](#cola-2) - [mnli](#mnli-2) - [mnli_matched](#mnli_matched-2) - [mnli_mismatched](#mnli_mismatched-2) - [mrpc](#mrpc-2) - [qnli](#qnli-2) - [qqp](#qqp-2) - [rte](#rte-2) - [sst2](#sst2-2) - [stsb](#stsb-2) - [wnli](#wnli-2) - [Data Splits](#data-splits) - [ax](#ax-3) - [cola](#cola-3) - [mnli](#mnli-3) - [mnli_matched](#mnli_matched-3) - [mnli_mismatched](#mnli_mismatched-3) - [mrpc](#mrpc-3) - [qnli](#qnli-3) - [qqp](#qqp-3) - [rte](#rte-3) - [sst2](#sst2-3) - [stsb](#stsb-3) - [wnli](#wnli-3) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) - [Who are the source language producers?](#who-are-the-source-language-producers) - [Annotations](#annotations) - [Annotation process](#annotation-process) - [Who are the annotators?](#who-are-the-annotators) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://nyu-mll.github.io/CoLA/](https://nyu-mll.github.io/CoLA/) - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) - **Size of downloaded dataset files:** 955.33 MB - **Size of the generated dataset:** 229.68 MB - **Total amount of disk used:** 1185.01 MB ### Dataset Summary GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems. ### Supported Tasks and Leaderboards The leaderboard for the GLUE benchmark can be found [at this address](https://gluebenchmark.com/). It comprises the following tasks: #### ax A manually-curated evaluation dataset for fine-grained analysis of system performance on a broad range of linguistic phenomena. This dataset evaluates sentence understanding through Natural Language Inference (NLI) problems. Use a model trained on MulitNLI to produce predictions for this dataset. #### cola The Corpus of Linguistic Acceptability consists of English acceptability judgments drawn from books and journal articles on linguistic theory. Each example is a sequence of words annotated with whether it is a grammatical English sentence. #### mnli The Multi-Genre Natural Language Inference Corpus is a crowdsourced collection of sentence pairs with textual entailment annotations. Given a premise sentence and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are gathered from ten different sources, including transcribed speech, fiction, and government reports. The authors of the benchmark use the standard test set, for which they obtained private labels from the RTE authors, and evaluate on both the matched (in-domain) and mismatched (cross-domain) section. They also uses and recommend the SNLI corpus as 550k examples of auxiliary training data. #### mnli_matched The matched validation and test splits from MNLI. See the "mnli" BuilderConfig for additional information. #### mnli_mismatched The mismatched validation and test splits from MNLI. See the "mnli" BuilderConfig for additional information. #### mrpc The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of sentence pairs automatically extracted from online news sources, with human annotations for whether the sentences in the pair are semantically equivalent. #### qnli The Stanford Question Answering Dataset is a question-answering dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn from Wikipedia) contains the answer to the corresponding question (written by an annotator). The authors of the benchmark convert the task into sentence pair classification by forming a pair between each question and each sentence in the corresponding context, and filtering out pairs with low lexical overlap between the question and the context sentence. The task is to determine whether the context sentence contains the answer to the question. This modified version of the original task removes the requirement that the model select the exact answer, but also removes the simplifying assumptions that the answer is always present in the input and that lexical overlap is a reliable cue. #### qqp The Quora Question Pairs2 dataset is a collection of question pairs from the community question-answering website Quora. The task is to determine whether a pair of questions are semantically equivalent. #### rte The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual entailment challenges. The authors of the benchmark combined the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009). Examples are constructed based on news and Wikipedia text. The authors of the benchmark convert all datasets to a two-class split, where for three-class datasets they collapse neutral and contradiction into not entailment, for consistency. #### sst2 The Stanford Sentiment Treebank consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence. It uses the two-way (positive/negative) class split, with only sentence-level labels. #### stsb The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of sentence pairs drawn from news headlines, video and image captions, and natural language inference data. Each pair is human-annotated with a similarity score from 1 to 5. #### wnli The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task in which a system must read a sentence with a pronoun and select the referent of that pronoun from a list of choices. The examples are manually constructed to foil simple statistical methods: Each one is contingent on contextual information provided by a single word or phrase in the sentence. To convert the problem into sentence pair classification, the authors of the benchmark construct sentence pairs by replacing the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the pronoun substituted is entailed by the original sentence. They use a small evaluation set consisting of new examples derived from fiction books that was shared privately by the authors of the original corpus. While the included training set is balanced between two classes, the test set is imbalanced between them (65% not entailment). Also, due to a data quirk, the development set is adversarial: hypotheses are sometimes shared between training and development examples, so if a model memorizes the training examples, they will predict the wrong label on corresponding development set example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence between a model's score on this task and its score on the unconverted original task. The authors of the benchmark call converted dataset WNLI (Winograd NLI). ### Languages The language data in GLUE is in English (BCP-47 `en`) ## Dataset Structure ### Data Instances #### ax - **Size of downloaded dataset files:** 0.21 MB - **Size of the generated dataset:** 0.23 MB - **Total amount of disk used:** 0.44 MB An example of 'test' looks as follows. ``` { "premise": "The cat sat on the mat.", "hypothesis": "The cat did not sit on the mat.", "label": -1, "idx: 0 } ``` #### cola - **Size of downloaded dataset files:** 0.36 MB - **Size of the generated dataset:** 0.58 MB - **Total amount of disk used:** 0.94 MB An example of 'train' looks as follows. ``` { "sentence": "Our friends won't buy this analysis, let alone the next one we propose.", "label": 1, "id": 0 } ``` #### mnli - **Size of downloaded dataset files:** 298.29 MB - **Size of the generated dataset:** 78.65 MB - **Total amount of disk used:** 376.95 MB An example of 'train' looks as follows. ``` { "premise": "Conceptually cream skimming has two basic dimensions - product and geography.", "hypothesis": "Product and geography are what make cream skimming work.", "label": 1, "idx": 0 } ``` #### mnli_matched - **Size of downloaded dataset files:** 298.29 MB - **Size of the generated dataset:** 3.52 MB - **Total amount of disk used:** 301.82 MB An example of 'test' looks as follows. ``` { "premise": "Hierbas, ans seco, ans dulce, and frigola are just a few names worth keeping a look-out for.", "hypothesis": "Hierbas is a name worth looking out for.", "label": -1, "idx": 0 } ``` #### mnli_mismatched - **Size of downloaded dataset files:** 298.29 MB - **Size of the generated dataset:** 3.73 MB - **Total amount of disk used:** 302.02 MB An example of 'test' looks as follows. ``` { "premise": "What have you decided, what are you going to do?", "hypothesis": "So what's your decision?, "label": -1, "idx": 0 } ``` #### mrpc [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### qnli [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### qqp [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### rte [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### sst2 [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### stsb [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### wnli [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Data Fields The data fields are the same among all splits. #### ax - `premise`: a `string` feature. - `hypothesis`: a `string` feature. - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2). - `idx`: a `int32` feature. #### cola - `sentence`: a `string` feature. - `label`: a classification label, with possible values including `unacceptable` (0), `acceptable` (1). - `idx`: a `int32` feature. #### mnli - `premise`: a `string` feature. - `hypothesis`: a `string` feature. - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2). - `idx`: a `int32` feature. #### mnli_matched - `premise`: a `string` feature. - `hypothesis`: a `string` feature. - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2). - `idx`: a `int32` feature. #### mnli_mismatched - `premise`: a `string` feature. - `hypothesis`: a `string` feature. - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2). - `idx`: a `int32` feature. #### mrpc [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### qnli [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### qqp [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### rte [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### sst2 [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### stsb [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### wnli [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Data Splits #### ax | |test| |---|---:| |ax |1104| #### cola | |train|validation|test| |----|----:|---------:|---:| |cola| 8551| 1043|1063| #### mnli | |train |validation_matched|validation_mismatched|test_matched|test_mismatched| |----|-----:|-----------------:|--------------------:|-----------:|--------------:| |mnli|392702| 9815| 9832| 9796| 9847| #### mnli_matched | |validation|test| |------------|---------:|---:| |mnli_matched| 9815|9796| #### mnli_mismatched | |validation|test| |---------------|---------:|---:| |mnli_mismatched| 9832|9847| #### mrpc [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### qnli [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### qqp [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### rte [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### sst2 [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### stsb [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### wnli [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Dataset Creation ### Curation Rationale [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Source Data #### Initial Data Collection and Normalization [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the source language producers? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Annotations #### Annotation process [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) #### Who are the annotators? [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Personal and Sensitive Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Discussion of Biases [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Other Known Limitations [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Additional Information ### Dataset Curators [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Licensing Information [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Citation Information ``` @article{warstadt2018neural, title={Neural Network Acceptability Judgments}, author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R}, journal={arXiv preprint arXiv:1805.12471}, year={2018} } @inproceedings{wang2019glue, title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019} } Note that each GLUE dataset has its own citation. Please see the source to see the correct citation for each contained dataset. ``` ### Contributions Thanks to [@patpizio](https://github.com/patpizio), [@jeswan](https://github.com/jeswan), [@thomwolf](https://github.com/thomwolf), [@patrickvonplaten](https://github.com/patrickvonplaten), [@mariamabarham](https://github.com/mariamabarham) for adding this dataset.
EricPeter
null
null
null
false
1
false
EricPeter/comments
2022-08-31T22:49:02.000Z
null
false
7146c03d31dcc036af4e2b78631a3ba1bd10b883
[]
[ "license:cc0-1.0" ]
https://huggingface.co/datasets/EricPeter/comments/resolve/main/README.md
--- license: cc0-1.0 ---
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-c9381c-14936084
2022-08-31T23:16:12.000Z
null
false
fdf89d9ab61732bcb253768750a35dcf7bba9a9e
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-c9381c-14936084/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: ptnv-s/biobert_squad2_cased-finetuned-squad metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: ptnv-s/biobert_squad2_cased-finetuned-squad * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-c9381c-14936085
2022-08-31T23:49:22.000Z
null
false
f73936e33d1c4ee021cb17b21e16ffff0ca95b80
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:squad_v2" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-squad_v2-squad_v2-c9381c-14936085/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - squad_v2 eval_info: task: extractive_question_answering model: gerardozq/biobert_v1.1_pubmed-finetuned-squad metrics: ['bertscore'] dataset_name: squad_v2 dataset_config: squad_v2 dataset_split: validation col_mapping: context: context question: question answers-text: answers.text answers-answer_start: answers.answer_start --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Question Answering * Model: gerardozq/biobert_v1.1_pubmed-finetuned-squad * Dataset: squad_v2 * Config: squad_v2 * Split: validation To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@nonchalant-nagavalli](https://huggingface.co/nonchalant-nagavalli) for evaluating this model.
autoevaluate
null
null
null
false
null
false
autoevaluate/autoeval-staging-eval-cnn_dailymail-3.0.0-d7ce16-14946086
2022-09-01T01:06:48.000Z
null
false
d5bf79983aff9a4a44953c5edf97a05393c8ab58
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:cnn_dailymail" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-cnn_dailymail-3.0.0-d7ce16-14946086/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - cnn_dailymail eval_info: task: summarization model: facebook/bart-large-cnn metrics: ['mse'] dataset_name: cnn_dailymail dataset_config: 3.0.0 dataset_split: test col_mapping: text: article target: highlights --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: facebook/bart-large-cnn * Dataset: cnn_dailymail * Config: 3.0.0 * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@samuelallen123](https://huggingface.co/samuelallen123) for evaluating this model.
Chr0my
null
null
null
false
2
false
Chr0my/Epidemic_sounds
2022-09-01T01:19:57.000Z
null
false
8591fdc2d9f94cfcd336feedb3002b0fdbc1f3d8
[]
[ "license:mit" ]
https://huggingface.co/datasets/Chr0my/Epidemic_sounds/resolve/main/README.md
--- license: mit ---
ElKulako
null
null
null
false
1
false
ElKulako/cryptobert-posttrain
2022-09-01T04:22:42.000Z
null
false
19c35918209a49548c54478695bbe6b8f0dc758e
[]
[ "license:afl-3.0" ]
https://huggingface.co/datasets/ElKulako/cryptobert-posttrain/resolve/main/README.md
--- license: afl-3.0 --- This is the dataset used to post-train the [BERTweet](https://huggingface.co/cardiffnlp/twitter-roberta-base) language model on a Masked Language Modeling (MLM) task, resulting in the [CryptoBERT](https://huggingface.co/ElKulako/cryptobert) language model. The dataset contains 3.207 million unique posts from the language domain of cryptocurrency-related social media text. The dataset contains 1.865 million StockTwits posts, 496 thousand tweets, 172 thousand Reddit comments and 664 thousand Telegram messages.
nanom
null
@dataset{jose_canete_2019_3247731, author = {José Cañete}, title = {Compilation of Large Spanish Unannotated Corpora}, month = may, year = 2019, publisher = {Zenodo}, doi = {10.5281/zenodo.3247731}, url = {https://doi.org/10.5281/zenodo.3247731} }
null
false
1
false
nanom/spanish3bwc
2022-09-05T21:03:49.000Z
null
false
2eb9b928c3a7cd9a92918353e0453b2c9d1a512a
[]
[ "language:es", "multilinguality:monolingual", "license:mit" ]
https://huggingface.co/datasets/nanom/spanish3bwc/resolve/main/README.md
--- language: - 'es' multilinguality: - monolingual pretty_name: "Unannotated Spanish 3 Billion Words Corpora" license: - mit --- # Dataset Card for Unannotated Spanish 3 Billion Words Corpora ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Languages](#languages) - [Source Data](#source-data) - [Data Subset](#data-subset) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Repository:** https://github.com/josecannete/spanish-corpora - **Paper:** https://users.dcc.uchile.cl/~jperez/papers/pml4dc2020.pdf ### Dataset Summary * Number of lines: 300904000 (300M) * Number of tokens: 2996016962 (3B) * Number of chars: 18431160978 (18.4B) ### Languages * Spanish ### Source Data * Available to download here: [Zenodo](https://doi.org/10.5281/zenodo.3247731) ### Data Subset * Spanish Wikis: Wich include Wikipedia, Wikinews, Wikiquotes and more. These were first processed with wikiextractor (https://github.com/josecannete/wikiextractorforBERT) using the wikis dump of 20/04/2019. * ParaCrawl: Spanish portion of ParaCrawl (http://opus.nlpl.eu/ParaCrawl.php) * EUBookshop: Spanish portion of EUBookshop (http://opus.nlpl.eu/EUbookshop.php) * MultiUN: Spanish portion of MultiUN (http://opus.nlpl.eu/MultiUN.php) * OpenSubtitles: Spanish portion of OpenSubtitles2018 (http://opus.nlpl.eu/OpenSubtitles-v2018.php) * DGC: Spanish portion of DGT (http://opus.nlpl.eu/DGT.php) * DOGC: Spanish portion of DOGC (http://opus.nlpl.eu/DOGC.php) * ECB: Spanish portion of ECB (http://opus.nlpl.eu/ECB.php) * EMEA: Spanish portion of EMEA (http://opus.nlpl.eu/EMEA.php) * Europarl: Spanish portion of Europarl (http://opus.nlpl.eu/Europarl.php) * GlobalVoices: Spanish portion of GlobalVoices (http://opus.nlpl.eu/GlobalVoices.php) * JRC: Spanish portion of JRC (http://opus.nlpl.eu/JRC-Acquis.php) * News-Commentary11: Spanish portion of NCv11 (http://opus.nlpl.eu/News-Commentary-v11.php) * TED: Spanish portion of TED (http://opus.nlpl.eu/TED2013.php) * UN: Spanish portion of UN (http://opus.nlpl.eu/UN.php) ## Additional Information ### Licensing Information * [MIT Licence](https://github.com/josecannete/spanish-corpora/blob/master/LICENSE) ### Citation Information ``` @dataset{jose_canete_2019_3247731, author = {José Cañete}, title = {Compilation of Large Spanish Unannotated Corpora}, month = may, year = 2019, publisher = {Zenodo}, doi = {10.5281/zenodo.3247731}, url = {https://doi.org/10.5281/zenodo.3247731} } @inproceedings{CaneteCFP2020, title={Spanish Pre-Trained BERT Model and Evaluation Data}, author={Cañete, José and Chaperon, Gabriel and Fuentes, Rodrigo and Ho, Jou-Hui and Kang, Hojin and Pérez, Jorge}, booktitle={PML4DC at ICLR 2020}, year={2020} } ```
Exterus
null
null
null
false
1
false
Exterus/Language
2022-09-01T12:33:41.000Z
null
false
6c66817025509e853c1c7f3ea268f9fed96e240c
[]
[ "license:other" ]
https://huggingface.co/datasets/Exterus/Language/resolve/main/README.md
--- license: other ---
mteb
null
@article{muennighoff2022mteb, doi = {10.48550/ARXIV.2210.07316}, url = {https://arxiv.org/abs/2210.07316}, author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Lo{\"\i}c and Reimers, Nils}, title = {MTEB: Massive Text Embedding Benchmark}, publisher = {arXiv}, journal={arXiv preprint arXiv:2210.07316}, year = {2022} }
Results on MTEB
false
847
false
mteb/results
2022-11-05T16:41:14.000Z
null
false
5739c153726064a641b8f92526059e633a0841bd
[]
[ "benchmark:mteb", "type:evaluation", "submission_name:MTEB" ]
https://huggingface.co/datasets/mteb/results/resolve/main/README.md
--- benchmark: mteb type: evaluation submission_name: MTEB ---
climatebert
null
null
null
false
16
false
climatebert/environmental_claims
2022-09-02T09:12:00.000Z
null
false
7752b2a4fa0fcfe4529e1ff76d6be5db2c8637ce
[]
[ "arxiv:2209.00507", "license:cc-by-nc-sa-4.0", "annotations_creators:expert-generated", "language_creators:found", "language:en", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "task_categories:text-classification", "task_ids:text-classification" ]
https://huggingface.co/datasets/climatebert/environmental_claims/resolve/main/README.md
--- license: cc-by-nc-sa-4.0 annotations_creators: - expert-generated language_creators: - found language: - en multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - text-classification pretty_name: EnvironmentalClaims --- # Dataset Card for environmental_claims ## Dataset Description - **Homepage:** [climatebert.ai](https://climatebert.ai) - **Repository:** - **Paper:** [arxiv.org/abs/2209.00507](https://arxiv.org/abs/2209.00507) - **Leaderboard:** - **Point of Contact:** [Dominik Stammbach](mailto:dominsta@ethz.ch) ### Dataset Summary We introduce an expert-annotated dataset for detecting real-world environmental claims made by listed companies. ### Supported Tasks and Leaderboards The dataset supports a binary classification task of whether a given sentence is an environmental claim or not. ### Languages The text in the dataset is in English. ## Dataset Structure ### Data Instances ``` { "text": "It will enable E.ON to acquire and leverage a comprehensive understanding of the transfor- mation of the energy system and the interplay between the individual submarkets in regional and local energy supply sys- tems.", "label": 0 } ``` ### Data Fields - text: a sentence extracted from corporate annual reports, sustainability reports and earning calls transcripts - label: the label (0 -> no environmental claim, 1 -> environmental claim) ### Data Splits The dataset is split into: - train: 2,400 - validation: 300 - test: 300 ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization Our dataset contains environmental claims by firms, often in the financial domain. We collect text from corporate annual reports, sustainability reports, and earning calls transcripts. For more information regarding our sample selection, please refer to Appendix B of our paper, which is provided for [citation](#citation-information). #### Who are the source language producers? Mainly large listed companies. ### Annotations #### Annotation process For more information on our annotation process and annotation guidelines, please refer to Appendix C of our paper, which is provided for [citation](#citation-information). #### Who are the annotators? The authors and students at University of Zurich with majors in finance and sustainable finance. ### Personal and Sensitive Information Since our text sources contain public information, no personal and sensitive information should be included. ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators - Dominik Stammbach - Nicolas Webersinke - Julia Anna Bingler - Mathias Kraus - Markus Leippold ### Licensing Information This dataset is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International license (cc-by-nc-sa-4.0). To view a copy of this license, visit [creativecommons.org/licenses/by-nc-sa/4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). If you are interested in commercial use of the dataset, please contact the ClimateBert team at [hello@climatebert.ai](mailto:hello@climatebert.ai). ### Citation Information ```bibtex @misc{stammbach2022environmentalclaims, title = {A Dataset for Detecting Real-World Environmental Claims}, author = {Stammbach, Dominik and Webersinke, Nicolas and Bingler, Julia Anna and Kraus, Mathias and Leippold, Markus}, year = {2022}, doi = {10.48550/ARXIV.2209.00507}, url = {https://arxiv.org/abs/2209.00507}, publisher = {arXiv}, } ``` ### Contributions Thanks to [@webersni](https://github.com/webersni) for adding this dataset.
cardiffnlp
null
@inproceedings{dimosthenis-etal-2022-twitter, title = "{T}witter {T}opic {C}lassification", author = "Antypas, Dimosthenis and Ushio, Asahi and Camacho-Collados, Jose and Neves, Leonardo and Silva, Vitor and Barbieri, Francesco", booktitle = "Proceedings of the 29th International Conference on Computational Linguistics", month = oct, year = "2022", address = "Gyeongju, Republic of Korea", publisher = "International Committee on Computational Linguistics" }
[TweetTopic](https://arxiv.org/abs/2209.09824)
false
326
false
cardiffnlp/tweet_topic_multi
2022-09-30T11:53:20.000Z
null
false
8b720cddf2bde9b9201225d2675a467cb3d9e6d7
[]
[ "arxiv:2209.09824", "language:en", "license:other", "multilinguality:monolingual", "size_categories:1k<10K", "task_categories:text-classification", "task_ids:sentiment-classification" ]
https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi/resolve/main/README.md
--- language: - en license: - other multilinguality: - monolingual size_categories: - 1k<10K task_categories: - text-classification task_ids: - sentiment-classification pretty_name: TweetTopicSingle --- # Dataset Card for "cardiffnlp/tweet_topic_multi" ## Dataset Description - **Paper:** [https://arxiv.org/abs/2209.09824](https://arxiv.org/abs/2209.09824) - **Dataset:** Tweet Topic Dataset - **Domain:** Twitter - **Number of Class:** 19 ### Dataset Summary This is the official repository of TweetTopic (["Twitter Topic Classification , COLING main conference 2022"](https://arxiv.org/abs/2209.09824)), a topic classification dataset on Twitter with 19 labels. Each instance of TweetTopic comes with a timestamp which distributes from September 2019 to August 2021. See [cardiffnlp/tweet_topic_single](https://huggingface.co/datasets/cardiffnlp/tweet_topic_single) for single label version of TweetTopic. The tweet collection used in TweetTopic is same as what used in [TweetNER7](https://huggingface.co/datasets/tner/tweetner7). The dataset is integrated in [TweetNLP](https://tweetnlp.org/) too. ### Preprocessing We pre-process tweets before the annotation to normalize some artifacts, converting URLs into a special token `{{URL}}` and non-verified usernames into `{{USERNAME}}`. For verified usernames, we replace its display name (or account name) with symbols `{@}`. For example, a tweet ``` Get the all-analog Classic Vinyl Edition of "Takin' Off" Album from @herbiehancock via @bluenoterecords link below: http://bluenote.lnk.to/AlbumOfTheWeek ``` is transformed into the following text. ``` Get the all-analog Classic Vinyl Edition of "Takin' Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below: {{URL}} ``` A simple function to format tweet follows below. ```python import re from urlextract import URLExtract extractor = URLExtract() def format_tweet(tweet): # mask web urls urls = extractor.find_urls(tweet) for url in urls: tweet = tweet.replace(url, "{{URL}}") # format twitter account tweet = re.sub(r"\b(\s*)(@[\S]+)\b", r'\1{\2@}', tweet) return tweet target = """Get the all-analog Classic Vinyl Edition of "Takin' Off" Album from @herbiehancock via @bluenoterecords link below: http://bluenote.lnk.to/AlbumOfTheWeek""" target_format = format_tweet(target) print(target_format) 'Get the all-analog Classic Vinyl Edition of "Takin\' Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below: {{URL}}' ``` ### Data Splits | split | number of texts | description | |:------------------------|-----:|------:| | test_2020 | 573 | test dataset from September 2019 to August 2020 | | test_2021 | 1679 | test dataset from September 2020 to August 2021 | | train_2020 | 4585 | training dataset from September 2019 to August 2020 | | train_2021 | 1505 | training dataset from September 2020 to August 2021 | | train_all | 6090 | combined training dataset of `train_2020` and `train_2021` | | validation_2020 | 573 | validation dataset from September 2019 to August 2020 | | validation_2021 | 188 | validation dataset from September 2020 to August 2021 | | train_random | 4564 | randomly sampled training dataset with the same size as `train_2020` from `train_all` | | validation_random | 573 | randomly sampled training dataset with the same size as `validation_2020` from `validation_all` | | test_coling2022_random | 5536 | random split used in the COLING 2022 paper | | train_coling2022_random | 5731 | random split used in the COLING 2022 paper | | test_coling2022 | 5536 | temporal split used in the COLING 2022 paper | | train_coling2022 | 5731 | temporal split used in the COLING 2022 paper | For the temporal-shift setting, model should be trained on `train_2020` with `validation_2020` and evaluate on `test_2021`. In general, model would be trained on `train_all`, the most representative training set with `validation_2021` and evaluate on `test_2021`. **IMPORTANT NOTE:** To get a result that is comparable with the results of the COLING 2022 Tweet Topic paper, please use `train_coling2022` and `test_coling2022` for temporal-shift, and `train_coling2022_random` and `test_coling2022_random` fir random split (the coling2022 split does not have validation set). ### Models | model | training data | F1 | F1 (macro) | Accuracy | |:----------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------|---------:|-------------:|-----------:| | [cardiffnlp/roberta-large-tweet-topic-multi-all](https://huggingface.co/cardiffnlp/roberta-large-tweet-topic-multi-all) | all (2020 + 2021) | 0.763104 | 0.620257 | 0.536629 | | [cardiffnlp/roberta-base-tweet-topic-multi-all](https://huggingface.co/cardiffnlp/roberta-base-tweet-topic-multi-all) | all (2020 + 2021) | 0.751814 | 0.600782 | 0.531864 | | [cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-multi-all](https://huggingface.co/cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-multi-all) | all (2020 + 2021) | 0.762513 | 0.603533 | 0.547945 | | [cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-multi-all](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-multi-all) | all (2020 + 2021) | 0.759917 | 0.59901 | 0.536033 | | [cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-multi-all](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-multi-all) | all (2020 + 2021) | 0.764767 | 0.618702 | 0.548541 | | [cardiffnlp/roberta-large-tweet-topic-multi-2020](https://huggingface.co/cardiffnlp/roberta-large-tweet-topic-multi-2020) | 2020 only | 0.732366 | 0.579456 | 0.493746 | | [cardiffnlp/roberta-base-tweet-topic-multi-2020](https://huggingface.co/cardiffnlp/roberta-base-tweet-topic-multi-2020) | 2020 only | 0.725229 | 0.561261 | 0.499107 | | [cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-multi-2020](https://huggingface.co/cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-multi-2020) | 2020 only | 0.73671 | 0.565624 | 0.513401 | | [cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-multi-2020](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-multi-2020) | 2020 only | 0.729446 | 0.534799 | 0.50268 | | [cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-multi-2020](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-multi-2020) | 2020 only | 0.731106 | 0.532141 | 0.509827 | Model fine-tuning script can be found [here](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi/blob/main/lm_finetuning.py). ## Dataset Structure ### Data Instances An example of `train` looks as follows. ```python { "date": "2021-03-07", "text": "The latest The Movie theater Daily! {{URL}} Thanks to {{USERNAME}} {{USERNAME}} {{USERNAME}} #lunchtimeread #amc1000", "id": "1368464923370676231", "label": [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "label_name": ["film_tv_&_video"] } ``` ### Label ID The label2id dictionary can be found at [here](https://huggingface.co/datasets/tner/tweet_topic_multi/raw/main/dataset/label.multi.json). ```python { "arts_&_culture": 0, "business_&_entrepreneurs": 1, "celebrity_&_pop_culture": 2, "diaries_&_daily_life": 3, "family": 4, "fashion_&_style": 5, "film_tv_&_video": 6, "fitness_&_health": 7, "food_&_dining": 8, "gaming": 9, "learning_&_educational": 10, "music": 11, "news_&_social_concern": 12, "other_hobbies": 13, "relationships": 14, "science_&_technology": 15, "sports": 16, "travel_&_adventure": 17, "youth_&_student_life": 18 } ``` ### Citation Information ``` @inproceedings{dimosthenis-etal-2022-twitter, title = "{T}witter {T}opic {C}lassification", author = "Antypas, Dimosthenis and Ushio, Asahi and Camacho-Collados, Jose and Neves, Leonardo and Silva, Vitor and Barbieri, Francesco", booktitle = "Proceedings of the 29th International Conference on Computational Linguistics", month = oct, year = "2022", address = "Gyeongju, Republic of Korea", publisher = "International Committee on Computational Linguistics" } ```
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-emotion-default-139135-14996090
2022-09-01T15:39:48.000Z
null
false
2e11493c1b92c66b3d718b39d13d21c0bcbab1ba
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:emotion" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-emotion-default-139135-14996090/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - emotion eval_info: task: multi_class_classification model: bhadresh-savani/roberta-base-emotion metrics: ['roc_auc', 'mae'] dataset_name: emotion dataset_config: default dataset_split: test col_mapping: text: text target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: bhadresh-savani/roberta-base-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@gmoney](https://huggingface.co/gmoney) for evaluating this model.
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-emotion-default-139135-14996091
2022-09-01T15:39:53.000Z
null
false
aff1661b05d3101c728c5383a9c84111d2e1349f
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:emotion" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-emotion-default-139135-14996091/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - emotion eval_info: task: multi_class_classification model: ericntay/bert-finetuned-emotion metrics: ['roc_auc', 'mae'] dataset_name: emotion dataset_config: default dataset_split: test col_mapping: text: text target: label --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Multi-class Text Classification * Model: ericntay/bert-finetuned-emotion * Dataset: emotion * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@gmoney](https://huggingface.co/gmoney) for evaluating this model.
BAJIRAO
null
null
null
false
1
false
BAJIRAO/spam_data
2022-09-01T20:08:50.000Z
null
false
532014b9d4a1dd5c658db790758698c0810d9793
[]
[]
https://huggingface.co/datasets/BAJIRAO/spam_data/resolve/main/README.md
zeroshot
null
null
null
false
6
false
zeroshot/twitter-financial-news-sentiment
2022-09-07T18:49:28.000Z
null
false
b02e6b2a4decd7514b454f91e35399ab9631c9a7
[]
[ "annotations_creators:other", "language:en", "language_creators:other", "license:mit", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "tags:twitter", "tags:finance", "tags:markets", "tags:stocks", "tags:wallstreet", "tags:quant", "tags:hedgefunds",...
https://huggingface.co/datasets/zeroshot/twitter-financial-news-sentiment/resolve/main/README.md
--- annotations_creators: - other language: - en language_creators: - other license: - mit multilinguality: - monolingual pretty_name: twitter financial news size_categories: - 10K<n<100K source_datasets: - original tags: - twitter - finance - markets - stocks - wallstreet - quant - hedgefunds - markets task_categories: - text-classification task_ids: - multi-class-classification --- ### Dataset Description The Twitter Financial News dataset is an English-language dataset containing an annotated corpus of finance-related tweets. This dataset is used to classify finance-related tweets for their sentiment. 1. The dataset holds 11,932 documents annotated with 3 labels: ```python sentiments = { "LABEL_0": "Bearish", "LABEL_1": "Bullish", "LABEL_2": "Neutral" } ``` The data was collected using the Twitter API. The current dataset supports the multi-class classification task. ### Task: Sentiment Analysis # Data Splits There are 2 splits: train and validation. Below are the statistics: | Dataset Split | Number of Instances in Split | | ------------- | ------------------------------------------- | | Train | 9,938 | | Validation | 2,486 | # Licensing Information The Twitter Financial Dataset (sentiment) version 1.0.0 is released under the MIT License.
Lubub
null
null
null
false
1
false
Lubub/locutorxxinews
2022-09-01T23:56:34.000Z
null
false
bacd60959e6e00287ef74c0ebf49fba20dce61b9
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/Lubub/locutorxxinews/resolve/main/README.md
--- license: apache-2.0 ---
Lubub
null
null
null
false
1
false
Lubub/testexxi
2022-09-02T00:05:12.000Z
null
false
dc06182a52cd5bbb6d30a5e2e62a1406dec583dc
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/Lubub/testexxi/resolve/main/README.md
--- license: apache-2.0 ---
cardiffnlp
null
@inproceedings{dimosthenis-etal-2022-twitter, title = "{T}witter {T}opic {C}lassification", author = "Antypas, Dimosthenis and Ushio, Asahi and Camacho-Collados, Jose and Neves, Leonardo and Silva, Vitor and Barbieri, Francesco", booktitle = "Proceedings of the 29th International Conference on Computational Linguistics", month = oct, year = "2022", address = "Gyeongju, Republic of Korea", publisher = "International Committee on Computational Linguistics" }
[TweetTopic](https://arxiv.org/abs/2209.09824)
false
15
false
cardiffnlp/tweet_topic_single
2022-09-30T21:03:35.000Z
null
false
cb817076c21a18ec36b1fcfab365b2647b0fe43e
[]
[ "arxiv:2209.09824", "language:en", "license:other", "multilinguality:monolingual", "size_categories:1k<10K", "task_categories:text-classification", "task_ids:sentiment-classification" ]
https://huggingface.co/datasets/cardiffnlp/tweet_topic_single/resolve/main/README.md
--- language: - en license: - other multilinguality: - monolingual size_categories: - 1k<10K task_categories: - text-classification task_ids: - sentiment-classification pretty_name: TweetTopicSingle --- # Dataset Card for "cardiffnlp/tweet_topic_single" ## Dataset Description - **Paper:** [https://arxiv.org/abs/2209.09824](https://arxiv.org/abs/2209.09824) - **Dataset:** Tweet Topic Dataset - **Domain:** Twitter - **Number of Class:** 6 ### Dataset Summary This is the official repository of TweetTopic (["Twitter Topic Classification , COLING main conference 2022"](https://arxiv.org/abs/2209.09824)), a topic classification dataset on Twitter with 6 labels. Each instance of TweetTopic comes with a timestamp which distributes from September 2019 to August 2021. See [cardiffnlp/tweet_topic_multi](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi) for multi label version of TweetTopic. The tweet collection used in TweetTopic is same as what used in [TweetNER7](https://huggingface.co/datasets/tner/tweetner7). The dataset is integrated in [TweetNLP](https://tweetnlp.org/) too. ### Preprocessing We pre-process tweets before the annotation to normalize some artifacts, converting URLs into a special token `{{URL}}` and non-verified usernames into `{{USERNAME}}`. For verified usernames, we replace its display name (or account name) with symbols `{@}`. For example, a tweet ``` Get the all-analog Classic Vinyl Edition of "Takin' Off" Album from @herbiehancock via @bluenoterecords link below: http://bluenote.lnk.to/AlbumOfTheWeek ``` is transformed into the following text. ``` Get the all-analog Classic Vinyl Edition of "Takin' Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below: {{URL}} ``` A simple function to format tweet follows below. ```python import re from urlextract import URLExtract extractor = URLExtract() def format_tweet(tweet): # mask web urls urls = extractor.find_urls(tweet) for url in urls: tweet = tweet.replace(url, "{{URL}}") # format twitter account tweet = re.sub(r"\b(\s*)(@[\S]+)\b", r'\1{\2@}', tweet) return tweet target = """Get the all-analog Classic Vinyl Edition of "Takin' Off" Album from @herbiehancock via @bluenoterecords link below: http://bluenote.lnk.to/AlbumOfTheWeek""" target_format = format_tweet(target) print(target_format) 'Get the all-analog Classic Vinyl Edition of "Takin\' Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below: {{URL}}' ``` ### Data Splits | split | number of texts | description | |:------------------------|-----:|------:| | test_2020 | 376 | test dataset from September 2019 to August 2020 | | test_2021 | 1693 | test dataset from September 2020 to August 2021 | | train_2020 | 2858 | training dataset from September 2019 to August 2020 | | train_2021 | 1516 | training dataset from September 2020 to August 2021 | | train_all | 4374 | combined training dataset of `train_2020` and `train_2021` | | validation_2020 | 352 | validation dataset from September 2019 to August 2020 | | validation_2021 | 189 | validation dataset from September 2020 to August 2021 | | train_random | 2830 | randomly sampled training dataset with the same size as `train_2020` from `train_all` | | validation_random | 354 | randomly sampled training dataset with the same size as `validation_2020` from `validation_all` | | test_coling2022_random | 3399 | random split used in the COLING 2022 paper | | train_coling2022_random | 3598 | random split used in the COLING 2022 paper | | test_coling2022 | 3399 | temporal split used in the COLING 2022 paper | | train_coling2022 | 3598 | temporal split used in the COLING 2022 paper | For the temporal-shift setting, model should be trained on `train_2020` with `validation_2020` and evaluate on `test_2021`. In general, model would be trained on `train_all`, the most representative training set with `validation_2021` and evaluate on `test_2021`. **IMPORTANT NOTE:** To get a result that is comparable with the results of the COLING 2022 Tweet Topic paper, please use `train_coling2022` and `test_coling2022` for temporal-shift, and `train_coling2022_random` and `test_coling2022_random` fir random split (the coling2022 split does not have validation set). ### Models | model | training data | F1 | F1 (macro) | Accuracy | |:------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------|---------:|-------------:|-----------:| | [cardiffnlp/roberta-large-tweet-topic-single-all](https://huggingface.co/cardiffnlp/roberta-large-tweet-topic-single-all) | all (2020 + 2021) | 0.896043 | 0.800061 | 0.896043 | | [cardiffnlp/roberta-base-tweet-topic-single-all](https://huggingface.co/cardiffnlp/roberta-base-tweet-topic-single-all) | all (2020 + 2021) | 0.887773 | 0.79793 | 0.887773 | | [cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-single-all](https://huggingface.co/cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-single-all) | all (2020 + 2021) | 0.892499 | 0.774494 | 0.892499 | | [cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-single-all](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-single-all) | all (2020 + 2021) | 0.890136 | 0.776025 | 0.890136 | | [cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-single-all](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-single-all) | all (2020 + 2021) | 0.894861 | 0.800952 | 0.894861 | | [cardiffnlp/roberta-large-tweet-topic-single-2020](https://huggingface.co/cardiffnlp/roberta-large-tweet-topic-single-2020) | 2020 only | 0.878913 | 0.70565 | 0.878913 | | [cardiffnlp/roberta-base-tweet-topic-single-2020](https://huggingface.co/cardiffnlp/roberta-base-tweet-topic-single-2020) | 2020 only | 0.868281 | 0.729667 | 0.868281 | | [cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-single-2020](https://huggingface.co/cardiffnlp/twitter-roberta-base-2019-90m-tweet-topic-single-2020) | 2020 only | 0.882457 | 0.740187 | 0.882457 | | [cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-single-2020](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2020-tweet-topic-single-2020) | 2020 only | 0.87596 | 0.746275 | 0.87596 | | [cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-single-2020](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021-tweet-topic-single-2020) | 2020 only | 0.877732 | 0.746119 | 0.877732 | Model fine-tuning script can be found [here](https://huggingface.co/datasets/cardiffnlp/tweet_topic_single/blob/main/lm_finetuning.py). ## Dataset Structure ### Data Instances An example of `train` looks as follows. ```python { "text": "Game day for {{USERNAME}} U18\u2019s against {{USERNAME}} U18\u2019s. Even though it\u2019s a \u2018home\u2019 game for the people that have settled in Mid Wales it\u2019s still a 4 hour round trip for us up to Colwyn Bay. Still enjoy it though!", "date": "2019-09-08", "label": 4, "id": "1170606779568463874", "label_name": "sports_&_gaming" } ``` ### Label ID The label2id dictionary can be found at [here](https://huggingface.co/datasets/tner/tweet_topic_single/raw/main/dataset/label.single.json). ```python { "arts_&_culture": 0, "business_&_entrepreneurs": 1, "pop_culture": 2, "daily_life": 3, "sports_&_gaming": 4, "science_&_technology": 5 } ``` ### Citation Information ``` @inproceedings{dimosthenis-etal-2022-twitter, title = "{T}witter {T}opic {C}lassification", author = "Antypas, Dimosthenis and Ushio, Asahi and Camacho-Collados, Jose and Neves, Leonardo and Silva, Vitor and Barbieri, Francesco", booktitle = "Proceedings of the 29th International Conference on Computational Linguistics", month = oct, year = "2022", address = "Gyeongju, Republic of Korea", publisher = "International Committee on Computational Linguistics" } ```
xianbao
null
null
null
false
null
false
xianbao/test
2022-09-02T00:50:30.000Z
null
false
fd58a44fc0160dea934912d28c113b39279b92af
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/xianbao/test/resolve/main/README.md
--- license: apache-2.0 ---
Chr0my
null
null
null
false
30
false
Chr0my/Epidemic_music
2022-09-02T02:25:43.000Z
null
false
3a80376302783b83edcba43d8ef53f49eadb0298
[]
[ "license:mit" ]
https://huggingface.co/datasets/Chr0my/Epidemic_music/resolve/main/README.md
--- license: mit ---
tobiaslee
null
null
null
false
1
false
tobiaslee/FiCLS
2022-09-02T03:14:32.000Z
null
false
69d51c85d30f6f0202c140ecdd40bd010027e59f
[]
[ "license:afl-3.0" ]
https://huggingface.co/datasets/tobiaslee/FiCLS/resolve/main/README.md
--- license: afl-3.0 ---
nid989
null
null
null
false
2
false
nid989/EssayFroum-Dataset
2022-09-02T04:45:37.000Z
null
false
73d805de8c0299677d1037085f4272949da330ef
[]
[ "license:apache-2.0" ]
https://huggingface.co/datasets/nid989/EssayFroum-Dataset/resolve/main/README.md
--- license: apache-2.0 ---
autoevaluate
null
null
null
false
1
false
autoevaluate/autoeval-staging-eval-xsum-default-21f5cd-15036097
2022-09-02T09:46:38.000Z
null
false
8b820b74765bc3a114dd3d1cbb344ed857bef73b
[]
[ "type:predictions", "tags:autotrain", "tags:evaluation", "datasets:xsum" ]
https://huggingface.co/datasets/autoevaluate/autoeval-staging-eval-xsum-default-21f5cd-15036097/resolve/main/README.md
--- type: predictions tags: - autotrain - evaluation datasets: - xsum eval_info: task: summarization model: sshleifer/distilbart-xsum-9-6 metrics: ['accuracy'] dataset_name: xsum dataset_config: default dataset_split: test col_mapping: text: document target: summary --- # Dataset Card for AutoTrain Evaluator This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset: * Task: Summarization * Model: sshleifer/distilbart-xsum-9-6 * Dataset: xsum * Config: default * Split: test To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator). ## Contributions Thanks to [@Rohil](https://huggingface.co/Rohil) for evaluating this model.
speech-seq2seq
null
@article{DBLP:journals/corr/abs-2106-06909, author = {Guoguo Chen and Shuzhou Chai and Guanbo Wang and Jiayu Du and Wei{-}Qiang Zhang and Chao Weng and Dan Su and Daniel Povey and Jan Trmal and Junbo Zhang and Mingjie Jin and Sanjeev Khudanpur and Shinji Watanabe and Shuaijiang Zhao and Wei Zou and Xiangang Li and Xuchen Yao and Yongqing Wang and Yujun Wang and Zhao You and Zhiyong Yan}, title = {GigaSpeech: An Evolving, Multi-domain {ASR} Corpus with 10, 000 Hours of Transcribed Audio}, journal = {CoRR}, volume = {abs/2106.06909}, year = {2021}, url = {https://arxiv.org/abs/2106.06909}, eprinttype = {arXiv}, eprint = {2106.06909}, timestamp = {Wed, 29 Dec 2021 14:29:26 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2106-06909.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} }
GigaSpeech is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised and unsupervised training. Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science, sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable for speech recognition training, and to filter out segments with low-quality transcription. For system training, GigaSpeech provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h. For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage, and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand, are re-processed by professional human transcribers to ensure high transcription quality.
false
1
false
speech-seq2seq/ami
2022-09-06T23:03:11.000Z
null
false
7779c1f5ce465390fae18cef176c52cd371e8618
[]
[]
https://huggingface.co/datasets/speech-seq2seq/ami/resolve/main/README.md
# Unormalized AMI ```python from datasets import load_dataset ami = load_dataset("speech-seq2seq/ami", "ihm") ``` ## TODO(PVP) - explain exactly what normalization was accepted what wasn't
graphs-datasets
null
null
null
false
1
false
graphs-datasets/AIDS
2022-09-02T10:53:25.000Z
null
false
a2c431ddff4668df09beed0bd5450c77a87b7c27
[]
[ "arxiv:2007.08663", "licence:unknown" ]
https://huggingface.co/datasets/graphs-datasets/AIDS/resolve/main/README.md
--- licence: unknown --- # Dataset Card for AIDS ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](https://wiki.nci.nih.gov/display/NCIDTPdata/AIDS+Antiviral+Screen+Data)** - **Paper:**: (see citation) - **Leaderboard:**: [Papers with code leaderboard](https://paperswithcode.com/sota/graph-classification-on-aids) ### Dataset Summary The `AIDS` dataset is a dataset containing compounds checked for evidence of anti-HIV activity.. ### Supported Tasks and Leaderboards `AIDS` should be used for molecular classification, a binary classification task. The score used is accuracy with cross validation. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | scale | medium | | #graphs | 1999 | | average #nodes | 15.5875 | | average #edges | 32.39 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: 1 x #labels): contains the number of labels available to predict (here 1, equal to zero or one) - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under license unknown. ### Citation Information ``` @inproceedings{Morris+2020, title={TUDataset: A collection of benchmark datasets for learning with graphs}, author={Christopher Morris and Nils M. Kriege and Franka Bause and Kristian Kersting and Petra Mutzel and Marion Neumann}, booktitle={ICML 2020 Workshop on Graph Representation Learning and Beyond (GRL+ 2020)}, archivePrefix={arXiv}, eprint={2007.08663}, url={www.graphlearning.io}, year={2020} } ``` ``` @InProceedings{10.1007/978-3-540-89689-0_33, author="Riesen, Kaspar and Bunke, Horst", editor="da Vitoria Lobo, Niels and Kasparis, Takis and Roli, Fabio and Kwok, James T. and Georgiopoulos, Michael and Anagnostopoulos, Georgios C. and Loog, Marco", title="IAM Graph Database Repository for Graph Based Pattern Recognition and Machine Learning", booktitle="Structural, Syntactic, and Statistical Pattern Recognition", year="2008", publisher="Springer Berlin Heidelberg", address="Berlin, Heidelberg", pages="287--297", abstract="In recent years the use of graph based representation has gained popularity in pattern recognition and machine learning. As a matter of fact, object representation by means of graphs has a number of advantages over feature vectors. Therefore, various algorithms for graph based machine learning have been proposed in the literature. However, in contrast with the emerging interest in graph based representation, a lack of standardized graph data sets for benchmarking can be observed. Common practice is that researchers use their own data sets, and this behavior cumbers the objective evaluation of the proposed methods. In order to make the different approaches in graph based machine learning better comparable, the present paper aims at introducing a repository of graph data sets and corresponding benchmarks, covering a wide spectrum of different applications.", isbn="978-3-540-89689-0" } ```
graphs-datasets
null
null
null
false
1
false
graphs-datasets/MD17-aspirin
2022-09-02T11:31:22.000Z
null
false
cd3e1fa2eda6616334e18e10fcbf0a93bd8ec174
[]
[ "arxiv:2007.08663", "licence:unknown" ]
https://huggingface.co/datasets/graphs-datasets/MD17-aspirin/resolve/main/README.md
--- licence: unknown --- # Dataset Card for aspirin ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](http://www.sgdml.org/#datasets)** - **Paper:**: (see citation) ### Dataset Summary The `aspirin` dataset is a molecular dynamics (MD) dataset. The total energy and force labels for each dataset were computed using the PBE+vdW-TS electronic structure method. All geometries are in Angstrom, energies and forces are given in kcal/mol and kcal/mol/A respectively. ### Supported Tasks and Leaderboards `aspirin` should be used for organic molecular property prediction, a regression task on 1 property. The score used is Mean absolute errors (in meV) for energy prediction. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the full set dataset_pg_list = [Data(graph) for graph in dataset_hf["full"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | scale | big | | #graphs | 111762 | | average #nodes | 21.0 | | average #edges | 303.0447106824262 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under license unknown. ### Citation Information ``` @inproceedings{Morris+2020, title={TUDataset: A collection of benchmark datasets for learning with graphs}, author={Christopher Morris and Nils M. Kriege and Franka Bause and Kristian Kersting and Petra Mutzel and Marion Neumann}, booktitle={ICML 2020 Workshop on Graph Representation Learning and Beyond (GRL+ 2020)}, archivePrefix={arXiv}, eprint={2007.08663}, url={www.graphlearning.io}, year={2020} } ``` ``` @article{Chmiela_2017, doi = {10.1126/sciadv.1603015}, url = {https://doi.org/10.1126%2Fsciadv.1603015}, year = 2017, month = {may}, publisher = {American Association for the Advancement of Science ({AAAS})}, volume = {3}, number = {5}, author = {Stefan Chmiela and Alexandre Tkatchenko and Huziel E. Sauceda and Igor Poltavsky and Kristof T. Schütt and Klaus-Robert Müller}, title = {Machine learning of accurate energy-conserving molecular force fields}, journal = {Science Advances} } ```
graphs-datasets
null
null
null
false
null
false
graphs-datasets/MD17-benzene
2022-09-02T11:32:01.000Z
null
false
ae5b03a688d934c0226638734ad36f9131f88dff
[]
[ "arxiv:2007.08663", "licence:unknown" ]
https://huggingface.co/datasets/graphs-datasets/MD17-benzene/resolve/main/README.md
--- licence: unknown --- # Dataset Card for benzene ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](http://www.sgdml.org/#datasets)** - **Paper:**: (see citation) ### Dataset Summary The `benzene` dataset is molecular dynamics (MD) dataset. The total energy and force labels for each dataset were computed using the PBE+vdW-TS electronic structure method. All geometries are in Angstrom, energies and forces are given in kcal/mol and kcal/mol/A respectively. ### Supported Tasks and Leaderboards `benzene` should be used for organic molecular property prediction, a regression task on 1 property. The score used is Mean absolute errors (in meV) for energy prediction. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | scale | big | | #graphs | 527983 | | average #nodes | 12.0 | | average #edges | 129.8848866632322 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under license unknown. ### Citation Information ``` @inproceedings{Morris+2020, title={TUDataset: A collection of benchmark datasets for learning with graphs}, author={Christopher Morris and Nils M. Kriege and Franka Bause and Kristian Kersting and Petra Mutzel and Marion Neumann}, booktitle={ICML 2020 Workshop on Graph Representation Learning and Beyond (GRL+ 2020)}, archivePrefix={arXiv}, eprint={2007.08663}, url={www.graphlearning.io}, year={2020} } ``` ``` @article{Chmiela_2017, doi = {10.1126/sciadv.1603015}, url = {https://doi.org/10.1126%2Fsciadv.1603015}, year = 2017, month = {may}, publisher = {American Association for the Advancement of Science ({AAAS})}, volume = {3}, number = {5}, author = {Stefan Chmiela and Alexandre Tkatchenko and Huziel E. Sauceda and Igor Poltavsky and Kristof T. Schütt and Klaus-Robert Müller}, title = {Machine learning of accurate energy-conserving molecular force fields}, journal = {Science Advances} } ```
graphs-datasets
null
null
null
false
1
false
graphs-datasets/MD17-ethanol
2022-09-02T11:36:14.000Z
null
false
b7f4194f478273b5e05bae2883fe8009f6d53fa8
[]
[ "arxiv:2007.08663", "licence:unknown" ]
https://huggingface.co/datasets/graphs-datasets/MD17-ethanol/resolve/main/README.md
--- licence: unknown --- # Dataset Card for ethanol ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](http://www.sgdml.org/#datasets)** - **Paper:**: (see citation) ### Dataset Summary The `ethanol` dataset is a molecular dynamics (MD) dataset. The total energy and force labels for each dataset were computed using the PBE+vdW-TS electronic structure method. All geometries are in Angstrom, energies and forces are given in kcal/mol and kcal/mol/A respectively. ### Supported Tasks and Leaderboards `ethanol` should be used for organic molecular property prediction, a regression task on 1 property. The score used is Mean absolute errors (in meV) for energy prediction. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | scale | big | | #graphs | 455092 | | average #nodes | 9.0 | | average #edges | 72.0 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under license unknown. ### Citation Information ``` @inproceedings{Morris+2020, title={TUDataset: A collection of benchmark datasets for learning with graphs}, author={Christopher Morris and Nils M. Kriege and Franka Bause and Kristian Kersting and Petra Mutzel and Marion Neumann}, booktitle={ICML 2020 Workshop on Graph Representation Learning and Beyond (GRL+ 2020)}, archivePrefix={arXiv}, eprint={2007.08663}, url={www.graphlearning.io}, year={2020} } ``` ``` @article{Chmiela_2017, doi = {10.1126/sciadv.1603015}, url = {https://doi.org/10.1126%2Fsciadv.1603015}, year = 2017, month = {may}, publisher = {American Association for the Advancement of Science ({AAAS})}, volume = {3}, number = {5}, author = {Stefan Chmiela and Alexandre Tkatchenko and Huziel E. Sauceda and Igor Poltavsky and Kristof T. Schütt and Klaus-Robert Müller}, title = {Machine learning of accurate energy-conserving molecular force fields}, journal = {Science Advances} } ```
graphs-datasets
null
null
null
false
1
false
graphs-datasets/MD17-malonaldehyde
2022-09-02T12:14:41.000Z
null
false
6424deaa2dca5feabeac96c36b043429d4252312
[]
[ "arxiv:2007.08663", "licence:unknown" ]
https://huggingface.co/datasets/graphs-datasets/MD17-malonaldehyde/resolve/main/README.md
--- licence: unknown --- # Dataset Card for malonaldehyde ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](http://www.sgdml.org/#datasets)** - **Paper:**: (see citation) ### Dataset Summary The `malonaldehyde` dataset is a molecular dynamics (MD) dataset. The total energy and force labels for each dataset were computed using the PBE+vdW-TS electronic structure method. All geometries are in Angstrom, energies and forces are given in kcal/mol and kcal/mol/A respectively. ### Supported Tasks and Leaderboards `malonaldehyde` should be used for organic molecular property prediction, a regression task on 1 property. The score used is Mean absolute errors (in meV) for energy prediction. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | scale | big | | #graphs | 893237 | | average #nodes | 9.0 | | average #edges | 71.99990148202383 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under license unknown. ### Citation Information ``` @inproceedings{Morris+2020, title={TUDataset: A collection of benchmark datasets for learning with graphs}, author={Christopher Morris and Nils M. Kriege and Franka Bause and Kristian Kersting and Petra Mutzel and Marion Neumann}, booktitle={ICML 2020 Workshop on Graph Representation Learning and Beyond (GRL+ 2020)}, archivePrefix={arXiv}, eprint={2007.08663}, url={www.graphlearning.io}, year={2020} } ``` ``` @article{Chmiela_2017, doi = {10.1126/sciadv.1603015}, url = {https://doi.org/10.1126%2Fsciadv.1603015}, year = 2017, month = {may}, publisher = {American Association for the Advancement of Science ({AAAS})}, volume = {3}, number = {5}, author = {Stefan Chmiela and Alexandre Tkatchenko and Huziel E. Sauceda and Igor Poltavsky and Kristof T. Schütt and Klaus-Robert Müller}, title = {Machine learning of accurate energy-conserving molecular force fields}, journal = {Science Advances} } ```
clips
null
null
null
false
8
false
clips/VaccinChatNL
2022-09-06T13:42:34.000Z
null
false
607e26b9ad9e36a1b1239aee8cc56b39210a6d27
[]
[ "annotations_creators:expert-generated", "language:nl", "language_creators:other", "license:cc-by-4.0", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "tags:covid-19", "tags:FAQ", "tags:question-answer pairs", "task_categories:text-classification", "task...
https://huggingface.co/datasets/clips/VaccinChatNL/resolve/main/README.md
--- annotations_creators: - expert-generated language: - nl language_creators: - other license: - cc-by-4.0 multilinguality: - monolingual pretty_name: VaccinChatNL size_categories: - 1K<n<10K source_datasets: - original tags: - covid-19 - FAQ - question-answer pairs task_categories: - text-classification task_ids: - intent-classification --- # Dataset Card for VaccinChatNL ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) <!-- - [Curation Rationale](#curation-rationale) --> <!-- - [Source Data](#source-data) --> - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) <!-- - [Social Impact of Dataset](#social-impact-of-dataset) --> - [Discussion of Biases](#discussion-of-biases) <!-- - [Other Known Limitations](#other-known-limitations) --> - [Additional Information](#additional-information) <!-- - [Dataset Curators](#dataset-curators) --> <!-- - [Licensing Information](#licensing-information) --> - [Citation Information](#citation-information) <!-- - [Contributions](#contributions) --> ## Dataset Description <!-- - **Homepage:** - **Repository:** - **Paper:** [To be added] - **Leaderboard:** --> - **Point of Contact:** [Jeska Buhmann](mailto:jeska.buhmann@uantwerpen.be) ### Dataset Summary VaccinChatNL is a Flemish Dutch FAQ dataset on the topic of COVID-19 vaccinations in Flanders. It consists of 12,833 user questions divided over 181 answer labels, thus providing large groups of semantically equivalent paraphrases (a many-to-one mapping of user questions to answer labels). VaccinChatNL is the first Dutch many-to-one FAQ dataset of this size. ### Supported Tasks and Leaderboards - 'text-classification': the dataset can be used to train a classification model for Dutch frequently asked questions on the topic of COVID-19 vaccination in Flanders. ### Languages Dutch (Flemish): the BCP-47 code for Dutch as generally spoken in Flanders (Belgium) is nl-BE. ## Dataset Structure ### Data Instances For each instance, there is a string for the user question and a string for the label of the annotated answer. See the [CLiPS / VaccinChatNL dataset viewer](https://huggingface.co/datasets/clips/VaccinChatNL/viewer/clips--VaccinChatNL/train). ``` {"sentence1": "Waar kan ik de bijsluiters van de vaccins vinden?", "label": "faq_ask_bijsluiter"} ``` ### Data Fields - `sentence1`: a string containing the user question - `label`: a string containing the name of the intent (the answer class) ### Data Splits The VaccinChatNL dataset has 3 splits: _train_, _valid_, and _test_. Below are the statistics for the dataset. | Dataset Split | Number of Labeled User Questions in Split | | ------------- | ------------------------------------------ | | Train | 10,542 | | Validation | 1,171 | | Test | 1,170 | ## Dataset Creation <!-- ### Curation Rationale [More Information Needed] --> <!-- ### Source Data [Perhaps a link to vaccinchat.be and some of the website that were used for information] --> <!-- #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] --> ### Annotations #### Annotation process Annotation was an iterative semi-automatic process. Starting from a very limited dataset with approximately 50 question-answer pairs (_sentence1-label_ pairs) a text classification model was trained and implemented in a publicly available chatbot. When the chatbot was used, the predicted labels for the new questions were checked and corrected if necessary. In addition, new answers were added to the dataset. After each round of corrections, the model was retrained on the updated dataset. This iterative approach led to the final dataset containing 12,883 user questions divided over 181 answer labels. #### Who are the annotators? The VaccinChatNL data were annotated by members and students of [CLiPS](https://www.uantwerpen.be/en/research-groups/clips/). All annotators have a background in Computational Linguistics. ### Personal and Sensitive Information The data are anonymized in the sense that a user question can never be traced back to a specific individual. ## Considerations for Using the Data <!-- ### Social Impact of Dataset [More Information Needed] --> ### Discussion of Biases This dataset contains real user questions, including a rather large section (7%) of out-of-domain questions or remarks (_label: nlu_fallback_). This class of user questions consists of ununderstandable questions, but also jokes and insulting remarks. <!-- ### Other Known Limitations [Perhaps some information of % of exact overlap between train and test set] --> ## Additional Information <!-- ### Dataset Curators [More Information Needed] --> <!-- ### Licensing Information [More Information Needed] --> ### Citation Information Will be added asap. <!-- ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset. -->
graphs-datasets
null
null
null
false
1
false
graphs-datasets/MD17-naphthalene
2022-09-02T11:54:50.000Z
null
false
c9d4ba90a4cf784feff32721f5c9072d49fed2a9
[]
[ "arxiv:2007.08663", "licence:unknown" ]
https://huggingface.co/datasets/graphs-datasets/MD17-naphthalene/resolve/main/README.md
--- licence: unknown --- # Dataset Card for naphthalene ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](http://www.sgdml.org/#datasets)** - **Paper:**: (see citation) ### Dataset Summary The `naphthalene` dataset is a molecular dynamics (MD) dataset. The total energy and force labels for each dataset were computed using the PBE+vdW-TS electronic structure method. All geometries are in Angstrom, energies and forces are given in kcal/mol and kcal/mol/A respectively. ### Supported Tasks and Leaderboards `naphthalene` should be used for organic molecular property prediction, a regression task on 1 property. The score used is Mean absolute errors (in meV) for energy prediction. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | scale | big | | #graphs | 226255 | | average #nodes | 18.0 | | average #edges | 254.73246234354005 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under license unknown. ### Citation Information ``` @inproceedings{Morris+2020, title={TUDataset: A collection of benchmark datasets for learning with graphs}, author={Christopher Morris and Nils M. Kriege and Franka Bause and Kristian Kersting and Petra Mutzel and Marion Neumann}, booktitle={ICML 2020 Workshop on Graph Representation Learning and Beyond (GRL+ 2020)}, archivePrefix={arXiv}, eprint={2007.08663}, url={www.graphlearning.io}, year={2020} } ``` ``` @article{Chmiela_2017, doi = {10.1126/sciadv.1603015}, url = {https://doi.org/10.1126%2Fsciadv.1603015}, year = 2017, month = {may}, publisher = {American Association for the Advancement of Science ({AAAS})}, volume = {3}, number = {5}, author = {Stefan Chmiela and Alexandre Tkatchenko and Huziel E. Sauceda and Igor Poltavsky and Kristof T. Schütt and Klaus-Robert Müller}, title = {Machine learning of accurate energy-conserving molecular force fields}, journal = {Science Advances} } ```
graphs-datasets
null
null
null
false
null
false
graphs-datasets/MD17-salicylic_acid
2022-09-02T12:14:25.000Z
null
false
6800b85764a9982635ccc319c99edb38266b23c6
[]
[ "arxiv:2007.08663", "licence:unknown" ]
https://huggingface.co/datasets/graphs-datasets/MD17-salicylic_acid/resolve/main/README.md
--- licence: unknown --- # Dataset Card for salicylic_acid ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](http://www.sgdml.org/#datasets)** - **Paper:**: (see citation) ### Dataset Summary The `salicylic_acid` dataset is a molecular dynamics (MD) dataset. The total energy and force labels for each dataset were computed using the PBE+vdW-TS electronic structure method. All geometries are in Angstrom, energies and forces are given in kcal/mol and kcal/mol/A respectively. ### Supported Tasks and Leaderboards `salicylic_acid` should be used for organic molecular property prediction, a regression task on 1 property. The score used is Mean absolute errors (in meV) for energy prediction. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | scale | big | | #graphs | 220231 | | average #nodes | 16.0 | | average #edges | 208.2681717461586 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under license unknown. ### Citation Information ``` @inproceedings{Morris+2020, title={TUDataset: A collection of benchmark datasets for learning with graphs}, author={Christopher Morris and Nils M. Kriege and Franka Bause and Kristian Kersting and Petra Mutzel and Marion Neumann}, booktitle={ICML 2020 Workshop on Graph Representation Learning and Beyond (GRL+ 2020)}, archivePrefix={arXiv}, eprint={2007.08663}, url={www.graphlearning.io}, year={2020} } ``` ``` @article{Chmiela_2017, doi = {10.1126/sciadv.1603015}, url = {https://doi.org/10.1126%2Fsciadv.1603015}, year = 2017, month = {may}, publisher = {American Association for the Advancement of Science ({AAAS})}, volume = {3}, number = {5}, author = {Stefan Chmiela and Alexandre Tkatchenko and Huziel E. Sauceda and Igor Poltavsky and Kristof T. Schütt and Klaus-Robert Müller}, title = {Machine learning of accurate energy-conserving molecular force fields}, journal = {Science Advances} } ```
graphs-datasets
null
null
null
false
1
false
graphs-datasets/MD17-toluene
2022-09-02T12:13:51.000Z
null
false
23193a565cb880fae47912ee75fe1e73a2886308
[]
[ "arxiv:2007.08663", "licence:unknown" ]
https://huggingface.co/datasets/graphs-datasets/MD17-toluene/resolve/main/README.md
--- licence: unknown --- # Dataset Card for toluene ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](http://www.sgdml.org/#datasets)** - **Paper:**: (see citation) ### Dataset Summary The `toluene` dataset is a molecular dynamics (MD) dataset. The total energy and force labels for each dataset were computed using the PBE+vdW-TS electronic structure method. All geometries are in Angstrom, energies and forces are given in kcal/mol and kcal/mol/A respectively. ### Supported Tasks and Leaderboards `toluene` should be used for organic molecular property prediction, a regression task on 1 property. The score used is Mean absolute errors (in meV) for energy prediction. ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | scale | big | | #graphs | 342790 | | average #nodes | 15.0 | | average #edges | 192.30698588936116 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: #labels): contains the number of labels available to predict - `num_nodes` (int): number of nodes of the graph ### Data Splits This data is not split, and should be used with cross validation. It comes from the PyGeometric version of the dataset. ## Additional Information ### Licensing Information The dataset has been released under license unknown. ### Citation Information ``` @inproceedings{Morris+2020, title={TUDataset: A collection of benchmark datasets for learning with graphs}, author={Christopher Morris and Nils M. Kriege and Franka Bause and Kristian Kersting and Petra Mutzel and Marion Neumann}, booktitle={ICML 2020 Workshop on Graph Representation Learning and Beyond (GRL+ 2020)}, archivePrefix={arXiv}, eprint={2007.08663}, url={www.graphlearning.io}, year={2020} } ``` ``` @article{Chmiela_2017, doi = {10.1126/sciadv.1603015}, url = {https://doi.org/10.1126%2Fsciadv.1603015}, year = 2017, month = {may}, publisher = {American Association for the Advancement of Science ({AAAS})}, volume = {3}, number = {5}, author = {Stefan Chmiela and Alexandre Tkatchenko and Huziel E. Sauceda and Igor Poltavsky and Kristof T. Schütt and Klaus-Robert Müller}, title = {Machine learning of accurate energy-conserving molecular force fields}, journal = {Science Advances} } ```
openclimatefix
null
null
null
false
1
false
openclimatefix/era5-reanalysis
2022-11-16T20:32:37.000Z
null
false
fe3d5cddf36a843472e077e83ff6b474ad028559
[]
[ "license:mit" ]
https://huggingface.co/datasets/openclimatefix/era5-reanalysis/resolve/main/README.md
--- license: mit --- This repo contains converted ECMWF ERA5 reanalysis files for both hourly atmospheric and land variables from Jan 2014 to October 2022. The data has been converted from the downloaded NetCDF files into Zarr using Xarray. Each file is 1 day of reanalysis, and so has 24 timesteps at a 0.25 degree grid resolution. All variables in the reanalysis are included here.
patrickfrank1
null
null
null
false
1
false
patrickfrank1/chess-pgn-games
2022-09-02T14:07:22.000Z
null
false
51204a59442e2b988dd4939ec1c89056f8c949b4
[]
[ "license:cc0-1.0" ]
https://huggingface.co/datasets/patrickfrank1/chess-pgn-games/resolve/main/README.md
--- license: cc0-1.0 ---
open-source-metrics
null
null
null
false
1
false
open-source-metrics/transformers-dependents
2022-11-09T17:21:28.000Z
null
false
f30a065014b9d5b82d13e7691efee55a56864b0f
[]
[ "license:apache-2.0", "tags:github-stars" ]
https://huggingface.co/datasets/open-source-metrics/transformers-dependents/resolve/main/README.md
--- license: apache-2.0 pretty_name: transformers metrics tags: - github-stars --- # transformers metrics This dataset contains metrics about the huggingface/transformers package. Number of repositories in the dataset: 27067 Number of packages in the dataset: 823 ## Package dependents This contains the data available in the [used-by](https://github.com/huggingface/transformers/network/dependents) tab on GitHub. ### Package & Repository star count This section shows the package and repository star count, individually. Package | Repository :-------------------------:|:-------------------------: ![transformers-dependent package star count](./transformers-dependents/resolve/main/transformers-dependent_package_star_count.png) | ![transformers-dependent repository star count](./transformers-dependents/resolve/main/transformers-dependent_repository_star_count.png) There are 65 packages that have more than 1000 stars. There are 140 repositories that have more than 1000 stars. The top 10 in each category are the following: *Package* [hankcs/HanLP](https://github.com/hankcs/HanLP): 26958 [fastai/fastai](https://github.com/fastai/fastai): 22774 [slundberg/shap](https://github.com/slundberg/shap): 17482 [fastai/fastbook](https://github.com/fastai/fastbook): 16052 [jina-ai/jina](https://github.com/jina-ai/jina): 16052 [huggingface/datasets](https://github.com/huggingface/datasets): 14101 [microsoft/recommenders](https://github.com/microsoft/recommenders): 14017 [borisdayma/dalle-mini](https://github.com/borisdayma/dalle-mini): 12872 [flairNLP/flair](https://github.com/flairNLP/flair): 12033 [allenai/allennlp](https://github.com/allenai/allennlp): 11198 *Repository* [huggingface/transformers](https://github.com/huggingface/transformers): 70487 [hankcs/HanLP](https://github.com/hankcs/HanLP): 26959 [ageron/handson-ml2](https://github.com/ageron/handson-ml2): 22886 [ray-project/ray](https://github.com/ray-project/ray): 22047 [jina-ai/jina](https://github.com/jina-ai/jina): 16052 [RasaHQ/rasa](https://github.com/RasaHQ/rasa): 14844 [microsoft/recommenders](https://github.com/microsoft/recommenders): 14017 [deeplearning4j/deeplearning4j](https://github.com/deeplearning4j/deeplearning4j): 12617 [flairNLP/flair](https://github.com/flairNLP/flair): 12034 [allenai/allennlp](https://github.com/allenai/allennlp): 11198 ### Package & Repository fork count This section shows the package and repository fork count, individually. Package | Repository :-------------------------:|:-------------------------: ![transformers-dependent package forks count](./transformers-dependents/resolve/main/transformers-dependent_package_forks_count.png) | ![transformers-dependent repository forks count](./transformers-dependents/resolve/main/transformers-dependent_repository_forks_count.png) There are 55 packages that have more than 200 forks. There are 128 repositories that have more than 200 forks. The top 10 in each category are the following: *Package* [hankcs/HanLP](https://github.com/hankcs/HanLP): 7388 [fastai/fastai](https://github.com/fastai/fastai): 7297 [fastai/fastbook](https://github.com/fastai/fastbook): 6033 [slundberg/shap](https://github.com/slundberg/shap): 2646 [microsoft/recommenders](https://github.com/microsoft/recommenders): 2473 [allenai/allennlp](https://github.com/allenai/allennlp): 2218 [jina-ai/clip-as-service](https://github.com/jina-ai/clip-as-service): 1972 [jina-ai/jina](https://github.com/jina-ai/jina): 1967 [flairNLP/flair](https://github.com/flairNLP/flair): 1934 [huggingface/datasets](https://github.com/huggingface/datasets): 1841 *Repository* [huggingface/transformers](https://github.com/huggingface/transformers): 16159 [ageron/handson-ml2](https://github.com/ageron/handson-ml2): 11053 [hankcs/HanLP](https://github.com/hankcs/HanLP): 7389 [aws/amazon-sagemaker-examples](https://github.com/aws/amazon-sagemaker-examples): 5493 [deeplearning4j/deeplearning4j](https://github.com/deeplearning4j/deeplearning4j): 4933 [RasaHQ/rasa](https://github.com/RasaHQ/rasa): 4106 [ray-project/ray](https://github.com/ray-project/ray): 3876 [apache/beam](https://github.com/apache/beam): 3648 [plotly/dash-sample-apps](https://github.com/plotly/dash-sample-apps): 2795 [microsoft/recommenders](https://github.com/microsoft/recommenders): 2473
kordless
null
null
null
false
null
false
kordless/steve
2022-09-02T13:24:32.000Z
null
false
501043cd0842281776bceb72effc91b75bca500c
[]
[]
https://huggingface.co/datasets/kordless/steve/resolve/main/README.md
lewtun
null
\
false
6
false
lewtun/music_classification
2022-09-02T17:08:02.000Z
null
false
30ec6a996b5554d1f4294ca4c6b2879926981728
[]
[ "license:unknown" ]
https://huggingface.co/datasets/lewtun/music_classification/resolve/main/README.md
--- license: unknown ---
Hemaxi
null
null
null
false
1
false
Hemaxi/3DU-Vec
2022-09-19T15:29:26.000Z
null
false
081009c3673cab07e4417b6442934d15d1004aa8
[]
[ "license:gpl-3.0" ]
https://huggingface.co/datasets/Hemaxi/3DU-Vec/resolve/main/README.md
--- license: gpl-3.0 --- This repository contains the dataset used in our [work](https://github.com/HemaxiN/3D_U-Vec).