id
stringlengths
2
115
lastModified
stringlengths
24
24
tags
list
author
stringlengths
2
42
description
stringlengths
0
6.67k
citation
stringlengths
0
10.7k
likes
int64
0
3.66k
downloads
int64
0
8.89M
created
timestamp[us]
card
stringlengths
11
977k
card_len
int64
11
977k
embeddings
list
promptora11/data1
2023-10-18T09:35:33.000Z
[ "region:us" ]
promptora11
null
null
0
10
2023-10-18T09:35:31
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: input dtype: string - name: output dtype: string splits: - name: train num_bytes: 9403 num_examples: 112 download_size: 2356 dataset_size: 9403 --- # Dataset Card for "data1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
459
[ [ -0.044403076171875, -0.02447509765625, 0.0097808837890625, 0.0195770263671875, -0.0181884765625, -0.0034503936767578125, 0.030609130859375, -0.006931304931640625, 0.0670166015625, 0.0355224609375, -0.0714111328125, -0.056671142578125, -0.044921875, -0.018035...
peterbeamish/helm-charts-uniform
2023-10-19T01:14:36.000Z
[ "region:us" ]
peterbeamish
null
null
0
10
2023-10-18T18:45:59
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: chart_name dtype: string - name: templates sequence: string - name: values dtype: string splits: - name: train num_bytes: 2972740 num_examples: 137 - name: test num_bytes: 3044774 num_examples: 138 download_size: 1781817 dataset_size: 6017514 --- # Dataset Card for "helm-charts-uniform" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
626
[ [ -0.039398193359375, -0.004955291748046875, 0.00853729248046875, 0.033477783203125, -0.0240020751953125, -0.002025604248046875, 0.01446533203125, -0.0238494873046875, 0.05499267578125, 0.040924072265625, -0.0540771484375, -0.07086181640625, -0.04327392578125, ...
YL95/REDL_LLM1_naive0
2023-10-18T19:32:04.000Z
[ "region:us" ]
YL95
null
null
0
10
2023-10-18T19:31:46
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
takojunior/tests
2023-10-19T02:00:02.000Z
[ "region:us" ]
takojunior
null
null
0
10
2023-10-19T01:56:59
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Kimata/GPT_Driver_dataset
2023-10-20T12:38:23.000Z
[ "region:us" ]
Kimata
null
null
0
10
2023-10-20T12:37:54
--- dataset_info: features: - name: messages list: - name: role dtype: string - name: content dtype: string splits: - name: train num_bytes: 49269209 num_examples: 23388 - name: test num_bytes: 10693369 num_examples: 5119 download_size: 7321936 dataset_size: 59962578 --- # Dataset Card for "GPT_Driver_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
498
[ [ -0.044097900390625, -0.025115966796875, 0.031707763671875, 0.00936126708984375, -0.01480865478515625, -0.00923919677734375, 0.0192413330078125, -0.010986328125, 0.038421630859375, 0.01053619384765625, -0.05889892578125, -0.038818359375, -0.036773681640625, -...
shossain/merged-no-pad-text-16384
2023-10-21T06:10:21.000Z
[ "region:us" ]
shossain
null
null
0
10
2023-10-21T06:10:01
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 581129508 num_examples: 10486 download_size: 287686244 dataset_size: 581129508 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "merged-no-pad-text-16384" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
459
[ [ -0.053619384765625, -0.0226898193359375, 0.0208587646484375, 0.02838134765625, -0.034393310546875, 0.004833221435546875, 0.0130462646484375, -0.011688232421875, 0.0716552734375, 0.053436279296875, -0.05267333984375, -0.04815673828125, -0.045623779296875, -0....
dot-ammar/AR-dotted-mediumPlus
2023-10-24T02:32:35.000Z
[ "region:us" ]
dot-ammar
null
null
0
10
2023-10-21T22:18:57
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: clean dtype: string splits: - name: train num_bytes: 387187864 num_examples: 1625508 download_size: 214233397 dataset_size: 387187864 --- # Dataset Card for "AR-dotted-mediumPlus-arrow" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
464
[ [ -0.037078857421875, -0.0279693603515625, 0.005100250244140625, 0.00876617431640625, -0.020782470703125, 0.01251983642578125, 0.0205841064453125, -0.0122528076171875, 0.0716552734375, 0.0205841064453125, -0.05401611328125, -0.0596923828125, -0.048553466796875, ...
Phando/llava-filtered-cc3m-595k
2023-10-29T02:15:17.000Z
[ "region:us" ]
Phando
null
null
0
10
2023-10-22T09:31:05
Dataset transformed to the image-caption format from https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K
120
[ [ -0.0052490234375, -0.009765625, 0.0294189453125, 0.029083251953125, -0.060455322265625, -0.007450103759765625, -0.003887176513671875, -0.0179595947265625, 0.044464111328125, 0.0643310546875, -0.061126708984375, -0.030242919921875, -0.04248046875, 0.010360717...
AdapterOcean/physics_dataset_standardized_cluster_0_alpaca
2023-10-23T01:51:41.000Z
[ "region:us" ]
AdapterOcean
null
null
0
10
2023-10-22T18:30:30
--- dataset_info: features: - name: input dtype: string - name: output dtype: string splits: - name: train num_bytes: 4470624 num_examples: 1510 download_size: 0 dataset_size: 4470624 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "physics_dataset_standardized_cluster_0_alpaca" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
503
[ [ -0.041290283203125, -0.0253448486328125, 0.031951904296875, 0.0272674560546875, -0.0321044921875, -0.0088348388671875, 0.0369873046875, -0.01393890380859375, 0.08209228515625, 0.01427459716796875, -0.0478515625, -0.05718994140625, -0.0390625, -0.021942138671...
mauricioobgo/resume-qa-data-2023-10
2023-10-22T22:33:02.000Z
[ "region:us" ]
mauricioobgo
null
null
0
10
2023-10-22T22:32:57
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: context dtype: string - name: instruction dtype: string - name: category dtype: string - name: response dtype: string splits: - name: train num_bytes: 948965 num_examples: 260 download_size: 56207 dataset_size: 948965 --- # Dataset Card for "resume-qa-data-2023-10" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
562
[ [ -0.0321044921875, 0.00554656982421875, 0.031463623046875, 0.029693603515625, -0.0027332305908203125, 0.0072479248046875, 0.036285400390625, 0.00238037109375, 0.06109619140625, 0.037689208984375, -0.0635986328125, -0.050384521484375, -0.017181396484375, -0.00...
centroIA/MistralInstructScenarios
2023-10-22T23:18:11.000Z
[ "region:us" ]
centroIA
null
null
0
10
2023-10-22T23:18:09
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: input dtype: string - name: output dtype: string - name: text dtype: string splits: - name: train num_bytes: 2684487 num_examples: 967 download_size: 698243 dataset_size: 2684487 --- # Dataset Card for "MistralInstructScenarios" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
519
[ [ -0.041839599609375, -0.0104217529296875, 0.00760650634765625, 0.0263824462890625, -0.0137939453125, -0.0009150505065917969, 0.03277587890625, -0.01018524169921875, 0.05609130859375, 0.040618896484375, -0.059661865234375, -0.050140380859375, -0.0426025390625, ...
BEE-spoke-data/wikipedia-20230901.en-deduped
2023-10-24T22:55:47.000Z
[ "task_categories:text-generation", "task_categories:fill-mask", "task_categories:feature-extraction", "size_categories:1M<n<10M", "source_datasets:graelo/wikipedia", "language:en", "license:cc-by-sa-3.0", "wiki", "wikipedia", "pretrain", "region:us" ]
BEE-spoke-data
null
null
0
10
2023-10-23T02:31:02
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* - config_name: text-only data_files: - split: train path: text-only/train-* - split: validation path: text-only/validation-* - split: test path: text-only/test-* dataset_info: - config_name: default features: - name: id dtype: string - name: url dtype: string - name: title dtype: string - name: text dtype: string splits: - name: train num_bytes: 15368746858.779654 num_examples: 5673373 - name: validation num_bytes: 404439922.64724064 num_examples: 149299 - name: test num_bytes: 404442631.57310516 num_examples: 149300 download_size: 9703633440 dataset_size: 16177629413 - config_name: text-only features: - name: text dtype: string splits: - name: train num_bytes: 14834731398.280304 num_examples: 5673373 - name: validation num_bytes: 390386911.46022856 num_examples: 149299 - name: test num_bytes: 390389526.2594667 num_examples: 149300 download_size: 9374463601 dataset_size: 15615507835.999998 license: cc-by-sa-3.0 task_categories: - text-generation - fill-mask - feature-extraction language: - en tags: - wiki - wikipedia - pretrain size_categories: - 1M<n<10M source_datasets: graelo/wikipedia --- # wikipedia - 20230901.en - deduped > purpose: train with less data while maintaining (most) of the quality This is really more of a "high quality diverse sample" rather than _"we are trying to remove literal duplicate documents"_. Source dataset: [graelo/wikipedia](https://huggingface.co/datasets/graelo/wikipedia). ## configs ### `default` command: ```sh python -m text_dedup.minhash \ --path $ds_name \ --name $dataset_config \ --split $data_split \ --cache_dir "./cache" \ --output $out_dir \ --column $text_column \ --ngram 4 --threshold 0.6 \ --hash_func xxh3 --hash_bits 16 --num_perm 64 \ --batch_size 10000 ``` dedup: ```sh Fingerprinting... (num_proc=40): 100% 6705754/6705754 [06:57<00:00, 16063.27 examples/s] Iterating MinHashes...: 100% 671/671 [04:13<00:00, 2.65it/s] Clustering...: 100% 10/10 [00:21<00:00, 2.18s/it] Finding clusters... (num_proc=40): 100% 6705754/6705754 [06:38<00:00, 16839.42 examples/s] Filtering clusters... (num_proc=40): 100% 6705754/6705754 [02:25<00:00, 46058.39 examples/s] Saving the dataset (39/39 shards): 100% 5971972/5971972 [03:47<00:00, 26266.10 examples/s] [10/23/23 02:29:41] INFO Loading : 78.82s ``` result: ```python DatasetDict({ train: Dataset({ features: ['id', 'url', 'title', 'text'], num_rows: 5673373 }) validation: Dataset({ features: ['id', 'url', 'title', 'text'], num_rows: 149299 }) test: Dataset({ features: ['id', 'url', 'title', 'text'], num_rows: 149300 }) }) ``` ### text-only This is the same thing but with all columns except for 'text' removed. ```python from datasets import load_dataset # If the dataset is gated/private, make sure you have run huggingface-cli login config_name = "text-only" dataset = load_dataset("BEE-spoke-data/wikipedia-deduped", config_name) ``` ## token counts ### train Using `tiktoken` GPT-4 tokenizer, `train` split, and `text` column: | | num_tokens | |:------|----------------:| | count | 5.67337e+06 | | mean | 612.413 | | std | 739.331 | | min | 3 | | 25% | 163 | | 50% | 359 | | 75% | 761 | | max | 34298 | total: 3,474,446,396 ---
3,702
[ [ -0.053436279296875, -0.04168701171875, 0.0224151611328125, 0.007007598876953125, -0.031890869140625, -0.018829345703125, -0.017822265625, -0.017852783203125, 0.0308380126953125, 0.0174407958984375, -0.038116455078125, -0.06402587890625, -0.04510498046875, 0....
Rewcifer/ct_scans_90pct_3000_cutoff
2023-10-24T01:55:26.000Z
[ "region:us" ]
Rewcifer
null
null
0
10
2023-10-24T01:54:50
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 1064432300.5515866 num_examples: 213139 download_size: 233454097 dataset_size: 1064432300.5515866 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "ct_scans_90pct_3000_cutoff" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
480
[ [ -0.047882080078125, -0.002597808837890625, 0.036956787109375, 0.0180511474609375, -0.0234527587890625, -0.0296630859375, 0.0256805419921875, 0.00910186767578125, 0.0404052734375, 0.040802001953125, -0.0682373046875, -0.0504150390625, -0.0352783203125, -0.003...
sunjun/medmcqa_sj
2023-10-24T12:51:09.000Z
[ "region:us" ]
sunjun
null
null
0
10
2023-10-24T12:49:59
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: id dtype: string - name: question dtype: string - name: cop dtype: class_label: names: '0': a '1': b '2': c '3': d - name: choice_type dtype: string - name: exp dtype: string - name: subject_name dtype: string - name: topic_name dtype: string - name: options struct: - name: A dtype: string - name: B dtype: string - name: C dtype: string - name: D dtype: string - name: answer_idx dtype: string - name: answer dtype: string - name: choices sequence: string - name: answer_index dtype: int64 splits: - name: train num_bytes: 155300377 num_examples: 182822 - name: test num_bytes: 2810502 num_examples: 4183 download_size: 102706169 dataset_size: 158110879 --- # Dataset Card for "medmcqa_sj" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1,178
[ [ -0.031524658203125, -0.007190704345703125, 0.032501220703125, -0.0010623931884765625, -0.0136566162109375, 0.01058197021484375, 0.026611328125, 0.01201629638671875, 0.06158447265625, 0.0428466796875, -0.059906005859375, -0.05419921875, -0.049468994140625, -0...
Luciya/llama-2-clinc-oos-test
2023-10-24T12:52:25.000Z
[ "region:us" ]
Luciya
null
null
0
10
2023-10-24T12:52:24
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 12858700 num_examples: 5492 download_size: 1217377 dataset_size: 12858700 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "llama-2-clinc-oos-test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
452
[ [ -0.032867431640625, -0.0201416015625, 0.01248931884765625, 0.01070404052734375, -0.03314208984375, 0.0006279945373535156, 0.0295562744140625, -0.02642822265625, 0.053802490234375, 0.034210205078125, -0.05224609375, -0.053253173828125, -0.04638671875, -0.0038...
shermansiu/sgd_dst
2023-10-24T22:52:03.000Z
[ "language:en", "license:cc-by-sa-4.0", "natural-language-processing", "dialogue-state-tracking", "region:us" ]
shermansiu
null
null
0
10
2023-10-24T18:04:56
--- license: cc-by-sa-4.0 tags: - natural-language-processing - dialogue-state-tracking language: - en pretty_name: Schema-Guided Dialogue dataset - Dialogue State Tracking configs: - config_name: sgd-sdt data_files: - split: train path: "sdt/sgd_sdt_v0_train.tsv" - split: dev path: "sdt/sgd_sdt_v0_dev.tsv" - split: test path: "sdt/sgd_sdt_v0_test.tsv" - config_name: sgd-d3st data_files: - split: train path: "d3st/sgd_d3st_v0_train.tsv" - split: dev path: "d3st/sgd_d3st_v0_dev.tsv" - split: test path: "d3st/sgd_d3st_v0_test.tsv" dataset_info: - config_name: sgd-sdt features: - name: prompt dtype: string - name: target dtype: string - name: dialogue_id dtype: string - name: turn_id dtype: string - name: frame_id dtype: string splits: - name: train num_bytes: 269690581 num_examples: 175780 - name: dev num_bytes: 38376392 num_examples: 26077 - name: test num_bytes: 72134551 num_examples: 46116 - config_name: sgd-d3st features: - name: prompt dtype: string - name: target dtype: string - name: dialogue_id dtype: string - name: turn_id dtype: string - name: frame_id dtype: string splits: - name: train num_bytes: 226090375 num_examples: 175780 - name: dev num_bytes: 32170435 num_examples: 26077 - name: test num_bytes: 59997238 num_examples: 46116 --- Schema-Guided Dialogue dataset - Dialogue State Tracking - Description-Driven Dialogue State Tracking (D3ST) - Show, Don't Tell (SDT)
1,733
[ [ -0.01103973388671875, -0.037567138671875, 0.062286376953125, 0.01450347900390625, -0.023895263671875, 0.01096343994140625, 0.02264404296875, 0.0190887451171875, 0.0308990478515625, 0.06689453125, -0.0775146484375, -0.047332763671875, -0.026763916015625, -0.0...
Jing24/sort_low_all_train
2023-10-24T21:12:54.000Z
[ "region:us" ]
Jing24
null
null
0
10
2023-10-24T21:12:51
--- dataset_info: features: - name: id dtype: string - name: title dtype: string - name: context dtype: string - name: question dtype: string - name: answers struct: - name: answer_start sequence: int64 - name: text sequence: string splits: - name: train num_bytes: 79661311 num_examples: 87599 download_size: 46213808 dataset_size: 79661311 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "sort_low_all_train" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
672
[ [ -0.04669189453125, -0.0094451904296875, 0.0233917236328125, 0.01006317138671875, -0.0160980224609375, -0.002643585205078125, 0.0174560546875, -0.01332855224609375, 0.0604248046875, 0.0246429443359375, -0.05242919921875, -0.049896240234375, -0.06634521484375, ...
quyanh/helm-samsum-dolly-lima-cot
2023-10-25T09:11:30.000Z
[ "region:us" ]
quyanh
null
null
0
10
2023-10-25T09:07:09
--- dataset_info: features: - name: prompt dtype: string splits: - name: train num_bytes: 28230011.540207386 num_examples: 30963 download_size: 19770554 dataset_size: 28230011.540207386 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "helm-samsum-dolly-lima-cot" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
480
[ [ -0.03216552734375, -0.01201629638671875, 0.01125335693359375, 0.032623291015625, -0.04168701171875, -0.002979278564453125, 0.02044677734375, -0.0181121826171875, 0.06536865234375, 0.043304443359375, -0.0556640625, -0.0787353515625, -0.07122802734375, -0.0093...
TanmaySah/bias
2023-10-25T13:11:47.000Z
[ "region:us" ]
TanmaySah
null
null
0
10
2023-10-25T12:53:45
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
octa-cba/codigo_procesal_laboral_etiquetas2
2023-10-25T20:35:27.000Z
[ "region:us" ]
octa-cba
null
null
0
10
2023-10-25T20:34:35
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
rishiraj/predict-the-llm
2023-10-28T07:53:45.000Z
[ "region:us" ]
rishiraj
null
null
0
10
2023-10-26T16:57:05
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
CJWeiss/multitiny
2023-10-26T21:32:27.000Z
[ "region:us" ]
CJWeiss
null
null
0
10
2023-10-26T21:32:01
--- dataset_info: features: - name: id dtype: string - name: sources sequence: string - name: summary/long dtype: string - name: summary/short dtype: string - name: summary/tiny dtype: string splits: - name: train num_bytes: 489812218.2614571 num_examples: 1207 - name: test num_bytes: 97877726.43171807 num_examples: 251 - name: valid num_bytes: 63699346.36563877 num_examples: 145 download_size: 465403499 dataset_size: 651389291.0588139 --- # Dataset Card for "multitiny" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
675
[ [ -0.0474853515625, -0.018646240234375, 0.01432037353515625, 0.0234832763671875, -0.014129638671875, 0.01212310791015625, 0.005405426025390625, -0.01873779296875, 0.07147216796875, 0.0291748046875, -0.06011962890625, -0.04071044921875, -0.042816162109375, -0.0...
CJWeiss/multishort
2023-10-26T21:34:51.000Z
[ "region:us" ]
CJWeiss
null
null
0
10
2023-10-26T21:34:18
--- dataset_info: features: - name: id dtype: string - name: sources sequence: string - name: summary/long dtype: string - name: summary/short dtype: string - name: summary/tiny dtype: string splits: - name: train num_bytes: 949594524.2185664 num_examples: 2340 - name: test num_bytes: 189516235.24229074 num_examples: 486 - name: valid num_bytes: 137063421.14537445 num_examples: 312 download_size: 762638149 dataset_size: 1276174180.6062317 --- # Dataset Card for "multishort" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
679
[ [ -0.04241943359375, -0.01209259033203125, 0.0219573974609375, 0.039703369140625, -0.0274810791015625, 0.0054473876953125, 0.0214691162109375, -0.01519775390625, 0.053985595703125, 0.01873779296875, -0.055908203125, -0.049591064453125, -0.0513916015625, -0.025...
mwitiderrick/lamini_mistral
2023-10-27T07:05:54.000Z
[ "language:en", "license:apache-2.0", "region:us" ]
mwitiderrick
null
null
0
10
2023-10-27T07:03:20
--- license: apache-2.0 language: - en --- # Lamini Mistral Lamini Docs dataset formatted for fine-tuning with Mistral-7B Instruct model
138
[ [ -0.0232391357421875, -0.02716064453125, 0.00331878662109375, 0.03009033203125, -0.033172607421875, -0.045013427734375, -0.0001628398895263672, 0.00679779052734375, -0.0241241455078125, 0.071044921875, -0.0304718017578125, -0.04278564453125, -0.0137481689453125, ...
bobbybelajar/AmazonReviewVisualization
2023-10-27T08:23:57.000Z
[ "region:us" ]
bobbybelajar
null
null
0
10
2023-10-27T08:23:41
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
slaee/c-tutor
2023-10-28T15:10:18.000Z
[ "region:us" ]
slaee
null
null
0
10
2023-10-28T14:25:53
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
cxllin/economics
2023-10-28T22:27:36.000Z
[ "region:us" ]
cxllin
null
null
0
10
2023-10-28T21:49:43
--- # For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1 # Doc / guide: https://huggingface.co/docs/hub/datasets-cards {} --- # Dataset Card for cxllin/economics This dataset aims to represent knowledge within the realm of economics ## Dataset Details Featuring Macro, Micro, and Math texbooks ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
4,399
[ [ -0.03466796875, -0.044464111328125, 0.026092529296875, 0.0142669677734375, -0.0109405517578125, -0.0006723403930664062, -0.01226043701171875, -0.04693603515625, 0.026397705078125, 0.058990478515625, -0.0506591796875, -0.07086181640625, -0.034271240234375, 0....
jay401521/domain_balance
2023-10-30T08:26:13.000Z
[ "region:us" ]
jay401521
null
null
0
10
2023-10-30T08:26:09
--- dataset_info: features: - name: id dtype: int64 - name: domain dtype: int64 - name: label dtype: int64 - name: rank dtype: string - name: sentence dtype: string splits: - name: train num_bytes: 6252293 num_examples: 72276 download_size: 3387340 dataset_size: 6252293 --- # Dataset Card for "domain_balance" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
491
[ [ -0.043609619140625, -0.0237579345703125, -0.00640106201171875, 0.0181427001953125, -0.01358795166015625, 0.005603790283203125, 0.0244903564453125, -0.01409149169921875, 0.05633544921875, 0.037994384765625, -0.064453125, -0.044036865234375, -0.043914794921875, ...
Yama/c_tree
2023-10-30T14:43:49.000Z
[ "region:us" ]
Yama
null
@misc{huggingface_cppe_5, author = {Hugging Face}, title = {CPPE-5 dataset code}, url = {https://huggingface.co/datasets/cppe-5/blob/main/cppe-5.py}, year = {2023}, }
0
10
2023-10-30T13:04:55
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
ayoub999/dataset_for_orange_factures
2023-11-02T13:07:33.000Z
[ "region:us" ]
ayoub999
null
null
0
10
2023-10-30T15:15:47
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: id dtype: string - name: image dtype: image - name: bboxes sequence: sequence: int64 - name: ner_tags sequence: class_label: names: '0': O '1': Ref '2': NumFa '3': Fourniss '4': DateFa '5': DateLim '6': TotalHT '7': TVA '8': TotalTTc '9': unitP '10': Qt '11': TVAP '12': Désignation '13': Adresse - name: tokens sequence: string splits: - name: train num_bytes: 1168888.6666666667 num_examples: 4 - name: test num_bytes: 584444.3333333334 num_examples: 2 download_size: 0 dataset_size: 1753333.0 --- # Dataset Card for "dataset_for_orange_factures" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1,059
[ [ -0.040313720703125, -0.0109710693359375, 0.01393890380859375, 0.033233642578125, -0.013671875, -0.0171966552734375, 0.0178070068359375, -0.019195556640625, 0.06494140625, 0.043609619140625, -0.038909912109375, -0.055572509765625, -0.03118896484375, -0.006565...
banghua/all_cot_cve
2023-10-31T22:01:17.000Z
[ "region:us" ]
banghua
null
null
0
10
2023-10-30T17:48:14
--- dataset_info: features: - name: Original_GT dtype: bool - name: Input dtype: string - name: Output dtype: string - name: Cot dtype: string splits: - name: train num_bytes: 530255 num_examples: 417 download_size: 150055 dataset_size: 530255 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "all_cot_cve" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
541
[ [ -0.03753662109375, -0.01068878173828125, 0.035308837890625, 0.0196075439453125, -0.0297698974609375, -0.003566741943359375, 0.02685546875, -0.00980377197265625, 0.056976318359375, 0.044921875, -0.0433349609375, -0.08203125, -0.052093505859375, -0.02542114257...
stsudharsan/veshti-controlnet-v2-sammed-fingers
2023-10-30T19:02:00.000Z
[ "region:us" ]
stsudharsan
null
null
0
10
2023-10-30T19:01:48
--- dataset_info: features: - name: image dtype: image - name: conditioning_img dtype: image - name: caption dtype: string splits: - name: train num_bytes: 42872715.0 num_examples: 143 download_size: 42037622 dataset_size: 42872715.0 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "veshti-controlnet-v2-sammed-fingers" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
549
[ [ -0.0157470703125, -0.0127716064453125, 0.01404571533203125, 0.007656097412109375, -0.02587890625, 0.014129638671875, 0.026580810546875, -0.01467132568359375, 0.078369140625, 0.038482666015625, -0.05548095703125, -0.0440673828125, -0.044189453125, -0.01338195...
PrasoonPratham/aktarkhan
2023-10-31T06:53:13.000Z
[ "region:us" ]
PrasoonPratham
null
null
0
10
2023-10-31T06:44:59
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Adi-0-0-Gupta/Onion
2023-10-31T08:29:38.000Z
[ "region:us" ]
Adi-0-0-Gupta
null
null
0
10
2023-10-31T08:29:01
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: valid path: data/valid-* dataset_info: features: - name: image struct: - name: bytes dtype: binary - name: path dtype: 'null' - name: label dtype: int64 splits: - name: train num_bytes: 366111294 num_examples: 3932 - name: valid num_bytes: 11432314 num_examples: 127 download_size: 368399242 dataset_size: 377543608 --- # Dataset Card for "Onion" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
644
[ [ -0.025848388671875, -0.02496337890625, 0.01904296875, 0.00984954833984375, 0.0014581680297851562, 0.01129150390625, 0.023529052734375, -0.01141357421875, 0.06781005859375, 0.0293121337890625, -0.047943115234375, -0.051361083984375, -0.040802001953125, -0.019...
Lollitor/MyPubChem
2023-10-31T09:20:14.000Z
[ "region:us" ]
Lollitor
null
null
0
10
2023-10-31T09:20:11
--- dataset_info: config_name: Lollitor features: - name: text dtype: string splits: - name: train num_bytes: 34191420 num_examples: 207932 download_size: 7873702 dataset_size: 34191420 configs: - config_name: Lollitor data_files: - split: train path: Lollitor/train-* --- # Dataset Card for "MyPubChem" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
470
[ [ -0.04876708984375, -0.0195159912109375, 0.01415252685546875, 0.0227813720703125, -0.00954437255859375, -0.00007253885269165039, 0.01824951171875, -0.005615234375, 0.06488037109375, 0.03973388671875, -0.057403564453125, -0.04547119140625, -0.033172607421875, ...
Adi-0-0-Gupta/Potato
2023-10-31T10:43:12.000Z
[ "region:us" ]
Adi-0-0-Gupta
null
null
0
10
2023-10-31T10:42:03
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: valid path: data/valid-* dataset_info: features: - name: image struct: - name: bytes dtype: binary - name: path dtype: 'null' - name: label dtype: int64 splits: - name: train num_bytes: 735307536 num_examples: 7742 - name: valid num_bytes: 25841416 num_examples: 278 download_size: 733447508 dataset_size: 761148952 --- # Dataset Card for "Potato" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
645
[ [ -0.048004150390625, -0.0236053466796875, 0.01099395751953125, 0.01153564453125, -0.007686614990234375, 0.0189056396484375, 0.027099609375, -0.01099395751953125, 0.07537841796875, 0.03228759765625, -0.044403076171875, -0.04730224609375, -0.0545654296875, -0.0...
Phaedrus/rsna_fixed
2023-11-01T13:25:09.000Z
[ "region:us" ]
Phaedrus
null
null
0
10
2023-11-01T13:11:23
--- dataset_info: features: - name: image dtype: image - name: label1 dtype: image - name: label2 dtype: image - name: label3 dtype: image - name: label4 dtype: image - name: label5 dtype: image - name: label6 dtype: image - name: label7 dtype: image - name: label8 dtype: image - name: label9 dtype: image - name: label10 dtype: image - name: label11 dtype: image - name: label12 dtype: image - name: label13 dtype: image - name: label14 dtype: image splits: - name: train num_bytes: 29579297463.0 num_examples: 2000 download_size: 1123982789 dataset_size: 29579297463.0 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "rsna_fixed" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
934
[ [ -0.030242919921875, -0.001834869384765625, 0.0016326904296875, 0.0084228515625, -0.03131103515625, 0.0092315673828125, 0.006587982177734375, 0.00007796287536621094, 0.07843017578125, 0.03985595703125, -0.056671142578125, -0.037933349609375, -0.034423828125, ...
rajammanabrolu/ultra-split
2023-11-01T21:02:41.000Z
[ "region:us" ]
rajammanabrolu
null
null
0
10
2023-11-01T21:02:13
--- dataset_info: features: - name: source dtype: string - name: instruction dtype: string - name: models sequence: string - name: completions list: - name: annotations struct: - name: instruction_following struct: - name: Rating dtype: string - name: Rationale dtype: string - name: honesty struct: - name: Rating dtype: string - name: Rationale dtype: string - name: truthfulness struct: - name: Type sequence: string - name: Rationale dtype: string - name: Rating dtype: string - name: Rationale For Rating dtype: string - name: helpfulness struct: - name: Type sequence: string - name: Rationale dtype: string - name: Rating dtype: string - name: Rationale For Rating dtype: string - name: custom_system_prompt dtype: string - name: model dtype: string - name: principle dtype: string - name: response dtype: string - name: critique dtype: string - name: overall_score dtype: float64 - name: correct_answers sequence: string - name: incorrect_answers sequence: string splits: - name: train num_bytes: 757820544.4612066 num_examples: 57570 - name: test num_bytes: 84206670.53879344 num_examples: 6397 download_size: 333284347 dataset_size: 842027215.0 --- # Dataset Card for "ultra-split" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1,730
[ [ -0.0498046875, -0.030426025390625, 0.01181793212890625, 0.00637054443359375, -0.025970458984375, 0.0210418701171875, 0.0228729248046875, -0.016326904296875, 0.062469482421875, 0.038970947265625, -0.05767822265625, -0.035247802734375, -0.01904296875, -0.02180...
Nathamon/CommentwordExpo_Eng
2023-11-02T08:17:10.000Z
[ "region:us" ]
Nathamon
null
null
0
10
2023-11-02T08:17:09
--- dataset_info: features: - name: id dtype: string - name: sentence dtype: string - name: cleaned_sentence dtype: string - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 2858226 num_examples: 12407 download_size: 1570070 dataset_size: 2858226 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "CommentwordExpo_Eng" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
573
[ [ -0.044189453125, 0.00611114501953125, 0.020172119140625, 0.027923583984375, -0.0266571044921875, -0.0008916854858398438, -0.0026721954345703125, -0.02801513671875, 0.0675048828125, 0.03997802734375, -0.06500244140625, -0.049468994140625, -0.04437255859375, 0...
TonyJPk7/Chat-PCR_CNNDaily_clear_Paraphrase
2023-11-02T09:39:32.000Z
[ "region:us" ]
TonyJPk7
null
null
0
10
2023-11-02T09:39:03
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
ranpox/xfund
2021-09-08T11:15:02.000Z
[ "region:us" ]
ranpox
null
null
3
9
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
svakulenk0/qrecc
2022-07-02T17:35:21.000Z
[ "task_categories:question-answering", "task_ids:open-domain-qa", "language_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "source_datasets:extended|natural_questions", "source_datasets:extended|quac", "language:en", "license:cc-by-3.0", "arxiv:2010.04898", ...
svakulenk0
null
null
7
9
2022-03-02T23:29:22
--- pretty_name: QReCC language_creators: - expert-generated - found language: - en license: - cc-by-3.0 multilinguality: - monolingual source_datasets: - extended|natural_questions - extended|quac task_categories: - question-answering task_ids: - open-domain-qa --- # Dataset Card for QReCC: Question Rewriting in Conversational Context ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Source Data](#source-data) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - [**Repository:**](https://github.com/apple/ml-qrecc) - [**Paper:**](https://arxiv.org/pdf/2010.04898.pdf) - [**Leaderboard:**](https://www.tira.io/task/scai-qrecc/dataset/scai-qrecc21-test-dataset-2021-07-20) ### Dataset Summary QReCC (Question Rewriting in Conversational Context) is an end-to-end open-domain question answering dataset comprising of 14K conversations with 81K question-answer pairs. The goal of this dataset is to provide a challenging benchmark for end-to-end conversational question answering that includes the individual subtasks of question rewriting, passage retrieval and reading comprehension. The task in QReCC is to find answers to conversational questions within a collection of 10M web pages split into 54M passages. Answers to questions in the same conversation may be distributed across several web pages. The passage collection should be downloaded from [**Zenodo**](https://zenodo.org/record/5115890#.YaeD7C8RppR) (passages.zip) ### Supported Tasks and Leaderboards `question-answering` ### Languages English ## Dataset Structure ### Data Instances An example from the data set looks as follows: ``` { "Context": [ "What are the pros and cons of electric cars?", "Some pros are: They're easier on the environment. Electricity is cheaper than gasoline. Maintenance is less frequent and less expensive. They're very quiet. You'll get tax credits. They can shorten your commute time. Some cons are: Most EVs have pretty short ranges. Recharging can take a while." ], "Question": "Tell me more about Tesla", "Rewrite": "Tell me more about Tesla the car company.", "Answer": "Tesla Inc. is an American automotive and energy company based in Palo Alto, California. The company specializes in electric car manufacturing and, through its SolarCity subsidiary, solar panel manufacturing.", "Answer_URL": "https://en.wikipedia.org/wiki/Tesla,_Inc.", "Conversation_no": 74, "Turn_no": 2, "Conversation_source": "trec" } ``` ### Data Splits - train: 63501 - test: 16451 ## Dataset Creation ### Source Data - QuAC - TREC CAsT - Natural Questions ## Additional Information ### Licensing Information [CC BY-SA 3.0](http://creativecommons.org/licenses/by-sa/3.0/) ### Citation Information ``` @inproceedings{ qrecc, title={Open-Domain Question Answering Goes Conversational via Question Rewriting}, author={Anantha, Raviteja and Vakulenko, Svitlana and Tu, Zhucheng and Longpre, Shayne and Pulman, Stephen and Chappidi, Srinivas}, booktitle={ NAACL }, year={2021} } ```
3,466
[ [ -0.032012939453125, -0.06597900390625, 0.0214691162109375, -0.00971221923828125, -0.01215362548828125, 0.0142974853515625, -0.005859375, -0.0265655517578125, 0.00789642333984375, 0.034912109375, -0.0675048828125, -0.038726806640625, 0.0088958740234375, 0.017...
zapsdcn/hyperpartisan_news
2021-12-08T20:17:10.000Z
[ "region:us" ]
zapsdcn
null
null
0
9
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
crystina-z/nocs-mrtydi-corpus
2022-03-07T17:15:52.000Z
[ "region:us" ]
crystina-z
null
null
0
9
2022-03-06T01:55:04
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
crystina-z/no-nonself-title-mrtydi-corpus
2022-03-11T22:55:53.000Z
[ "region:us" ]
crystina-z
null
null
0
9
2022-03-11T22:40:38
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
victor/autonlp-data-tweet-sentiment
2022-10-25T10:03:17.000Z
[ "task_categories:text-classification", "language:en", "region:us" ]
victor
null
null
2
9
2022-03-15T11:10:29
--- language: - en task_categories: - text-classification --- # AutoNLP Dataset for project: tweet-sentiment ## Table of content - [Dataset Description](#dataset-description) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) ## Dataset Descritpion This dataset has been automatically processed by AutoNLP for project tweet-sentiment. ### Languages The BCP-47 code for the dataset's language is en. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "text": "I am going to see how long I can do this for.", "target": 8 }, { "text": "@anitabora yeah, right. What if our politicians start using uploading their pics, lots of inside sto[...]", "target": 8 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "text": "Value(dtype='string', id=None)", "target": "ClassLabel(num_classes=13, names=['anger', 'boredom', 'empty', 'enthusiasm', 'fun', 'happiness', 'hate', 'love', 'neutral', 'relief', 'sadness', 'surprise', 'worry'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 31995 | | valid | 8005 |
1,435
[ [ -0.035064697265625, -0.01059722900390625, 0.01039886474609375, 0.030517578125, -0.033233642578125, 0.0296783447265625, -0.0196533203125, -0.017669677734375, 0.01593017578125, 0.0310211181640625, -0.034149169921875, -0.050567626953125, -0.050018310546875, 0.0...
pietrolesci/conj_nli
2022-04-25T13:27:25.000Z
[ "region:us" ]
pietrolesci
null
null
0
9
2022-03-25T10:17:37
## Overview The original dataset can be found [here](https://github.com/swarnaHub/ConjNLI). It has been proposed in [ConjNLI: Natural Language Inference Over Conjunctive Sentences](https://aclanthology.org/2020.emnlp-main.661/). This dataset is a stress test for natural language inference over conjunctive sentences, where the premise differs from the hypothesis by conjuncts removed, added, or replaced. ## Dataset curation The label mapping is the usual `{"entailment": 0, "neutral": 1, "contradiction": 2}` used in NLI datasets. Note that labels for `test` split are not available. Also, the `train` split is originally named `adversarial_train_15k`. There are 2 instances (join on "premise", "hypothesis", "label") present both in `train` and `dev`. The `test` split does not have labels. Finally, in the `train` set there are a few instances without a label, they are removed. ## Code to create the dataset ```python import pandas as pd from datasets import Dataset, ClassLabel, Value, Features, DatasetDict # download data from repo https://github.com/swarnaHub/ConjNLI paths = { "train": "<path_to_folder>/ConjNLI-master/data/NLI/adversarial_train_15k.tsv", "dev": "<path_to_folder>/ConjNLI-master/data/NLI/conj_dev.tsv", "test": "<path_to_folder>/ConjNLI-master/data/NLI/conj_test.tsv", } dataset_splits = {} for split, path in paths.items(): # load data df = pd.read_csv(paths[split], sep="\t") # encode labels using the default mapping used by other nli datasets # i.e, entailment: 0, neutral: 1, contradiction: 2 df.columns = df.columns.str.lower() if "test" in path: df["label"] = -1 else: # remove empty labels df = df.loc[~df["label"].isna()] # encode labels df["label"] = df["label"].map({"entailment": 0, "neutral": 1, "contradiction": 2}) # cast to dataset features = Features({ "premise": Value(dtype="string", id=None), "hypothesis": Value(dtype="string", id=None), "label": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]), }) dataset = Dataset.from_pandas(df, features=features) dataset_splits[split] = dataset conj_nli = DatasetDict(dataset_splits) conj_nli.push_to_hub("pietrolesci/conj_nli", token="<token>") # check overlap between splits from itertools import combinations for i, j in combinations(conj_nli.keys(), 2): print( f"{i} - {j}: ", pd.merge( conj_nli[i].to_pandas(), conj_nli[j].to_pandas(), on=["premise", "hypothesis", "label"], how="inner" ).shape[0], ) #> train - dev: 2 #> train - test: 0 #> dev - test: 0 ```
2,699
[ [ -0.041107177734375, -0.055816650390625, 0.002834320068359375, 0.0211639404296875, -0.01229095458984375, -0.01263427734375, -0.0251312255859375, -0.0252838134765625, 0.02923583984375, 0.0260467529296875, -0.04931640625, -0.038177490234375, -0.040008544921875, ...
huggan/few-shot-obama
2022-04-12T14:05:43.000Z
[ "arxiv:2101.04775", "region:us" ]
huggan
null
null
0
9
2022-04-01T11:33:51
# Citation ``` @article{DBLP:journals/corr/abs-2101-04775, author = {Bingchen Liu and Yizhe Zhu and Kunpeng Song and Ahmed Elgammal}, title = {Towards Faster and Stabilized {GAN} Training for High-fidelity Few-shot Image Synthesis}, journal = {CoRR}, volume = {abs/2101.04775}, year = {2021}, url = {https://arxiv.org/abs/2101.04775}, eprinttype = {arXiv}, eprint = {2101.04775}, timestamp = {Fri, 22 Jan 2021 15:16:00 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2101-04775.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
676
[ [ -0.038360595703125, -0.055694580078125, 0.0012750625610351562, 0.0233154296875, -0.0065155029296875, -0.012420654296875, -0.00557708740234375, -0.0200042724609375, 0.00550079345703125, -0.0029239654541015625, -0.024627685546875, -0.0238189697265625, -0.02734375,...
lm233/humor_train
2022-04-08T18:13:45.000Z
[ "region:us" ]
lm233
null
null
1
9
2022-04-08T18:10:37
annotations_creators: [] language_creators: [] languages: [] licenses: [] multilinguality: [] pretty_name: humor_train size_categories: [] source_datasets: [] task_categories: [] task_ids: []
191
[ [ -0.02325439453125, -0.00820159912109375, 0.007350921630859375, 0.04656982421875, -0.036773681640625, 0.021087646484375, -0.021270751953125, -0.00989532470703125, 0.047698974609375, 0.046112060546875, -0.031829833984375, -0.042388916015625, -0.055938720703125, ...
taln-ls2n/wikinews-fr-100
2022-09-23T07:38:18.000Z
[ "task_categories:text-generation", "annotations_creators:unknown", "language_creators:unknown", "multilinguality:monolingual", "size_categories:n<1K", "language:fr", "license:cc-by-4.0", "region:us" ]
taln-ls2n
Wikinews-fr-100 benchmark dataset for keyphrase extraction an generation.
@inproceedings{bougouin-etal-2013-topicrank, title = "{T}opic{R}ank: Graph-Based Topic Ranking for Keyphrase Extraction", author = "Bougouin, Adrien and Boudin, Florian and Daille, B{\'e}atrice", booktitle = "Proceedings of the Sixth International Joint Conference on Natural Language Processing", month = oct, year = "2013", address = "Nagoya, Japan", publisher = "Asian Federation of Natural Language Processing", url = "https://aclanthology.org/I13-1062", pages = "543--551", }
1
9
2022-04-19T11:55:39
--- annotations_creators: - unknown language_creators: - unknown language: - fr license: - cc-by-4.0 multilinguality: - monolingual task_categories: - text-mining - text-generation task_ids: - keyphrase-generation - keyphrase-extraction size_categories: - n<1K pretty_name: Wikinews-fr-100 --- # Wikinews-fr-100 Benchmark Dataset for Keyphrase Generation ## About Wikinews-fr-100 is a dataset for benchmarking keyphrase extraction and generation models. The dataset is composed of 100 news articles in French collected from [wikinews](https://fr.wikinews.org/wiki/Accueil). Keyphrases were annotated by readers (students in computer science) in an uncontrolled setting (that is, not limited to thesaurus entries). Details about the dataset can be found in the original paper [(Bougouin et al., 2013)][bougouin-2013]. Reference (indexer-assigned) keyphrases are also categorized under the PRMU (<u>P</u>resent-<u>R</u>eordered-<u>M</u>ixed-<u>U</u>nseen) scheme as proposed in [(Boudin and Gallina, 2021)][boudin-2021]. Present reference keyphrases are also ordered by their order of apparition in the concatenation of title and abstract. Text pre-processing (tokenization) is carried out using `spacy` (`fr_core_news_sm` model) with a special rule to avoid splitting words with hyphens (e.g. graph-based is kept as one token). Stemming (Snowball stemmer implementation for french provided in `nltk`) is applied before reference keyphrases are matched against the source text. Details about the process can be found in `prmu.py`. ## Content and statistics The dataset contains the following test split: | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen | | :--------- | ----------: | -----: | -----------: | --------: | ----------: | ------: | -------: | | Test | 100 | 306.9 | 9.64 | 95.91 | 1.40 | 0.85 | 1.84 | The following data fields are available : - **id**: unique identifier of the document. - **title**: title of the document. - **abstract**: abstract of the document. - **keyphrases**: list of reference keyphrases. - **prmu**: list of <u>P</u>resent-<u>R</u>eordered-<u>M</u>ixed-<u>U</u>nseen categories for reference keyphrases. ## References - (Bougouin et al., 2013) Adrien Bougouin, Florian Boudin, and Béatrice Daille. 2013. [TopicRank: Graph-Based Topic Ranking for Keyphrase Extraction][bougouin-2013]. In Proceedings of the Sixth International Joint Conference on Natural Language Processing, pages 543–551, Nagoya, Japan. Asian Federation of Natural Language Processing. - (Boudin and Gallina, 2021) Florian Boudin and Ygor Gallina. 2021. [Redefining Absent Keyphrases and their Effect on Retrieval Effectiveness][boudin-2021]. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4185–4193, Online. Association for Computational Linguistics. [bougouin-2013]: https://aclanthology.org/I13-1062/ [boudin-2021]: https://aclanthology.org/2021.naacl-main.330/
3,087
[ [ -0.0258941650390625, -0.050018310546875, 0.0167083740234375, 0.01678466796875, -0.0220794677734375, 0.01171112060546875, -0.01171875, 0.002246856689453125, 0.0156402587890625, 0.025604248046875, -0.040313720703125, -0.056884765625, -0.04034423828125, 0.03668...
h4iku/coconut_javascript2010_preprocessed
2022-04-21T20:34:35.000Z
[ "region:us" ]
h4iku
null
null
0
9
2022-04-21T20:05:05
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
pietrolesci/dnc
2022-04-25T08:59:06.000Z
[ "region:us" ]
pietrolesci
null
null
0
9
2022-04-25T08:54:56
## Overview Original dataset [here](https://github.com/decompositional-semantics-initiative/DNC). This dataset has been proposed in [Collecting Diverse Natural Language Inference Problems for Sentence Representation Evaluation](https://www.aclweb.org/anthology/D18-1007/). ## Dataset curation This version of the dataset does not include the `type-of-inference` "KG" as its label set is `[1, 2, 3, 4, 5]` while here we focus on NLI-related label sets, i.e. `[entailed, not-entailed]`. For this reason, I named the dataset DNLI for _Diverse_ NLI, as in [Liu et al 2020](https://aclanthology.org/2020.conll-1.48/), instead of DNC. This version of the dataset contains columns from the `*_data.json` and the `*_metadata.json` files available in the repo. In the original repo, each data file has the following keys and values: - `context`: The context sentence for the NLI pair. The context is already tokenized. - `hypothesis`: The hypothesis sentence for the NLI pair. The hypothesis is already tokenized. - `label`: The label for the NLI pair - `label-set`: The set of possible labels for the specific NLI pair - `binary-label`: A `True` or `False` label. See the paper for details on how we convert the `label` into a binary label. - `split`: This can be `train`, `dev`, or `test`. - `type-of-inference`: A string indicating what type of inference is tested in this example. - `pair-id`: A unique integer id for the NLI pair. The `pair-id` is used to find the corresponding metadata for any given NLI pair while each metadata file has the following columns - `pair-id`: A unique integer id for the NLI pair. - `corpus`: The original corpus where this example came from. - `corpus-sent-id`: The id of the sentence (or example) in the original dataset that we recast. - `corpus-license`: The license for the data from the original dataset. - `creation-approach`: Determines the method used to recast this example. Options are `automatic`, `manual`, or `human-labeled`. - `misc`: A dictionary of other relevant information. This is an optional field. The files are merged on the `pair-id` key. I **do not** include the `misc` column as it is not essential for NLI. NOTE: the label mapping is **not** the custom (i.e., 3 class) for NLI tasks. They used a binary target and I encoded them with the following mapping `{"not-entailed": 0, "entailed": 1}`. NOTE: some instances are present in multiple splits (matching performed by exact matching on "context", "hypothesis", and "label"). ## Code to create the dataset ```python import pandas as pd from datasets import Dataset, ClassLabel, Value, Features, DatasetDict, Sequence from pathlib import Path paths = { "train": "<path_to_folder>/DNC-master/train", "dev": "<path_to_folder>/DNC-master/dev", "test": "<path_to_folder>/DNC-master/test", } # read all data files dfs = [] for split, path in paths.items(): for f_name in Path(path).rglob("*_data.json"): df = pd.read_json(str(f_name)) df["file_split_data"] = split dfs.append(df) data = pd.concat(dfs, ignore_index=False, axis=0) # read all metadata files meta_dfs = [] for split, path in paths.items(): for f_name in Path(path).rglob("*_metadata.json"): df = pd.read_json(str(f_name)) meta_dfs.append(df) metadata = pd.concat(meta_dfs, ignore_index=False, axis=0) # merge dataset = pd.merge(data, metadata, on="pair-id", how="left") # check that the split column reflects file splits assert sum(dataset["split"] != dataset["file_split_data"]) == 0 dataset = dataset.drop(columns=["file_split_data"]) # fix `binary-label` column dataset.loc[~dataset["label"].isin(["entailed", "not-entailed"]), "binary-label"] = False dataset.loc[dataset["label"].isin(["entailed", "not-entailed"]), "binary-label"] = True # fix datatype dataset["corpus-sent-id"] = dataset["corpus-sent-id"].astype(str) # order columns as shown in the README.md columns = [ "context", "hypothesis", "label", "label-set", "binary-label", "split", "type-of-inference", "pair-id", "corpus", "corpus-sent-id", "corpus-license", "creation-approach", "misc", ] dataset = dataset.loc[:, columns] # remove misc column dataset = dataset.drop(columns=["misc"]) # remove KG for NLI dataset.loc[(dataset["label"].isin([1, 2, 3, 4, 5])), "type-of-inference"].value_counts() # > the only split with label-set [1, 2, 3, 4, 5], so remove as we focus on NLI dataset = dataset.loc[~(dataset["type-of-inference"] == "KG")] # encode labels dataset["label"] = dataset["label"].map({"not-entailed": 0, "entailed": 1}) # fill NA in label-set dataset["label-set"] = dataset["label-set"].ffill() features = Features( { "context": Value(dtype="string"), "hypothesis": Value(dtype="string"), "label": ClassLabel(num_classes=2, names=["not-entailed", "entailed"]), "label-set": Sequence(length=2, feature=Value(dtype="string")), "binary-label": Value(dtype="bool"), "split": Value(dtype="string"), "type-of-inference": Value(dtype="string"), "pair-id": Value(dtype="int64"), "corpus": Value(dtype="string"), "corpus-sent-id": Value(dtype="string"), "corpus-license": Value(dtype="string"), "creation-approach": Value(dtype="string"), } ) dataset_splits = {} for split in ("train", "dev", "test"): df_split = dataset.loc[dataset["split"] == split] dataset_splits[split] = Dataset.from_pandas(df_split, features=features) dataset_splits = DatasetDict(dataset_splits) dataset_splits.push_to_hub("pietrolesci/dnli", token="<your token>") # check overlap between splits from itertools import combinations for i, j in combinations(dataset_splits.keys(), 2): print( f"{i} - {j}: ", pd.merge( dataset_splits[i].to_pandas(), dataset_splits[j].to_pandas(), on=["context", "hypothesis", "label"], how="inner", ).shape[0], ) #> train - dev: 127 #> train - test: 55 #> dev - test: 54 ```
6,055
[ [ -0.04010009765625, -0.046783447265625, 0.02069091796875, 0.0198211669921875, -0.0135498046875, 0.0010366439819335938, -0.0166015625, -0.01171112060546875, 0.040435791015625, 0.039642333984375, -0.043060302734375, -0.06390380859375, -0.04132080078125, 0.03057...
mrm8488/ImageNet1K-train
2022-04-28T11:06:11.000Z
[ "region:us" ]
mrm8488
null
null
0
9
2022-04-27T20:03:48
mapping: ``` n01440764 tench, Tinca tinca n01443537 goldfish, Carassius auratus n01484850 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias n01491361 tiger shark, Galeocerdo cuvieri n01494475 hammerhead, hammerhead shark n01496331 electric ray, crampfish, numbfish, torpedo n01498041 stingray n01514668 cock n01514859 hen n01518878 ostrich, Struthio camelus n01530575 brambling, Fringilla montifringilla n01531178 goldfinch, Carduelis carduelis n01532829 house finch, linnet, Carpodacus mexicanus n01534433 junco, snowbird n01537544 indigo bunting, indigo finch, indigo bird, Passerina cyanea n01558993 robin, American robin, Turdus migratorius n01560419 bulbul n01580077 jay n01582220 magpie n01592084 chickadee n01601694 water ouzel, dipper n01608432 kite n01614925 bald eagle, American eagle, Haliaeetus leucocephalus n01616318 vulture n01622779 great grey owl, great gray owl, Strix nebulosa n01629819 European fire salamander, Salamandra salamandra n01630670 common newt, Triturus vulgaris n01631663 eft n01632458 spotted salamander, Ambystoma maculatum n01632777 axolotl, mud puppy, Ambystoma mexicanum n01641577 bullfrog, Rana catesbeiana n01644373 tree frog, tree-frog n01644900 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui n01664065 loggerhead, loggerhead turtle, Caretta caretta n01665541 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea n01667114 mud turtle n01667778 terrapin n01669191 box turtle, box tortoise n01675722 banded gecko n01677366 common iguana, iguana, Iguana iguana n01682714 American chameleon, anole, Anolis carolinensis n01685808 whiptail, whiptail lizard n01687978 agama n01688243 frilled lizard, Chlamydosaurus kingi n01689811 alligator lizard n01692333 Gila monster, Heloderma suspectum n01693334 green lizard, Lacerta viridis n01694178 African chameleon, Chamaeleo chamaeleon n01695060 Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis n01697457 African crocodile, Nile crocodile, Crocodylus niloticus n01698640 American alligator, Alligator mississipiensis n01704323 triceratops n01728572 thunder snake, worm snake, Carphophis amoenus n01728920 ringneck snake, ring-necked snake, ring snake n01729322 hognose snake, puff adder, sand viper n01729977 green snake, grass snake n01734418 king snake, kingsnake n01735189 garter snake, grass snake n01737021 water snake n01739381 vine snake n01740131 night snake, Hypsiglena torquata n01742172 boa constrictor, Constrictor constrictor n01744401 rock python, rock snake, Python sebae n01748264 Indian cobra, Naja naja n01749939 green mamba n01751748 sea snake n01753488 horned viper, cerastes, sand viper, horned asp, Cerastes cornutus n01755581 diamondback, diamondback rattlesnake, Crotalus adamanteus n01756291 sidewinder, horned rattlesnake, Crotalus cerastes n01768244 trilobite n01770081 harvestman, daddy longlegs, Phalangium opilio n01770393 scorpion n01773157 black and gold garden spider, Argiope aurantia n01773549 barn spider, Araneus cavaticus n01773797 garden spider, Aranea diademata n01774384 black widow, Latrodectus mactans n01774750 tarantula n01775062 wolf spider, hunting spider n01776313 tick n01784675 centipede n01795545 black grouse n01796340 ptarmigan n01797886 ruffed grouse, partridge, Bonasa umbellus n01798484 prairie chicken, prairie grouse, prairie fowl n01806143 peacock n01806567 quail n01807496 partridge n01817953 African grey, African gray, Psittacus erithacus n01818515 macaw n01819313 sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita n01820546 lorikeet n01824575 coucal n01828970 bee eater n01829413 hornbill n01833805 hummingbird n01843065 jacamar n01843383 toucan n01847000 drake n01855032 red-breasted merganser, Mergus serrator n01855672 goose n01860187 black swan, Cygnus atratus n01871265 tusker n01872401 echidna, spiny anteater, anteater n01873310 platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus n01877812 wallaby, brush kangaroo n01882714 koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus n01883070 wombat n01910747 jellyfish n01914609 sea anemone, anemone n01917289 brain coral n01924916 flatworm, platyhelminth n01930112 nematode, nematode worm, roundworm n01943899 conch n01944390 snail n01945685 slug n01950731 sea slug, nudibranch n01955084 chiton, coat-of-mail shell, sea cradle, polyplacophore n01968897 chambered nautilus, pearly nautilus, nautilus n01978287 Dungeness crab, Cancer magister n01978455 rock crab, Cancer irroratus n01980166 fiddler crab n01981276 king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica n01983481 American lobster, Northern lobster, Maine lobster, Homarus americanus n01984695 spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish n01985128 crayfish, crawfish, crawdad, crawdaddy n01986214 hermit crab n01990800 isopod n02002556 white stork, Ciconia ciconia n02002724 black stork, Ciconia nigra n02006656 spoonbill n02007558 flamingo n02009229 little blue heron, Egretta caerulea n02009912 American egret, great white heron, Egretta albus n02011460 bittern n02012849 crane n02013706 limpkin, Aramus pictus n02017213 European gallinule, Porphyrio porphyrio n02018207 American coot, marsh hen, mud hen, water hen, Fulica americana n02018795 bustard n02025239 ruddy turnstone, Arenaria interpres n02027492 red-backed sandpiper, dunlin, Erolia alpina n02028035 redshank, Tringa totanus n02033041 dowitcher n02037110 oystercatcher, oyster catcher n02051845 pelican n02056570 king penguin, Aptenodytes patagonica n02058221 albatross, mollymawk n02066245 grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus n02071294 killer whale, killer, orca, grampus, sea wolf, Orcinus orca n02074367 dugong, Dugong dugon n02077923 sea lion n02085620 Chihuahua n02085782 Japanese spaniel n02085936 Maltese dog, Maltese terrier, Maltese n02086079 Pekinese, Pekingese, Peke n02086240 Shih-Tzu n02086646 Blenheim spaniel n02086910 papillon n02087046 toy terrier n02087394 Rhodesian ridgeback n02088094 Afghan hound, Afghan n02088238 basset, basset hound n02088364 beagle n02088466 bloodhound, sleuthhound n02088632 bluetick n02089078 black-and-tan coonhound n02089867 Walker hound, Walker foxhound n02089973 English foxhound n02090379 redbone n02090622 borzoi, Russian wolfhound n02090721 Irish wolfhound n02091032 Italian greyhound n02091134 whippet n02091244 Ibizan hound, Ibizan Podenco n02091467 Norwegian elkhound, elkhound n02091635 otterhound, otter hound n02091831 Saluki, gazelle hound n02092002 Scottish deerhound, deerhound n02092339 Weimaraner n02093256 Staffordshire bullterrier, Staffordshire bull terrier n02093428 American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier n02093647 Bedlington terrier n02093754 Border terrier n02093859 Kerry blue terrier n02093991 Irish terrier n02094114 Norfolk terrier n02094258 Norwich terrier n02094433 Yorkshire terrier n02095314 wire-haired fox terrier n02095570 Lakeland terrier n02095889 Sealyham terrier, Sealyham n02096051 Airedale, Airedale terrier n02096177 cairn, cairn terrier n02096294 Australian terrier n02096437 Dandie Dinmont, Dandie Dinmont terrier n02096585 Boston bull, Boston terrier n02097047 miniature schnauzer n02097130 giant schnauzer n02097209 standard schnauzer n02097298 Scotch terrier, Scottish terrier, Scottie n02097474 Tibetan terrier, chrysanthemum dog n02097658 silky terrier, Sydney silky n02098105 soft-coated wheaten terrier n02098286 West Highland white terrier n02098413 Lhasa, Lhasa apso n02099267 flat-coated retriever n02099429 curly-coated retriever n02099601 golden retriever n02099712 Labrador retriever n02099849 Chesapeake Bay retriever n02100236 German short-haired pointer n02100583 vizsla, Hungarian pointer n02100735 English setter n02100877 Irish setter, red setter n02101006 Gordon setter n02101388 Brittany spaniel n02101556 clumber, clumber spaniel n02102040 English springer, English springer spaniel n02102177 Welsh springer spaniel n02102318 cocker spaniel, English cocker spaniel, cocker n02102480 Sussex spaniel n02102973 Irish water spaniel n02104029 kuvasz n02104365 schipperke n02105056 groenendael n02105162 malinois n02105251 briard n02105412 kelpie n02105505 komondor n02105641 Old English sheepdog, bobtail n02105855 Shetland sheepdog, Shetland sheep dog, Shetland n02106030 collie n02106166 Border collie n02106382 Bouvier des Flandres, Bouviers des Flandres n02106550 Rottweiler n02106662 German shepherd, German shepherd dog, German police dog, alsatian n02107142 Doberman, Doberman pinscher n02107312 miniature pinscher n02107574 Greater Swiss Mountain dog n02107683 Bernese mountain dog n02107908 Appenzeller n02108000 EntleBucher n02108089 boxer n02108422 bull mastiff n02108551 Tibetan mastiff n02108915 French bulldog n02109047 Great Dane n02109525 Saint Bernard, St Bernard n02109961 Eskimo dog, husky n02110063 malamute, malemute, Alaskan malamute n02110185 Siberian husky n02110341 dalmatian, coach dog, carriage dog n02110627 affenpinscher, monkey pinscher, monkey dog n02110806 basenji n02110958 pug, pug-dog n02111129 Leonberg n02111277 Newfoundland, Newfoundland dog n02111500 Great Pyrenees n02111889 Samoyed, Samoyede n02112018 Pomeranian n02112137 chow, chow chow n02112350 keeshond n02112706 Brabancon griffon n02113023 Pembroke, Pembroke Welsh corgi n02113186 Cardigan, Cardigan Welsh corgi n02113624 toy poodle n02113712 miniature poodle n02113799 standard poodle n02113978 Mexican hairless n02114367 timber wolf, grey wolf, gray wolf, Canis lupus n02114548 white wolf, Arctic wolf, Canis lupus tundrarum n02114712 red wolf, maned wolf, Canis rufus, Canis niger n02114855 coyote, prairie wolf, brush wolf, Canis latrans n02115641 dingo, warrigal, warragal, Canis dingo n02115913 dhole, Cuon alpinus n02116738 African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus n02117135 hyena, hyaena n02119022 red fox, Vulpes vulpes n02119789 kit fox, Vulpes macrotis n02120079 Arctic fox, white fox, Alopex lagopus n02120505 grey fox, gray fox, Urocyon cinereoargenteus n02123045 tabby, tabby cat n02123159 tiger cat n02123394 Persian cat n02123597 Siamese cat, Siamese n02124075 Egyptian cat n02125311 cougar, puma, catamount, mountain lion, painter, panther, Felis concolor n02127052 lynx, catamount n02128385 leopard, Panthera pardus n02128757 snow leopard, ounce, Panthera uncia n02128925 jaguar, panther, Panthera onca, Felis onca n02129165 lion, king of beasts, Panthera leo n02129604 tiger, Panthera tigris n02130308 cheetah, chetah, Acinonyx jubatus n02132136 brown bear, bruin, Ursus arctos n02133161 American black bear, black bear, Ursus americanus, Euarctos americanus n02134084 ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus n02134418 sloth bear, Melursus ursinus, Ursus ursinus n02137549 mongoose n02138441 meerkat, mierkat n02165105 tiger beetle n02165456 ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle n02167151 ground beetle, carabid beetle n02168699 long-horned beetle, longicorn, longicorn beetle n02169497 leaf beetle, chrysomelid n02172182 dung beetle n02174001 rhinoceros beetle n02177972 weevil n02190166 fly n02206856 bee n02219486 ant, emmet, pismire n02226429 grasshopper, hopper n02229544 cricket n02231487 walking stick, walkingstick, stick insect n02233338 cockroach, roach n02236044 mantis, mantid n02256656 cicada, cicala n02259212 leafhopper n02264363 lacewing, lacewing fly n02268443 dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk n02268853 damselfly n02276258 admiral n02277742 ringlet, ringlet butterfly n02279972 monarch, monarch butterfly, milkweed butterfly, Danaus plexippus n02280649 cabbage butterfly n02281406 sulphur butterfly, sulfur butterfly n02281787 lycaenid, lycaenid butterfly n02317335 starfish, sea star n02319095 sea urchin n02321529 sea cucumber, holothurian n02325366 wood rabbit, cottontail, cottontail rabbit n02326432 hare n02328150 Angora, Angora rabbit n02342885 hamster n02346627 porcupine, hedgehog n02356798 fox squirrel, eastern fox squirrel, Sciurus niger n02361337 marmot n02363005 beaver n02364673 guinea pig, Cavia cobaya n02389026 sorrel n02391049 zebra n02395406 hog, pig, grunter, squealer, Sus scrofa n02396427 wild boar, boar, Sus scrofa n02397096 warthog n02398521 hippopotamus, hippo, river horse, Hippopotamus amphibius n02403003 ox n02408429 water buffalo, water ox, Asiatic buffalo, Bubalus bubalis n02410509 bison n02412080 ram, tup n02415577 bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis n02417914 ibex, Capra ibex n02422106 hartebeest n02422699 impala, Aepyceros melampus n02423022 gazelle n02437312 Arabian camel, dromedary, Camelus dromedarius n02437616 llama n02441942 weasel n02442845 mink n02443114 polecat, fitch, foulmart, foumart, Mustela putorius n02443484 black-footed ferret, ferret, Mustela nigripes n02444819 otter n02445715 skunk, polecat, wood pussy n02447366 badger n02454379 armadillo n02457408 three-toed sloth, ai, Bradypus tridactylus n02480495 orangutan, orang, orangutang, Pongo pygmaeus n02480855 gorilla, Gorilla gorilla n02481823 chimpanzee, chimp, Pan troglodytes n02483362 gibbon, Hylobates lar n02483708 siamang, Hylobates syndactylus, Symphalangus syndactylus n02484975 guenon, guenon monkey n02486261 patas, hussar monkey, Erythrocebus patas n02486410 baboon n02487347 macaque n02488291 langur n02488702 colobus, colobus monkey n02489166 proboscis monkey, Nasalis larvatus n02490219 marmoset n02492035 capuchin, ringtail, Cebus capucinus n02492660 howler monkey, howler n02493509 titi, titi monkey n02493793 spider monkey, Ateles geoffroyi n02494079 squirrel monkey, Saimiri sciureus n02497673 Madagascar cat, ring-tailed lemur, Lemur catta n02500267 indri, indris, Indri indri, Indri brevicaudatus n02504013 Indian elephant, Elephas maximus n02504458 African elephant, Loxodonta africana n02509815 lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens n02510455 giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca n02514041 barracouta, snoek n02526121 eel n02536864 coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch n02606052 rock beauty, Holocanthus tricolor n02607072 anemone fish n02640242 sturgeon n02641379 gar, garfish, garpike, billfish, Lepisosteus osseus n02643566 lionfish n02655020 puffer, pufferfish, blowfish, globefish n02666196 abacus n02667093 abaya n02669723 academic gown, academic robe, judge's robe n02672831 accordion, piano accordion, squeeze box n02676566 acoustic guitar n02687172 aircraft carrier, carrier, flattop, attack aircraft carrier n02690373 airliner n02692877 airship, dirigible n02699494 altar n02701002 ambulance n02704792 amphibian, amphibious vehicle n02708093 analog clock n02727426 apiary, bee house n02730930 apron n02747177 ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin n02749479 assault rifle, assault gun n02769748 backpack, back pack, knapsack, packsack, rucksack, haversack n02776631 bakery, bakeshop, bakehouse n02777292 balance beam, beam n02782093 balloon n02783161 ballpoint, ballpoint pen, ballpen, Biro n02786058 Band Aid n02787622 banjo n02788148 bannister, banister, balustrade, balusters, handrail n02790996 barbell n02791124 barber chair n02791270 barbershop n02793495 barn n02794156 barometer n02795169 barrel, cask n02797295 barrow, garden cart, lawn cart, wheelbarrow n02799071 baseball n02802426 basketball n02804414 bassinet n02804610 bassoon n02807133 bathing cap, swimming cap n02808304 bath towel n02808440 bathtub, bathing tub, bath, tub n02814533 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon n02814860 beacon, lighthouse, beacon light, pharos n02815834 beaker n02817516 bearskin, busby, shako n02823428 beer bottle n02823750 beer glass n02825657 bell cote, bell cot n02834397 bib n02835271 bicycle-built-for-two, tandem bicycle, tandem n02837789 bikini, two-piece n02840245 binder, ring-binder n02841315 binoculars, field glasses, opera glasses n02843684 birdhouse n02859443 boathouse n02860847 bobsled, bobsleigh, bob n02865351 bolo tie, bolo, bola tie, bola n02869837 bonnet, poke bonnet n02870880 bookcase n02871525 bookshop, bookstore, bookstall n02877765 bottlecap n02879718 bow n02883205 bow tie, bow-tie, bowtie n02892201 brass, memorial tablet, plaque n02892767 brassiere, bra, bandeau n02894605 breakwater, groin, groyne, mole, bulwark, seawall, jetty n02895154 breastplate, aegis, egis n02906734 broom n02909870 bucket, pail n02910353 buckle n02916936 bulletproof vest n02917067 bullet train, bullet n02927161 butcher shop, meat market n02930766 cab, hack, taxi, taxicab n02939185 caldron, cauldron n02948072 candle, taper, wax light n02950826 cannon n02951358 canoe n02951585 can opener, tin opener n02963159 cardigan n02965783 car mirror n02966193 carousel, carrousel, merry-go-round, roundabout, whirligig n02966687 carpenter's kit, tool kit n02971356 carton n02974003 car wheel n02977058 cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM n02978881 cassette n02979186 cassette player n02980441 castle n02981792 catamaran n02988304 CD player n02992211 cello, violoncello n02992529 cellular telephone, cellular phone, cellphone, cell, mobile phone n02999410 chain n03000134 chainlink fence n03000247 chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour n03000684 chain saw, chainsaw n03014705 chest n03016953 chiffonier, commode n03017168 chime, bell, gong n03018349 china cabinet, china closet n03026506 Christmas stocking n03028079 church, church building n03032252 cinema, movie theater, movie theatre, movie house, picture palace n03041632 cleaver, meat cleaver, chopper n03042490 cliff dwelling n03045698 cloak n03047690 clog, geta, patten, sabot n03062245 cocktail shaker n03063599 coffee mug n03063689 coffeepot n03065424 coil, spiral, volute, whorl, helix n03075370 combination lock n03085013 computer keyboard, keypad n03089624 confectionery, confectionary, candy store n03095699 container ship, containership, container vessel n03100240 convertible n03109150 corkscrew, bottle screw n03110669 cornet, horn, trumpet, trump n03124043 cowboy boot n03124170 cowboy hat, ten-gallon hat n03125729 cradle n03126707 crane n03127747 crash helmet n03127925 crate n03131574 crib, cot n03133878 Crock Pot n03134739 croquet ball n03141823 crutch n03146219 cuirass n03160309 dam, dike, dyke n03179701 desk n03180011 desktop computer n03187595 dial telephone, dial phone n03188531 diaper, nappy, napkin n03196217 digital clock n03197337 digital watch n03201208 dining table, board n03207743 dishrag, dishcloth n03207941 dishwasher, dish washer, dishwashing machine n03208938 disk brake, disc brake n03216828 dock, dockage, docking facility n03218198 dogsled, dog sled, dog sleigh n03220513 dome n03223299 doormat, welcome mat n03240683 drilling platform, offshore rig n03249569 drum, membranophone, tympan n03250847 drumstick n03255030 dumbbell n03259280 Dutch oven n03271574 electric fan, blower n03272010 electric guitar n03272562 electric locomotive n03290653 entertainment center n03291819 envelope n03297495 espresso maker n03314780 face powder n03325584 feather boa, boa n03337140 file, file cabinet, filing cabinet n03344393 fireboat n03345487 fire engine, fire truck n03347037 fire screen, fireguard n03355925 flagpole, flagstaff n03372029 flute, transverse flute n03376595 folding chair n03379051 football helmet n03384352 forklift n03388043 fountain n03388183 fountain pen n03388549 four-poster n03393912 freight car n03394916 French horn, horn n03400231 frying pan, frypan, skillet n03404251 fur coat n03417042 garbage truck, dustcart n03424325 gasmask, respirator, gas helmet n03425413 gas pump, gasoline pump, petrol pump, island dispenser n03443371 goblet n03444034 go-kart n03445777 golf ball n03445924 golfcart, golf cart n03447447 gondola n03447721 gong, tam-tam n03450230 gown n03452741 grand piano, grand n03457902 greenhouse, nursery, glasshouse n03459775 grille, radiator grille n03461385 grocery store, grocery, food market, market n03467068 guillotine n03476684 hair slide n03476991 hair spray n03478589 half track n03481172 hammer n03482405 hamper n03483316 hand blower, blow dryer, blow drier, hair dryer, hair drier n03485407 hand-held computer, hand-held microcomputer n03485794 handkerchief, hankie, hanky, hankey n03492542 hard disc, hard disk, fixed disk n03494278 harmonica, mouth organ, harp, mouth harp n03495258 harp n03496892 harvester, reaper n03498962 hatchet n03527444 holster n03529860 home theater, home theatre n03530642 honeycomb n03532672 hook, claw n03534580 hoopskirt, crinoline n03535780 horizontal bar, high bar n03538406 horse cart, horse-cart n03544143 hourglass n03584254 iPod n03584829 iron, smoothing iron n03590841 jack-o'-lantern n03594734 jean, blue jean, denim n03594945 jeep, landrover n03595614 jersey, T-shirt, tee shirt n03598930 jigsaw puzzle n03599486 jinrikisha, ricksha, rickshaw n03602883 joystick n03617480 kimono n03623198 knee pad n03627232 knot n03630383 lab coat, laboratory coat n03633091 ladle n03637318 lampshade, lamp shade n03642806 laptop, laptop computer n03649909 lawn mower, mower n03657121 lens cap, lens cover n03658185 letter opener, paper knife, paperknife n03661043 library n03662601 lifeboat n03666591 lighter, light, igniter, ignitor n03670208 limousine, limo n03673027 liner, ocean liner n03676483 lipstick, lip rouge n03680355 Loafer n03690938 lotion n03691459 loudspeaker, speaker, speaker unit, loudspeaker system, speaker system n03692522 loupe, jeweler's loupe n03697007 lumbermill, sawmill n03706229 magnetic compass n03709823 mailbag, postbag n03710193 mailbox, letter box n03710637 maillot n03710721 maillot, tank suit n03717622 manhole cover n03720891 maraca n03721384 marimba, xylophone n03724870 mask n03729826 matchstick n03733131 maypole n03733281 maze, labyrinth n03733805 measuring cup n03742115 medicine chest, medicine cabinet n03743016 megalith, megalithic structure n03759954 microphone, mike n03761084 microwave, microwave oven n03763968 military uniform n03764736 milk can n03769881 minibus n03770439 miniskirt, mini n03770679 minivan n03773504 missile n03775071 mitten n03775546 mixing bowl n03776460 mobile home, manufactured home n03777568 Model T n03777754 modem n03781244 monastery n03782006 monitor n03785016 moped n03786901 mortar n03787032 mortarboard n03788195 mosque n03788365 mosquito net n03791053 motor scooter, scooter n03792782 mountain bike, all-terrain bike, off-roader n03792972 mountain tent n03793489 mouse, computer mouse n03794056 mousetrap n03796401 moving van n03803284 muzzle n03804744 nail n03814639 neck brace n03814906 necklace n03825788 nipple n03832673 notebook, notebook computer n03837869 obelisk n03838899 oboe, hautboy, hautbois n03840681 ocarina, sweet potato n03841143 odometer, hodometer, mileometer, milometer n03843555 oil filter n03854065 organ, pipe organ n03857828 oscilloscope, scope, cathode-ray oscilloscope, CRO n03866082 overskirt n03868242 oxcart n03868863 oxygen mask n03871628 packet n03873416 paddle, boat paddle n03874293 paddlewheel, paddle wheel n03874599 padlock n03876231 paintbrush n03877472 pajama, pyjama, pj's, jammies n03877845 palace n03884397 panpipe, pandean pipe, syrinx n03887697 paper towel n03888257 parachute, chute n03888605 parallel bars, bars n03891251 park bench n03891332 parking meter n03895866 passenger car, coach, carriage n03899768 patio, terrace n03902125 pay-phone, pay-station n03903868 pedestal, plinth, footstall n03908618 pencil box, pencil case n03908714 pencil sharpener n03916031 perfume, essence n03920288 Petri dish n03924679 photocopier n03929660 pick, plectrum, plectron n03929855 pickelhaube n03930313 picket fence, paling n03930630 pickup, pickup truck n03933933 pier n03935335 piggy bank, penny bank n03937543 pill bottle n03938244 pillow n03942813 ping-pong ball n03944341 pinwheel n03947888 pirate, pirate ship n03950228 pitcher, ewer n03954731 plane, carpenter's plane, woodworking plane n03956157 planetarium n03958227 plastic bag n03961711 plate rack n03967562 plow, plough n03970156 plunger, plumber's helper n03976467 Polaroid camera, Polaroid Land camera n03976657 pole n03977966 police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria n03980874 poncho n03982430 pool table, billiard table, snooker table n03983396 pop bottle, soda bottle n03991062 pot, flowerpot n03992509 potter's wheel n03995372 power drill n03998194 prayer rug, prayer mat n04004767 printer n04005630 prison, prison house n04008634 projectile, missile n04009552 projector n04019541 puck, hockey puck n04023962 punching bag, punch bag, punching ball, punchball n04026417 purse n04033901 quill, quill pen n04033995 quilt, comforter, comfort, puff n04037443 racer, race car, racing car n04039381 racket, racquet n04040759 radiator n04041544 radio, wireless n04044716 radio telescope, radio reflector n04049303 rain barrel n04065272 recreational vehicle, RV, R.V. n04067472 reel n04069434 reflex camera n04070727 refrigerator, icebox n04074963 remote control, remote n04081281 restaurant, eating house, eating place, eatery n04086273 revolver, six-gun, six-shooter n04090263 rifle n04099969 rocking chair, rocker n04111531 rotisserie n04116512 rubber eraser, rubber, pencil eraser n04118538 rugby ball n04118776 rule, ruler n04120489 running shoe n04125021 safe n04127249 safety pin n04131690 saltshaker, salt shaker n04133789 sandal n04136333 sarong n04141076 sax, saxophone n04141327 scabbard n04141975 scale, weighing machine n04146614 school bus n04147183 schooner n04149813 scoreboard n04152593 screen, CRT screen n04153751 screw n04154565 screwdriver n04162706 seat belt, seatbelt n04179913 sewing machine n04192698 shield, buckler n04200800 shoe shop, shoe-shop, shoe store n04201297 shoji n04204238 shopping basket n04204347 shopping cart n04208210 shovel n04209133 shower cap n04209239 shower curtain n04228054 ski n04229816 ski mask n04235860 sleeping bag n04238763 slide rule, slipstick n04239074 sliding door n04243546 slot, one-armed bandit n04251144 snorkel n04252077 snowmobile n04252225 snowplow, snowplough n04254120 soap dispenser n04254680 soccer ball n04254777 sock n04258138 solar dish, solar collector, solar furnace n04259630 sombrero n04263257 soup bowl n04264628 space bar n04265275 space heater n04266014 space shuttle n04270147 spatula n04273569 speedboat n04275548 spider web, spider's web n04277352 spindle n04285008 sports car, sport car n04286575 spotlight, spot n04296562 stage n04310018 steam locomotive n04311004 steel arch bridge n04311174 steel drum n04317175 stethoscope n04325704 stole n04326547 stone wall n04328186 stopwatch, stop watch n04330267 stove n04332243 strainer n04335435 streetcar, tram, tramcar, trolley, trolley car n04336792 stretcher n04344873 studio couch, day bed n04346328 stupa, tope n04347754 submarine, pigboat, sub, U-boat n04350905 suit, suit of clothes n04355338 sundial n04355933 sunglass n04356056 sunglasses, dark glasses, shades n04357314 sunscreen, sunblock, sun blocker n04366367 suspension bridge n04367480 swab, swob, mop n04370456 sweatshirt n04371430 swimming trunks, bathing trunks n04371774 swing n04372370 switch, electric switch, electrical switch n04376876 syringe n04380533 table lamp n04389033 tank, army tank, armored combat vehicle, armoured combat vehicle n04392985 tape player n04398044 teapot n04399382 teddy, teddy bear n04404412 television, television system n04409515 tennis ball n04417672 thatch, thatched roof n04418357 theater curtain, theatre curtain n04423845 thimble n04428191 thresher, thrasher, threshing machine n04429376 throne n04435653 tile roof n04442312 toaster n04443257 tobacco shop, tobacconist shop, tobacconist n04447861 toilet seat n04456115 torch n04458633 totem pole n04461696 tow truck, tow car, wrecker n04462240 toyshop n04465501 tractor n04467665 trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi n04476259 tray n04479046 trench coat n04482393 tricycle, trike, velocipede n04483307 trimaran n04485082 tripod n04486054 triumphal arch n04487081 trolleybus, trolley coach, trackless trolley n04487394 trombone n04493381 tub, vat n04501370 turnstile n04505470 typewriter keyboard n04507155 umbrella n04509417 unicycle, monocycle n04515003 upright, upright piano n04517823 vacuum, vacuum cleaner n04522168 vase n04523525 vault n04525038 velvet n04525305 vending machine n04532106 vestment n04532670 viaduct n04536866 violin, fiddle n04540053 volleyball n04542943 waffle iron n04548280 wall clock n04548362 wallet, billfold, notecase, pocketbook n04550184 wardrobe, closet, press n04552348 warplane, military plane n04553703 washbasin, handbasin, washbowl, lavabo, wash-hand basin n04554684 washer, automatic washer, washing machine n04557648 water bottle n04560804 water jug n04562935 water tower n04579145 whiskey jug n04579432 whistle n04584207 wig n04589890 window screen n04590129 window shade n04591157 Windsor tie n04591713 wine bottle n04592741 wing n04596742 wok n04597913 wooden spoon n04599235 wool, woolen, woollen n04604644 worm fence, snake fence, snake-rail fence, Virginia fence n04606251 wreck n04612504 yawl n04613696 yurt n06359193 web site, website, internet site, site n06596364 comic book n06785654 crossword puzzle, crossword n06794110 street sign n06874185 traffic light, traffic signal, stoplight n07248320 book jacket, dust cover, dust jacket, dust wrapper n07565083 menu n07579787 plate n07583066 guacamole n07584110 consomme n07590611 hot pot, hotpot n07613480 trifle n07614500 ice cream, icecream n07615774 ice lolly, lolly, lollipop, popsicle n07684084 French loaf n07693725 bagel, beigel n07695742 pretzel n07697313 cheeseburger n07697537 hotdog, hot dog, red hot n07711569 mashed potato n07714571 head cabbage n07714990 broccoli n07715103 cauliflower n07716358 zucchini, courgette n07716906 spaghetti squash n07717410 acorn squash n07717556 butternut squash n07718472 cucumber, cuke n07718747 artichoke, globe artichoke n07720875 bell pepper n07730033 cardoon n07734744 mushroom n07742313 Granny Smith n07745940 strawberry n07747607 orange n07749582 lemon n07753113 fig n07753275 pineapple, ananas n07753592 banana n07754684 jackfruit, jak, jack n07760859 custard apple n07768694 pomegranate n07802026 hay n07831146 carbonara n07836838 chocolate sauce, chocolate syrup n07860988 dough n07871810 meat loaf, meatloaf n07873807 pizza, pizza pie n07875152 potpie n07880968 burrito n07892512 red wine n07920052 espresso n07930864 cup n07932039 eggnog n09193705 alp n09229709 bubble n09246464 cliff, drop, drop-off n09256479 coral reef n09288635 geyser n09332890 lakeside, lakeshore n09399592 promontory, headland, head, foreland n09421951 sandbar, sand bar n09428293 seashore, coast, seacoast, sea-coast n09468604 valley, vale n09472597 volcano n09835506 ballplayer, baseball player n10148035 groom, bridegroom n10565667 scuba diver n11879895 rapeseed n11939491 daisy n12057211 yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum n12144580 corn n12267677 acorn n12620546 hip, rose hip, rosehip n12768682 buckeye, horse chestnut, conker n12985857 coral fungus n12998815 agaric n13037406 gyromitra n13040303 stinkhorn, carrion fungus n13044778 earthstar n13052670 hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa n13054560 bolete n13133613 ear, spike, capitulum n15075141 toilet tissue, toilet paper, bathroom tissue ```
31,691
[ [ -0.0684814453125, -0.0155181884765625, 0.0204010009765625, 0.02587890625, -0.00916290283203125, 0.019317626953125, 0.01061248779296875, -0.0304718017578125, 0.05218505859375, -0.018829345703125, -0.0153045654296875, -0.03082275390625, -0.0601806640625, 0.036...
jamescalam/world-cities-geo
2022-04-29T18:34:46.000Z
[ "region:us" ]
jamescalam
null
null
4
9
2022-04-29T16:54:48
Dataset containing city, country, region, and continents alongside their longitude and latitude co-ordinates. Cartesian coordinates are provided in x, y, z features.
165
[ [ -0.047943115234375, -0.00023555755615234375, 0.04620361328125, -0.006866455078125, -0.0228729248046875, 0.014404296875, 0.020660400390625, -0.0108642578125, 0.0404052734375, 0.0831298828125, -0.054107666015625, -0.07965087890625, -0.0021266937255859375, -0.0...
bigscience-data/roots_fr_wikipedia
2022-12-12T10:39:08.000Z
[ "language:fr", "license:cc-by-sa-3.0", "region:us" ]
bigscience-data
null
null
0
9
2022-05-18T09:13:52
--- language: fr license: cc-by-sa-3.0 extra_gated_prompt: 'By accessing this dataset, you agree to abide by the BigScience Ethical Charter. The charter can be found at: https://hf.co/spaces/bigscience/ethical-charter' extra_gated_fields: I have read and agree to abide by the BigScience Ethical Charter: checkbox --- ROOTS Subset: roots_fr_wikipedia # wikipedia - Dataset uid: `wikipedia` ### Description ### Homepage ### Licensing ### Speaker Locations ### Sizes - 3.2299 % of total - 4.2071 % of en - 5.6773 % of ar - 3.3416 % of fr - 5.2815 % of es - 12.4852 % of ca - 0.4288 % of zh - 0.4286 % of zh - 5.4743 % of indic-bn - 8.9062 % of indic-ta - 21.3313 % of indic-te - 4.4845 % of pt - 4.0493 % of indic-hi - 11.3163 % of indic-ml - 22.5300 % of indic-ur - 4.4902 % of vi - 16.9916 % of indic-kn - 24.7820 % of eu - 11.6241 % of indic-mr - 9.8749 % of id - 9.3489 % of indic-pa - 9.4767 % of indic-gu - 24.1132 % of indic-as - 5.3309 % of indic-or ### BigScience processing steps #### Filters applied to: en - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_1024 #### Filters applied to: ar - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: fr - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_1024 #### Filters applied to: es - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_1024 #### Filters applied to: ca - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_1024 #### Filters applied to: zh #### Filters applied to: zh #### Filters applied to: indic-bn - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-ta - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-te - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: pt - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-hi - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-ml - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-ur - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: vi - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-kn - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: eu - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs #### Filters applied to: indic-mr - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: id - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-pa - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-gu - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs - filter_small_docs_bytes_300 #### Filters applied to: indic-as - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs #### Filters applied to: indic-or - filter_wiki_user_titles - dedup_document - filter_remove_empty_docs
3,635
[ [ -0.04522705078125, -0.03790283203125, 0.022308349609375, 0.01226806640625, -0.01349639892578125, -0.00566864013671875, -0.01465606689453125, -0.01087188720703125, 0.04541015625, 0.0214691162109375, -0.0526123046875, -0.05902099609375, -0.0460205078125, 0.032...
statworx/haiku
2022-07-02T13:25:45.000Z
[ "task_categories:text-generation", "task_ids:language-modeling", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:en", "region:us" ]
statworx
null
null
1
9
2022-05-19T09:40:41
--- annotations_creators: [] language_creators: [] language: - en license: [] multilinguality: - monolingual pretty_name: Haiku size_categories: - 10K<n<100K source_datasets: [] task_categories: - text-generation task_ids: - language-modeling --- # Dataset Card for Haiku Data
279
[ [ -0.0029449462890625, 0.004306793212890625, -0.0041351318359375, 0.0113372802734375, -0.05230712890625, -0.000553131103515625, -0.0123138427734375, -0.006877899169921875, 0.026885986328125, 0.028076171875, -0.0194244384765625, -0.066650390625, -0.0308074951171875...
arize-ai/movie_reviews_with_context_drift
2022-07-01T17:26:12.000Z
[ "task_categories:text-classification", "task_ids:sentiment-classification", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:extended|imdb", "language:en", "license:mit", "region:us" ]
arize-ai
null
null
0
9
2022-05-20T23:25:49
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - en license: - mit multilinguality: - monolingual pretty_name: sentiment-classification-reviews-with-drift size_categories: - 10K<n<100K source_datasets: - extended|imdb task_categories: - text-classification task_ids: - sentiment-classification --- # Dataset Card for `reviews_with_drift` ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description ### Dataset Summary This dataset was crafted to be used in our tutorial [Link to the tutorial when ready]. It consists on a large Movie Review Dataset mixed with some reviews from a Hotel Review Dataset. The training/validation set are purely obtained from the Movie Review Dataset while the production set is mixed. Some other features have been added (`age`, `gender`, `context`) as well as a made up timestamp `prediction_ts` of when the inference took place. ### Supported Tasks and Leaderboards `text-classification`, `sentiment-classification`: The dataset is mainly used for text classification: given the text, predict the sentiment (positive or negative). ### Languages Text is mainly written in english. ## Dataset Structure ### Data Instances #### default An example of `training` looks as follows: ```json { 'prediction_ts': 1650092416.0, 'age': 44, 'gender': 'female', 'context': 'movies', 'text': "An interesting premise, and Billy Drago is always good as a dangerous nut-bag (side note: I'd love to see Drago, Stephen McHattie and Lance Hendrikson in a flick together; talk about raging cheekbones!). The soundtrack wasn't terrible, either.<br /><br />But the acting--even that of such professionals as Drago and Debbie Rochon--was terrible, the directing worse (perhaps contributory to the former), the dialog chimp-like, and the camera work, barely tolerable. Still, it was the SETS that got a big 10 on my oy-vey scale. I don't know where this was filmed, but were I to hazard a guess, it would be either an open-air museum, or one of those re-enactment villages, where everything is just a bit too well-kept to do more than suggest the real Old West. Okay, so it was shot on a college kid's budget. That said, I could have forgiven one or two of the aforementioned faults. But taken all together, and being generous, I could not see giving it more than three stars.", 'label': 0 } ``` ### Data Fields #### default The data fields are the same among all splits. An example of `training` looks as follows: - `prediction_ts`: a `float` feature. - `age`: an `int` feature. - `gender`: a `string` feature. - `context`: a `string` feature. - `text`: a `string` feature. - `label`: a `ClassLabel` feature, with possible values including negative(0) and positive(1). ### Data Splits | name |training|validation|production | |----------|-------:|---------:|----------:| | default | 9916 | 2479 | 40079 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Contributions Thanks to [@fjcasti1](https://github.com/fjcasti1) for adding this dataset.
4,123
[ [ -0.048828125, -0.03692626953125, 0.0184326171875, -0.01099395751953125, -0.025238037109375, 0.003063201904296875, -0.0216827392578125, -0.019256591796875, 0.0418701171875, 0.018768310546875, -0.07684326171875, -0.045013427734375, -0.0460205078125, 5.96046447...
blinoff/healthcare_facilities_reviews
2022-10-23T16:50:31.000Z
[ "task_categories:text-classification", "task_ids:sentiment-classification", "multilinguality:monolingual", "size_categories:10K<n<100K", "language:ru", "region:us" ]
blinoff
null
null
2
9
2022-05-25T10:48:13
--- language: - ru multilinguality: - monolingual size_categories: - 10K<n<100K task_categories: - text-classification task_ids: - sentiment-classification --- ### Dataset Summary The dataset contains user reviews about medical facilities. In total it contains 70,597 reviews. The detailed distribution on sentiment scale is: - 41,419 positive reviews; - 29,178 negative reviews. ### Data Fields Each sample contains the following fields: - **review_id**; - **category** category of medical facility (one of 48); - **title**: review title; - **content**: review text; - **sentiment**: sentiment (<em>positive</em> or <em>negative</em>); - **source_url**. ### Python ```python3 import pandas as pd df = pd.read_json('healthcare_facilities_reviews.jsonl', lines=True) df.sample(5) ```
784
[ [ -0.0295257568359375, -0.021636962890625, 0.032318115234375, 0.04132080078125, -0.0312347412109375, -0.0304412841796875, 0.006282806396484375, -0.00029087066650390625, 0.04046630859375, 0.050048828125, -0.0207672119140625, -0.0869140625, -0.040496826171875, 0...
khalidalt/ultimate_arabic_news
2022-06-15T14:46:10.000Z
[ "region:us" ]
khalidalt
null
null
1
9
2022-06-11T06:06:25
# Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary The Ultimate Arabic News Dataset is a collection of single-label modern Arabic texts that are used in news websites and press articles. Arabic news data was collected by web scraping techniques from many famous news sites such as Al-Arabiya, Al-Youm Al-Sabea (Youm7), the news published on the Google search engine and other various sources. ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information license: cc-by-4.0 ### Citation Information ``` @book{url, author = {Al-Dulaimi, Ahmed Hashim}, year = {2022}, month = {05}, website = {Mendeley Data, V1}, title = {Ultimate Arabic News Dataset}, doi = {10.17632/jz56k5wxz7.1} } ``` ### Contributions [More Information Needed]
2,903
[ [ -0.033447265625, -0.036712646484375, 0.0005154609680175781, 0.0153656005859375, -0.029083251953125, 0.0135040283203125, -0.0091094970703125, -0.023193359375, 0.031402587890625, 0.029144287109375, -0.053009033203125, -0.09210205078125, -0.056610107421875, 0.0...
mounikaiiith/Telugu-Sarcasm
2022-07-04T15:06:49.000Z
[ "license:cc-by-4.0", "region:us" ]
mounikaiiith
null
null
0
9
2022-06-19T12:15:20
--- license: cc-by-4.0 --- Do cite the below references for using the dataset: @article{marreddy2022resource, title={Am I a Resource-Poor Language? Data Sets, Embeddings, Models and Analysis for four different NLP tasks in Telugu Language}, author={Marreddy, Mounika and Oota, Subba Reddy and Vakada, Lakshmi Sireesha and Chinni, Venkata Charan and Mamidi, Radhika}, journal={Transactions on Asian and Low-Resource Language Information Processing}, publisher={ACM New York, NY} } @article{marreddy2022multi, title={Multi-Task Text Classification using Graph Convolutional Networks for Large-Scale Low Resource Language}, author={Marreddy, Mounika and Oota, Subba Reddy and Vakada, Lakshmi Sireesha and Chinni, Venkata Charan and Mamidi, Radhika}, journal={arXiv preprint arXiv:2205.01204}, year={2022} }
816
[ [ -0.006435394287109375, -0.0261077880859375, 0.01239776611328125, 0.0222320556640625, -0.00684356689453125, -0.0212554931640625, -0.0182647705078125, -0.0086822509765625, 0.005008697509765625, 0.038055419921875, -0.012725830078125, -0.01117706298828125, -0.029571...
imvladikon/hebrew_news
2022-07-09T19:53:05.000Z
[ "task_categories:summarization", "task_ids:news-articles-summarization", "annotations_creators:no-annotation", "language_creators:other", "multilinguality:monolingual", "size_categories:100K<n<1M", "source_datasets:original", "language:he", "license:other", "region:us" ]
imvladikon
1
9
2022-06-19T16:19:53
--- annotations_creators: - no-annotation language_creators: - other language: - he license: - other multilinguality: - monolingual size_categories: - 100K<n<1M source_datasets: - original task_categories: - summarization task_ids: - news-articles-summarization --- ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description ``` id - article id articleBody - article main content description - short version of the article, description of the article headline - headline of the article title - title of the article ```
1,542
[ [ -0.041656494140625, -0.0318603515625, 0.01605224609375, 0.023101806640625, -0.01320648193359375, 0.0209808349609375, -0.01334381103515625, -0.01522064208984375, 0.041961669921875, 0.0450439453125, -0.07073974609375, -0.08416748046875, -0.049224853515625, 0.0...
openclimatefix/gfs-surface-pressure-2.0deg
2022-06-28T18:38:27.000Z
[ "region:us" ]
openclimatefix
null
null
0
9
2022-06-22T22:11:25
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
smangrul/MuDoConv
2022-06-29T06:39:30.000Z
[ "license:cc-by-nc-4.0", "region:us" ]
smangrul
null
null
1
9
2022-06-24T06:05:04
--- license: cc-by-nc-4.0 --- Collated datasets from 10 sources and preprocessed it to have ["texts", "labels"] columns to train/finetune sequence-to-sequence models such as T5/Blenderbot ... Below are the 10 datasets: 1. blended_skill_talk, 2. conv_ai_2 3. empathetic_dialogues 4. wizard_of_wikipedia 5. meta_woz 6. multi_woz, 7. spolin 8. dailydialog 9. cornell_movie_dialogues 10. taskmaster The data access and preprocessing code is [here](https://github.com/pacman100/accelerate-deepspeed-test/blob/main/src/data_preprocessing/DataPreprocessing.ipynb)
561
[ [ -0.04974365234375, -0.0458984375, 0.019989013671875, 0.01244354248046875, 0.0034008026123046875, 0.0220794677734375, 0.002414703369140625, -0.0148773193359375, -0.005634307861328125, 0.057281494140625, -0.07318115234375, -0.03564453125, -0.0380859375, 0.0308...
paren8esis/S4A
2023-10-24T08:15:34.000Z
[ "arxiv:2204.00951", "region:us" ]
paren8esis
Sen4AgriNet is a Sentinel-2 based time series multi country benchmark dataset, tailored for agricultural monitoring applications with Machine and Deep Learning. It is annotated from farmer declarations collected via the Land Parcel Identification System (LPIS) for harmonizing country wide labels. These declarations have only recently been made available as open data, allowing for the first time the labelling of satellite imagery from ground truth data. We proceed to propose and standardise a new crop type taxonomy across Europe that address Common Agriculture Policy (CAP) needs, based on the Food and Agriculture Organization (FAO) Indicative Crop Classification scheme. Sen4AgriNet is the only multi-country, multi-year dataset that includes all spectral information. It is constructed to cover the period 2016-2020 for Catalonia and France, while it can be extended to include additional countries.
@ARTICLE{ 9749916, author={Sykas, Dimitrios and Sdraka, Maria and Zografakis, Dimitrios and Papoutsis, Ioannis}, journal={IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, title={A Sentinel-2 multi-year, multi-country benchmark dataset for crop classification and segmentation with deep learning}, year={2022}, doi={10.1109/JSTARS.2022.3164771} }
7
9
2022-07-01T16:26:54
--- YAML tags: --- ## Dataset Description - **Homepage:** [www.sen4agrinet.space.noa.gr](https://www.sen4agrinet.space.noa.gr/) - **Repository:** [github.com/Orion-AI-Lab/S4A](https://github.com/Orion-AI-Lab/S4A) - **Paper:** ["A Sentinel-2 multi-year, multi-country benchmark dataset for crop classification and segmentation with deep learning" (D. Sykas, M. Sdraka, D. Zografakis, I. Papoutsis](https://arxiv.org/abs/2204.00951) ### Dataset Summary Sen4AgriNet is a Sentinel-2 based time series multi-country benchmark dataset, tailored for agricultural monitoring applications with Machine and Deep Learning. It is annotated from farmer declarations collected via the Land Parcel Identification System (LPIS) for harmonizing country wide labels. These declarations have only recently been made available as open data, allowing for the first time the labelling of satellite imagery from ground truth data. We proceed to propose and standardise a new crop type taxonomy across Europe that address Common Agriculture Policy (CAP) needs, based on the Food and Agriculture Organization (FAO) Indicative Crop Classification scheme. Sen4AgriNet is the only multi-country, multi-year dataset that includes all spectral information. The current version covers the period 2019-2020 for Catalonia and France, while it can be extended to include additional countries. ### Languages All information in the dataset is in English (`en_GB`). ## Dataset Structure ### Data Instances A typical sample in Sen4AgriNet consists of the following fields: ``` { 'patch_full_name': '2019_31TCF_patch_10_14', 'patch_year': '2019', 'patch_name': 'patch_10_14', 'patch_country_code': 'ES', 'patch_tile': '31TCF', 'B01': array([...]), 'B02': array([...]), 'B03': array([...]), 'B04': array([...]), 'B05': array([...]), 'B06': array([...]), 'B07': array([...]), 'B08': array([...]), 'B09': array([...]), 'B10': array([...]), 'B11': array([...]), 'B12': array([...]), 'B8A': array([...]), 'parcels': array([...]), 'labels': array([...]), 'timestamp': [...] } ``` ### Data Fields Below we provide a brief explanation of each field: - `patch_full_name`: The full name of the patch. - `patch_year`: The year of the observations included in the patch. - `patch_name`: The name of the patch. It is of the form: `patch_xx_yy` where `xx` and `yy` are the indices of the patch inside the tile. - `patch_country_code`: The country code of the observations included in the patch. Currently it is either `ES` for Catalonia or `FR` for France. - `B01`, ..., `B8A`: Each one is an array containing the observations of the corresponding Sentinel-2 band. The shape of each array is (T, H, W) where T is the number of observations, H the height of the image and W the width of the image. - `parcels`: A mask containing the parcels code number. - `labels`: A mask containing the class codes for each crop in the taxonomy. - `timestamp`: The timestamps of the observations. ### Data Splits In this version of the dataset there are no predefined train/val/test splits so that the users can define their own. ### Data configurations There are the following configurations in the current version of Sen4AgriNet: - `complete`: The complete Sen4AgriNet dataset. - `cat_2019`: Only Catalonia data for 2019. - `cat_2020`: Only Catalonia data for 2020. - `fr_2019`: Only France data for 2019. ## Dataset Creation ### Curation Rationale One of the major problems faced by researchers in the fields of Remote Sensing and AI is the absence of country-wide labelled data that are harmonized along space and time. Specifically in the EU, the Common Agriculture Policy (CAP) has placed a stepping stone to overcome this issue by legally establishing Paying Agencies in each EU country which are responsible for distributing subsidies to farmers. In order to fulfill their objectives, Paying Agencies systematically collect the cultivated crop type and parcel geometries for every farmer and record it via the Land Parcel Identification System (LPIS) in a standardized way for each country. Unfortunately, public access to these farmer declarations has been restricted for several years, thus making it almost impossible to get country-wide ground truth data. However, since 2019 and for the first time these datasets are gradually becoming open (e.g. France, Catalonia, Estonia, Croatia, Slovenia, Slovakia and Luxemburg). This change offers a significant opportunity for the Earth Observation (EO) community to explore novel and innovative data-driven agricultural applications, by exploiting this abundance of new LPIS information. In principle, this fusion of the LPIS data sources has tremendous potential but there are still some barriers to overcome. First of all, the LPIS system of each country is customly configured to utilize the local language of the crop types and the specific taxonomy structure of the crops that matches the local subsidies policy implementation. This non-standardization of the labels prohibits the spatial generalization of Deep Learning (DL) models and thus needs to be carefully handled to achieve a common representation consistent among countries. On top of these contextual/semantic barriers, parcels are mapped in the corresponding national cartographic projection which in all cases is different from the cartographic projection of the satellite images and pose an additional challenge on the preparation of a consistent, proper and at scale DL-ready dataset. Aiming to overcome the above limitations in this repository we offer Sen4AgriNet, a unique benchmark EO dataset for agricultural monitoring with the following key characteristics: - it is **pixel based** to capture spatial parcel variability - it is **multi-temporal** to capture the crop phenology phases - it is **multi-annual** to model the seasonal variability - it is **multi-country** to model the geographic spatial variability - it is **object-aggregated** to further incorporate ground truth data (parcel geometries) in the process - it is **modular** since it can be enlarged with parcels from more EU countries or expanded in a straightforward way to include additional sensor and non-EO data (e.g. meteorological data) ### Source Data 1) The LPIS data for the region of Catalonia for 2019–2020 provided by the "Agricultura, Ramaderia, Pesca i Alimentacio" with an Open Data Commons Attribution License. 2) France LPIS data for 2019 provided by the French Paying Agency with an Open Data Commons Attribution License. 3) All Sentinel-2 L1C images with less than 10% cloud coverage for the above tiles. #### Initial Data Collection and Normalization The Sentinel-2 L1C images were downloaded from Copernicus and each image was split into 900 non-overlapping patches. A single patch contains 366x366 images for the 10-meter bands, 183x183 for the 20-meter bands and 61x61 for the 60-meter bands. The size of the patches was chosen in order to have integer division of the size of the tile with all 3 different spatial resolutions of Sentinel-2. #### Annotation process The Indicative Crop Classification (ICC) scheme was developed by the United Nations FAO organization. It is an approach to produce a harmonized vocabulary and taxonomy for crops and plants that are used in food production. Sen4AgriNet adopts and customises an extended version of FAO ICC in order to create a universally applicable crop label nomenclature for the collected LPIS data with the following benefits: - Single language (English) is used and naming for all classes across all participating countries. - Classes are normalized among different datasets. - Hierarchical class structure is adopted. Depending on the application different levels of classes can be used. - Additional non-agricultural classes are used (e.g. "fallow land", "barren land", etc.) to model Remote Sensing spectral signatures since agricultural parcels co-exist with other unrelated classes in satellite images. The presented custom FAO/CLC classification scheme has a total of 9 groups, 168 classes and sub-classes. The 161 classes/sub-classes are crop related, 4 are some major CLC classes (as sub-classes in this hierarchy), 2 are the fallow and barren lands, and 1 is the no data sub-class. This crop taxonomy was used to create the `labels` mask. In addition, a second annotation mask is provided (`parcels`) where each parcel obtains a unique identifier, regardless of the crops cultivated in it. ### Personal and Sensitive Information None. ## Considerations for Using the Data ### Social Impact of Dataset We believe that Sen4AgriNet can be regarded as a labelled benchmark dataset, tailored for CAP and the use of Sentinel-2 imagery that come at no cost, and can spur numerous DL-based applications for crop type classification, parcel extraction, parcel counting and semantic segmentation. More importantly, the dataset can be extended to include other input data sources, including Sentinel-1 Synthetic Aperture Radar data, and meteorological data, allowing a new family of applications on early warning risk assessment and agricultural insurance. ## Additional Information ### Licensing Information MIT License. ### Citation Information ``` @ARTICLE{ 9749916, author={Sykas, Dimitrios and Sdraka, Maria and Zografakis, Dimitrios and Papoutsis, Ioannis}, journal={IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, title={A Sentinel-2 multi-year, multi-country benchmark dataset for crop classification and segmentation with deep learning}, year={2022}, doi={10.1109/JSTARS.2022.3164771} } ```
9,657
[ [ -0.03863525390625, -0.0156707763671875, 0.03741455078125, -0.00995635986328125, 0.0032291412353515625, -0.0092010498046875, -0.01450347900390625, -0.043792724609375, 0.004016876220703125, 0.026641845703125, -0.03338623046875, -0.0672607421875, -0.048187255859375...
MicPie/unpredictable_dummies-com
2022-08-04T20:04:46.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-03T09:42:45
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-dummies-com size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-dummies-com" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,801
[ [ -0.04083251953125, -0.040985107421875, 0.032012939453125, 0.0224151611328125, 0.00665283203125, 0.01154327392578125, -0.009063720703125, -0.04058837890625, 0.037109375, 0.021820068359375, -0.0738525390625, -0.047821044921875, -0.04681396484375, 0.01565551757...
MicPie/unpredictable_studystack-com
2022-08-04T20:15:01.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-03T10:23:52
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-studystack-com size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-studystack-com" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,807
[ [ -0.0390625, -0.040802001953125, 0.031768798828125, 0.0218048095703125, 0.005725860595703125, 0.01131439208984375, -0.00933074951171875, -0.042816162109375, 0.0390625, 0.0210723876953125, -0.07281494140625, -0.046051025390625, -0.045867919921875, 0.0154190063...
MicPie/unpredictable_sittercity-com
2022-08-04T20:13:09.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-03T10:37:38
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-sittercity-com size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-sittercity-com" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,807
[ [ -0.039398193359375, -0.038970947265625, 0.03338623046875, 0.02386474609375, 0.0057830810546875, 0.0106964111328125, -0.0101776123046875, -0.044097900390625, 0.0389404296875, 0.0206451416015625, -0.0731201171875, -0.04681396484375, -0.0447998046875, 0.0146942...
embedding-data/coco_captions_quintets
2022-08-02T02:18:54.000Z
[ "task_categories:sentence-similarity", "task_ids:semantic-similarity-classification", "language:en", "license:mit", "arxiv:1405.0312", "region:us" ]
embedding-data
null
null
3
9
2022-07-07T23:12:19
--- license: mit language: - en paperswithcode_id: embedding-data/coco_captions pretty_name: coco_captions task_categories: - sentence-similarity - paraphrase-mining task_ids: - semantic-similarity-classification --- # Dataset Card for "coco_captions" ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [https://cocodataset.org/#home](https://cocodataset.org/#home) - **Repository:** [https://github.com/cocodataset/cocodataset.github.io](https://github.com/cocodataset/cocodataset.github.io) - **Paper:** [More Information Needed](https://arxiv.org/abs/1405.0312) - **Point of Contact:** [info@cocodataset.org](info@cocodataset.org) - **Size of downloaded dataset files:** - **Size of the generated dataset:** - **Total amount of disk used:** 6.32 MB ### Dataset Summary COCO is a large-scale object detection, segmentation, and captioning dataset. This repo contains five captions per image; useful for sentence similarity tasks. Disclaimer: The team releasing COCO did not upload the dataset to the Hub and did not write a dataset card. These steps were done by the Hugging Face team. ### Supported Tasks - [Sentence Transformers](https://huggingface.co/sentence-transformers) training; useful for semantic search and sentence similarity. ### Languages - English. ## Dataset Structure Each example in the dataset contains quintets of similar sentences and is formatted as a dictionary with the key "set" and a list with the sentences as "value": ``` {"set": [sentence_1, sentence_2, sentence3, sentence4, sentence5]} {"set": [sentence_1, sentence_2, sentence3, sentence4, sentence5]} ... {"set": [sentence_1, sentence_2, sentence3, sentence4, sentence5]} ``` This dataset is useful for training Sentence Transformers models. Refer to the following post on how to train models using similar pairs of sentences. ### Usage Example Install the 🤗 Datasets library with `pip install datasets` and load the dataset from the Hub with: ```python from datasets import load_dataset dataset = load_dataset("embedding-data/coco_captions") ``` The dataset is loaded as a `DatasetDict` and has the format: ```python DatasetDict({ train: Dataset({ features: ['set'], num_rows: 82783 }) }) ``` Review an example `i` with: ```python dataset["train"][i]["set"] ``` ### Data Instances [More Information Needed](https://cocodataset.org/#format-data) ### Data Splits [More Information Needed](https://cocodataset.org/#format-data) ## Dataset Creation ### Curation Rationale [More Information Needed](https://cocodataset.org/#home) ### Source Data #### Initial Data Collection and Normalization [More Information Needed](https://cocodataset.org/#home) #### Who are the source language producers? [More Information Needed](https://cocodataset.org/#home) ### Annotations #### Annotation process [More Information Needed](https://cocodataset.org/#home) #### Who are the annotators? [More Information Needed](https://cocodataset.org/#home) ### Personal and Sensitive Information [More Information Needed](https://cocodataset.org/#home) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed](https://cocodataset.org/#home) ### Discussion of Biases [More Information Needed](https://cocodataset.org/#home) ### Other Known Limitations [More Information Needed](https://cocodataset.org/#home) ## Additional Information ### Dataset Curators [More Information Needed](https://cocodataset.org/#home) ### Licensing Information The annotations in this dataset along with this website belong to the COCO Consortium and are licensed under a [Creative Commons Attribution 4.0 License](https://creativecommons.org/licenses/by/4.0/legalcode) ### Citation Information [More Information Needed](https://cocodataset.org/#home) ### Contributions Thanks to: - Tsung-Yi Lin - Google Brain - Genevieve Patterson - MSR, Trash TV - Matteo R. - Ronchi Caltech - Yin Cui - Google - Michael Maire - TTI-Chicago - Serge Belongie - Cornell Tech - Lubomir Bourdev - WaveOne, Inc. - Ross Girshick - FAIR - James Hays - Georgia Tech - Pietro Perona - Caltech - Deva Ramanan - CMU - Larry Zitnick - FAIR - Piotr Dollár - FAIR for adding this dataset.
5,262
[ [ -0.033447265625, -0.04327392578125, 0.004100799560546875, 0.0386962890625, -0.0195159912109375, 0.0216064453125, -0.027069091796875, -0.024932861328125, 0.039764404296875, 0.042388916015625, -0.052825927734375, -0.055755615234375, -0.04937744140625, 0.022720...
MicPie/unpredictable_cluster-noise
2022-08-04T19:42:06.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T17:15:02
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster-noise size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster-noise" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,805
[ [ -0.041961669921875, -0.041748046875, 0.033050537109375, 0.0233001708984375, 0.00588226318359375, 0.01039886474609375, -0.0104522705078125, -0.044281005859375, 0.037017822265625, 0.0200042724609375, -0.07281494140625, -0.047515869140625, -0.045806884765625, 0...
MicPie/unpredictable_cluster01
2022-08-04T19:43:16.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T17:17:31
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster01 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster01" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.0413818359375, -0.040740966796875, 0.033447265625, 0.0232086181640625, 0.00592803955078125, 0.01047515869140625, -0.009521484375, -0.0428466796875, 0.0384521484375, 0.0198822021484375, -0.072998046875, -0.048187255859375, -0.046478271484375, 0.01364898681...
MicPie/unpredictable_cluster11
2022-08-04T19:50:50.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T17:19:16
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster11 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster11" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.0416259765625, -0.039398193359375, 0.03369140625, 0.0223388671875, 0.006549835205078125, 0.0118408203125, -0.0104217529296875, -0.043243408203125, 0.037841796875, 0.0200653076171875, -0.07196044921875, -0.0484619140625, -0.047332763671875, 0.0140838623046...
MicPie/unpredictable_cluster13
2022-08-04T19:52:42.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T17:23:44
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster13 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster13" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.0413818359375, -0.04022216796875, 0.033660888671875, 0.023834228515625, 0.005889892578125, 0.010833740234375, -0.0105743408203125, -0.042816162109375, 0.0372314453125, 0.01934814453125, -0.073486328125, -0.0484619140625, -0.046844482421875, 0.014335632324...
MicPie/unpredictable_cluster14
2022-08-04T19:53:18.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T17:29:56
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster14 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster14" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.041748046875, -0.03961181640625, 0.033447265625, 0.0233612060546875, 0.005970001220703125, 0.0107574462890625, -0.01044464111328125, -0.042510986328125, 0.036773681640625, 0.0201263427734375, -0.07318115234375, -0.049041748046875, -0.04644775390625, 0.014...
MicPie/unpredictable_cluster15
2022-08-04T19:54:04.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T17:31:11
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster15 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster15" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.0421142578125, -0.040985107421875, 0.033233642578125, 0.0234222412109375, 0.006420135498046875, 0.0115814208984375, -0.01027679443359375, -0.043121337890625, 0.03680419921875, 0.0198516845703125, -0.07305908203125, -0.048797607421875, -0.0455322265625, 0....
MicPie/unpredictable_cluster16
2022-08-04T19:54:44.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T17:32:41
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster16 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster16" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.04107666015625, -0.040283203125, 0.03350830078125, 0.0216827392578125, 0.0058746337890625, 0.011749267578125, -0.01031494140625, -0.0435791015625, 0.037139892578125, 0.0195159912109375, -0.0728759765625, -0.04876708984375, -0.0458984375, 0.013824462890625...
MicPie/unpredictable_cluster18
2022-08-04T19:55:58.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T17:51:30
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster18 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster18" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.0411376953125, -0.0404052734375, 0.03314208984375, 0.022613525390625, 0.005878448486328125, 0.01129913330078125, -0.01027679443359375, -0.0435791015625, 0.037200927734375, 0.020477294921875, -0.0728759765625, -0.048431396484375, -0.04638671875, 0.01425933...
MicPie/unpredictable_cluster19
2022-08-04T19:56:35.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T18:23:12
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster19 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster19" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.041046142578125, -0.04095458984375, 0.03289794921875, 0.02325439453125, 0.00589752197265625, 0.0117034912109375, -0.01021575927734375, -0.042999267578125, 0.03753662109375, 0.019775390625, -0.0731201171875, -0.04730224609375, -0.046051025390625, 0.0135421...
MicPie/unpredictable_cluster02
2022-08-04T19:44:14.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T18:25:06
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster02 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster02" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.039398193359375, -0.04010009765625, 0.03338623046875, 0.0232086181640625, 0.0061798095703125, 0.01079559326171875, -0.00994873046875, -0.043701171875, 0.0364990234375, 0.0199737548828125, -0.07183837890625, -0.046417236328125, -0.0474853515625, 0.01228332...
MicPie/unpredictable_cluster20
2022-08-04T19:57:20.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T18:26:06
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster20 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster20" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.04193115234375, -0.040069580078125, 0.033660888671875, 0.0236968994140625, 0.00681304931640625, 0.01143646240234375, -0.01006317138671875, -0.04302978515625, 0.03631591796875, 0.0208892822265625, -0.07281494140625, -0.0484619140625, -0.046600341796875, 0....
MicPie/unpredictable_cluster21
2022-08-04T19:57:54.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T18:27:34
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster21 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster21" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.04156494140625, -0.04046630859375, 0.033203125, 0.0243072509765625, 0.00634002685546875, 0.01158905029296875, -0.01032257080078125, -0.043243408203125, 0.036895751953125, 0.0215911865234375, -0.07305908203125, -0.04840087890625, -0.04656982421875, 0.01392...
MicPie/unpredictable_cluster22
2022-08-04T19:58:29.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T18:28:51
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster22 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster22" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.041015625, -0.04046630859375, 0.033111572265625, 0.0235748291015625, 0.0062255859375, 0.01172637939453125, -0.0103759765625, -0.042510986328125, 0.036956787109375, 0.0207061767578125, -0.0726318359375, -0.04827880859375, -0.046661376953125, 0.013160705566...
MicPie/unpredictable_cluster23
2022-08-04T19:58:59.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T18:29:41
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster23 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster23" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.04095458984375, -0.03997802734375, 0.033477783203125, 0.0237884521484375, 0.006320953369140625, 0.012664794921875, -0.010650634765625, -0.04254150390625, 0.03753662109375, 0.020599365234375, -0.07275390625, -0.048370361328125, -0.04644775390625, 0.0137481...
MicPie/unpredictable_cluster05
2022-08-04T19:45:58.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T19:10:16
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster05 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster05" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.04168701171875, -0.038604736328125, 0.033935546875, 0.0230560302734375, 0.006259918212890625, 0.0106353759765625, -0.0095977783203125, -0.043487548828125, 0.036346435546875, 0.020233154296875, -0.0728759765625, -0.049652099609375, -0.04644775390625, 0.014...
MicPie/unpredictable_cluster06
2022-08-04T19:46:44.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T19:11:07
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster06 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster06" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.040435791015625, -0.03887939453125, 0.03375244140625, 0.0221099853515625, 0.006679534912109375, 0.00921630859375, -0.009490966796875, -0.04327392578125, 0.037353515625, 0.019989013671875, -0.072998046875, -0.048187255859375, -0.0462646484375, 0.0139846801...
MicPie/unpredictable_cluster07
2022-08-04T19:47:24.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-08T19:13:12
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster07 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster07" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.04071044921875, -0.03912353515625, 0.03369140625, 0.0227203369140625, 0.00527191162109375, 0.01018524169921875, -0.0100555419921875, -0.0428466796875, 0.037322998046875, 0.0208587646484375, -0.0716552734375, -0.048828125, -0.046905517578125, 0.01438140869...
MicPie/unpredictable_rated-low
2022-08-04T20:12:07.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
9
2022-07-09T08:47:52
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-rated-low size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-rated-low" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.04022216796875, -0.0406494140625, 0.0308990478515625, 0.0226287841796875, 0.00616455078125, 0.0087738037109375, -0.0087890625, -0.0433349609375, 0.035797119140625, 0.021484375, -0.07281494140625, -0.048004150390625, -0.04632568359375, 0.01476287841796875,...
tner/btc
2022-11-27T19:07:36.000Z
[ "task_categories:token-classification", "task_ids:named-entity-recognition", "multilinguality:monolingual", "size_categories:1k<10K", "language:en", "license:other", "region:us" ]
tner
[BTC](https://aclanthology.org/C16-1111/)
@inproceedings{derczynski-etal-2016-broad, title = "Broad {T}witter Corpus: A Diverse Named Entity Recognition Resource", author = "Derczynski, Leon and Bontcheva, Kalina and Roberts, Ian", booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers", month = dec, year = "2016", address = "Osaka, Japan", publisher = "The COLING 2016 Organizing Committee", url = "https://aclanthology.org/C16-1111", pages = "1169--1179", abstract = "One of the main obstacles, hampering method development and comparative evaluation of named entity recognition in social media, is the lack of a sizeable, diverse, high quality annotated corpus, analogous to the CoNLL{'}2003 news dataset. For instance, the biggest Ritter tweet corpus is only 45,000 tokens {--} a mere 15{\%} the size of CoNLL{'}2003. Another major shortcoming is the lack of temporal, geographic, and author diversity. This paper introduces the Broad Twitter Corpus (BTC), which is not only significantly bigger, but sampled across different regions, temporal periods, and types of Twitter users. The gold-standard named entity annotations are made by a combination of NLP experts and crowd workers, which enables us to harness crowd recall while maintaining high quality. We also measure the entity drift observed in our dataset (i.e. how entity representation varies over time), and compare to newswire. The corpus is released openly, including source text and intermediate annotations.", }
1
9
2022-07-18T10:38:50
--- language: - en license: - other multilinguality: - monolingual size_categories: - 1k<10K task_categories: - token-classification task_ids: - named-entity-recognition pretty_name: BTC --- # Dataset Card for "tner/btc" ## Dataset Description - **Repository:** [T-NER](https://github.com/asahi417/tner) - **Paper:** [https://aclanthology.org/C16-1111/](https://aclanthology.org/C16-1111/) - **Dataset:** Broad Twitter Corpus - **Domain:** Twitter - **Number of Entity:** 3 ### Dataset Summary Broad Twitter Corpus NER dataset formatted in a part of [TNER](https://github.com/asahi417/tner) project. - Entity Types: `LOC`, `ORG`, `PER` ## Dataset Structure ### Data Instances An example of `train` looks as follows. ``` { 'tokens': ['I', 'hate', 'the', 'words', 'chunder', ',', 'vomit', 'and', 'puke', '.', 'BUUH', '.'], 'tags': [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6] } ``` ### Label ID The label2id dictionary can be found at [here](https://huggingface.co/datasets/tner/btc/raw/main/dataset/label.json). ```python { "B-LOC": 0, "B-ORG": 1, "B-PER": 2, "I-LOC": 3, "I-ORG": 4, "I-PER": 5, "O": 6 } ``` ### Data Splits | name |train|validation|test| |---------|----:|---------:|---:| |btc | 6338| 1001|2000| ### Citation Information ``` @inproceedings{derczynski-etal-2016-broad, title = "Broad {T}witter Corpus: A Diverse Named Entity Recognition Resource", author = "Derczynski, Leon and Bontcheva, Kalina and Roberts, Ian", booktitle = "Proceedings of {COLING} 2016, the 26th International Conference on Computational Linguistics: Technical Papers", month = dec, year = "2016", address = "Osaka, Japan", publisher = "The COLING 2016 Organizing Committee", url = "https://aclanthology.org/C16-1111", pages = "1169--1179", abstract = "One of the main obstacles, hampering method development and comparative evaluation of named entity recognition in social media, is the lack of a sizeable, diverse, high quality annotated corpus, analogous to the CoNLL{'}2003 news dataset. For instance, the biggest Ritter tweet corpus is only 45,000 tokens {--} a mere 15{\%} the size of CoNLL{'}2003. Another major shortcoming is the lack of temporal, geographic, and author diversity. This paper introduces the Broad Twitter Corpus (BTC), which is not only significantly bigger, but sampled across different regions, temporal periods, and types of Twitter users. The gold-standard named entity annotations are made by a combination of NLP experts and crowd workers, which enables us to harness crowd recall while maintaining high quality. We also measure the entity drift observed in our dataset (i.e. how entity representation varies over time), and compare to newswire. The corpus is released openly, including source text and intermediate annotations.", } ```
2,863
[ [ -0.04022216796875, -0.042449951171875, 0.01367950439453125, 0.0030307769775390625, -0.0305023193359375, 0.0264129638671875, -0.038726806640625, -0.0374755859375, 0.05157470703125, 0.0085906982421875, -0.028900146484375, -0.0643310546875, -0.05401611328125, 0...
biglam/hansard_speech
2022-07-27T12:30:30.000Z
[ "task_categories:text-classification", "task_categories:text-generation", "task_ids:multi-class-classification", "task_ids:language-modeling", "task_ids:masked-language-modeling", "annotations_creators:no-annotation", "language_creators:expert-generated", "multilinguality:monolingual", "size_categor...
biglam
A dataset containing every speech in the House of Commons from May 1979-July 2020.
@misc{odell, evan_2021, title={Hansard Speeches 1979-2021: Version 3.1.0}, DOI={10.5281/zenodo.4843485}, abstractNote={<p>Full details are available at <a href="https://evanodell.com/projects/datasets/hansard-data">https://evanodell.com/projects/datasets/hansard-data</a></p> <p><strong>Version 3.1.0 contains the following changes:</strong></p> <p>- Coverage up to the end of April 2021</p>}, note={This release is an update of previously released datasets. See full documentation for details.}, publisher={Zenodo}, author={Odell, Evan}, year={2021}, month={May} }
2
9
2022-07-22T21:57:59
--- annotations_creators: - no-annotation language: - 'en' language_creators: - expert-generated license: - cc-by-4.0 multilinguality: - monolingual pretty_name: Hansard Speeches size_categories: - 1M<n<10M source_datasets: - original tags: - speeches - politics - parliament - British task_categories: - text-classification - text-generation task_ids: - multi-class-classification - language-modeling - masked-language-modeling --- # Dataset Card for Hansard speech ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://evanodell.com/projects/datasets/hansard-data/ - **Repository:** https://github.com/evanodell/hansard-data3 - **Paper:** [Needs More Information] - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Evan Odell](https://github.com/evanodell) ### Dataset Summary A dataset containing every speech in the House of Commons from May 1979-July 2020. Quoted from the dataset homepage > Please contact me if you find any errors in the dataset. The integrity of the public Hansard record is questionable at times, and while I have improved it, the data is presented "as is". ### Supported Tasks and Leaderboards - `text-classification`: This dataset can be used to classify various texts (transcribed from speeches) as different time periods or as different types - `language-modeling`: This dataset can contribute to the training or the evaluation of language models for historical texts. ### Languages `en:GB` ## Dataset Structure ### Data Instances ``` { 'id': 'uk.org.publicwhip/debate/1979-05-17a.390.0', 'speech': "Since the Minister for Consumer Affairs said earlier that the bread price rise would be allowed, in view of developing unemployment in the baking industry, and since the Mother's Pride bakery in my constituency is about to close, will the right hon. Gentleman give us a firm assurance that there will be an early debate on the future of the industry, so that the Government may announce that, thanks to the price rise, those workers will not now be put out of work?", 'display_as': 'Eric Heffer', 'party': 'Labour', 'constituency': 'Liverpool, Walton', 'mnis_id': '725', 'date': '1979-05-17', 'time': '', 'colnum': '390', 'speech_class': 'Speech', 'major_heading': 'BUSINESS OF THE HOUSE', 'minor_heading': '', 'oral_heading': '', 'year': '1979', 'hansard_membership_id': '5612', 'speakerid': 'uk.org.publicwhip/member/11615', 'person_id': '', 'speakername': 'Mr. Heffer', 'url': '', 'government_posts': [], 'opposition_posts': [], 'parliamentary_posts': ['Member, Labour Party National Executive Committee'] } ``` ### Data Fields |Variable|Description| |---|---| |id|The ID as assigned by mysociety| |speech|The text of the speech| |display_as| The standardised name of the MP.| |party|The party an MP is member of at time of speech| |constituency| Constituency represented by MP at time of speech| |mnis_id| The MP's Members Name Information Service number| |date|Date of speech| |time|Time of speech| |colnum |Column number in hansard record| |speech_class |Type of speech| |major_heading| Major debate heading| |minor_heading| Minor debate heading| |oral_heading| Oral debate heading| |year |Year of speech| |hansard_membership_id| ID used by mysociety| |speakerid |ID used by mysociety| |person_id |ID used by mysociety| |speakername| MP name as appeared in Hansard record for speech| |url| link to speech| |government_posts| Government posts held by MP (list)| |opposition_posts |Opposition posts held by MP (list)| |parliamentary_posts| Parliamentary posts held by MP (list)| ### Data Splits Train: 2694375 ## Dataset Creation ### Curation Rationale This dataset contains all the speeches made in the House of Commons and can be used for a number of deep learning tasks like detecting how language and societal views have changed over the >40 years. The dataset also provides language closer to the spoken language used in an elite British institution. ### Source Data #### Initial Data Collection and Normalization The dataset is created by getting the data from [data.parliament.uk](http://data.parliament.uk/membersdataplatform/memberquery.aspx). There is no normalization. #### Who are the source language producers? [N/A] ### Annotations #### Annotation process None #### Who are the annotators? [N/A] ### Personal and Sensitive Information This is public information, so there should not be any personal and sensitive information ## Considerations for Using the Data ### Social Impact of Dataset The purpose of this dataset is to understand how language use and society's views have changed over time. ### Discussion of Biases Because of the long time period this dataset spans, it might contain language and opinions that are unacceptable in modern society. ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators This dataset was built on top of [parlparse](https://github.com/mysociety/parlparse) by [Evan Odell](https://github.com/evanodell) ### Licensing Information Creative Commons Attribution 4.0 International License ### Citation Information ``` @misc{odell, evan_2021, title={Hansard Speeches 1979-2021: Version 3.1.0}, DOI={10.5281/zenodo.4843485}, abstractNote={<p>Full details are available at <a href="https://evanodell.com/projects/datasets/hansard-data">https://evanodell.com/projects/datasets/hansard-data</a></p> <p><strong>Version 3.1.0 contains the following changes:</strong></p> <p>- Coverage up to the end of April 2021</p>}, note={This release is an update of previously released datasets. See full documentation for details.}, publisher={Zenodo}, author={Odell, Evan}, year={2021}, month={May} } ``` Thanks to [@shamikbose](https://github.com/shamikbose) for adding this dataset.
6,831
[ [ -0.024993896484375, -0.04107666015625, 0.0182647705078125, -0.004852294921875, -0.0174102783203125, -0.0157012939453125, -0.037506103515625, -0.010650634765625, 0.027313232421875, 0.038116455078125, -0.031707763671875, -0.057342529296875, -0.050537109375, -0...
spacemanidol/ESCI-product-dataset-corpus-us
2022-08-04T13:59:21.000Z
[ "region:us" ]
spacemanidol
null
@misc{reddy2022shopping, title={Shopping Queries Dataset: A Large-Scale {ESCI} Benchmark for Improving Product Search}, author={Chandan K. Reddy and Lluís Màrquez and Fran Valero and Nikhil Rao and Hugo Zaragoza and Sambaran Bandyopadhyay and Arnab Biswas and Anlu Xing and Karthik Subbian}, year={2022}, eprint={2206.06588}, archivePrefix={arXiv} }
1
9
2022-07-26T15:45:55
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
joelniklaus/lextreme
2023-04-29T07:02:17.000Z
[ "task_categories:text-classification", "task_categories:token-classification", "task_ids:multi-class-classification", "task_ids:multi-label-classification", "task_ids:topic-classification", "task_ids:named-entity-recognition", "annotations_creators:other", "language_creators:found", "multilinguality...
joelniklaus
The LEXTREME Benchmark is a collection of multilingual datasets for evaluating model performance across a diverse set of legal NLU tasks.
@misc{niklaus2023lextreme, title={LEXTREME: A Multi-Lingual and Multi-Task Benchmark for the Legal Domain}, author={Joel Niklaus and Veton Matoshi and Pooja Rani and Andrea Galassi and Matthias Stürmer and Ilias Chalkidis}, year={2023}, eprint={2301.13126}, archivePrefix={arXiv}, primaryClass={cs.CL} }
15
9
2022-08-01T08:41:55
--- annotations_creators: - other language_creators: - found language: - bg - cs - da - de - el - en - es - et - fi - fr - ga - hr - hu - it - lt - lv - mt - nl - pl - pt - ro - sk - sl - sv license: - cc-by-4.0 multilinguality: - multilingual paperswithcode_id: null pretty_name: "LEXTREME: A Multilingual Legal Benchmark for Natural Language Understanding" size_categories: - 10K<n<100K source_datasets: - extended task_categories: - text-classification - token-classification task_ids: - multi-class-classification - multi-label-classification - topic-classification - named-entity-recognition --- # Dataset Card for LEXTREME: A Multilingual Legal Benchmark for Natural Language Understanding ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** [Joel Niklaus](mailto:joel.niklaus.2@bfh.ch) ### Dataset Summary The dataset consists of 11 diverse multilingual legal NLU datasets. 6 datasets have one single configuration and 5 datasets have two or three configurations. This leads to a total of 18 tasks (8 single-label text classification tasks, 5 multi-label text classification tasks and 5 token-classification tasks). Use the dataset like this: ```python from datasets import load_dataset dataset = load_dataset("joelito/lextreme", "swiss_judgment_prediction") ``` ### Supported Tasks and Leaderboards The dataset supports the tasks of text classification and token classification. In detail, we support the folliwing tasks and configurations: | task | task type | configurations | link | |:---------------------------|--------------------------:|---------------------------------:|-------------------------------------------------------------------------------------------------------:| | Brazilian Court Decisions | Judgment Prediction | (judgment, unanimity) | [joelito/brazilian_court_decisions](https://huggingface.co/datasets/joelito/brazilian_court_decisions) | | Swiss Judgment Prediction | Judgment Prediction | default | [joelito/swiss_judgment_prediction](https://huggingface.co/datasets/swiss_judgment_prediction) | | German Argument Mining | Argument Mining | default | [joelito/german_argument_mining](https://huggingface.co/datasets/joelito/german_argument_mining) | | Greek Legal Code | Topic Classification | (volume, chapter, subject) | [greek_legal_code](https://huggingface.co/datasets/greek_legal_code) | | Online Terms of Service | Unfairness Classification | (unfairness level, clause topic) | [online_terms_of_service](https://huggingface.co/datasets/joelito/online_terms_of_service) | | Covid 19 Emergency Event | Event Classification | default | [covid19_emergency_event](https://huggingface.co/datasets/joelito/covid19_emergency_event) | | MultiEURLEX | Topic Classification | (level 1, level 2, level 3) | [multi_eurlex](https://huggingface.co/datasets/multi_eurlex) | | LeNER BR | Named Entity Recognition | default | [lener_br](https://huggingface.co/datasets/lener_br) | | LegalNERo | Named Entity Recognition | default | [legalnero](https://huggingface.co/datasets/joelito/legalnero) | | Greek Legal NER | Named Entity Recognition | default | [greek_legal_ner](https://huggingface.co/datasets/joelito/greek_legal_ner) | | MAPA | Named Entity Recognition | (coarse, fine) | [mapa](https://huggingface.co/datasets/joelito/mapa) | ### Languages The following languages are supported: bg , cs , da, de, el, en, es, et, fi, fr, ga, hr, hu, it, lt, lv, mt, nl, pl, pt, ro, sk, sl, sv ## Dataset Structure ### Data Instances The file format is jsonl and three data splits are present for each configuration (train, validation and test). ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information How can I contribute a dataset to lextreme? Please follow the following steps: 1. Make sure your dataset is available on the huggingface hub and has a train, validation and test split. 2. Create a pull request to the lextreme repository by adding the following to the lextreme.py file: - Create a dict _{YOUR_DATASET_NAME} (similar to _BRAZILIAN_COURT_DECISIONS_JUDGMENT) containing all the necessary information about your dataset (task_type, input_col, label_col, etc.) - Add your dataset to the BUILDER_CONFIGS list: `LextremeConfig(name="{your_dataset_name}", **_{YOUR_DATASET_NAME})` - Test that it works correctly by loading your subset with `load_dataset("lextreme", "{your_dataset_name}")` and inspecting a few examples. ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @misc{niklaus2023lextreme, title={LEXTREME: A Multi-Lingual and Multi-Task Benchmark for the Legal Domain}, author={Joel Niklaus and Veton Matoshi and Pooja Rani and Andrea Galassi and Matthias Stürmer and Ilias Chalkidis}, year={2023}, eprint={2301.13126}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ### Contributions Thanks to [@JoelNiklaus](https://github.com/joelniklaus) for adding this dataset.
7,624
[ [ -0.0291900634765625, -0.032623291015625, 0.0270538330078125, 0.0272369384765625, -0.0201568603515625, -0.0037059783935546875, -0.0340576171875, -0.0276336669921875, 0.02532958984375, 0.029052734375, -0.0307159423828125, -0.0819091796875, -0.04779052734375, 0...
DingZhaohai/emotion
2022-08-04T13:43:16.000Z
[ "region:us" ]
DingZhaohai
null
null
1
9
2022-08-04T13:43:01
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
sepidmnorozy/German_sentiment
2022-08-16T09:01:24.000Z
[ "region:us" ]
sepidmnorozy
null
null
0
9
2022-08-16T09:00:26
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
gotchab/Spa-Eng-Finetuning
2022-08-19T14:26:18.000Z
[ "region:us" ]
gotchab
null
null
0
9
2022-08-19T14:17:37
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
ShapeNet/shapenetcore-glb
2023-09-20T15:04:40.000Z
[ "language:en", "license:other", "3D shapes", "region:us" ]
ShapeNet
null
null
1
9
2022-08-31T08:04:51
--- language: - en pretty_name: ShapeNetCore tags: - 3D shapes license: other extra_gated_heading: Acknowledge license to accept the repository extra_gated_prompt: >- To request access to this ShapeNet repo, you will need to provide your **full name** (please provide both your first and last name), the name of your **advisor or the principal investigator (PI)** of your lab (in the PI/Advisor) fields, and the **school or company** that you are affiliated with (the **Affiliation** field). After requesting access to this ShapeNet repo, you will be considered for access approval. After access approval, you (the "Researcher") receive permission to use the ShapeNet database (the "Database") at Princeton University and Stanford University. In exchange for being able to join the ShapeNet community and receive such permission, Researcher hereby agrees to the following terms and conditions: Researcher shall use the Database only for non-commercial research and educational purposes. Princeton University and Stanford University make no representations or warranties regarding the Database, including but not limited to warranties of non-infringement or fitness for a particular purpose. Researcher accepts full responsibility for his or her use of the Database and shall defend and indemnify Princeton University and Stanford University, including their employees, Trustees, officers and agents, against any and all claims arising from Researcher's use of the Database, including but not limited to Researcher's use of any copies of copyrighted 3D models that he or she may create from the Database. Researcher may provide research associates and colleagues with access to the Database provided that they first agree to be bound by these terms and conditions. Princeton University and Stanford University reserve the right to terminate Researcher's access to the Database at any time. If Researcher is employed by a for-profit, commercial entity, Researcher's employer shall also be bound by these terms and conditions, and Researcher hereby represents that he or she is fully authorized to enter into this agreement on behalf of such employer. The law of the State of New Jersey shall apply to all disputes under this agreement. For access to the data, please fill in your **full name** (both first and last name), the name of your **advisor or principal investigator (PI)**, and the name of the **school or company** you are affliated with. Please actually fill out the fields (DO NOT put the word "Advisor" for PI/Advisor and the word "School" for "Affiliation", please specify the name of your advisor and the name of your school). extra_gated_fields: Name: text PI/Advisor: text Affiliation: text Purpose: text Country: text I agree to use this dataset for non-commercial use ONLY: checkbox --- This repository contains ShapeNetCore (v2) in [GLB](https://en.wikipedia.org/wiki/GlTF#GLB) format, a subset of [ShapeNet](https://shapenet.org). ShapeNetCore is a densely annotated subset of ShapeNet covering 55 common object categories with ~51,300 unique 3D models. Each model in ShapeNetCore are linked to an appropriate synset in [WordNet 3.0](https://wordnet.princeton.edu/). If you use ShapeNet data, you agree to abide by the [ShapeNet terms of use](https://shapenet.org/terms). You are only allowed to redistribute the data to your research associates and colleagues provided that they first agree to be bound by these terms and conditions. If you use this data, please cite the main ShapeNet technical report. ``` @techreport{shapenet2015, title = {{ShapeNet: An Information-Rich 3D Model Repository}}, author = {Chang, Angel X. and Funkhouser, Thomas and Guibas, Leonidas and Hanrahan, Pat and Huang, Qixing and Li, Zimo and Savarese, Silvio and Savva, Manolis and Song, Shuran and Su, Hao and Xiao, Jianxiong and Yi, Li and Yu, Fisher}, number = {arXiv:1512.03012 [cs.GR]}, institution = {Stanford University --- Princeton University --- Toyota Technological Institute at Chicago}, year = {2015} } ``` For more information, please contact us at shapenetwebmaster@gmail.com and indicate ShapeNetCore v2 in the title of your email.
4,231
[ [ -0.0026416778564453125, -0.02294921875, 0.02447509765625, 0.004413604736328125, -0.0145111083984375, -0.03570556640625, 0.0132293701171875, -0.049041748046875, 0.024200439453125, 0.045013427734375, -0.026458740234375, -0.042205810546875, -0.029266357421875, ...
Kirili4ik/yandex_jobs
2022-09-03T17:55:00.000Z
[ "task_categories:text-generation", "task_categories:summarization", "task_categories:multiple-choice", "task_ids:language-modeling", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:ru",...
Kirili4ik
null
null
4
9
2022-09-03T17:22:02
--- annotations_creators: - expert-generated language: - ru language_creators: - found license: - unknown multilinguality: - monolingual paperswithcode_id: climate-fever pretty_name: yandex_jobs size_categories: - n<1K source_datasets: - original tags: - vacancies - jobs - ru - yandex task_categories: - text-generation - summarization - multiple-choice task_ids: - language-modeling --- # Dataset Card for Yandex_Jobs ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Contributions](#contributions) ## Dataset Description ### Dataset Summary This is a dataset of more than 600 IT vacancies in Russian from parsing telegram channel https://t.me/ya_jobs. All the texts are perfectly structured, no missing values. ### Supported Tasks and Leaderboards `text-generation` with the 'Raw text column'. `summarization` as for getting from all the info the header. `multiple-choice` as for the hashtags (to choose multiple from all available in the dataset) ### Languages The text in the dataset is in only in Russian. The associated BCP-47 code is `ru`. ## Dataset Structure ### Data Instances The data is parsed from a vacancy of Russian IT company [Yandex](https://ya.ru/). An example from the set looks as follows: ``` {'Header': 'Разработчик интерфейсов в группу разработки спецпроектов', 'Emoji': '🎳', 'Description': 'Конструктор лендингов — это инструмент Яндекса, который позволяет пользователям создавать лендинги и турбо-лендинги для Яндекс.Директа. Турбо — режим ускоренной загрузки страниц для показа на мобильных. У нас современный стек, смелые планы и высокая динамика.\nМы ищем опытного и открытого новому фронтенд-разработчика.', 'Requirements': '• отлично знаете JavaScript • разрабатывали на Node.js, применяли фреймворк Express • умеете создавать веб-приложения на React + Redux • знаете HTML и CSS, особенности их отображения в браузерах', 'Tasks': '• разрабатывать интерфейсы', 'Pluses': '• писали интеграционные, модульные, функциональные или браузерные тесты • умеете разворачивать и администрировать веб-сервисы: собирать Docker-образы, настраивать мониторинги, выкладывать в облачные системы, отлаживать в продакшене • работали с реляционными БД PostgreSQL', 'Hashtags': '#фронтенд #турбо #JS', 'Link': 'https://ya.cc/t/t7E3UsmVSKs6L', 'Raw text': 'Разработчик интерфейсов в группу разработки спецпроектов🎳 Конструктор лендингов — это инструмент Яндекса, который позволяет пользователям создавать лендинги и турбо-лендинги для Яндекс.Директа. Турбо — режим ускоренной загрузки страниц для показа на мобильных. У нас современный стек, смелые планы и высокая динамика. Мы ищем опытного и открытого новому фронтенд-разработчика. Мы ждем, что вы: • отлично знаете JavaScript • разрабатывали на Node.js, применяли фреймворк Express • умеете создавать веб-приложения на React + Redux • знаете HTML и CSS, особенности их отображения в браузерах Что нужно делать: • разрабатывать интерфейсы Будет плюсом, если вы: • писали интеграционные, модульные, функциональные или браузерные тесты • умеете разворачивать и администрировать веб-сервисы: собирать Docker-образы, настраивать мониторинги, выкладывать в облачные системы, отлаживать в продакшене • работали с реляционными БД PostgreSQL https://ya.cc/t/t7E3UsmVSKs6L #фронтенд #турбо #JS' } ``` ### Data Fields - `Header`: A string with a position title (str) - `Emoji`: Emoji that is used at the end of the title position (usually asosiated with the position) (str) - `Description`: Short description of the vacancy (str) - `Requirements`: A couple of required technologies/programming languages/experience (str) - `Tasks`: Examples of the tasks of the job position (str) - `Pluses`: A couple of great points for the applicant to have (technologies/experience/etc) - `Hashtags`: A list of hashtags assosiated with the job (usually programming languages) (str) - `Link`: A link to a job description (there may be more information, but it is not checked) (str) - `Raw text`: Raw text with all the formatiing from the channel. Created with other fields. (str) ### Data Splits There is not enough examples yet to split it to train/test/val in my opinion. ## Dataset Creation It downloaded and parsed from telegram channel https://t.me/ya_jobs 03.09.2022. All the unparsed examples and the ones missing any field are deleted (from 1600 vacancies to only 600 without any missing fields like emojis or links) ## Considerations for Using the Data These vacancies are for only one IT company (yandex). This means they can be pretty specific and probably can not be generalized as any vacancies or even any IT vacancies. ## Contributions - **Point of Contact and Author:** [Kirill Gelvan](telegram: @kirili4ik)
5,125
[ [ -0.037567138671875, -0.04583740234375, 0.0275726318359375, 0.0080718994140625, -0.0323486328125, 0.01319122314453125, -0.0052642822265625, -0.0162811279296875, 0.06268310546875, 0.008148193359375, -0.05206298828125, -0.07379150390625, -0.0206298828125, -0.00...
nateraw/avocado-prices
2022-09-08T20:43:27.000Z
[ "license:odbl", "region:us" ]
nateraw
null
null
1
9
2022-09-08T20:35:54
--- license: - odbl converted_from: kaggle kaggle_id: neuromusic/avocado-prices --- # Dataset Card for Avocado Prices ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://kaggle.com/datasets/neuromusic/avocado-prices - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary ### Context It is a well known fact that Millenials LOVE Avocado Toast. It's also a well known fact that all Millenials live in their parents basements. Clearly, they aren't buying home because they are buying too much Avocado Toast! But maybe there's hope... if a Millenial could find a city with cheap avocados, they could live out the Millenial American Dream. ### Content This data was downloaded from the Hass Avocado Board website in May of 2018 & compiled into a single CSV. Here's how the [Hass Avocado Board describes the data on their website][1]: &gt; The table below represents weekly 2018 retail scan data for National retail volume (units) and price. Retail scan data comes directly from retailers’ cash registers based on actual retail sales of Hass avocados. Starting in 2013, the table below reflects an expanded, multi-outlet retail data set. Multi-outlet reporting includes an aggregation of the following channels: grocery, mass, club, drug, dollar and military. The Average Price (of avocados) in the table reflects a per unit (per avocado) cost, even when multiple units (avocados) are sold in bags. The Product Lookup codes (PLU’s) in the table are only for Hass avocados. Other varieties of avocados (e.g. greenskins) are not included in this table. Some relevant columns in the dataset: - `Date` - The date of the observation - `AveragePrice` - the average price of a single avocado - `type` - conventional or organic - `year` - the year - `Region` - the city or region of the observation - `Total Volume` - Total number of avocados sold - `4046` - Total number of avocados with PLU 4046 sold - `4225` - Total number of avocados with PLU 4225 sold - `4770` - Total number of avocados with PLU 4770 sold ### Acknowledgements Many thanks to the Hass Avocado Board for sharing this data!! http://www.hassavocadoboard.com/retail/volume-and-price-data ### Inspiration In which cities can millenials have their avocado toast AND buy a home? Was the Avocadopocalypse of 2017 real? [1]: http://www.hassavocadoboard.com/retail/volume-and-price-data ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators This dataset was shared by [@neuromusic](https://kaggle.com/neuromusic) ### Licensing Information The license for this dataset is odbl ### Citation Information ```bibtex [More Information Needed] ``` ### Contributions [More Information Needed]
4,683
[ [ -0.032867431640625, -0.06976318359375, 0.0143585205078125, 0.017425537109375, -0.009307861328125, -0.012908935546875, -0.0167083740234375, -0.053253173828125, 0.05548095703125, 0.04962158203125, -0.04425048828125, -0.0599365234375, -0.03143310546875, 0.00494...
NimaBoscarino/butterflies
2022-09-13T16:52:33.000Z
[ "region:us" ]
NimaBoscarino
null
null
1
9
2022-09-11T21:33:35
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
StonyBrookNLP/tellmewhy
2022-09-29T13:05:59.000Z
[ "task_categories:text2text-generation", "annotations_creators:crowdsourced", "language_creators:found", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:unknown", "region:us" ]
StonyBrookNLP
null
null
0
9
2022-09-21T16:11:29
--- annotations_creators: - crowdsourced language_creators: - found language: - en license: - unknown multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - text2text-generation task_ids: [] paperswithcode_id: null pretty_name: TellMeWhy --- # Dataset Card for NewsCommentary ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://stonybrooknlp.github.io/tellmewhy/ - **Repository:** https://github.com/StonyBrookNLP/tellmewhy - **Paper:** https://aclanthology.org/2021.findings-acl.53/ - **Leaderboard:** None - **Point of Contact:** [Yash Kumar Lal](mailto:ylal@cs.stonybrook.edu) ### Dataset Summary TellMeWhy is a large-scale crowdsourced dataset made up of more than 30k questions and free-form answers concerning why characters in short narratives perform the actions described. ### Supported Tasks and Leaderboards The dataset is designed to test why-question answering abilities of models when bound by local context. ### Languages English ## Dataset Structure ### Data Instances A typical data point consists of a story, a question and a crowdsourced answer to that question. Additionally, the instance also indicates whether the question's answer would be implicit or if it is explicitly stated in text. If applicable, it also contains Likert scores (-2 to 2) about the answer's grammaticality and validity in the given context. ``` { "narrative":"Cam ordered a pizza and took it home. He opened the box to take out a slice. Cam discovered that the store did not cut the pizza for him. He looked for his pizza cutter but did not find it. He had to use his chef knife to cut a slice.", "question":"Why did Cam order a pizza?", "original_sentence_for_question":"Cam ordered a pizza and took it home.", "narrative_lexical_overlap":0.3333333333, "is_ques_answerable":"Not Answerable", "answer":"Cam was hungry.", "is_ques_answerable_annotator":"Not Answerable", "original_narrative_form":[ "Cam ordered a pizza and took it home.", "He opened the box to take out a slice.", "Cam discovered that the store did not cut the pizza for him.", "He looked for his pizza cutter but did not find it.", "He had to use his chef knife to cut a slice." ], "question_meta":"rocstories_narrative_41270_sentence_0_question_0", "helpful_sentences":[ ], "human_eval":false, "val_ann":[ ], "gram_ann":[ ] } ``` ### Data Fields - `question_meta` - Unique meta for each question in the corpus - `narrative` - Full narrative from ROCStories. Used as the context with which the question and answer are associated - `question` - Why question about an action or event in the narrative - `answer` - Crowdsourced answer to the question - `original_sentence_for_question` - Sentence in narrative from which question was generated - `narrative_lexical_overlap` - Unigram overlap of answer with the narrative - `is_ques_answerable` - Majority judgment by annotators on whether an answer to this question is explicitly stated in the narrative. If "Not Answerable", it is part of the Implicit-Answer questions subset, which is harder for models. - `is_ques_answerable_annotator` - Individual annotator judgment on whether an answer to this question is explicitly stated in the narrative. - `original_narrative_form` - ROCStories narrative as an array of its sentences - `human_eval` - Indicates whether a question is a specific part of the test set. Models should be evaluated for their answers on these questions using the human evaluation suite released by the authors. They advocate for this human evaluation to be the correct way to track progress on this dataset. - `val_ann` - Array of Likert scores (possible sizes are 0 and 3) about whether an answer is valid given the question and context. Empty arrays exist for cases where the human_eval flag is False. - `gram_ann` - Array of Likert scores (possible sizes are 0 and 3) about whether an answer is grammatical. Empty arrays exist for cases where the human_eval flag is False. ### Data Splits The data is split into training, valiudation, and test sets. | Train | Valid | Test | | ------ | ----- | ----- | | 23964 | 2992 | 3563 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data ROCStories corpus (Mostafazadeh et al, 2016) #### Initial Data Collection and Normalization ROCStories was used to create why-questions related to actions and events in the stories. #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process Amazon Mechanical Turk workers were provided a story and an associated why-question, and asked to answer. Three answers were collected for each question. For a small subset of questions, the quality of answers was also validated in a second round of annotation. This smaller subset should be used to perform human evaluation of any new models built for this dataset. #### Who are the annotators? Amazon Mechanical Turk workers ### Personal and Sensitive Information None ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Evaluation To evaluate progress on this dataset, the authors advocate for human evaluation and release a suite with the required settings [here](https://github.com/StonyBrookNLP/tellmewhy). Once inference on the test set has been completed, please filter out the answers on which human evaluation needs to be performed by selecting the questions (one answer per question, deduplication might be needed) in the test set where the `human_eval` flag is set to `True`. This subset can then be used to complete the requisite evaluation on TellMeWhy. ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @inproceedings{lal-etal-2021-tellmewhy, title = "{T}ell{M}e{W}hy: A Dataset for Answering Why-Questions in Narratives", author = "Lal, Yash Kumar and Chambers, Nathanael and Mooney, Raymond and Balasubramanian, Niranjan", booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.findings-acl.53", doi = "10.18653/v1/2021.findings-acl.53", pages = "596--610", } ``` ### Contributions Thanks to [@yklal95](https://github.com/ykl7) for adding this dataset.
7,765
[ [ -0.0380859375, -0.0638427734375, 0.03411865234375, 0.0020751953125, -0.0025081634521484375, -0.00638580322265625, -0.021331787109375, -0.0357666015625, 0.0237579345703125, 0.039215087890625, -0.06500244140625, -0.055755615234375, -0.036376953125, 0.026321411...
gfhayworth/wiki_mini
2023-01-28T23:28:54.000Z
[ "region:us" ]
gfhayworth
null
null
2
9
2022-09-26T20:42:59
Simple English Wikipedia it has only about 170k articles. We split these articles into paragraphs. wikipedia_filepath = 'simplewiki-2020-11-01.jsonl.gz' if not os.path.exists(wikipedia_filepath): util.http_get('http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz', wikipedia_filepath)
294
[ [ -0.050506591796875, -0.06610107421875, 0.0350341796875, 0.0281524658203125, -0.037841796875, -0.01299285888671875, -0.0242156982421875, -0.040435791015625, 0.0347900390625, 0.0242919921875, -0.058349609375, -0.00931549072265625, -0.0208740234375, 0.060455322...
KevinSpaghetti/cadec
2022-10-06T13:09:46.000Z
[ "region:us" ]
KevinSpaghetti
null
null
1
9
2022-10-01T11:21:40
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...