id
stringlengths
2
115
lastModified
stringlengths
24
24
tags
list
author
stringlengths
2
42
description
stringlengths
0
6.67k
citation
stringlengths
0
10.7k
likes
int64
0
3.66k
downloads
int64
0
8.89M
created
timestamp[us]
card
stringlengths
11
977k
card_len
int64
11
977k
embeddings
list
sayan1101/final_summarize
2023-10-17T17:55:29.000Z
[ "region:us" ]
sayan1101
null
null
0
9
2023-10-17T17:51:17
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: prompt dtype: string splits: - name: train num_bytes: 869663385 num_examples: 200000 download_size: 254181054 dataset_size: 869663385 --- # Dataset Card for "final_summarize" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
453
[ [ -0.0504150390625, -0.0113677978515625, 0.01824951171875, 0.00962066650390625, -0.02020263671875, 0.00011014938354492188, 0.00510406494140625, -0.0011949539184570312, 0.06475830078125, 0.041717529296875, -0.053192138671875, -0.053955078125, -0.042816162109375, ...
hf-internal-testing/fixtures-captioning
2023-11-02T10:43:47.000Z
[ "region:us" ]
hf-internal-testing
\\n
\\n
0
9
2023-10-18T09:37:25
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
kjappelbaum/chemnlp-opv
2023-10-18T14:09:11.000Z
[ "region:us" ]
kjappelbaum
null
null
0
9
2023-10-18T10:38:24
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Santp98/processed_licibert_dataset
2023-10-20T00:27:07.000Z
[ "region:us" ]
Santp98
null
null
0
9
2023-10-18T20:06:06
--- dataset_info: features: - name: input_ids sequence: int32 - name: token_type_ids sequence: int8 - name: attention_mask sequence: int8 - name: special_tokens_mask sequence: int8 splits: - name: train num_bytes: 777380400.0 num_examples: 215939 download_size: 200499848 dataset_size: 777380400.0 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "processed_licibert_dataset" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
610
[ [ -0.0338134765625, -0.0208587646484375, 0.023651123046875, 0.01090240478515625, -0.015594482421875, -0.0023670196533203125, 0.00736236572265625, -0.011322021484375, 0.0611572265625, 0.05224609375, -0.0589599609375, -0.053466796875, -0.03814697265625, -0.02497...
NoahBSchwartz/RLHF_with_Open_Ended_and_Multiple_Choice
2023-10-18T21:27:19.000Z
[ "region:us" ]
NoahBSchwartz
null
null
0
9
2023-10-18T21:27:07
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
phanvancongthanh/enamine_diversity_standardized
2023-10-19T06:22:56.000Z
[ "region:us" ]
phanvancongthanh
null
null
0
9
2023-10-19T02:55:08
--- dataset_info: features: - name: standardized_smiles dtype: string splits: - name: train num_bytes: 2422193889.0 num_examples: 47999999 download_size: 955961392 dataset_size: 2422193889.0 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "enamine_diversity_standardized" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
489
[ [ -0.050079345703125, -0.00644683837890625, -0.0018873214721679688, 0.0255889892578125, -0.013397216796875, -0.007579803466796875, -0.0023651123046875, -0.0253448486328125, 0.0635986328125, 0.0250701904296875, -0.044647216796875, -0.0694580078125, -0.0456237792968...
jamestalentium/cnn_dailymail_10_finetune
2023-10-21T02:10:01.000Z
[ "region:us" ]
jamestalentium
null
null
0
9
2023-10-21T01:42:07
--- dataset_info: features: - name: input_text dtype: string - name: output_text dtype: string - name: id dtype: string splits: - name: train num_bytes: 43944.50216465294 num_examples: 10 download_size: 25357 dataset_size: 43944.50216465294 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "cnn_dailymail_10_finetune" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
546
[ [ -0.039154052734375, -0.025848388671875, 0.0009918212890625, 0.0278167724609375, -0.0296478271484375, -0.01464080810546875, 0.0006866455078125, -0.006008148193359375, 0.048065185546875, 0.034820556640625, -0.05657958984375, -0.058624267578125, -0.05047607421875, ...
jamestalentium/cnn_dailymail_10_rm
2023-10-21T02:10:03.000Z
[ "region:us" ]
jamestalentium
null
null
0
9
2023-10-21T01:42:09
--- dataset_info: features: - name: input_text dtype: string - name: output_text dtype: string - name: id dtype: string splits: - name: train num_bytes: 43944.50216465294 num_examples: 10 download_size: 22784 dataset_size: 43944.50216465294 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "cnn_dailymail_10_rm" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
540
[ [ -0.033111572265625, -0.0203857421875, 0.0008592605590820312, 0.031585693359375, -0.0307769775390625, -0.00406646728515625, 0.0081024169921875, -0.002277374267578125, 0.044281005859375, 0.033111572265625, -0.06451416015625, -0.062255859375, -0.05499267578125, ...
jamestalentium/cnn_dailymail_10_test
2023-10-21T02:10:03.000Z
[ "region:us" ]
jamestalentium
null
null
0
9
2023-10-21T01:42:10
--- dataset_info: features: - name: input_text dtype: string - name: output_text dtype: string - name: id dtype: string splits: - name: test num_bytes: 8255778.137510879 num_examples: 1900 download_size: 0 dataset_size: 8255778.137510879 configs: - config_name: default data_files: - split: test path: data/test-* --- # Dataset Card for "cnn_dailymail_10_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
537
[ [ -0.03900146484375, -0.0297393798828125, -0.0014944076538085938, 0.031524658203125, -0.0231781005859375, -0.0026912689208984375, 0.006313323974609375, -0.0037555694580078125, 0.04412841796875, 0.0268096923828125, -0.06005859375, -0.059234619140625, -0.04754638671...
jamestalentium/dialogsum_10_finetune
2023-10-21T02:10:10.000Z
[ "region:us" ]
jamestalentium
null
null
0
9
2023-10-21T01:42:20
--- dataset_info: features: - name: id dtype: string - name: input_text dtype: string - name: output_text dtype: string - name: topic dtype: string splits: - name: train num_bytes: 9181.081861958266 num_examples: 10 download_size: 11980 dataset_size: 9181.081861958266 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "dialogsum_10_finetune" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
576
[ [ -0.048492431640625, -0.030242919921875, 0.0062103271484375, 0.00963592529296875, -0.01055908203125, -0.024627685546875, 0.00921630859375, -0.003192901611328125, 0.054351806640625, 0.049163818359375, -0.055572509765625, -0.045654296875, -0.0294647216796875, -...
jamestalentium/dialogsum_10_rm
2023-10-21T02:10:11.000Z
[ "region:us" ]
jamestalentium
null
null
0
9
2023-10-21T01:42:22
--- dataset_info: features: - name: id dtype: string - name: input_text dtype: string - name: output_text dtype: string - name: topic dtype: string splits: - name: train num_bytes: 9181.081861958266 num_examples: 10 download_size: 14579 dataset_size: 9181.081861958266 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "dialogsum_10_rm" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
570
[ [ -0.03814697265625, -0.026580810546875, 0.004184722900390625, 0.01029205322265625, -0.00826263427734375, -0.01338958740234375, 0.0198822021484375, -0.0002715587615966797, 0.04833984375, 0.051055908203125, -0.061676025390625, -0.04254150390625, -0.033966064453125,...
jamestalentium/dialogsum_10_test
2023-10-21T02:10:12.000Z
[ "region:us" ]
jamestalentium
null
null
0
9
2023-10-21T01:42:23
--- dataset_info: features: - name: id dtype: string - name: input_text dtype: string - name: output_text dtype: string - name: topic dtype: string splits: - name: test num_bytes: 1353776.49 num_examples: 1485 download_size: 0 dataset_size: 1353776.49 configs: - config_name: default data_files: - split: test path: data/test-* --- # Dataset Card for "dialogsum_10_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
553
[ [ -0.046844482421875, -0.0340576171875, 0.0035381317138671875, 0.01374053955078125, 0.0006136894226074219, -0.01297760009765625, 0.0190582275390625, -0.0027313232421875, 0.050628662109375, 0.04144287109375, -0.059783935546875, -0.041534423828125, -0.02828979492187...
jamestalentium/xsum_10_finetune
2023-10-21T02:10:19.000Z
[ "region:us" ]
jamestalentium
null
null
0
9
2023-10-21T01:42:30
--- dataset_info: features: - name: input_text dtype: string - name: output_text dtype: string - name: id dtype: string splits: - name: train num_bytes: 23485.327403268886 num_examples: 10 download_size: 19146 dataset_size: 23485.327403268886 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "xsum_10_finetune" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
539
[ [ -0.04736328125, -0.00473785400390625, 0.0114593505859375, 0.006252288818359375, -0.01555633544921875, -0.016448974609375, 0.007427215576171875, -0.006969451904296875, 0.06878662109375, 0.03973388671875, -0.050079345703125, -0.044281005859375, -0.0419921875, ...
jamestalentium/xsum_10_rm
2023-10-21T02:10:21.000Z
[ "region:us" ]
jamestalentium
null
null
0
9
2023-10-21T01:42:31
--- dataset_info: features: - name: input_text dtype: string - name: output_text dtype: string - name: id dtype: string splits: - name: train num_bytes: 23485.327403268886 num_examples: 10 download_size: 19056 dataset_size: 23485.327403268886 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "xsum_10_rm" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
533
[ [ -0.03656005859375, 0.004894256591796875, 0.01033782958984375, 0.0105133056640625, -0.01265716552734375, -0.004558563232421875, 0.0207672119140625, -0.0054931640625, 0.0662841796875, 0.040069580078125, -0.05694580078125, -0.045013427734375, -0.04656982421875, ...
jamestalentium/xsum_10_test
2023-10-21T02:10:21.000Z
[ "region:us" ]
jamestalentium
null
null
0
9
2023-10-21T01:42:33
--- dataset_info: features: - name: input_text dtype: string - name: output_text dtype: string - name: id dtype: string splits: - name: test num_bytes: 15613650.659431798 num_examples: 6614 download_size: 0 dataset_size: 15613650.659431798 configs: - config_name: default data_files: - split: test path: data/test-* --- # Dataset Card for "xsum_10_test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
530
[ [ -0.043609619140625, -0.0032806396484375, 0.00844573974609375, 0.01396942138671875, -0.004642486572265625, -0.0030670166015625, 0.019805908203125, -0.0083160400390625, 0.06903076171875, 0.032135009765625, -0.0518798828125, -0.040985107421875, -0.040924072265625, ...
adityarra07/test_GPT_1000
2023-10-22T13:35:16.000Z
[ "region:us" ]
adityarra07
null
null
0
9
2023-10-22T13:35:11
--- dataset_info: features: - name: audio dtype: audio: sampling_rate: 16000 - name: transcription dtype: string - name: id dtype: string splits: - name: train num_bytes: 133411975.96105696 num_examples: 1001 download_size: 128698472 dataset_size: 133411975.96105696 --- # Dataset Card for "test_GPT_1000" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
488
[ [ -0.04656982421875, -0.032562255859375, 0.01739501953125, 0.0122222900390625, -0.0093231201171875, -0.01092529296875, 0.027923583984375, 0.00714874267578125, 0.04339599609375, 0.01476287841796875, -0.053619384765625, -0.04022216796875, -0.039642333984375, -0....
datkai/final_news_vnexpress
2023-10-22T14:13:15.000Z
[ "region:us" ]
datkai
null
null
0
9
2023-10-22T14:11:38
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
AdapterOcean/physics_dataset_standardized_cluster_1
2023-10-23T01:52:03.000Z
[ "region:us" ]
AdapterOcean
null
null
0
9
2023-10-22T18:30:48
--- dataset_info: features: - name: text dtype: string - name: conversation_id dtype: int64 - name: embedding sequence: float64 - name: cluster dtype: int64 splits: - name: train num_bytes: 48679635 num_examples: 4357 download_size: 0 dataset_size: 48679635 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "physics_dataset_standardized_cluster_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
582
[ [ -0.03277587890625, -0.01611328125, 0.0251312255859375, 0.0292510986328125, -0.02239990234375, -0.003520965576171875, 0.02508544921875, 0.0013360977172851562, 0.06103515625, 0.010498046875, -0.055023193359375, -0.060455322265625, -0.03680419921875, -0.0197296...
AdapterOcean/biology_dataset_standardized_cluster_1
2023-10-23T14:45:01.000Z
[ "region:us" ]
AdapterOcean
null
null
0
9
2023-10-22T18:46:54
--- dataset_info: features: - name: text dtype: string - name: conversation_id dtype: int64 - name: embedding sequence: float64 - name: cluster dtype: int64 splits: - name: train num_bytes: 10392472 num_examples: 957 download_size: 0 dataset_size: 10392472 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "biology_dataset_standardized_cluster_1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
581
[ [ -0.0272216796875, -0.021392822265625, 0.0213470458984375, 0.0192718505859375, -0.0267791748046875, -0.0025577545166015625, 0.0204315185546875, -0.0058746337890625, 0.07611083984375, 0.0189666748046875, -0.04974365234375, -0.0743408203125, -0.04473876953125, ...
sminchoi/guanaco-llama2-test
2023-10-23T07:26:25.000Z
[ "region:us" ]
sminchoi
null
null
0
9
2023-10-23T07:25:57
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
StivenLancheros/christmas-edits_dataset2
2023-10-23T10:35:18.000Z
[ "region:us" ]
StivenLancheros
null
null
0
9
2023-10-23T09:35:24
--- dataset_info: features: - name: input_image dtype: image - name: edit_prompt dtype: string - name: edited_image dtype: image splits: - name: train num_bytes: 33235736614.672 num_examples: 84776 download_size: 32967759861 dataset_size: 33235736614.672 --- # Dataset Card for "christmas-edits_dataset2" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
473
[ [ -0.0255279541015625, -0.0165863037109375, 0.0006923675537109375, 0.0197296142578125, -0.019287109375, -0.0002741813659667969, 0.003650665283203125, -0.030059814453125, 0.051055908203125, 0.018890380859375, -0.061126708984375, -0.037750244140625, -0.0326538085937...
Alamerton/pangolin-synthetic-sycophancy
2023-10-23T10:58:46.000Z
[ "region:us" ]
Alamerton
null
null
0
9
2023-10-23T10:57:59
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
mstz/victorian_authorship
2023-10-26T14:10:56.000Z
[ "task_categories:text-classification", "size_categories:10K<n<100K", "language:en", "license:cc", "victorian", "text-classification", "region:us" ]
mstz
null
@phdthesis{gungor2018benchmarking, title={Benchmarking authorship attribution techniques using over a thousand books by fifty victorian era novelists}, author={Gungor, Abdulmecit}, year={2018}, school={Purdue University} }
0
9
2023-10-23T12:27:28
--- language: - en tags: - victorian - text-classification pretty_name: Victorian authorship size_categories: - 10K<n<100K task_categories: - text-classification license: cc --- # Victorian authorship The [Victorian authorship dataset](https://scholarworks.iupui.edu/server/api/core/bitstreams/708a9870-915e-4d59-b54d-938af563c196/content). Which Victorian author wrote the given text? # Configurations and tasks | **Configuration** | **Task** | Description | |-------------------|---------------------------|---------------------------------------------------------------| | authorship | Classification | Which Victorian author wrote the given text?| # Usage ```python from datasets import load_dataset dataset = load_dataset("mstz/victorian_authorship", "authorship")["train"] ``` # Features |**Feature** |**Type** | |-------------------|---------------| | text | `[string]` | # Citation Cite this dataset as ``` @phdthesis{gungor2018benchmarking, title={Benchmarking authorship attribution techniques using over a thousand books by fifty victorian era novelists}, author={Gungor, Abdulmecit}, year={2018}, school={Purdue University} } ```
1,273
[ [ 0.0123291015625, -0.01282501220703125, 0.035247802734375, 0.01470947265625, 0.0009055137634277344, -0.0189971923828125, -0.006313323974609375, -0.013671875, 0.0191497802734375, 0.050933837890625, -0.02630615234375, -0.049591064453125, -0.049713134765625, 0.0...
am96149/first
2023-11-01T10:21:47.000Z
[ "region:us" ]
am96149
null
null
0
9
2023-10-23T13:39:20
This dataset is a subset of the Open Assistant dataset, which you can find here: https://huggingface.co/datasets/OpenAssistant/oasst1/tree/main This subset of the data only contains the highest-rated paths in the conversation tree, with a total of 9,846 samples. This dataset was used to train Guanaco with QLoRA. For further information, please see the original dataset. License: Apache 2.0
395
[ [ -0.01934814453125, -0.039215087890625, 0.021820068359375, 0.0085906982421875, -0.005558013916015625, -0.0062408447265625, 0.0078125, -0.03729248046875, 0.0233612060546875, 0.037811279296875, -0.06939697265625, -0.05303955078125, -0.032623291015625, -0.012321...
royzhong/cve-2023-llama2
2023-10-24T00:20:03.000Z
[ "region:us" ]
royzhong
null
null
0
9
2023-10-24T00:19:40
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
lzy337/attack_data_hf
2023-10-24T12:24:10.000Z
[ "region:us" ]
lzy337
null
null
0
9
2023-10-24T11:35:51
--- configs: - config_name: default data_files: - split: train path: - toxicity/toxic.jsonl.gpt3.n=25.out1.split.annotated.jsonl.filtered_train.jsonl - split: test path: - toxicity/toxic.jsonl.gpt3.n=25.out1.split.annotated.jsonl.filtered_test.jsonl - split: dev path: - toxicity/toxic.jsonl.gpt3.n=25.out1.split.annotated.jsonl.filtered_dev.jsonl --- Toxicity contail three types of data. 1. from realtoxicty prompt .2 response from gpt3.5 generation as prompt 3. same as 2 but it comes from gpt4
534
[ [ -0.01263427734375, -0.049285888671875, 0.0770263671875, 0.03814697265625, 0.0035247802734375, -0.0206146240234375, 0.0482177734375, -0.032501220703125, -0.004978179931640625, 0.047607421875, -0.058135986328125, -0.01500701904296875, -0.009368896484375, 0.025...
sunjun/pubmedqa_sj
2023-10-24T12:51:58.000Z
[ "region:us" ]
sunjun
null
null
0
9
2023-10-24T12:51:52
--- configs: - config_name: default data_files: - split: validation path: data/validation-* - split: test path: data/test-* dataset_info: features: - name: QUESTION dtype: string - name: CONTEXT dtype: string - name: final_decision dtype: string - name: question dtype: string - name: context dtype: string - name: choices sequence: string - name: answer_index dtype: int64 splits: - name: validation num_bytes: 1454799 num_examples: 500 - name: test num_bytes: 1477607 num_examples: 500 download_size: 1667194 dataset_size: 2932406 --- # Dataset Card for "pubmedqa_sj" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
786
[ [ -0.0223236083984375, -0.002079010009765625, 0.03802490234375, 0.00853729248046875, -0.021392822265625, 0.00014269351959228516, 0.0210723876953125, 0.01016998291015625, 0.060546875, 0.041412353515625, -0.042816162109375, -0.0538330078125, -0.05267333984375, 0...
quyanh/dataset-helm
2023-10-25T08:45:57.000Z
[ "region:us" ]
quyanh
null
null
0
9
2023-10-24T15:31:31
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Intuit-GenSRF/combined_toxicity_profanity_v2_eval_only
2023-10-24T17:44:29.000Z
[ "region:us" ]
Intuit-GenSRF
null
null
0
9
2023-10-24T17:44:15
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: text dtype: string - name: labels sequence: string - name: encoded_labels sequence: int64 splits: - name: train num_bytes: 313551093 num_examples: 710497 download_size: 161511342 dataset_size: 313551093 --- # Dataset Card for "combined_toxicity_profanity_v2_eval_only" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
559
[ [ -0.0020599365234375, -0.0258331298828125, 0.0198211669921875, 0.0302734375, -0.0239715576171875, 0.0033721923828125, -0.0005016326904296875, -0.01202392578125, 0.0321044921875, 0.042999267578125, -0.0390625, -0.0714111328125, -0.047637939453125, -0.019393920...
M-A-D/DarEn-space-test
2023-10-25T11:15:19.000Z
[ "region:us" ]
M-A-D
null
null
0
9
2023-10-25T10:30:55
--- dataset_info: features: - name: sentence dtype: string - name: translation dtype: string - name: translated dtype: bool - name: corrected dtype: bool - name: correction dtype: float64 splits: - name: train num_bytes: 725711 num_examples: 5583 download_size: 0 dataset_size: 725711 configs: - config_name: default data_files: - split: train path: data/train-* --- # Dataset Card for "DarEn-space-test" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
593
[ [ -0.03997802734375, -0.023040771484375, 0.0250701904296875, 0.016357421875, -0.00559234619140625, -0.0236053466796875, 0.01067352294921875, 0.004802703857421875, 0.04931640625, 0.0240020751953125, -0.0679931640625, -0.054168701171875, -0.023529052734375, -0.0...
absanjay/spam-test-data
2023-10-25T16:59:11.000Z
[ "region:us" ]
absanjay
null
null
0
9
2023-10-25T16:56:34
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
GHOFRANEE/SOPHIAE
2023-10-26T09:04:38.000Z
[ "region:us" ]
GHOFRANEE
null
null
0
9
2023-10-26T07:13:58
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* dataset_info: features: - name: image dtype: image - name: ground_truth dtype: string splits: - name: train num_bytes: 6853714.0 num_examples: 94 - name: validation num_bytes: 6853714.0 num_examples: 94 - name: test num_bytes: 6853714.0 num_examples: 94 download_size: 5009808 dataset_size: 20561142.0 --- # Dataset Card for "SOPHIAE" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
695
[ [ -0.0311279296875, -0.0131378173828125, 0.0194091796875, 0.00568389892578125, -0.01020050048828125, -0.01378631591796875, 0.03326416015625, -0.0230255126953125, 0.06573486328125, 0.028717041015625, -0.06451416015625, -0.055084228515625, -0.0428466796875, -0.0...
H4438/multi-choices-text
2023-10-28T07:11:04.000Z
[ "region:us" ]
H4438
null
null
0
9
2023-10-27T06:35:45
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: question dtype: string - name: options list: - name: answer dtype: string - name: key dtype: string - name: answer struct: - name: answer dtype: string - name: key dtype: string - name: solution dtype: string - name: type dtype: string - name: prompt dtype: string - name: response dtype: string - name: grade dtype: string - name: subject dtype: string splits: - name: train num_bytes: 118528289 num_examples: 68953 download_size: 61685616 dataset_size: 118528289 --- # Dataset Card for "multi-choices-text" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
875
[ [ -0.039764404296875, -0.0255279541015625, 0.0227203369140625, 0.0215301513671875, -0.013153076171875, 0.0082550048828125, 0.0001857280731201172, -0.0177459716796875, 0.0577392578125, 0.04638671875, -0.058502197265625, -0.042266845703125, -0.04742431640625, -0...
toilaluan/t2i_reward
2023-10-28T07:27:46.000Z
[ "region:us" ]
toilaluan
null
null
0
9
2023-10-27T07:35:19
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: model_type dtype: string - name: request_id dtype: int64 - name: topic dtype: string - name: reward dtype: float64 - name: individual_rewards struct: - name: image_rewarder dtype: float64 - name: hps_v2_rewarder dtype: float64 splits: - name: train num_bytes: 154200 num_examples: 1800 download_size: 36440 dataset_size: 154200 --- # Dataset Card for "t2i_reward" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
686
[ [ -0.00998687744140625, 0.00899505615234375, 0.010711669921875, 0.0198211669921875, -0.016265869140625, 0.01064300537109375, 0.0236053466796875, -0.0094757080078125, 0.050811767578125, 0.01021575927734375, -0.053924560546875, -0.042083740234375, -0.05120849609375,...
M0hammed87/DictionaryTrain
2023-10-31T01:19:58.000Z
[ "region:us" ]
M0hammed87
null
null
0
9
2023-10-28T10:33:23
--- dataset_info: features: - name: text dtype: string splits: - name: train num_bytes: 1654448 num_examples: 1000 download_size: 966693 dataset_size: 1654448 configs: - config_name: default --- # Guanaco-1k: Lazy Llama 2 Formatting This is a subset (1000 samples) of the excellent [`timdettmers/openassistant-guanaco`](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) dataset, processed to match Llama 2's prompt format as described [in this article](https://huggingface.co/blog/llama2#how-to-prompt-llama-2). It was created using the following [colab notebook](https://colab.research.google.com/drive/1Ad7a9zMmkxuXTOh1Z7-rNSICA4dybpM2?usp=sharing). Useful if you don't want to reformat it by yourself (e.g., using a script). It was designed for [this article](https://mlabonne.github.io/blog/posts/Fine_Tune_Your_Own_Llama_2_Model_in_a_Colab_Notebook.html) about fine-tuning a Llama 2 (chat) model in a Google Colab.
965
[ [ -0.00318145751953125, -0.06561279296875, 0.027099609375, 0.06365966796875, -0.040191650390625, 0.0033473968505859375, -0.01104736328125, -0.0240936279296875, 0.0404052734375, 0.026214599609375, -0.06646728515625, -0.0426025390625, -0.0269927978515625, 0.0102...
marcus2000/sentiment2to1
2023-10-29T20:52:55.000Z
[ "region:us" ]
marcus2000
null
null
0
9
2023-10-29T20:50:15
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* dataset_info: features: - name: text dtype: string - name: label dtype: int64 splits: - name: train num_bytes: 4281800 num_examples: 3350 - name: test num_bytes: 441642 num_examples: 373 download_size: 2338740 dataset_size: 4723442 --- # Dataset Card for "sentiment2to1" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
571
[ [ -0.04095458984375, -0.0127105712890625, 0.015594482421875, 0.03875732421875, -0.0197601318359375, -0.00844573974609375, 0.01461029052734375, -0.0057220458984375, 0.059783935546875, 0.0264129638671875, -0.06414794921875, -0.055450439453125, -0.0533447265625, ...
toilaluan/t2i_reward_v4
2023-10-30T09:19:31.000Z
[ "region:us" ]
toilaluan
null
null
0
9
2023-10-30T03:45:50
--- configs: - config_name: default data_files: - split: train path: data/train-* dataset_info: features: - name: model_type dtype: string - name: request_id dtype: int64 - name: topic dtype: string - name: reward dtype: float64 - name: individual_rewards struct: - name: clip_aesthetic_rewarder dtype: float64 - name: pick_rewarder dtype: float64 - name: image_rewarder dtype: float64 - name: hps_v2_rewarder dtype: float64 splits: - name: train num_bytes: 115800 num_examples: 1125 download_size: 43681 dataset_size: 115800 --- # Dataset Card for "t2i_reward_v4" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
793
[ [ -0.01171875, 0.0179443359375, 0.01512908935546875, 0.017303466796875, -0.017791748046875, 0.007781982421875, 0.0355224609375, -0.0121307373046875, 0.050445556640625, 0.0171966552734375, -0.058380126953125, -0.04241943359375, -0.044952392578125, -0.0204620361...
kejian/SciReviewGen
2023-11-01T02:29:09.000Z
[ "region:us" ]
kejian
null
null
0
9
2023-10-31T03:15:07
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* dataset_info: features: - name: reference dtype: string - name: target dtype: string splits: - name: train num_bytes: 1017206768 num_examples: 84705 - name: validation num_bytes: 52660512 num_examples: 4410 - name: test num_bytes: 54202617 num_examples: 4457 download_size: 507188880 dataset_size: 1124069897 --- # Dataset Card for "SciReviewGen" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
707
[ [ -0.035247802734375, 0.0088653564453125, 0.020355224609375, 0.020233154296875, -0.01174163818359375, 0.0032176971435546875, 0.025054931640625, -0.0226898193359375, 0.06378173828125, 0.0264739990234375, -0.0587158203125, -0.046600341796875, -0.043792724609375, ...
Neurogpt/autotrain-data-stroke-classifier
2023-10-31T08:25:11.000Z
[ "task_categories:image-classification", "region:us" ]
Neurogpt
null
null
0
9
2023-10-31T07:55:36
--- task_categories: - image-classification --- # AutoTrain Dataset for project: stroke-classifier ## Dataset Description This dataset has been automatically processed by AutoTrain for project stroke-classifier. ### Languages The BCP-47 code for the dataset's language is unk. ## Dataset Structure ### Data Instances A sample from this dataset looks as follows: ```json [ { "image": "<233x197 L PIL image>", "target": 0 }, { "image": "<233x197 L PIL image>", "target": 0 } ] ``` ### Dataset Fields The dataset has the following fields (also called "features"): ```json { "image": "Image(decode=True, id=None)", "target": "ClassLabel(names=['notStroke', 'stroke'], id=None)" } ``` ### Dataset Splits This dataset is split into a train and validation split. The split sizes are as follow: | Split name | Num samples | | ------------ | ------------------- | | train | 1600 | | valid | 945 |
955
[ [ -0.0294036865234375, 0.0023097991943359375, 0.0010929107666015625, 0.00989532470703125, -0.0343017578125, 0.01788330078125, -0.0011043548583984375, -0.036651611328125, 0.004520416259765625, 0.031463623046875, -0.0439453125, -0.046539306640625, -0.046630859375, ...
minoosh/shEMO_transcripts
2023-10-31T20:20:42.000Z
[ "region:us" ]
minoosh
null
null
0
9
2023-10-31T20:20:26
--- dataset_info: features: - name: transcription dtype: string - name: emotion dtype: class_label: names: '0': A '1': H '2': N '3': S '4': W '5': F splits: - name: train num_bytes: 255721.6 num_examples: 2400 - name: test num_bytes: 31965.2 num_examples: 300 - name: valid num_bytes: 31965.2 num_examples: 300 download_size: 173563 dataset_size: 319652.0 --- # Dataset Card for "shEMO_transcripts" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
655
[ [ -0.0205841064453125, -0.004913330078125, 0.01322174072265625, 0.00283050537109375, -0.01412200927734375, 0.00771331787109375, 0.01082611083984375, 0.01025390625, 0.037811279296875, 0.03143310546875, -0.058380126953125, -0.0623779296875, -0.061920166015625, -...
research-dump/mcqa_hoax_1h10r_def_bigbench
2023-10-31T21:53:23.000Z
[ "region:us" ]
research-dump
null
null
0
9
2023-10-31T21:52:46
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
derekiya/bigquery-v1
2023-11-01T07:12:47.000Z
[ "region:us" ]
derekiya
null
null
0
9
2023-11-01T07:11:09
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Arabic-Clip/mscoco_captions_en_ar_ViT_B_16_plus_240_1st_caption
2023-11-01T14:11:40.000Z
[ "region:us" ]
Arabic-Clip
null
null
0
9
2023-11-01T14:10:51
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
tyzhu/squad_train50000_eval1000_dec
2023-11-02T10:04:58.000Z
[ "region:us" ]
tyzhu
null
null
0
9
2023-11-02T10:04:45
--- configs: - config_name: default data_files: - split: validation path: data/validation-* - split: train path: data/train-* dataset_info: features: - name: id dtype: string - name: title dtype: string - name: context dtype: string - name: question dtype: string - name: answers sequence: - name: text dtype: string - name: answer_start dtype: int32 - name: answer dtype: string - name: text dtype: string - name: inputs dtype: string - name: targets dtype: string splits: - name: validation num_bytes: 3184837 num_examples: 1000 - name: train num_bytes: 169722340 num_examples: 50000 download_size: 35308668 dataset_size: 172907177 --- # Dataset Card for "squad_train50000_eval1000_dec" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
937
[ [ -0.040924072265625, -0.00817108154296875, 0.00746917724609375, 0.03326416015625, 0.0010232925415039062, 0.0225372314453125, 0.0226898193359375, 0.0015134811401367188, 0.051513671875, 0.0263214111328125, -0.0806884765625, -0.04254150390625, -0.028900146484375, ...
midas/nus
2022-03-05T03:35:59.000Z
[ "region:us" ]
midas
\
@InProceedings{10.1007/978-3-540-77094-7_41, author="Nguyen, Thuy Dung and Kan, Min-Yen", editor="Goh, Dion Hoe-Lian and Cao, Tru Hoang and Solvberg, Ingeborg Torvik and Rasmussen, Edie", title="Keyphrase Extraction in Scientific Publications", booktitle="Asian Digital Libraries. Looking Back 10 Years and Forging New Frontiers", year="2007", publisher="Springer Berlin Heidelberg", address="Berlin, Heidelberg", pages="317--326", isbn="978-3-540-77094-7" }
0
8
2022-03-02T23:29:22
## Dataset Summary A dataset for benchmarking keyphrase extraction and generation techniques from long document english scientific papers. For more details about the dataset please refer the original paper - [https://www.comp.nus.edu.sg/~kanmy/papers/icadl2007.pdf](https://www.comp.nus.edu.sg/~kanmy/papers/icadl2007.pdf) Original source of the data - []() ## Dataset Structure ### Data Fields - **id**: unique identifier of the document. - **document**: Whitespace separated list of words in the document. - **doc_bio_tags**: BIO tags for each word in the document. B stands for the beginning of a keyphrase and I stands for inside the keyphrase. O stands for outside the keyphrase and represents the word that isn't a part of the keyphrase at all. - **extractive_keyphrases**: List of all the present keyphrases. - **abstractive_keyphrase**: List of all the absent keyphrases. ### Data Splits |Split| #datapoints | |--|--| | Test | 211 | - Percentage of keyphrases that are named entities: 67.95% (named entities detected using scispacy - en-core-sci-lg model) - Percentage of keyphrases that are noun phrases: 82.16% (noun phrases detected using spacy en-core-web-lg after removing determiners) ## Usage ### Full Dataset ```python from datasets import load_dataset # get entire dataset dataset = load_dataset("midas/nus", "raw") # sample from the test split print("Sample from test dataset split") test_sample = dataset["test"][0] print("Fields in the sample: ", [key for key in test_sample.keys()]) print("Tokenized Document: ", test_sample["document"]) print("Document BIO Tags: ", test_sample["doc_bio_tags"]) print("Extractive/present Keyphrases: ", test_sample["extractive_keyphrases"]) print("Abstractive/absent Keyphrases: ", test_sample["abstractive_keyphrases"]) print("\n-----------\n") ``` **Output** ```bash Sample from test data split Fields in the sample: ['id', 'document', 'doc_bio_tags', 'extractive_keyphrases', 'abstractive_keyphrases', 'other_metadata'] Tokenized Document: ['Learning', 'Spatially', 'Variant', 'Dissimilarity', '-LRB-', 'Svad', '-RRB-', 'Measures', 'Clustering', 'algorithms', 'typically', 'operate', 'on', 'a', 'feature', 'vector', 'representation', 'of', 'the', 'data', 'and', 'find', 'clusters', 'that', 'are', 'compact', 'with', 'respect', 'to', 'an', 'assumed', '-LRB-', 'dis', '-RRB-', 'similarity', 'measure', 'between', 'the', 'data', 'points', 'in', 'feature', 'space', '.', 'This', 'makes', 'the', 'type', 'of', 'clusters', 'identified', 'highly', 'dependent', 'on', 'the', 'assumed', 'similarity', 'measure', '.', 'Building', 'on', 'recent', 'work', 'in', 'this', 'area', ',', 'we', 'formally', 'define', 'a', 'class', 'of', 'spatially', 'varying', 'dissimilarity', 'measures', 'and', 'propose', 'algorithms', 'to', 'learn', 'the', 'dissimilarity', 'measure', 'automatically', 'from', 'the', 'data', '.', 'The', 'idea', 'is', 'to', 'identify', 'clusters', 'that', 'are', 'compact', 'with', 'respect', 'to', 'the', 'unknown', 'spatially', 'varying', 'dissimilarity', 'measure', '.', 'Our', 'experiments', 'show', 'that', 'the', 'proposed', 'algorithms', 'are', 'more', 'stable', 'and', 'achieve', 'better', 'accuracy', 'on', 'various', 'textual', 'data', 'sets', 'when', 'compared', 'with', 'similar', 'algorithms', 'proposed', 'in', 'the', 'literature', '.', 'H.', '2.8', '-LSB-', 'Database', 'Management', '-RSB-', ':', 'Database', 'Applications-Data', 'Mining', 'Algorithms', 'Clustering', 'plays', 'a', 'major', 'role', 'in', 'data', 'mining', 'as', 'a', 'tool', 'to', 'discover', 'structure', 'in', 'data', '.', 'Object', 'clustering', 'algorithms', 'operate', 'on', 'a', 'feature', 'vector', 'representation', 'of', 'the', 'data', 'and', 'find', 'clusters', 'that', 'are', 'compact', 'with', 'respect', 'to', 'an', 'assumed', '-LRB-', 'dis', '-RRB-', 'similarity', 'measure', 'between', 'the', 'data', 'points', 'in', 'feature', 'space', '.', 'As', 'a', 'consequence', ',', 'the', 'nature', 'of', 'clusters', 'identified', 'by', 'a', 'clustering', 'algorithm', 'is', 'highly', 'dependent', 'on', 'the', 'assumed', 'similarity', 'measure', '.', 'The', 'most', 'commonly', 'used', 'dissimilarity', 'measure', ',', 'namely', 'the', 'Euclidean', 'metric', ',', 'assumes', 'that', 'the', 'dissimilarity', 'measure', 'is', 'isotropic', 'and', 'spatially', 'invariant', ',', 'and', 'Permission', 'to', 'make', 'digital', 'or', 'hard', 'copies', 'of', 'all', 'or', 'part', 'of', 'this', 'work', 'for', 'personal', 'or', 'classroom', 'use', 'is', 'granted', 'without', 'fee', 'provided', 'that', 'copies', 'are', 'not', 'made', 'or', 'distributed', 'for', 'profit', 'or', 'commercial', 'advantage', 'and', 'that', 'copies', 'bear', 'this', 'notice', 'and', 'the', 'full', 'citation', 'on', 'the', 'first', 'page', '.', 'To', 'copy', 'otherwise', ',', 'to', 'republish', ',', 'to', 'post', 'on', 'servers', 'or', 'to', 'redistribute', 'to', 'lists', ',', 'requires', 'prior', 'specific', 'permission', 'and/or', 'a', 'fee', '.', 'KDD', "'", '04', ',', 'August', '22', '25', ',', '2004', ',', 'Seattle', ',', 'Washington', ',', 'USA', '.', 'Copyright', '2004', 'ACM', '1-58113-888-1', '/', '04/0008', '...', '$', '5.00', '.', 'it', 'is', 'effective', 'only', 'when', 'the', 'clusters', 'are', 'roughly', 'spherical', 'and', 'all', 'of', 'them', 'have', 'approximately', 'the', 'same', 'size', ',', 'which', 'is', 'rarely', 'the', 'case', 'in', 'practice', '-LSB-', '8', '-RSB-', '.', 'The', 'problem', 'of', 'finding', 'non-spherical', 'clusters', 'is', 'often', 'addressed', 'by', 'utilizing', 'a', 'feature', 'weighting', 'technique', '.', 'These', 'techniques', 'discover', 'a', 'single', 'set', 'of', 'weights', 'such', 'that', 'relevant', 'features', 'are', 'given', 'more', 'importance', 'than', 'irrelevant', 'features', '.', 'However', ',', 'in', 'practice', ',', 'each', 'cluster', 'may', 'have', 'a', 'different', 'set', 'of', 'relevant', 'features', '.', 'We', 'consider', 'Spatially', 'Varying', 'Dissimilarity', '-LRB-', 'SVaD', '-RRB-', 'measures', 'to', 'address', 'this', 'problem', '.', 'Diday', 'et', '.', 'al.', '-LSB-', '4', '-RSB-', 'proposed', 'the', 'adaptive', 'distance', 'dynamic', 'clusters', '-LRB-', 'ADDC', '-RRB-', 'algorithm', 'in', 'this', 'vain', '.', 'A', 'fuzzified', 'version', 'of', 'ADDC', ',', 'popularly', 'known', 'as', 'the', 'Gustafson-Kessel', '-LRB-', 'GK', '-RRB-', 'algorithm', '-LSB-', '7', '-RSB-', 'uses', 'a', 'dynamically', 'updated', 'covariance', 'matrix', 'so', 'that', 'each', 'cluster', 'can', 'have', 'its', 'own', 'norm', 'matrix', '.', 'These', 'algorithms', 'can', 'deal', 'with', 'hyperelliposoidal', 'clusters', 'of', 'various', 'sizes', 'and', 'orientations', '.', 'The', 'EM', 'algorithm', '-LSB-', '2', '-RSB-', 'with', 'Gaussian', 'probability', 'distributions', 'can', 'also', 'be', 'used', 'to', 'achieve', 'similar', 'results', '.', 'However', ',', 'the', 'above', 'algorithms', 'are', 'computationally', 'expensive', 'for', 'high-dimensional', 'data', 'since', 'they', 'invert', 'covariance', 'matrices', 'in', 'every', 'iteration', '.', 'Moreover', ',', 'matrix', 'inversion', 'can', 'be', 'unstable', 'when', 'the', 'data', 'is', 'sparse', 'in', 'relation', 'to', 'the', 'dimensionality', '.', 'One', 'possible', 'solution', 'to', 'the', 'problems', 'of', 'high', 'computation', 'and', 'instability', 'arising', 'out', 'of', 'using', 'covariance', 'matrices', 'is', 'to', 'force', 'the', 'matrices', 'to', 'be', 'diagonal', ',', 'which', 'amounts', 'to', 'weighting', 'each', 'feature', 'differently', 'in', 'different', 'clusters', '.', 'While', 'this', 'restricts', 'the', 'dissimilarity', 'measures', 'to', 'have', 'axis', 'parallel', 'isometry', ',', 'the', 'weights', 'also', 'provide', 'a', 'simple', 'interpretation', 'of', 'the', 'clusters', 'in', 'terms', 'of', 'relevant', 'features', ',', 'which', 'is', 'important', 'in', 'knowledge', 'discovery', '.', 'Examples', 'of', 'such', 'algorithms', 'are', 'SCAD', 'and', 'Fuzzy-SKWIC', '-LSB-', '5', ',', '6', '-RSB-', ',', 'which', 'perform', 'fuzzy', 'clustering', 'of', 'data', 'while', 'simultaneously', 'finding', 'feature', 'weights', 'in', 'individual', 'clusters', '.', 'In', 'this', 'paper', ',', 'we', 'generalize', 'the', 'idea', 'of', 'the', 'feature', 'weighting', 'approach', 'to', 'define', 'a', 'class', 'of', 'spatially', 'varying', 'dissimilarity', 'measures', 'and', 'propose', 'algorithms', 'that', 'learn', 'the', 'dissimilarity', 'measure', 'automatically', 'from', 'the', 'given', 'data', 'while', 'performing', 'the', 'clustering', '.', 'The', 'idea', 'is', 'to', 'identify', 'clusters', 'inherent', 'in', 'the', 'data', 'that', 'are', 'compact', 'with', 'respect', 'to', 'the', 'unknown', 'spatially', 'varying', 'dissimilarity', 'measure', '.', 'We', 'compare', 'the', 'proposed', 'algorithms', 'with', 'a', 'diagonal', 'version', 'of', 'GK', '-LRB-', 'DGK', '-RRB-', 'and', 'a', 'crisp', 'version', 'of', 'SCAD', '-LRB-', 'CSCAD', '-RRB-', 'on', 'a', 'variety', 'of', 'data', 'sets', '.', 'Our', 'algorithms', 'perform', 'better', 'than', 'DGK', 'and', 'CSCAD', ',', 'and', 'use', 'more', 'stable', 'update', 'equations', 'for', 'weights', 'than', 'CSCAD', '.', 'The', 'rest', 'of', 'the', 'paper', 'is', 'organized', 'as', 'follows', '.', 'In', 'the', 'next', 'section', ',', 'we', 'define', 'a', 'general', 'class', 'of', 'dissimilarity', 'measures', '611', 'Research', 'Track', 'Poster', 'and', 'formulate', 'two', 'objective', 'functions', 'based', 'on', 'them', '.', 'In', 'Section', '3', ',', 'we', 'derive', 'learning', 'algorithms', 'that', 'optimize', 'the', 'objective', 'functions', '.', 'We', 'present', 'an', 'experimental', 'study', 'of', 'the', 'proposed', 'algorithms', 'in', 'Section', '4', '.', 'We', 'compare', 'the', 'performance', 'of', 'the', 'proposed', 'algorithms', 'with', 'that', 'of', 'DGK', 'and', 'CSCAD', '.', 'These', 'two', 'algorithms', 'are', 'explained', 'in', 'Appendix', 'A.', 'Finally', ',', 'we', 'summarize', 'our', 'contributions', 'and', 'conclude', 'with', 'some', 'future', 'directions', 'in', 'Section', '5', '.', 'We', 'first', 'define', 'a', 'general', 'class', 'of', 'dissimilarity', 'measures', 'and', 'formulate', 'a', 'few', 'objective', 'functions', 'in', 'terms', 'of', 'the', 'given', 'data', 'set', '.', 'Optimization', 'of', 'the', 'objective', 'functions', 'would', 'result', 'in', 'learning', 'the', 'underlying', 'dissimilarity', 'measure', '.', '2.1', 'SVaD', 'Measures', 'In', 'the', 'following', 'definition', ',', 'we', 'generalize', 'the', 'concept', 'of', 'dissimilarity', 'measures', 'in', 'which', 'the', 'weights', 'associated', 'with', 'features', 'change', 'over', 'feature', 'space', '.', 'Definition', '2.1', 'We', 'define', 'the', 'measure', 'of', 'dissimilarity', 'of', 'x', 'from', 'y', '1', 'to', 'be', 'a', 'weighted', 'sum', 'of', 'M', 'dissimilarity', 'measures', 'between', 'x', 'and', 'y', 'where', 'the', 'values', 'of', 'the', 'weights', 'depend', 'on', 'the', 'region', 'from', 'which', 'the', 'dissimilarity', 'is', 'being', 'measured', '.', 'Let', 'P', '=', '-LCB-', 'R', '1', ',', '...', ',', 'R', 'K', '-RCB-', 'be', 'a', 'collection', 'of', 'K', 'regions', 'that', 'partition', 'the', 'feature', 'space', ',', 'and', 'w', '1', ',', 'w', '2', ',', '...', ',', 'and', 'w', 'K', 'be', 'the', 'weights', 'associated', 'with', 'R', '1', ',', 'R', '2', ',', '...', ',', 'and', 'R', 'K', ',', 'respectively', '.', 'Let', 'g', '1', ',', 'g', '2', ',', '...', ',', 'and', 'g', 'M', 'be', 'M', 'dissimilarity', 'measures', '.', 'Then', ',', 'each', 'w', 'j', ',', 'j', '=', '1', ',', '...', ',', 'K', ',', 'is', 'an', 'M', '-', 'dimensional', 'vector', 'where', 'its', 'l-th', 'component', ',', 'w', 'jl', 'is', 'associated', 'with', 'g', 'l', '.', 'Let', 'W', 'denote', 'the', 'K-tuple', '-LRB-', 'w', '1', ',', '...', ',', 'w', 'K', '-RRB-', 'and', 'let', 'r', 'be', 'a', 'real', 'number', '.', 'Then', ',', 'the', 'dissimilarity', 'of', 'x', 'from', 'y', 'is', 'given', 'by', ':', 'f', 'W', '-LRB-', 'x', ',', 'y', '-RRB-', '=', 'M', 'l', '=', '1', 'w', 'r', 'jl', 'g', 'l', '-LRB-', 'x', ',', 'y', '-RRB-', ',', 'if', 'y', 'R', 'j', '.', '-LRB-', '1', '-RRB-', 'We', 'refer', 'to', 'f', 'W', 'as', 'a', 'Spatially', 'Variant', 'Dissimilarity', '-LRB-', 'SVaD', '-RRB-', 'measure', '.', 'Note', 'that', 'f', 'W', 'need', 'not', 'be', 'symmetric', 'even', 'if', 'g', 'i', 'are', 'symmetric', '.', 'Hence', ',', 'f', 'W', 'is', 'not', 'a', 'metric', '.', 'Moreover', ',', 'the', 'behavior', 'of', 'f', 'W', 'depends', 'on', 'the', 'behavior', 'of', 'g', 'i', '.', 'There', 'are', 'many', 'ways', 'to', 'define', 'g', 'i', '.', 'We', 'list', 'two', 'instances', 'of', 'f', 'W', '.', 'Example', '2.1', '-LRB-', 'Minkowski', '-RRB-', 'Let', 'd', 'be', 'the', 'feature', 'space', 'and', 'M', '=', 'd.', 'Let', 'a', 'point', 'x', 'd', 'be', 'represented', 'as', '-LRB-', 'x', '1', ',', '...', ',', 'x', 'd', '-RRB-', '.', 'Then', ',', 'when', 'g', 'i', '-LRB-', 'x', ',', 'y', '-RRB-', '=', '|', 'x', 'i', '-', 'y', 'i', '|', 'p', 'for', 'i', '=', '1', ',', '...', ',', 'd', ',', 'and', 'p', '1', ',', 'the', 'resulting', 'SVaD', 'measure', ',', 'f', 'M', 'W', 'is', 'called', 'Minkowski', 'SVaD', '-LRB-', 'MSVaD', '-RRB-', 'measure', '.', 'That', 'is', ',', 'f', 'M', 'W', '-LRB-', 'x', ',', 'y', '-RRB-', '=', 'd', 'l', '=', '1', 'w', 'r', 'jl', '|', 'x', 'l', '-', 'y', 'l', '|', 'p', ',', 'if', 'y', 'R', 'j', '.', '-LRB-', '2', '-RRB-', 'One', 'may', 'note', 'that', 'when', 'w', '1', '=', '=', 'w', 'K', 'and', 'p', '=', '2', ',', 'f', 'M', 'W', 'is', 'the', 'weighted', 'Euclidean', 'distance', '.', 'When', 'p', '=', '2', ',', 'we', 'call', 'f', 'M', 'W', 'a', 'Euclidean', 'SVaD', '-LRB-', 'ESVaD', '-RRB-', 'measure', 'and', 'denote', 'it', 'by', 'f', 'E', 'W', '.', '1', 'We', 'use', 'the', 'phrase', '``', 'dissimilarity', 'of', 'x', 'from', 'y', "''", 'rather', 'than', '``', 'dissimilarity', 'between', 'x', 'and', 'y', "''", 'because', 'we', 'consider', 'a', 'general', 'situation', 'where', 'the', 'dissimilarity', 'measure', 'depends', 'on', 'the', 'location', 'of', 'y', '.', 'As', 'an', 'example', 'of', 'this', 'situation', 'in', 'text', 'mining', ',', 'when', 'the', 'dissimilarity', 'is', 'measured', 'from', 'a', 'document', 'on', '`', 'terrorism', "'", 'to', 'a', 'document', 'x', ',', 'a', 'particular', 'set', 'of', 'keywords', 'may', 'be', 'weighted', 'heavily', 'whereas', 'when', 'the', 'dissimilarity', 'is', 'measured', 'from', 'a', 'document', 'on', '`', 'football', "'", 'to', 'x', ',', 'a', 'different', 'set', 'of', 'keywords', 'may', 'be', 'weighted', 'heavily', '.', 'Example', '2.2', '-LRB-', 'Cosine', '-RRB-', 'Let', 'the', 'feature', 'space', 'be', 'the', 'set', 'of', 'points', 'with', 'l', '2', 'norm', 'equal', 'to', 'one', '.', 'That', 'is', ',', 'x', '2', '=', '1', 'for', 'all', 'points', 'x', 'in', 'feature', 'space', '.', 'Then', ',', 'when', 'g', 'l', '-LRB-', 'x', ',', 'y', '-RRB-', '=', '-LRB-', '1/d', '-', 'x', 'l', 'y', 'l', '-RRB-', 'for', 'l', '=', '1', ',', '...', ',', 'd', ',', 'the', 'resulting', 'SVaD', 'measure', 'f', 'C', 'W', 'is', 'called', 'a', 'Cosine', 'SVaD', '-LRB-', 'CSVaD', '-RRB-', 'measure', ':', 'f', 'C', 'W', '-LRB-', 'x', ',', 'y', '-RRB-', '=', 'd', 'i', '=', '1', 'w', 'r', 'jl', '-LRB-', '1/d', '-', 'x', 'l', 'y', 'l', '-RRB-', ',', 'if', 'y', 'R', 'j', '.', '-LRB-', '3', '-RRB-', 'In', 'the', 'formulation', 'of', 'the', 'objective', 'function', 'below', ',', 'we', 'use', 'a', 'set', 'of', 'parameters', 'to', 'represent', 'the', 'regions', 'R', '1', ',', 'R', '2', ',', '...', ',', 'and', 'R', 'K', '.', 'Let', 'c', '1', ',', 'c', '2', ',', '...', ',', 'and', 'c', 'K', 'be', 'K', 'points', 'in', 'feature', 'space', '.', 'Then', 'y', 'R', 'j', 'iff', 'f', 'W', '-LRB-', 'y', ',', 'c', 'j', '-RRB-', '<', 'f', 'W', '-LRB-', 'y', ',', 'c', 'i', '-RRB-', 'for', 'i', '=', 'j.', '-LRB-', '4', '-RRB-', 'In', 'the', 'case', 'of', 'ties', ',', 'y', 'is', 'assigned', 'to', 'the', 'region', 'with', 'the', 'lowest', 'index', '.', 'Thus', ',', 'the', 'K-tuple', 'of', 'points', 'C', '=', '-LRB-', 'c', '1', ',', 'c', '2', ',', '...', ',', 'c', 'K', '-RRB-', 'defines', 'a', 'partition', 'in', 'feature', 'space', '.', 'The', 'partition', 'induced', 'by', 'the', 'points', 'in', 'C', 'is', 'similar', 'in', 'nature', 'to', 'a', 'Voronoi', 'tessellation', '.', 'We', 'use', 'the', 'notation', 'f', 'W', ',', 'C', 'whenever', 'we', 'use', 'the', 'set', 'C', 'to', 'parameterize', 'the', 'regions', 'used', 'in', 'the', 'dissimilarity', 'measure', '.', '2.2', 'Objective', 'Function', 'for', 'Clustering', 'The', 'goal', 'of', 'the', 'present', 'work', 'is', 'to', 'identify', 'the', 'spatially', 'varying', 'dissimilarity', 'measure', 'and', 'the', 'associated', 'compact', 'clusters', 'simultaneously', '.', 'It', 'is', 'worth', 'mentioning', 'here', 'that', ',', 'as', 'in', 'the', 'case', 'of', 'any', 'clustering', 'algorithm', ',', 'the', 'underlying', 'assumption', 'in', 'this', 'paper', 'is', 'the', 'existence', 'of', 'such', 'a', 'dissimilarity', 'measure', 'and', 'clusters', 'for', 'a', 'given', 'data', 'set', '.', 'Let', 'x', '1', ',', 'x', '2', ',', '...', ',', 'and', 'x', 'n', 'be', 'n', 'given', 'data', 'points', '.', 'Let', 'K', 'be', 'a', 'given', 'positive', 'integer', '.', 'Assuming', 'that', 'C', 'represents', 'the', 'cluster', 'centers', ',', 'let', 'us', 'assign', 'each', 'data', 'point', 'x', 'i', 'to', 'a', 'cluster', 'R', 'j', 'with', 'the', 'closest', 'c', 'j', 'as', 'the', 'cluster', 'center', '2', ',', 'i.e.', ',', 'j', '=', 'arg', 'min', 'l', 'f', 'W', ',', 'C', '-LRB-', 'x', 'i', ',', 'c', 'l', '-RRB-', '.', '-LRB-', '5', '-RRB-', 'Then', ',', 'the', 'within-cluster', 'dissimilarity', 'is', 'given', 'by', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', '=', 'K', 'j', '=', '1', 'x', 'i', 'R', 'j', 'M', 'l', '=', '1', 'w', 'r', 'jl', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '.', '-LRB-', '6', '-RRB-', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'represents', 'the', 'sum', 'of', 'the', 'dissimilarity', 'measures', 'of', 'all', 'the', 'data', 'points', 'from', 'their', 'closest', 'centroids', '.', 'The', 'objective', 'is', 'to', 'find', 'W', 'and', 'C', 'that', 'minimize', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', '.', 'To', 'avoid', 'the', 'trivial', 'solution', 'to', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', ',', 'we', 'consider', 'a', 'normalization', 'condition', 'on', 'w', 'j', ',', 'viz.', ',', 'M', 'l', '=', '1', 'w', 'jl', '=', '1', '.', '-LRB-', '7', '-RRB-', 'Note', 'that', 'even', 'with', 'this', 'condition', ',', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'has', 'a', 'trivial', 'solution', ':', 'w', 'jp', '=', '1', 'where', 'p', '=', 'arg', 'min', 'l', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', ',', 'and', 'the', 'remaining', 'weights', 'are', 'zero', '.', 'One', 'way', 'to', 'avoid', 'convergence', 'of', 'w', 'j', 'to', 'unit', 'vectors', 'is', 'to', 'impose', 'a', 'regularization', 'condition', 'on', 'w', 'j', '.', 'We', 'consider', 'the', 'following', 'two', 'regularization', 'measures', 'in', 'this', 'paper', ':', '-LRB-', '1', '-RRB-', 'Entropy', 'measure', ':', 'M', 'l', '=', '1', 'w', 'jl', 'log', '-LRB-', 'w', 'jl', '-RRB-', 'and', '-LRB-', '2', '-RRB-', 'Gini', 'measure', ':', 'M', 'l', '=', '1', 'w', '2', 'jl', '.', '2', 'We', 'use', 'P', '=', '-LCB-', 'R', '1', ',', 'R', '2', ',', '...', ',', 'R', 'K', '-RCB-', 'to', 'represent', 'the', 'corresponding', 'partition', 'of', 'the', 'data', 'set', 'as', 'well', '.', 'The', 'intended', 'interpretation', '-LRB-', 'cluster', 'or', 'region', '-RRB-', 'would', 'be', 'evident', 'from', 'the', 'context', '.', '612', 'Research', 'Track', 'Poster', 'The', 'problem', 'of', 'determining', 'the', 'optimal', 'W', 'and', 'C', 'is', 'similar', 'to', 'the', 'traditional', 'clustering', 'problem', 'that', 'is', 'solved', 'by', 'the', 'K-Means', 'Algorithm', '-LRB-', 'KMA', '-RRB-', 'except', 'for', 'the', 'additional', 'W', 'matrix', '.', 'We', 'propose', 'a', 'class', 'of', 'iterative', 'algorithms', 'similar', 'to', 'KMA', '.', 'These', 'algorithms', 'start', 'with', 'a', 'random', 'partition', 'of', 'the', 'data', 'set', 'and', 'iteratively', 'update', 'C', ',', 'W', 'and', 'P', 'so', 'that', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'is', 'minimized', '.', 'These', 'iterative', 'algorithms', 'are', 'instances', 'of', 'Alternating', 'Optimization', '-LRB-', 'AO', '-RRB-', 'algorithms', '.', 'In', '-LSB-', '1', '-RSB-', ',', 'it', 'is', 'shown', 'that', 'AO', 'algorithms', 'converge', 'to', 'a', 'local', 'optimum', 'under', 'some', 'conditions', '.', 'We', 'outline', 'the', 'algorithm', 'below', 'before', 'actually', 'describing', 'how', 'to', 'update', 'C', ',', 'W', 'and', 'P', 'in', 'every', 'iteration', '.', 'Randomly', 'assign', 'the', 'data', 'points', 'to', 'K', 'clusters', '.', 'REPEAT', 'Update', 'C', ':', 'Compute', 'the', 'centroid', 'of', 'each', 'cluster', 'c', 'j', '.', 'Update', 'W', ':', 'Compute', 'the', 'w', 'jl', 'j', ',', 'l.', 'Update', 'P', ':', 'Reassign', 'the', 'data', 'points', 'to', 'the', 'clusters', '.', 'UNTIL', '-LRB-', 'termination', 'condition', 'is', 'reached', '-RRB-', '.', 'In', 'the', 'above', 'algorithm', ',', 'the', 'update', 'of', 'C', 'depends', 'on', 'the', 'definition', 'of', 'g', 'i', ',', 'and', 'the', 'update', 'of', 'W', 'on', 'the', 'regularization', 'terms', '.', 'The', 'update', 'of', 'P', 'is', 'done', 'by', 'reassigning', 'the', 'data', 'points', 'according', 'to', '-LRB-', '5', '-RRB-', '.', 'Before', 'explaining', 'the', 'computation', 'of', 'C', 'in', 'every', 'iteration', 'for', 'various', 'g', 'i', ',', 'we', 'first', 'derive', 'update', 'equations', 'for', 'W', 'for', 'various', 'regularization', 'measures', '.', '3.1', 'Update', 'of', 'Weights', 'While', 'updating', 'weights', ',', 'we', 'need', 'to', 'find', 'the', 'values', 'of', 'weights', 'that', 'minimize', 'the', 'objective', 'function', 'for', 'a', 'given', 'C', 'and', 'P', '.', 'As', 'mentioned', 'above', ',', 'we', 'consider', 'the', 'two', 'regularization', 'measures', 'for', 'w', 'jl', 'and', 'derive', 'update', 'equations', '.', 'If', 'we', 'consider', 'the', 'entropy', 'regularization', 'with', 'r', '=', '1', ',', 'the', 'objective', 'function', 'becomes', ':', 'J', 'EN', 'T', '-LRB-', 'W', ',', 'C', '-RRB-', '=', 'K', 'j', '=', '1', 'x', 'i', 'R', 'j', 'M', 'l', '=', '1', 'w', 'jl', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '+', 'K', 'j', '=', '1', 'j', 'M', 'l', '=', '1', 'w', 'jl', 'log', '-LRB-', 'w', 'jl', '-RRB-', '+', 'K', 'j', '=', '1', 'j', 'M', 'l', '=', '1', 'w', 'jl', '-', '1', '.', '-LRB-', '8', '-RRB-', 'Note', 'that', 'j', 'are', 'the', 'Lagrange', 'multipliers', 'corresponding', 'to', 'the', 'normalization', 'constraints', 'in', '-LRB-', '7', '-RRB-', ',', 'and', 'j', 'represent', 'the', 'relative', 'importance', 'given', 'to', 'the', 'regularization', 'term', 'relative', 'to', 'the', 'within-cluster', 'dissimilarity', '.', 'Differentiating', 'J', 'EN', 'T', '-LRB-', 'W', ',', 'C', '-RRB-', 'with', 'respect', 'to', 'w', 'jl', 'and', 'equating', 'it', 'to', 'zero', ',', 'we', 'obtain', 'w', 'jl', '=', 'exp', '-', '-LRB-', 'j', '+', 'x', 'i', 'Rj', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '-RRB-', 'j', '-', '1', '.', 'Solving', 'for', 'j', 'by', 'substituting', 'the', 'above', 'value', 'of', 'w', 'jl', 'in', '-LRB-', '7', '-RRB-', 'and', 'substituting', 'the', 'value', 'of', 'j', 'back', 'in', 'the', 'above', 'equation', ',', 'we', 'obtain', 'w', 'jl', '=', 'exp', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '/', 'j', 'M', 'n', '=', '1', 'exp', 'x', 'i', 'R', 'j', 'g', 'n', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '/', 'j', '.', '-LRB-', '9', '-RRB-', 'If', 'we', 'consider', 'the', 'Gini', 'measure', 'for', 'regularization', 'with', 'r', '=', '2', ',', 'the', 'corresponding', 'w', 'jl', 'that', 'minimizes', 'the', 'objective', 'function', 'can', 'be', 'shown', 'to', 'be', 'w', 'jl', '=', '1', '/', '-LRB-', 'j', '+', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '-RRB-', 'M', 'n', '=', '1', '-LRB-', '1', '/', '-LRB-', 'j', '+', 'x', 'i', 'R', 'j', 'g', 'n', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '-RRB-', '-RRB-', '.', '-LRB-', '10', '-RRB-', 'In', 'both', 'cases', ',', 'the', 'updated', 'value', 'of', 'w', 'jl', 'is', 'inversely', 'related', 'Algorithm', 'Update', 'Equations', 'Acronyms', 'P', 'C', 'W', 'EEnt', '-LRB-', '5', '-RRB-', '-LRB-', '11', '-RRB-', '-LRB-', '9', '-RRB-', 'EsGini', '-LRB-', '5', '-RRB-', '-LRB-', '11', '-RRB-', '-LRB-', '10', '-RRB-', 'CEnt', '-LRB-', '5', '-RRB-', '-LRB-', '12', '-RRB-', '-LRB-', '9', '-RRB-', 'CsGini', '-LRB-', '5', '-RRB-', '-LRB-', '12', '-RRB-', '-LRB-', '10', '-RRB-', 'Table', '1', ':', 'Summary', 'of', 'algorithms', '.', 'to', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '.', 'This', 'has', 'various', 'interpretations', 'based', 'on', 'the', 'nature', 'of', 'g', 'l', '.', 'For', 'example', ',', 'when', 'we', 'consider', 'the', 'ESVaD', 'measure', ',', 'w', 'jl', 'is', 'inversely', 'related', 'to', 'the', 'variance', 'of', 'l-th', 'element', 'of', 'the', 'data', 'vectors', 'in', 'the', 'j-th', 'cluster', '.', 'In', 'other', 'words', ',', 'when', 'the', 'variance', 'along', 'a', 'particular', 'dimension', 'is', 'high', 'in', 'a', 'cluster', ',', 'then', 'the', 'dimension', 'is', 'less', 'important', 'to', 'the', 'cluster', '.', 'This', 'popular', 'heuristic', 'has', 'been', 'used', 'in', 'various', 'contexts', '-LRB-', 'such', 'as', 'relevance', 'feedback', '-RRB-', 'in', 'the', 'literature', '-LSB-', '9', '-RSB-', '.', 'Similarly', ',', 'when', 'we', 'consider', 'the', 'CSVaD', 'measure', ',', 'w', 'jl', 'is', 'directly', 'proportional', 'to', 'the', 'correlation', 'of', 'the', 'j-th', 'dimension', 'in', 'the', 'l-th', 'cluster', '.', '3.2', 'Update', 'of', 'Centroids', 'Learning', 'ESVaD', 'Measures', ':', 'Substituting', 'the', 'ESVaD', 'measure', 'in', 'the', 'objective', 'function', 'and', 'solving', 'the', 'first', 'order', 'necessary', 'conditions', ',', 'we', 'observe', 'that', 'c', 'jl', '=', '1', '|', 'R', 'j', '|', 'x', 'i', 'R', 'j', 'x', 'il', '-LRB-', '11', '-RRB-', 'minimizes', 'J', 'ESV', 'AD', '-LRB-', 'W', ',', 'C', '-RRB-', '.', 'Learning', 'CSVaD', 'Measures', ':', 'Let', 'x', 'il', '=', 'w', 'jl', 'x', 'il', ',', 'then', 'using', 'the', 'Cauchy-Swartz', 'inequality', ',', 'it', 'can', 'be', 'shown', 'that', 'c', 'jl', '=', '1', '|', 'R', 'j', '|', 'x', 'i', 'R', 'j', 'x', 'il', '-LRB-', '12', '-RRB-', 'maximizes', 'x', 'i', 'R', 'j', 'd', 'l', '=', '1', 'w', 'jl', 'x', 'il', 'c', 'jl', '.', 'Hence', ',', '-LRB-', '12', '-RRB-', 'also', 'minimizes', 'the', 'objective', 'function', 'when', 'CSVaD', 'is', 'used', 'as', 'the', 'dissimilarity', 'measure', '.', 'Table', '1', 'summarizes', 'the', 'update', 'equations', 'used', 'in', 'various', 'algorithms', '.', 'We', 'refer', 'to', 'this', 'set', 'of', 'algorithms', 'as', 'SVaD', 'learning', 'algorithms', '.', 'In', 'this', 'section', ',', 'we', 'present', 'an', 'experimental', 'study', 'of', 'the', 'algorithms', 'described', 'in', 'the', 'previous', 'sections', '.', 'We', 'applied', 'the', 'proposed', 'algorithms', 'on', 'various', 'text', 'data', 'sets', 'and', 'compared', 'the', 'performance', 'of', 'EEnt', 'and', 'EsGini', 'with', 'that', 'of', 'K-Means', ',', 'CSCAD', 'and', 'DGK', 'algorithms', '.', 'The', 'reason', 'for', 'choosing', 'the', 'K-Means', 'algorithm', '-LRB-', 'KMA', '-RRB-', 'apart', 'from', 'CSCAD', 'and', 'DGK', 'is', 'that', 'it', 'provides', 'a', 'baseline', 'for', 'assessing', 'the', 'advantages', 'of', 'feature', 'weighting', '.', 'KMA', 'is', 'also', 'a', 'popular', 'algorithm', 'for', 'text', 'clustering', '.', 'We', 'have', 'included', 'a', 'brief', 'description', 'of', 'CSCAD', 'and', 'DGK', 'algorithms', 'in', 'Appendix', 'A.', 'Text', 'data', 'sets', 'are', 'sparse', 'and', 'high', 'dimensional', '.', 'We', 'consider', 'standard', 'labeled', 'document', 'collections', 'and', 'test', 'the', 'proposed', 'algorithms', 'for', 'their', 'ability', 'to', 'discover', 'dissimilarity', 'measures', 'that', 'distinguish', 'one', 'class', 'from', 'another', 'without', 'actually', 'considering', 'the', 'class', 'labels', 'of', 'the', 'documents', '.', 'We', 'measure', 'the', 'success', 'of', 'the', 'algorithms', 'by', 'the', 'purity', 'of', 'the', 'regions', 'that', 'they', 'discover', '.', '613', 'Research', 'Track', 'Poster', '4.1', 'Data', 'Sets', 'We', 'performed', 'our', 'experiments', 'on', 'three', 'standard', 'data', 'sets', ':', '20', 'News', 'Group', ',', 'Yahoo', 'K1', ',', 'and', 'Classic', '3', '.', 'These', 'data', 'sets', 'are', 'described', 'below', '.', '20', 'News', 'Group', '3', ':', 'We', 'considered', 'different', 'subsets', 'of', '20', 'News', 'Group', 'data', 'that', 'are', 'known', 'to', 'contain', 'clusters', 'of', 'varying', 'degrees', 'of', 'separation', '-LSB-', '10', '-RSB-', '.', 'As', 'in', '-LSB-', '10', '-RSB-', ',', 'we', 'considered', 'three', 'random', 'samples', 'of', 'three', 'subsets', 'of', 'the', '20', 'News', 'Group', 'data', '.', 'The', 'subsets', 'denoted', 'by', 'Binary', 'has', '250', 'documents', 'each', 'from', 'talk.politics.mideast', 'and', 'talk.politics.misc', '.', 'Multi5', 'has', '100', 'documents', 'each', 'from', 'comp.graphics', ',', 'rec.motorcycles', ',', 'rec.sport.baseball', ',', 'sci.space', ',', 'and', 'talk.politics.mideast', '.', 'Finally', ',', 'Multi10', 'has', '50', 'documents', 'each', 'from', 'alt.atheism', ',', 'comp', '.', 'sys.mac.hardware', ',', 'misc.forsale', ',', 'rec.autos', ',', 'rec.sport.hockey', ',', 'sci.crypt', ',', 'sci.electronics', ',', 'sci.med', ',', 'sci.space', ',', 'and', 'talk.politics', '.', 'gun', '.', 'It', 'may', 'be', 'noted', 'that', 'Binary', 'data', 'sets', 'have', 'two', 'highly', 'overlapping', 'classes', '.', 'Each', 'of', 'Multi5', 'data', 'sets', 'has', 'samples', 'from', '5', 'distinct', 'classes', ',', 'whereas', 'Multi10', 'data', 'sets', 'have', 'only', 'a', 'few', 'samples', 'from', '10', 'different', 'classes', '.', 'The', 'size', 'of', 'the', 'vocabulary', 'used', 'to', 'represent', 'the', 'documents', 'in', 'Binary', 'data', 'set', 'is', 'about', '4000', ',', 'Multi5', 'about', '3200', 'and', 'Multi10', 'about', '2800', '.', 'We', 'observed', 'that', 'the', 'relative', 'performance', 'of', 'the', 'algorithms', 'on', 'various', 'samples', 'of', 'Binary', ',', 'Multi5', 'and', 'Multi10', 'data', 'sets', 'was', 'similar', '.', 'Hence', ',', 'we', 'report', 'results', 'on', 'only', 'one', 'of', 'them', '.', 'Yahoo', 'K1', '4', ':', 'This', 'data', 'set', 'contains', '2340', 'Reuters', 'news', 'articles', 'downloaded', 'from', 'Yahoo', 'in', '1997', '.', 'There', 'are', '494', 'from', 'Health', ',', '1389', 'from', 'Entertainment', ',', '141', 'from', 'Sports', ',', '114', 'from', 'Politics', ',', '60', 'from', 'Technology', 'and', '142', 'from', 'Business', '.', 'After', 'preprocessing', ',', 'the', 'documents', 'from', 'this', 'data', 'set', 'are', 'represented', 'using', '12015', 'words', '.', 'Note', 'that', 'this', 'data', 'set', 'has', 'samples', 'from', '6', 'different', 'classes', '.', 'Here', ',', 'the', 'distribution', 'of', 'data', 'points', 'across', 'the', 'class', 'is', 'uneven', ',', 'ranging', 'from', '60', 'to', '1389', '.', 'Classic', '3', '5', ':', 'Classic', '3', 'data', 'set', 'contains', '1400', 'aerospace', 'systems', 'abstracts', 'from', 'the', 'Cranfield', 'collection', ',', '1033', 'medical', 'abstracts', 'from', 'the', 'Medline', 'collection', 'and', '1460', 'information', 'retrieval', 'abstracts', 'from', 'the', 'Cisi', 'collection', ',', 'making', 'up', '3893', 'documents', 'in', 'all', '.', 'After', 'preprocessing', ',', 'this', 'data', 'set', 'has', '4301', 'words', '.', 'The', 'points', 'are', 'almost', 'equally', 'distributed', 'among', 'the', 'three', 'distinct', 'classes', '.', 'The', 'data', 'sets', 'were', 'preprocessed', 'using', 'two', 'major', 'steps', '.', 'First', ',', 'a', 'set', 'of', 'words', '-LRB-', 'vocabulary', '-RRB-', 'is', 'extracted', 'and', 'then', 'each', 'document', 'is', 'represented', 'with', 'respect', 'to', 'this', 'vocabulary', '.', 'Finding', 'the', 'vocabulary', 'includes', ':', '-LRB-', '1', '-RRB-', 'elimination', 'of', 'the', 'standard', 'list', 'of', 'stop', 'words', 'from', 'the', 'documents', ',', '-LRB-', '2', '-RRB-', 'application', 'of', 'Porter', 'stemming', '6', 'for', 'term', 'normalization', ',', 'and', '-LRB-', '3', '-RRB-', 'keeping', 'only', 'the', 'words', 'which', 'appear', 'in', 'at', 'least', '3', 'documents', '.', 'We', 'represent', 'each', 'document', 'by', 'the', 'unitized', 'frequency', 'vector', '.', '4.2', 'Evaluation', 'of', 'Algorithms', 'We', 'use', 'the', 'accuracy', 'measure', 'to', 'compare', 'the', 'performance', 'of', 'various', 'algorithms', '.', 'Let', 'a', 'ij', 'represent', 'the', 'number', 'of', 'data', 'points', 'from', 'class', 'i', 'that', 'are', 'in', 'cluster', 'j', '.', 'Then', 'the', 'accuracy', 'of', 'the', 'partition', 'is', 'given', 'by', 'j', 'max', 'i', 'a', 'ij', '/', 'n', 'where', 'n', 'is', 'the', 'total', 'number', 'of', 'data', 'points', '.', 'It', 'is', 'to', 'be', 'noted', 'that', 'points', 'coming', 'from', 'a', 'single', 'class', 'need', 'not', 'form', 'a', 'single', 'cluster', '.', 'There', 'could', 'be', 'multiple', '3', 'http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20', '.', 'tar.gz', '4', 'ftp://ftp.cs.umn.edu/dept/users/boley/PDDPdata/doc-K', '5', 'ftp://ftp.cs.cornell.edu/pub/smart', '6', 'http://www.tartarus.org/~martin/PorterStemmer/', 'Iteration', '0', '1', '2', '3', '4', '5', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', '334.7', '329.5', '328.3', '328.1', '327.8', 'Accuracy', '73.8', '80.2', '81.4', '81.6', '82', '82', 'Table', '2', ':', 'Evolution', 'of', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'and', 'Accuracies', 'with', 'iterations', 'when', 'EEnt', 'applied', 'on', 'a', 'Multi5', 'data', '.', 'clusters', 'in', 'a', 'class', 'that', 'represent', 'sub-classes', '.', 'We', 'study', 'the', 'performance', 'of', 'SVaD', 'learning', 'algorithms', 'for', 'various', 'values', 'of', 'K', ',', 'i.e.', ',', 'the', 'number', 'of', 'clusters', '.', '4.3', 'Experimental', 'Setup', 'In', 'our', 'implementations', ',', 'we', 'have', 'observed', 'that', 'the', 'proposed', 'algorithms', ',', 'if', 'applied', 'on', 'randomly', 'initialized', 'centroids', ',', 'show', 'unstable', 'behavior', '.', 'One', 'reason', 'for', 'this', 'behavior', 'is', 'that', 'the', 'number', 'of', 'parameters', 'that', 'are', 'estimated', 'in', 'feature-weighting', 'clustering', 'algorithms', 'is', 'twice', 'as', 'large', 'as', 'that', 'estimated', 'by', 'the', 'traditional', 'KMA', '.', 'We', ',', 'therefore', ',', 'first', 'estimate', 'the', 'cluster', 'centers', 'giving', 'equal', 'weights', 'to', 'all', 'the', 'dimensions', 'using', 'KMA', 'and', 'then', 'fine-tune', 'the', 'cluster', 'centers', 'and', 'the', 'weights', 'using', 'the', 'feature-weighting', 'clustering', 'algorithms', '.', 'In', 'every', 'iteration', ',', 'the', 'new', 'sets', 'of', 'weights', 'are', 'updated', 'as', 'follows', '.', 'Let', 'w', 'n', '-LRB-', 't', '+1', '-RRB-', 'represent', 'the', 'weights', 'com-puted', 'using', 'one', 'of', '-LRB-', '9', '-RRB-', ',', '-LRB-', '10', '-RRB-', ',', '-LRB-', '14', '-RRB-', 'or', '-LRB-', '15', '-RRB-', 'in', 'iteration', '-LRB-', 't', '+', '1', '-RRB-', 'and', 'w', '-LRB-', 't', '-RRB-', 'the', 'weights', 'in', 'iteration', 't.', 'Then', ',', 'the', 'weights', 'in', 'iteration', '-LRB-', 't', '+', '1', '-RRB-', 'are', 'w', '-LRB-', 't', '+', '1', '-RRB-', '=', '-LRB-', '1', '-', '-LRB-', 't', '-RRB-', '-RRB-', 'w', '-LRB-', 't', '-RRB-', '+', '-LRB-', 't', '-RRB-', 'w', 'n', '-LRB-', 't', '+', '1', '-RRB-', ',', '-LRB-', '13', '-RRB-', 'where', '-LRB-', 't', '-RRB-', '-LSB-', '0', ',', '1', '-RSB-', 'decreases', 'with', 't', '.', 'That', 'is', ',', '-LRB-', 't', '-RRB-', '=', '-LRB-', 't', '1', '-RRB-', ',', 'for', 'a', 'given', 'constant', '-LSB-', '0', ',', '1', '-RSB-', '.', 'In', 'our', 'experiments', ',', 'we', 'observed', 'that', 'the', 'variance', 'of', 'purity', 'values', 'for', 'different', 'initial', 'values', 'of', '-LRB-', '0', '-RRB-', 'and', 'above', '0.5', 'is', 'very', 'small', '.', 'Hence', ',', 'we', 'report', 'the', 'results', 'for', '-LRB-', '0', '-RRB-', '=', '0.5', 'and', '=', '0.5', '.', 'We', 'set', 'the', 'value', 'of', 'j', '=', '1', '.', 'It', 'may', 'be', 'noted', 'that', 'when', 'the', 'documents', 'are', 'represented', 'as', 'unit', 'vectors', ',', 'KMA', 'with', 'the', 'cosine', 'dissimilarity', 'measure', 'and', 'Euclidean', 'distance', 'measure', 'would', 'yield', 'the', 'same', 'clusters', '.', 'This', 'is', 'essentially', 'the', 'same', 'as', 'Spherical', 'K-Means', 'algorithms', 'described', 'in', '-LSB-', '3', '-RSB-', '.', 'Therefore', ',', 'we', 'consider', 'only', 'the', 'weighted', 'Euclidean', 'measure', 'and', 'restrict', 'our', 'comparisons', 'to', 'EEnt', 'and', 'EsGini', 'in', 'the', 'experiments', '.', 'Since', 'the', 'clusters', 'obtained', 'by', 'KMA', 'are', 'used', 'to', 'initialize', 'all', 'other', 'algorithms', 'considered', 'here', ',', 'and', 'since', 'the', 'results', 'of', 'KMA', 'are', 'sensitive', 'to', 'initialization', ',', 'the', 'accuracy', 'numbers', 'reported', 'in', 'this', 'section', 'are', 'averages', 'over', '10', 'random', 'initializations', 'of', 'KMA', '.', '4.4', 'Results', 'and', 'Observations', '4.4.1', 'Effect', 'of', 'SVaD', 'Measures', 'on', 'Accuracies', 'In', 'Table', '2', ',', 'we', 'show', 'a', 'sample', 'run', 'of', 'EEnt', 'algorithm', 'on', 'one', 'of', 'the', 'Multi5', 'data', 'sets', '.', 'This', 'table', 'shows', 'the', 'evolution', 'of', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'and', 'the', 'corresponding', 'accuracies', 'of', 'the', 'clusters', 'with', 'the', 'iterations', '.', 'The', 'accuracy', ',', 'shown', 'at', 'iteration', '0', ',', 'is', 'that', 'of', 'the', 'clusters', 'obtained', 'by', 'KMA', '.', 'The', 'purity', 'of', 'clusters', 'increases', 'with', 'decrease', 'in', 'the', 'value', 'of', 'the', 'objective', 'function', 'defined', 'using', 'SVaD', 'measures', '.', 'We', 'have', 'observed', 'a', 'similar', 'behavior', 'of', 'EEnt', 'and', 'EsGini', 'on', 'other', 'data', 'sets', 'also', '.', 'This', 'validates', 'our', 'hypothesis', 'that', 'SVaD', 'measures', 'capture', 'the', 'underlying', 'structure', 'in', 'the', 'data', 'sets', 'more', 'accurately', '.', '614', 'Research', 'Track', 'Poster', '4.4.2', 'Comparison', 'with', 'Other', 'Algorithms', 'Figure', '1', 'to', 'Figure', '5', 'show', 'average', 'accuracies', 'of', 'various', 'algorithms', 'on', 'the', '5', 'data', 'sets', 'for', 'various', 'number', 'of', 'clusters', '.', 'The', 'accuracies', 'of', 'KMA', 'and', 'DGK', 'are', 'very', 'close', 'to', 'each', 'other', 'and', 'hence', ',', 'in', 'the', 'figures', ',', 'the', 'lines', 'corresponding', 'to', 'these', 'algorithms', 'are', 'indistinguishable', '.', 'The', 'lines', 'corresponding', 'to', 'CSCAD', 'are', 'also', 'close', 'to', 'that', 'of', 'KMA', 'in', 'all', 'the', 'cases', 'except', 'Class', '3', '.', 'General', 'observations', ':', 'The', 'accuracies', 'of', 'SVaD', 'algorithms', 'follow', 'the', 'trend', 'of', 'the', 'accuracies', 'of', 'other', 'algorithms', '.', 'In', 'all', 'our', 'experiments', ',', 'both', 'SVaD', 'learning', 'algorithms', 'improve', 'the', 'accuracies', 'of', 'clusters', 'obtained', 'by', 'KMA', '.', 'It', 'is', 'observed', 'in', 'our', 'experiments', 'that', 'the', 'improvement', 'could', 'be', 'as', 'large', 'as', '8', '%', 'in', 'some', 'instances', '.', 'EEnt', 'and', 'EsGini', 'consis-tently', 'perform', 'better', 'than', 'DGK', 'on', 'all', 'data', 'sets', 'and', 'for', 'all', 'values', 'of', 'K.', 'EEnt', 'and', 'EsGini', 'perform', 'better', 'than', 'CSCAD', 'on', 'all', 'data', 'sets', 'excepts', 'in', 'the', 'case', 'of', 'Classic', '3', 'and', 'for', 'a', 'few', 'values', 'of', 'K.', 'Note', 'that', 'the', 'weight', 'update', 'equation', 'of', 'CSCAD', '-LRB-', '15', '-RRB-', 'may', 'result', 'in', 'negative', 'values', 'of', 'w', 'jl', '.', 'Our', 'experience', 'with', 'CSCAD', 'shows', 'that', 'it', 'is', 'quite', 'sensitive', 'to', 'initialization', 'and', 'it', 'may', 'have', 'convergence', 'problems', '.', 'In', 'contrast', ',', 'it', 'may', 'be', 'observed', 'that', 'w', 'jl', 'in', '-LRB-', '9', '-RRB-', 'and', '-LRB-', '10', '-RRB-', 'are', 'always', 'positive', '.', 'Moreover', ',', 'in', 'our', 'experience', ',', 'these', 'two', 'versions', 'are', 'much', 'less', 'sensitive', 'to', 'the', 'choice', 'of', 'j', '.', 'Data', 'specific', 'observations', ':', 'When', 'K', '=', '2', ',', 'EEnt', 'and', 'EsGini', 'could', 'not', 'further', 'improve', 'the', 'results', 'of', 'KMA', 'on', 'the', 'Binary', 'data', 'set', '.', 'The', 'reason', 'is', 'that', 'the', 'data', 'set', 'contains', 'two', 'highly', 'overlapping', 'classes', '.', 'However', ',', 'for', 'other', 'values', 'of', 'K', ',', 'they', 'marginally', 'improve', 'the', 'accuracies', '.', 'In', 'the', 'case', 'of', 'Multi5', ',', 'the', 'accuracies', 'of', 'the', 'algorithms', 'are', 'non-monotonic', 'with', 'K', '.', 'The', 'improvement', 'of', 'accuracies', 'is', 'large', 'for', 'intermediate', 'values', 'of', 'K', 'and', 'small', 'for', 'extreme', 'values', 'of', 'K', '.', 'When', 'K', '=', '5', ',', 'KMA', 'finds', 'relatively', 'stable', 'clusters', '.', 'Hence', ',', 'SVaD', 'algorithms', 'are', 'unable', 'to', 'improve', 'the', 'accuracies', 'as', 'much', 'as', 'they', 'did', 'for', 'intermediate', 'values', 'of', 'K.', 'For', 'larger', 'values', 'of', 'K', ',', 'the', 'clusters', 'are', 'closely', 'spaced', 'and', 'hence', 'there', 'is', 'little', 'scope', 'for', 'improvement', 'by', 'the', 'SVaD', 'algorithms', '.', 'Multi10', 'data', 'sets', 'are', 'the', 'toughest', 'to', 'cluster', 'because', 'of', 'the', 'large', 'number', 'of', 'classes', 'present', 'in', 'the', 'data', '.', 'In', 'this', 'case', ',', 'the', 'accuracies', 'of', 'the', 'algorithms', 'are', 'monotonically', 'increasing', 'with', 'the', 'number', 'of', 'clusters', '.', 'The', 'extent', 'of', 'improvement', 'of', 'accuracies', 'of', 'SVaD', 'algorithms', 'over', 'KMA', 'is', 'almost', 'constant', 'over', 'the', 'entire', 'range', 'of', 'K', '.', 'This', 'reflects', 'the', 'fact', 'that', 'the', 'documents', 'in', 'Multi10', 'data', 'set', 'are', 'uniformly', 'distributed', 'over', 'feature', 'space', '.', 'The', 'distribution', 'of', 'documents', 'in', 'Yahoo', 'K1', 'data', 'set', 'is', 'highly', 'skewed', '.', 'The', 'extent', 'of', 'improvements', 'that', 'the', 'SVaD', 'algorithms', 'could', 'achieve', 'decrease', 'with', 'K.', 'For', 'higher', 'values', 'of', 'K', ',', 'KMA', 'is', 'able', 'to', 'find', 'almost', 'pure', 'sub-clusters', ',', 'resulting', 'in', 'accuracies', 'of', 'about', '90', '%', '.', 'This', 'leaves', 'little', 'scope', 'for', 'improvement', '.', 'The', 'performance', 'of', 'CSCAD', 'differs', 'noticeably', 'in', 'the', 'case', 'of', 'Classic', '3', '.', 'It', 'performs', 'better', 'than', 'the', 'SVaD', 'algorithms', 'for', 'K', '=', '3', 'and', 'better', 'than', 'EEnt', 'for', 'K', '=', '9', '.', 'However', ',', 'for', 'larger', 'values', 'of', 'K', ',', 'the', 'SVaD', 'algorithms', 'perform', 'better', 'than', 'the', 'rest', '.', 'As', 'in', 'the', 'case', 'of', 'Multi5', ',', 'the', 'improvements', 'of', 'SVaD', 'algorithms', 'over', 'others', 'are', 'significant', 'and', 'consistent', '.', 'One', 'may', 'recall', 'that', 'Multi5', 'and', 'Classic', '3', 'consist', 'of', 'documents', 'from', 'distinct', 'classes', '.', 'Therefore', ',', 'this', 'observation', 'implies', 'that', 'when', 'there', 'are', 'distinct', 'clusters', 'in', 'the', 'data', 'set', ',', 'KMA', 'yields', 'confusing', 'clusters', 'when', 'the', 'number', 'of', 'clusters', 'is', 'over-Figure', '1', ':', 'Accuracy', 'results', 'on', 'Binary', 'data', '.', 'Figure', '2', ':', 'Accuracy', 'results', 'on', 'Multi5', 'data', '.', 'specified', '.', 'In', 'this', 'scenario', ',', 'EEnt', 'and', 'EsGini', 'can', 'fine-tune', 'the', 'clusters', 'to', 'improve', 'their', 'purity', '.', 'We', 'have', 'defined', 'a', 'general', 'class', 'of', 'spatially', 'variant', 'dissimilarity', 'measures', 'and', 'proposed', 'algorithms', 'to', 'learn', 'the', 'measure', 'underlying', 'a', 'given', 'data', 'set', 'in', 'an', 'unsupervised', 'learning', 'framework', '.', 'Through', 'our', 'experiments', 'on', 'various', 'textual', 'data', 'sets', ',', 'we', 'have', 'shown', 'that', 'such', 'a', 'formulation', 'of', 'dissimilarity', 'measure', 'can', 'more', 'accurately', 'capture', 'the', 'hidden', 'structure', 'in', 'the', 'data', 'than', 'a', 'standard', 'Euclidean', 'measure', 'that', 'does', 'not', 'vary', 'over', 'feature', 'space', '.', 'We', 'have', 'also', 'shown', 'that', 'the', 'proposed', 'learning', 'algorithms', 'perform', 'better', 'than', 'other', 'similar', 'algorithms', 'in', 'the', 'literature', ',', 'and', 'have', 'better', 'stability', 'properties', '.', 'Even', 'though', 'we', 'have', 'applied', 'these', 'algorithms', 'only', 'to', 'text', 'data', 'sets', ',', 'the', 'algorithms', 'derived', 'here', 'do', 'not', 'assume', 'any', 'specific', 'characteristics', 'of', 'textual', 'data', 'sets', '.', 'Hence', ',', 'they', 'Figure', '3', ':', 'Accuracy', 'results', 'on', 'Multi10', 'data', '.', '615', 'Research', 'Track', 'Poster', 'Figure', '4', ':', 'Accuracy', 'results', 'on', 'Yahoo', 'K1', 'data', '.', 'Figure', '5', ':', 'Accuracy', 'results', 'on', 'Classic', '3', 'data', '.', 'are', 'applicable', 'to', 'general', 'data', 'sets', '.', 'Since', 'the', 'algorithms', 'perform', 'better', 'for', 'larger', 'K', ',', 'it', 'would', 'be', 'interesting', 'to', 'investigate', 'whether', 'they', 'can', 'be', 'used', 'to', 'find', 'subtopics', 'of', 'a', 'topic', '.', 'Finally', ',', 'it', 'will', 'be', 'interesting', 'to', 'learn', 'SVaD', 'measures', 'for', 'labeled', 'data', 'sets', '.', '-LSB-', '1', '-RSB-', 'J.', 'C.', 'Bezdek', 'and', 'R.', 'J.', 'Hathaway', '.', 'Some', 'notes', 'on', 'alternating', 'optimization', '.', 'In', 'Proceedings', 'of', 'the', '2002', 'AFSS', 'International', 'Conference', 'on', 'Fuzzy', 'Systems', '.', 'Calcutta', ',', 'pages', '288', '300', '.', 'Springer-Verlag', ',', '2002', '.', '-LSB-', '2', '-RSB-', 'A.', 'P.', 'Dempster', ',', 'N.', 'M.', 'Laird', ',', 'and', 'Rubin', '.', 'Maximum', 'likelihood', 'from', 'incomplete', 'data', 'via', 'the', 'EM', 'algorithm', '.', 'Journal', 'Royal', 'Statistical', 'Society', 'B', ',', '39', '-LRB-', '2', '-RRB-', ':', '1', '38', ',', '1977', '.', '-LSB-', '3', '-RSB-', 'I.', 'S.', 'Dhillon', 'and', 'D.', 'S.', 'Modha', '.', 'Concept', 'decompositions', 'for', 'large', 'sparse', 'text', 'data', 'using', 'clustering', '.', 'Machine', 'Learning', ',', '42', '-LRB-', '1', '-RRB-', ':', '143', '175', ',', 'January', '2001', '.', '-LSB-', '4', '-RSB-', 'E.', 'Diday', 'and', 'J.', 'C.', 'Simon', '.', 'Cluster', 'analysis', '.', 'In', 'K.', 'S.', 'Fu', ',', 'editor', ',', 'Pattern', 'Recognition', ',', 'pages', '47', '94', '.', 'Springer-Verlag', ',', '1976', '.', '-LSB-', '5', '-RSB-', 'H.', 'Frigui', 'and', 'O.', 'Nasraoui', '.', 'Simultaneous', 'clustering', 'and', 'attribute', 'discrimination', '.', 'In', 'Proceedings', 'of', 'FUZZIEEE', ',', 'pages', '158', '163', ',', 'San', 'Antonio', ',', '2000', '.', '-LSB-', '6', '-RSB-', 'H.', 'Frigui', 'and', 'O.', 'Nasraoui', '.', 'Simultaneous', 'categorization', 'of', 'text', 'documents', 'and', 'identification', 'of', 'cluster-dependent', 'keywords', '.', 'In', 'Proceedings', 'of', 'FUZZIEEE', ',', 'pages', '158', '163', ',', 'Honolulu', ',', 'Hawaii', ',', '2001', '.', '-LSB-', '7', '-RSB-', 'D.', 'E.', 'Gustafson', 'and', 'W.', 'C.', 'Kessel', '.', 'Fuzzy', 'clustering', 'with', 'the', 'fuzzy', 'covariance', 'matrix', '.', 'In', 'Proccedings', 'of', 'IEEE', 'CDC', ',', 'pages', '761', '766', ',', 'San', 'Diego', ',', 'California', ',', '1979', '.', '-LSB-', '8', '-RSB-', 'R.', 'Krishnapuram', 'and', 'J.', 'Kim', '.', 'A', 'note', 'on', 'fuzzy', 'clustering', 'algorithms', 'for', 'Gaussian', 'clusters', '.', 'IEEE', 'Transactions', 'on', 'Fuzzy', 'Systems', ',', '7', '-LRB-', '4', '-RRB-', ':', '453', '461', ',', 'Aug', '1999', '.', '-LSB-', '9', '-RSB-', 'Y.', 'Rui', ',', 'T.', 'S.', 'Huang', ',', 'and', 'S.', 'Mehrotra', '.', 'Relevance', 'feedback', 'techniques', 'in', 'interactive', 'content-based', 'image', 'retrieval', '.', 'In', 'Storage', 'and', 'Retrieval', 'for', 'Image', 'and', 'Video', 'Databases', '-LRB-', 'SPIE', '-RRB-', ',', 'pages', '25', '36', ',', '1998', '.', '-LSB-', '10', '-RSB-', 'N.', 'Slonim', 'and', 'N.', 'Tishby', '.', 'Document', 'clustering', 'using', 'word', 'clusters', 'via', 'the', 'information', 'bottleneck', 'method', '.', 'In', 'Proceedings', 'of', 'SIGIR', ',', 'pages', '208', '215', ',', '2000', '.', 'APPENDIX', 'A', '.', 'OTHER', 'FEATURE', 'WEIGHTING', 'CLUSTERING', 'TECHNIQUES', 'A.', '1', 'Diagonal', 'Gustafson-Kessel', '-LRB-', 'DGK', '-RRB-', 'Gustafson', 'and', 'Kessel', '-LSB-', '7', '-RSB-', 'associate', 'each', 'cluster', 'with', 'a', 'different', 'norm', 'matrix', '.', 'Let', 'A', '=', '-LRB-', 'A', '1', ',', '...', ',', 'A', 'k', '-RRB-', 'be', 'the', 'set', 'of', 'k', 'norm', 'matrices', 'associated', 'with', 'k', 'clusters', '.', 'Let', 'u', 'ji', 'is', 'the', 'fuzzy', 'membership', 'of', 'x', 'i', 'in', 'cluster', 'j', 'and', 'U', '=', '-LSB-', 'u', 'ji', '-RSB-', '.', 'By', 'restricting', 'A', 'j', 's', 'to', 'be', 'diagonal', 'and', 'u', 'ji', '-LCB-', '0', ',', '1', '-RCB-', ',', 'we', 'can', 'reformulate', 'the', 'original', 'optimization', 'problem', 'in', 'terms', 'of', 'SVaD', 'measures', 'as', 'follows', ':', 'min', 'C', ',', 'W', 'J', 'DGK', '-LRB-', 'C', ',', 'W', '-RRB-', '=', 'k', 'j', '=', '1', 'x', 'i', 'R', 'j', 'M', 'l', '=', '1', 'w', 'jl', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', ',', 'subject', 'to', 'l', 'w', 'jl', '=', 'j', '.', 'Note', 'that', 'this', 'problem', 'can', 'be', 'solved', 'using', 'the', 'same', 'AO', 'algorithms', 'described', 'in', 'Section', '3', '.', 'Here', ',', 'the', 'update', 'for', 'C', 'and', 'P', 'would', 'remain', 'the', 'same', 'as', 'that', 'discussed', 'in', 'Section', '3', '.', 'It', 'can', 'be', 'easily', 'shown', 'that', 'when', 'j', '=', '1', ',', 'j', ',', 'w', 'jl', '=', 'M', 'm', '=', '1', 'x', 'i', 'R', 'j', 'g', 'm', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '1/M', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '-LRB-', '14', '-RRB-', 'minimize', 'J', 'DGK', 'for', 'a', 'given', 'C.', 'A.', '2', 'Crisp', 'Simultaneous', 'Clustering', 'and', 'Attribute', 'Discrimination', '-LRB-', 'CSCAD', '-RRB-', 'Frigui', 'et', '.', 'al.', 'in', '-LSB-', '5', ',', '6', '-RSB-', ',', 'considered', 'a', 'fuzzy', 'version', 'of', 'the', 'feature-weighting', 'based', 'clustering', 'problem', '-LRB-', 'SCAD', '-RRB-', '.', 'To', 'make', 'a', 'fair', 'comparison', 'of', 'our', 'algorithms', 'with', 'SCAD', ',', 'we', 'derive', 'its', 'crisp', 'version', 'and', 'refer', 'to', 'it', 'as', 'Crisp', 'SCAD', '-LRB-', 'CSCAD', '-RRB-', '.', 'In', '-LSB-', '5', ',', '6', '-RSB-', ',', 'the', 'Gini', 'measure', 'is', 'used', 'for', 'regularization', '.', 'If', 'the', 'Gini', 'measure', 'is', 'considered', 'with', 'r', '=', '1', ',', 'the', 'weights', 'w', 'jl', 'that', 'minimize', 'the', 'corresponding', 'objective', 'function', 'for', 'a', 'given', 'C', 'and', 'P', ',', 'are', 'given', 'by', 'w', 'jl', '=', '1', 'M', '+', '1', '2', 'j', '1', 'M', 'M', 'n', '=', '1', 'x', 'i', 'R', 'j', 'g', 'n', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '.', '-LRB-', '15', '-RRB-', 'Since', 'SCAD', 'uses', 'the', 'weighted', 'Euclidean', 'measure', ',', 'the', 'update', 'equations', 'of', 'centroids', 'in', 'CSCAD', 'remain', 'the', 'same', 'as', 'in', '-LRB-', '11', '-RRB-', '.', 'The', 'update', 'equation', 'for', 'w', 'jl', 'in', 'SCAD', 'is', 'quite', 'similar', 'to', '-LRB-', '15', '-RRB-', '.', 'One', 'may', 'note', 'that', ',', 'in', '-LRB-', '15', '-RRB-', ',', 'the', 'value', 'of', 'w', 'jl', 'can', 'become', 'negative', '.', 'In', '-LSB-', '5', '-RSB-', ',', 'a', 'heuristic', 'is', 'used', 'to', 'estimate', 'the', 'value', 'j', 'in', 'every', 'iteration', 'and', 'set', 'the', 'negative', 'values', 'of', 'w', 'jl', 'to', 'zero', 'before', 'normalizing', 'the', 'weights', '.', '616', 'Research', 'Track', 'Poster'] Document BIO Tags: ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O'] Extractive/present Keyphrases: ['dissimilarity measure', 'clustering', 'feature weighting'] Abstractive/absent Keyphrases: ['spatially varying dissimilarity (svad)', 'learning dissimilarity measures'] ----------- ``` ### Keyphrase Extraction ```python from datasets import load_dataset # get the dataset only for keyphrase extraction dataset = load_dataset("midas/nus", "extraction") print("Samples for Keyphrase Extraction") # sample from the test split print("Sample from test data split") test_sample = dataset["test"][0] print("Fields in the sample: ", [key for key in test_sample.keys()]) print("Tokenized Document: ", test_sample["document"]) print("Document BIO Tags: ", test_sample["doc_bio_tags"]) print("\n-----------\n") ``` ### Keyphrase Generation ```python # get the dataset only for keyphrase generation dataset = load_dataset("midas/nus", "generation") print("Samples for Keyphrase Generation") # sample from the test split print("Sample from test data split") test_sample = dataset["test"][0] print("Fields in the sample: ", [key for key in test_sample.keys()]) print("Tokenized Document: ", test_sample["document"]) print("Extractive/present Keyphrases: ", test_sample["extractive_keyphrases"]) print("Abstractive/absent Keyphrases: ", test_sample["abstractive_keyphrases"]) print("\n-----------\n") ``` ## Citation Information ``` @InProceedings{10.1007/978-3-540-77094-7_41, author="Nguyen, Thuy Dung and Kan, Min-Yen", editor="Goh, Dion Hoe-Lian and Cao, Tru Hoang and Solvberg, Ingeborg Torvik and Rasmussen, Edie", title="Keyphrase Extraction in Scientific Publications", booktitle="Asian Digital Libraries. Looking Back 10 Years and Forging New Frontiers", year="2007", publisher="Springer Berlin Heidelberg", address="Berlin, Heidelberg", pages="317--326", isbn="978-3-540-77094-7" } ``` ## Contributions Thanks to [@debanjanbhucs](https://github.com/debanjanbhucs), [@dibyaaaaax](https://github.com/dibyaaaaax) and [@ad6398](https://github.com/ad6398) for adding this dataset
85,916
[ [ -0.049224853515625, -0.0384521484375, 0.05584716796875, 0.031982421875, 0.00318145751953125, 0.007083892822265625, -0.0120849609375, -0.01495361328125, 0.049163818359375, -0.0033969879150390625, -0.0302581787109375, -0.07244873046875, -0.03216552734375, 0.02...
philschmid/test_german_squad
2021-10-25T13:55:14.000Z
[ "region:us" ]
philschmid
null
null
2
8
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
piEsposito/br_quad_20
2021-02-05T16:05:55.000Z
[ "region:us" ]
piEsposito
Translates SQuAD 2.0 from english to portuguese using Google Cloud API
@article{2020braquad, author = {{Esposito}, Wladimir and {Esposito}, Piero and {Tamais}, Ana Laura and {Gatti}, Daniel}, title = "{BrQuAD - Brazilian Question-Answering Dataset: Dataset para benchmark de modelos de Machine Learning para question-answering em Portugu^es brasileiro traduzindo o SQuAD com Google Cloud API}", year = 2020, }
0
8
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
poperson1205/mrtydi-v1.1-korean-fixed
2022-01-02T08:18:25.000Z
[ "region:us" ]
poperson1205
null
null
1
8
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
yuvalkirstain/contract_nli_t5
2022-01-09T06:16:30.000Z
[ "region:us" ]
yuvalkirstain
null
null
0
8
2022-03-02T23:29:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
blo05/cleaned_wiki_en
2022-03-30T10:12:38.000Z
[ "region:us" ]
blo05
null
null
0
8
2022-03-21T15:55:39
Cleaned wikipedia dataset
25
[ [ -0.038543701171875, -0.0208892822265625, -0.0107574462890625, -0.047088623046875, -0.0248870849609375, -0.0269622802734375, -0.042327880859375, -0.00838470458984375, 0.0361328125, 0.06427001953125, -0.036529541015625, -0.021331787109375, -0.01103973388671875, ...
huggan/few-shot-cat
2022-04-12T14:06:50.000Z
[ "arxiv:2101.04775", "region:us" ]
huggan
null
null
1
8
2022-04-01T11:40:37
# Citation ``` @article{DBLP:journals/corr/abs-2101-04775, author = {Bingchen Liu and Yizhe Zhu and Kunpeng Song and Ahmed Elgammal}, title = {Towards Faster and Stabilized {GAN} Training for High-fidelity Few-shot Image Synthesis}, journal = {CoRR}, volume = {abs/2101.04775}, year = {2021}, url = {https://arxiv.org/abs/2101.04775}, eprinttype = {arXiv}, eprint = {2101.04775}, timestamp = {Fri, 22 Jan 2021 15:16:00 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2101-04775.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
676
[ [ -0.038360595703125, -0.055694580078125, 0.0012750625610351562, 0.0233154296875, -0.0065155029296875, -0.012420654296875, -0.00557708740234375, -0.0200042724609375, 0.00550079345703125, -0.0029239654541015625, -0.024627685546875, -0.0238189697265625, -0.02734375,...
enoriega/biocreative_gene_mention
2022-04-20T00:15:20.000Z
[ "region:us" ]
enoriega
Training and validation datasets for the BioCreative II gene mention task. The data has been tokenized with [processors](https://github.com/clulab/processors) ## Features: - __tokens__: Input token sequence - __folded_tokens__: Same as tokens, but case-folded - __tags__: POS tags of the input sequence tokens - __labels__: BIO sequence tags
@article{cite-key, Abstract = {Nineteen teams presented results for the Gene Mention Task at the BioCreative II Workshop. In this task participants designed systems to identify substrings in sentences corresponding to gene name mentions. A variety of different methods were used and the results varied with a highest achieved F1 score of 0.8721. Here we present brief descriptions of all the methods used and a statistical analysis of the results. We also demonstrate that, by combining the results from all submissions, an F score of 0.9066 is feasible, and furthermore that the best result makes use of the lowest scoring submissions.}, Author = {Smith, Larry and Tanabe, Lorraine K. and Ando, Rie Johnson nee and Kuo, Cheng-Ju and Chung, I-Fang and Hsu, Chun-Nan and Lin, Yu-Shi and Klinger, Roman and Friedrich, Christoph M. and Ganchev, Kuzman and Torii, Manabu and Liu, Hongfang and Haddow, Barry and Struble, Craig A. and Povinelli, Richard J. and Vlachos, Andreas and Baumgartner, William A. and Hunter, Lawrence and Carpenter, Bob and Tsai, Richard Tzong-Han and Dai, Hong-Jie and Liu, Feng and Chen, Yifei and Sun, Chengjie and Katrenko, Sophia and Adriaans, Pieter and Blaschke, Christian and Torres, Rafael and Neves, Mariana and Nakov, Preslav and Divoli, Anna and Ma{\~n}a-L{\'o}pez, Manuel and Mata, Jacinto and Wilbur, W. John}, Da = {2008/09/01}, Date-Added = {2022-04-15 17:35:45 -0700}, Date-Modified = {2022-04-15 17:35:45 -0700}, Doi = {10.1186/gb-2008-9-s2-s2}, Id = {Smith2008}, Isbn = {1474-760X}, Journal = {Genome Biology}, Number = {2}, Pages = {S2}, Title = {Overview of BioCreative II gene mention recognition}, Ty = {JOUR}, Url = {https://doi.org/10.1186/gb-2008-9-s2-s2}, Volume = {9}, Year = {2008}, Bdsk-Url-1 = {https://doi.org/10.1186/gb-2008-9-s2-s2}}
0
8
2022-04-20T00:08:28
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Goud/Goud-sum
2022-07-04T16:02:36.000Z
[ "task_categories:summarization", "task_ids:news-articles-headline-generation", "annotations_creators:no-annotation", "language_creators:machine-generated", "size_categories:100K<n<1M", "source_datasets:original", "region:us" ]
Goud
null
null
2
8
2022-04-21T15:25:00
--- annotations_creators: - no-annotation language_creators: - machine-generated language: [] license: [] multilinguality: [] pretty_name: Goud-sum size_categories: - 100K<n<1M source_datasets: - original task_categories: - summarization task_ids: - news-articles-headline-generation --- # Dataset Card for Goud summarization dataset ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:**[Needs More Information] - **Repository:**[Needs More Information] - **Paper:**[Goud.ma: a News Article Dataset for Summarization in Moroccan Darija](https://openreview.net/forum?id=BMVq5MELb9) - **Leaderboard:**[Needs More Information] - **Point of Contact:**[Needs More Information] ### Dataset Summary Goud-sum contains 158k articles and their headlines extracted from [Goud.ma](https://www.goud.ma/) news website. The articles are written in the Arabic script. All headlines are in Moroccan Darija, while articles may be in Moroccan Darija, in Modern Standard Arabic, or a mix of both (code-switched Moroccan Darija). ### Supported Tasks and Leaderboards Text Summarization ### Languages * Moroccan Arabic (Darija) * Modern Standard Arabic ## Dataset Structure ### Data Instances The dataset consists of article-headline pairs in string format. ### Data Fields * article: a string containing the body of the news article * headline: a string containing the article's headline * categories: a list of string of article categories ### Data Splits Goud-sum dataset has 3 splits: _train_, _validation_, and _test_. Below are the number of instances in each split. | Dataset Split | Number of Instances in Split | | ------------- | ------------------------------------------- | | Train | 139,288 | | Validation | 9,497 | | Test | 9,497 | ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? The text was written by journalists at [Goud](https://www.goud.ma/). ### Annotations The dataset does not contain any additional annotations. #### Annotation process [N/A] #### Who are the annotators? [N/A] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information ``` @inproceedings{issam2022goudma, title={Goud.ma: a News Article Dataset for Summarization in Moroccan Darija}, author={Abderrahmane Issam and Khalil Mrini}, booktitle={3rd Workshop on African Natural Language Processing}, year={2022}, url={https://openreview.net/forum?id=BMVq5MELb9} } ``` ### Contributions Thanks to [@issam9](https://github.com/issam9) and [@KhalilMrini](https://github.com/KhalilMrini) for adding this dataset.
4,254
[ [ -0.041046142578125, -0.03765869140625, -0.0034198760986328125, 0.0139007568359375, -0.041412353515625, 0.006008148193359375, -0.0191192626953125, -0.0155487060546875, 0.04815673828125, 0.03955078125, -0.0400390625, -0.06866455078125, -0.0562744140625, 0.0075...
h4iku/coconut_python2010_preprocessed
2022-04-21T20:41:12.000Z
[ "region:us" ]
h4iku
null
null
0
8
2022-04-21T20:34:45
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
pietrolesci/fracas
2022-04-25T08:40:07.000Z
[ "region:us" ]
pietrolesci
null
null
0
8
2022-04-22T08:35:48
## Overview Original dataset [here](https://github.com/felipessalvatore/NLI_datasets). Below the original description reported for convenience. ```latex @MISC{Fracas96, author = {{The Fracas Consortium} and Robin Cooper and Dick Crouch and Jan Van Eijck and Chris Fox and Josef Van Genabith and Jan Jaspars and Hans Kamp and David Milward and Manfred Pinkal and Massimo Poesio and Steve Pulman and Ted Briscoe and Holger Maier and Karsten Konrad}, title = {Using the Framework}, year = {1996} } ``` Adapted from [https://nlp.stanford.edu/~wcmac/downloads/fracas.xml](https://nlp.stanford.edu/~wcmac/downloads/fracas.xml). We took `P1, ..., Pn` as premise and H as hypothesis. Labels have been mapped as follows `{'yes': "entailment", 'no': 'contradiction', 'undef': "neutral", 'unknown': "neutral"}`. And we randomly split 80/20 for train/dev. ## Dataset curation One hypothesis in the dev set and three hypotheses in the train set are empty and have been filled in with the empty string `""`. Labels are encoded with custom NLI mapping, that is ``` {"entailment": 0, "neutral": 1, "contradiction": 2} ``` ## Code to create the dataset ```python import pandas as pd from datasets import Features, Value, ClassLabel, Dataset, DatasetDict, load_dataset from pathlib import Path # load datasets path = Path("<path to folder>/nli_datasets") datasets = {} for dataset_path in path.iterdir(): datasets[dataset_path.name] = {} for name in dataset_path.iterdir(): df = pd.read_csv(name) datasets[dataset_path.name][name.name.split(".")[0]] = df ds = {} for name, df_ in datasets["fracas"].items(): df = df_.copy() assert df["label"].isna().sum() == 0 # fill-in empty hypothesis df = df.fillna("") # encode labels df["label"] = df["label"].map({"entailment": 0, "neutral": 1, "contradiction": 2}) # cast to dataset features = Features({ "premise": Value(dtype="string", id=None), "hypothesis": Value(dtype="string", id=None), "label": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]), }) ds[name] = Dataset.from_pandas(df, features=features) dataset = DatasetDict(ds) dataset.push_to_hub("fracas", token="<token>") # check overlap between splits from itertools import combinations for i, j in combinations(ds.keys(), 2): print( f"{i} - {j}: ", pd.merge( ds[i].to_pandas(), ds[j].to_pandas(), on=["label", "premise", "hypothesis"], how="inner", ).shape[0], ) #> train - dev: 0 ```
2,601
[ [ -0.04071044921875, -0.030029296875, 0.015106201171875, 0.02984619140625, -0.0036067962646484375, -0.0221099853515625, -0.0194854736328125, -0.0159912109375, 0.039642333984375, 0.0213470458984375, -0.039031982421875, -0.049957275390625, -0.038055419921875, 0....
pietrolesci/add_one_rte
2022-04-25T08:48:42.000Z
[ "region:us" ]
pietrolesci
null
null
0
8
2022-04-22T13:56:41
## Overview Original data available [here](http://www.seas.upenn.edu/~nlp/resources/AN-composition.tgz). ## Dataset curation `premise` and `hypothesis` columns have been cleaned following common practices ([1](https://github.com/rabeehk/robust-nli/blob/c32ff958d4df68ac2fad9bf990f70d30eab9f297/data/scripts/add_one_rte.py#L51-L52), [2](https://github.com/azpoliak/hypothesis-only-NLI/blob/b045230437b5ba74b9928ca2bac5e21ae57876b9/data/convert_add_1_rte.py#L31-L32)), that is - remove HTML tags `<b>`, `<u>`, `</b>`, `</u>` - normalize repeated white spaces - strip `mean_human_score` has been transformed into class labels following common practices ([1](https://github.com/rabeehk/robust-nli/blob/c32ff958d4df68ac2fad9bf990f70d30eab9f297/data/scripts/add_one_rte.py#L20-L35), [2](https://github.com/azpoliak/hypothesis-only-NLI/blob/b045230437b5ba74b9928ca2bac5e21ae57876b9/data/convert_add_1_rte.py#L6-L17)), that is - for test set: `mean_human_score <= 3 -> "not-entailed"` and `mean_human_score >= 4 -> "entailed"` (anything between 3 and 4 has been removed) - for all other splits: `mean_human_score < 3.5 -> "not-entailed"` else `"entailed"` more details below. ## Code to generate the dataset ```python import pandas as pd from datasets import Features, Value, ClassLabel, Dataset, DatasetDict def convert_label(score, is_test): if is_test: if score <= 3: return "not-entailed" elif score >= 4: return "entailed" return "REMOVE" if score < 3.5: return "not-entailed" return "entailed" ds = {} for split in ("dev", "test", "train"): # read data df = pd.read_csv(f"<path to folder>/AN-composition/addone-entailment/splits/data.{split}", sep="\t", header=None) df.columns = ["mean_human_score", "binary_label", "sentence_id", "adjective", "noun", "premise", "hypothesis"] # clean text from html tags and useless spaces for col in ("premise", "hypothesis"): df[col] = ( df[col] .str.replace("(<b>)|(<u>)|(</b>)|(</u>)", " ", regex=True) .str.replace(" {2,}", " ", regex=True) .str.strip() ) # encode labels if split == "test": df["label"] = df["mean_human_score"].map(lambda x: convert_label(x, True)) df = df.loc[df["label"] != "REMOVE"] else: df["label"] = df["mean_human_score"].map(lambda x: convert_label(x, False)) assert df["label"].isna().sum() == 0 df["label"] = df["label"].map({"not-entailed": 0, "entailed": 1}) # cast to dataset features = Features({ "mean_human_score": Value(dtype="float32"), "binary_label": Value(dtype="string"), "sentence_id": Value(dtype="string"), "adjective": Value(dtype="string"), "noun": Value(dtype="string"), "premise": Value(dtype="string"), "hypothesis": Value(dtype="string"), "label": ClassLabel(num_classes=2, names=["not-entailed", "entailed"]), }) ds[split] = Dataset.from_pandas(df, features=features) ds = DatasetDict(ds) ds.push_to_hub("add_one_rte", token="<token>") # check overlap between splits from itertools import combinations for i, j in combinations(ds.keys(), 2): print( f"{i} - {j}: ", pd.merge( ds[i].to_pandas(), ds[j].to_pandas(), on=["premise", "hypothesis", "label"], how="inner", ).shape[0], ) #> dev - test: 0 #> dev - train: 0 #> test - train: 0 ```
3,487
[ [ -0.0350341796875, -0.06201171875, 0.01079559326171875, 0.0181121826171875, -0.01166534423828125, -0.012725830078125, -0.0292205810546875, -0.01482391357421875, 0.042724609375, 0.03253173828125, -0.0290679931640625, -0.0640869140625, -0.032379150390625, 0.023...
pietrolesci/robust_nli_li_ts
2022-04-25T11:49:51.000Z
[ "region:us" ]
pietrolesci
null
null
0
8
2022-04-25T11:48:57
This is part of `robust_NLI`but since there seems to be a bug when loading and downloading `DatasetDict` containing datasets with different configurations, I loaded the datasets with the differing configs as standalone datasets. Issue here: [https://github.com/huggingface/datasets/issues/4211](https://github.com/huggingface/datasets/issues/4211)
348
[ [ -0.047882080078125, -0.033843994140625, -0.0026988983154296875, 0.036041259765625, -0.0122833251953125, 0.00124359130859375, -0.0230865478515625, -0.04986572265625, 0.049774169921875, 0.024169921875, -0.068603515625, -0.0308685302734375, -0.0419921875, 0.039...
pietrolesci/robust_nli_is_sd
2022-04-25T13:07:25.000Z
[ "region:us" ]
pietrolesci
null
null
0
8
2022-04-25T11:49:03
This is part of `robust_NLI`but since there seems to be a bug when loading and downloading `DatasetDict` containing datasets with different configurations, I loaded the datasets with the differing configs as standalone datasets. Issue here: [https://github.com/huggingface/datasets/issues/4211](https://github.com/huggingface/datasets/issues/4211)
348
[ [ -0.0478515625, -0.0338134765625, -0.0027065277099609375, 0.0361328125, -0.01226043701171875, 0.0012559890747070312, -0.023101806640625, -0.04986572265625, 0.049835205078125, 0.024169921875, -0.068603515625, -0.0308685302734375, -0.042022705078125, 0.03970336...
pietrolesci/breaking_nli
2022-04-25T13:37:23.000Z
[ "region:us" ]
pietrolesci
null
null
0
8
2022-04-25T13:36:48
## Overview Proposed by ```latex @InProceedings{glockner_acl18, author = {Glockner, Max and Shwartz, Vered and Goldberg, Yoav}, title = {Breaking NLI Systems with Sentences that Require Simple Lexical Inferences}, booktitle = {The 56th Annual Meeting of the Association for Computational Linguistics (ACL)}, month = {July}, year = {2018}, address = {Melbourne, Australia} } ``` Original dataset available [here](https://github.com/BIU-NLP/Breaking_NLI). ## Dataset curation Labels encoded with the following mapping `{"entailment": 0, "neutral": 1, "contradiction": 2}` and made available in the `label` column. ## Code to create the dataset ```python import pandas as pd from datasets import Features, Value, ClassLabel, Dataset, Sequence # load data with open("<path to folder>/dataset.jsonl", "r") as fl: data = fl.read().split("\n") df = pd.DataFrame([eval(i) for i in data if len(i) > 0]) # encode labels df["label"] = df["gold_label"].map({"entailment": 0, "neutral": 1, "contradiction": 2}) # cast to dataset features = Features({ "sentence1": Value(dtype="string", id=None), "category": Value(dtype="string", id=None), "gold_label": Value(dtype="string", id=None), "annotator_labels": Sequence(feature=Value(dtype="string", id=None), length=3), "pairID": Value(dtype="int32", id=None), "sentence2": Value(dtype="string", id=None), "label": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]), }) ds = Dataset.from_pandas(df, features=features) ds.push_to_hub("breaking_nli", token="<token>", split="all") ```
1,612
[ [ -0.025909423828125, -0.059051513671875, 0.023681640625, 0.016815185546875, -0.005329132080078125, -0.00970458984375, -0.019287109375, -0.029571533203125, 0.02215576171875, 0.042510986328125, -0.03594970703125, -0.0433349609375, -0.046966552734375, 0.03662109...
BigScienceBiasEval/bias-shades
2022-10-03T13:49:04.000Z
[ "license:cc-by-sa-4.0", "region:us" ]
BigScienceBiasEval
This is a preliminary version of the bias SHADES dataset for evaluating LMs for social biases.
""" # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION =
1
8
2022-04-28T16:46:11
--- license: cc-by-sa-4.0 --- Possibly a placeholder dataset for the original here: https://huggingface.co/datasets/bigscience-catalogue-data/bias-shades # Data Statement for SHADES > **How to use this document:** > Fill in each section according to the instructions. Give as much detail as you can, but there's no need to extrapolate. The goal is to help people understand your data when they approach it. This could be someone looking at it in ten years, or it could be you yourself looking back at the data in two years. > For full details, the best source is the original Data Statements paper, here: https://www.aclweb.org/anthology/Q18-1041/ . > Instruction fields are given as blockquotes; delete the instructions when you're done, and provide the file with your data, for example as "DATASTATEMENT.md". The lists in some blocks are designed to be filled in, but it's good to also leave a written description of what's happening, as well as the list. It's fine to skip some fields if the information isn't known. > Only blockquoted content should be deleted; the final about statement should be left intact. Data set name: Bias-Shades Citation (if available): TODO. Data set developer(s): This dataset was compiled by dozens of research scientists through the BigScience open science collaboration. Collaborators, representing numerous cultures and languages, joined the project of their own volition. Data statement author(s): Shayne Longpre, Aurélie Névéol, Shanya Sharma[Add name here if you add/edit the data statement :)]. Others who contributed to this document: N/A License: Creative Commons Attribution-ShareAlike 4.0 (CC BY-SA 4.0). ## A. CURATION RATIONALE > *Explanation.* Which texts were included and what were the goals in selecting texts, both in the original collection and in any further sub-selection? This can be especially important in datasets too large to thoroughly inspect by hand. An explicit statement of the curation rationale can help dataset users make inferences about what other kinds of texts systems trained with them could conceivably generalize to. This dataset was curated by hand-crafting stereotype sentences by native speakers from the culture which is being targeted. An initial set of sentences was inferred from stereotypes expressed in the crowS-pairs data set(Nangia et al.). Native speakers first crafted templates for sentences expressing a stereotype. These templates are marked for gender and plurality of the target nouns, so the template can be reused by substituting different targets. Next, the template-target noun pair combinations were annotated for the veracity/reliability of the expressed stereotype. The resulting sentences express common and less common stereotypes in a variety of cultures and languages. ## B. LANGUAGE VARIETY/VARIETIES > *Explanation.* Languages differ from each other in structural ways that can interact with NLP algorithms. Within a language, regional or social dialects can also show great variation (Chambers and Trudgill, 1998). The language and language variety should be described with a language tag from BCP-47 identifying the language variety (e.g., en-US or yue-Hant-HK), and a prose description of the language variety, glossing the BCP-47 tag and also providing further information (e.g., "English as spoken in Palo Alto, California", or "Cantonese written with traditional characters by speakers in Hong Kong who are bilingual in Mandarin"). * BCP-47 language tags: en-US, fr-FR, hi-IN, es-DO, ar-LY, ru-RU, de-DE, nl-NL, ta-IN. * Language variety description: English spoken by native speakers of the United States, native French people from metropolitan France, native Hindi and Tamil speakers from India, Spanish speakers from the Dominican Republic, Arabic speakers from Libya, Russian speakers from Russia, German speakers from Germany, and Dutch speakers from the Netherlands. ## C. CONTRIBUTOR DEMOGRAPHIC > ## C. SPEAKER DEMOGRAPHIC > *Explanation.* Sociolinguistics has found that variation (in pronunciation, prosody, word choice, and grammar) correlates with speaker demographic characteristics (Labov, 1966), as speakers use linguistic variation to construct and project identities (Eckert and Rickford, 2001). Transfer from native languages (L1) can affect the language produced by non-native (L2) speakers (Ellis, 1994, Ch. 8). A further important type of variation is disordered speech (e.g., dysarthria). Specifications include: Participants to the collection project were recruited through the HuggingFace BigScience project, and specifically the Bias and Fairness Evaluation group. Listed below. Speakers: * [ADD YOURSELF!] * Shayne Longpre: English-speaking, male, 28 years old, culturally Canadian. * Aurélie Névéol: French (native), English and Spanish speaking, female, 44 years old, culturally French (also familiar with American culture) * Shanya Sharma: Hindi(native), English speaking, female, 24 years old, culturally Indian * Margaret Mitchell: English, female, mid-30s, U.S.A. * Maraim Masoud: Arabic, English Speaking female. ## D. ANNOTATOR DEMOGRAPHIC > *Explanation.* What are the demographic characteristics of the annotators and annotation guideline developers? Their own “social address” influences their experience with language and thus their perception of what they are annotating. Specifications include: Participants to the collection project were recruited through the HuggingFace BigScience project, and specifically the Bias and Fairness Evaluation group. Speaker and annotator contributors listed in section C. ## E. SPEECH SITUATION N/A ## F. TEXT CHARACTERISTICS > *Explanation.* Both genre and topic influence the vocabulary and structural characteristics of texts (Biber, 1995), and should be specified. Collected data is a collection of offensive stereotyped statements in numerous languages and cultures. They might be upsetting and/or offensive. Along with these stereotyped statements are annotation judgements of how prevalent/real the expressed stereotypes are in the real world. Some statements were created from templates with substituted target nouns, and therefore may express an uncommon or unlikely stereotype. ## G. RECORDING QUALITY N/A ## H. OTHER > *Explanation.* There may be other information of relevance as well. Please use this space to develop any further categories that are relevant for your dataset. ## I. PROVENANCE APPENDIX This initiative is part of the BigScience Workshop: https://bigscience.huggingface.co/. ## About this document A data statement is a characterization of a dataset that provides context to allow developers and users to better understand how experimental results might generalize, how software might be appropriately deployed, and what biases might be reflected in systems built on the software. Data Statements are from the University of Washington. Contact: [datastatements@uw.edu](mailto:datastatements@uw.edu). This document template is licensed as [CC0](https://creativecommons.org/share-your-work/public-domain/cc0/). This version of the markdown Data Statement is from June 4th 2020. The Data Statement template is based on worksheets distributed at the [2020 LREC workshop on Data Statements](https://sites.google.com/uw.edu/data-statements-for-nlp/), by Emily M. Bender, Batya Friedman, and Angelina McMillan-Major. Adapted to community Markdown template by Leon Dercyznski.
7,432
[ [ -0.036102294921875, -0.042755126953125, 0.0183868408203125, 0.036956787109375, -0.004886627197265625, -0.00255584716796875, -0.02496337890625, -0.05865478515625, 0.03973388671875, 0.04400634765625, -0.02374267578125, -0.0574951171875, -0.044036865234375, 0.0...
taln-ls2n/kptimes
2022-09-23T07:38:28.000Z
[ "task_categories:text-generation", "annotations_creators:unknown", "language_creators:unknown", "multilinguality:monolingual", "size_categories:100K<n<1M", "language:en", "license:cc-by-4.0", "region:us" ]
taln-ls2n
KPTimes benchmark dataset for keyphrase extraction an generation.
@inproceedings{gallina-etal-2019-kptimes, title = "{KPT}imes: A Large-Scale Dataset for Keyphrase Generation on News Documents", author = "Gallina, Ygor and Boudin, Florian and Daille, Beatrice", booktitle = "Proceedings of the 12th International Conference on Natural Language Generation", month = oct # "{--}" # nov, year = "2019", address = "Tokyo, Japan", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W19-8617", doi = "10.18653/v1/W19-8617", pages = "130--135", abstract = "Keyphrase generation is the task of predicting a set of lexical units that conveys the main content of a source text. Existing datasets for keyphrase generation are only readily available for the scholarly domain and include non-expert annotations. In this paper we present KPTimes, a large-scale dataset of news texts paired with editor-curated keyphrases. Exploring the dataset, we show how editors tag documents, and how their annotations differ from those found in existing datasets. We also train and evaluate state-of-the-art neural keyphrase generation models on KPTimes to gain insights on how well they perform on the news domain. The dataset is available online at https:// github.com/ygorg/KPTimes.", }
1
8
2022-05-06T09:34:40
--- annotations_creators: - unknown language_creators: - unknown language: - en license: - cc-by-4.0 multilinguality: - monolingual task_categories: - text-mining - text-generation task_ids: - keyphrase-generation - keyphrase-extraction size_categories: - 100K<n<1M pretty_name: KPTimes --- # KPTimes Benchmark Dataset for Keyphrase Generation ## About KPTimes is a dataset for benchmarking keyphrase extraction and generation models. The dataset is composed of 290K news articles in English collected from the [New York Times](https://www.nytimes.com/) and the [Japan Times](https://www.japantimes.co.jp/). Keyphrases were annotated by editors in a semi-automated manner (that is, editors revise a set of keyphrases proposed by an algorithm and provide additional keyphrases). Details about the dataset can be found in the original paper [(Gallina et al., 2019)][gallina-2019]. Reference (indexer-assigned) keyphrases are also categorized under the PRMU (<u>P</u>resent-<u>R</u>eordered-<u>M</u>ixed-<u>U</u>nseen) scheme as proposed in [(Boudin and Gallina, 2021)][boudin-2021]. Text pre-processing (tokenization) is carried out using `spacy` (`en_core_web_sm` model) with a special rule to avoid splitting words with hyphens (e.g. graph-based is kept as one token). Stemming (Porter's stemmer implementation provided in `nltk`) is applied before reference keyphrases are matched against the source text. Details about the process can be found in `prmu.py`. <u>P</u>resent keyphrases are ordered according to their first occurrence position in the text. ## Content and statistics The dataset contains the following test split: | Split | # documents | #words | # keyphrases | % Present | % Reordered | % Mixed | % Unseen | | :--------- | ----------: | -----: | -----------: | --------: | ----------: | ------: | -------: | | Train | 259,923 | 921 | 5.03 | 45.61 | 15.57 | 29.63 | 9.19 | | Validation | 10,000 | 921 | 5.02 | 45.22 | 15.78 | 29.60 | 9.41 | | Test | 20,000 | 648 | 5.03 | 60.64 | 8.90 | 18.95 | 11.51 | The following data fields are available : - **id**: unique identifier of the document. - **title**: title of the document. - **abstract**: abstract of the document. - **keyphrases**: list of reference keyphrases. - **prmu**: list of <u>P</u>resent-<u>R</u>eordered-<u>M</u>ixed-<u>U</u>nseen categories for reference keyphrases. - **date**: publishing date (YYYY/MM/DD) - **categories**: categories of the article (1 or 2 categories) ## References - (Gallina et al., 2019) Ygor Gallina, Florian Boudin, and Beatrice Daille. 2019. [KPTimes: A Large-Scale Dataset for Keyphrase Generation on News Documents][gallina-2019]. In Proceedings of the 12th International Conference on Natural Language Generation, pages 130–135, Tokyo, Japan. Association for Computational Linguistics. - (Boudin and Gallina, 2021) Florian Boudin and Ygor Gallina. 2021. [Redefining Absent Keyphrases and their Effect on Retrieval Effectiveness][boudin-2021]. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4185–4193, Online. Association for Computational Linguistics. [gallina-2019]: https://aclanthology.org/W19-8617/ [boudin-2021]: https://aclanthology.org/2021.naacl-main.330/
3,407
[ [ -0.01421356201171875, -0.037811279296875, 0.0296630859375, 0.0168609619140625, -0.034576416015625, 0.0145416259765625, -0.01049041748046875, -0.006076812744140625, 0.007061004638671875, 0.0193023681640625, -0.046295166015625, -0.057281494140625, -0.0321960449218...
strombergnlp/nlpcc-stance
2022-10-25T21:47:26.000Z
[ "task_categories:text-classification", "task_ids:sentiment-analysis", "annotations_creators:expert-generated", "language_creators:found", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:zh", "license:cc-by-4.0", "stance-detection", "region:us" ]
strombergnlp
This is a stance prediction dataset in Chinese. The data is that from a shared task, stance detection in Chinese microblogs, in NLPCC-ICCPOL 2016. It covers Task A, a mandatory supervised task which detects stance towards five targets of interest with given labeled data.
@incollection{xu2016overview, title={Overview of nlpcc shared task 4: Stance detection in chinese microblogs}, author={Xu, Ruifeng and Zhou, Yu and Wu, Dongyin and Gui, Lin and Du, Jiachen and Xue, Yun}, booktitle={Natural language understanding and intelligent applications}, pages={907--916}, year={2016}, publisher={Springer} }
4
8
2022-05-19T11:19:12
--- annotations_creators: - expert-generated language_creators: - found language: - zh license: - cc-by-4.0 multilinguality: - monolingual size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - sentiment-analysis pretty_name: NLPCC Stance tags: - stance-detection --- # Dataset Card for "NLPCC 2016: Stance Detection in Chinese Microblogs" ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** [http://tcci.ccf.org.cn/conference/2016/pages/page05_evadata.html](http://tcci.ccf.org.cn/conference/2016/pages/page05_evadata.html) - **Repository:** - **Paper:** [https://link.springer.com/chapter/10.1007/978-3-319-50496-4_85](https://link.springer.com/chapter/10.1007/978-3-319-50496-4_85) - **Point of Contact:** [Mads Kongsback](https://github.com/mkonxd) - **Size of downloaded dataset files:** - **Size of the generated dataset:** - **Total amount of disk used:** ### Dataset Summary This is a stance prediction dataset in Chinese. The data is that from a shared task, stance detection in Chinese microblogs, in NLPCC-ICCPOL 2016. It covers Task A, a mandatory supervised task which detects stance towards five targets of interest with given labeled data. Some instances of the dataset have been removed, as they were without label. ### Supported Tasks and Leaderboards * Stance Detection in Chinese Microblogs ### Languages Chinese, as spoken on the Weibo website (`bcp47:zh`) ## Dataset Structure ### Data Instances Example instance: ``` { 'id': '0', 'target': 'IphoneSE', 'text': '3月31日,苹果iPhone SE正式开卖,然而这款小屏新机并未出现人们预想的疯抢局面。根据市场分析机构Localytics周一公布的数据,iPhone SE正式上市的这个周末,销量成绩并不算太好。', 'stance': 2 } ``` ### Data Fields * id: a `string` field with a unique id for the instance * target: a `string` representing the target of the stance * text: a `string` of the stance-bearing text * stance: an `int` representing class label -- `0`: AGAINST; `1`: FAVOR; `2`: NONE. ### Data Splits The training split has 2986 instances ## Dataset Creation ### Curation Rationale The goal was to create a dataset of microblog text annotated for stance. Six stance targets were selected and data was collected from Sina Weibo for annotation. ### Source Data #### Initial Data Collection and Normalization Not specified #### Who are the source language producers? Sina Weibo users ### Annotations #### Annotation process The stance of each target-microblog pair is duplicated annotated by two students individually. If these two students provide the same annotation, the stance of this microblog-target pair is then labeled. If the different annotation is detected, the third student will be assigned to annotate this pair. Their annotation results will be voted to obtain the final label. #### Who are the annotators? Students in China ### Personal and Sensitive Information No reflections ## Considerations for Using the Data ### Social Impact of Dataset The data preserves social media utterances verbatim and so has obviated any right to be forgotten, though usernames and post IDs are not explicitly included in the data. ### Discussion of Biases There'll be at least a temporal and regional bias to this data, as well as it only representing expressions of stance on six topics. ### Other Known Limitations ## Additional Information ### Dataset Curators The dataset is curated by the paper's authors. ### Licensing Information The authors distribute this data under Creative Commons attribution license, CC-BY 4.0. ### Citation Information ``` @incollection{xu2016overview, title={Overview of nlpcc shared task 4: Stance detection in chinese microblogs}, author={Xu, Ruifeng and Zhou, Yu and Wu, Dongyin and Gui, Lin and Du, Jiachen and Xue, Yun}, booktitle={Natural language understanding and intelligent applications}, pages={907--916}, year={2016}, publisher={Springer} } ``` ### Contributions Added by [@mkonxd](https://github.com/mkonxd), [@leondz](https://github.com/leondz)
5,041
[ [ -0.01885986328125, -0.041351318359375, 0.01934814453125, 0.0213623046875, -0.045745849609375, -0.003406524658203125, -0.0192718505859375, -0.01172637939453125, 0.04852294921875, 0.00873565673828125, -0.04437255859375, -0.07867431640625, -0.04547119140625, -0...
Lehrig/GTZAN-Collection
2022-06-13T13:54:08.000Z
[ "license:apache-2.0", "region:us" ]
Lehrig
The dataset consists of 1000 audio tracks each 30 seconds long. It contains 10 genres, each represented by 100 tracks. The tracks are all 22050Hz Mono 16-bit audio files in .wav format. The genres are: * blues * classical * country * disco * hiphop * jazz * metal * pop * reggae * rock This collection includes the following GTZAN variants: * raw (original WAV files) * melspectrograms (from each WAV file, contiguous 2-second windows at 4 random locations are sampled and transformed to Mel Spectrograms, resulting in 8000 Mel Spectrograms)
@ARTICLE{1021072, author={Tzanetakis, G. and Cook, P.}, journal={IEEE Transactions on Speech and Audio Processing}, title={Musical genre classification of audio signals}, year={2002}, volume={10}, number={5}, pages={293-302}, doi={10.1109/TSA.2002.800560}}
1
8
2022-05-25T20:16:44
--- license: apache-2.0 --- # Dataset Card for GTZAN Collection ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://github.com/derekahuang/Music-Classification - **Repository:** https://github.com/derekahuang/Music-Classification - **Paper:** [Musical genre classification of audio signals](https://ieeexplore.ieee.org/document/1021072) - **Leaderboard:** [Needs More Information] - **Point of Contact:** [Needs More Information] ### Dataset Summary The dataset consists of 1000 audio tracks each 30 seconds long. It contains 10 genres, each represented by 100 tracks. The tracks are all 22050Hz Mono 16-bit audio files in .wav format. The genres are: * blues * classical * country * disco * hiphop * jazz * metal * pop * reggae * rock This collection includes the following GTZAN variants: * raw (original WAV files) * melspectrograms (from each WAV file, contiguous 2-second windows at 4 random locations are sampled and transformed to Mel Spectrograms, resulting in 8000 Mel Spectrograms) ### Supported Tasks and Leaderboards [Needs More Information] ### Languages [Needs More Information] ## Dataset Structure ### Data Instances [Needs More Information] ### Data Fields [Needs More Information] ### Data Splits [Needs More Information] ## Dataset Creation ### Curation Rationale [Needs More Information] ### Source Data #### Initial Data Collection and Normalization [Needs More Information] #### Who are the source language producers? [Needs More Information] ### Annotations #### Annotation process [Needs More Information] #### Who are the annotators? [Needs More Information] ### Personal and Sensitive Information [Needs More Information] ## Considerations for Using the Data ### Social Impact of Dataset [Needs More Information] ### Discussion of Biases [Needs More Information] ### Other Known Limitations [Needs More Information] ## Additional Information ### Dataset Curators [Needs More Information] ### Licensing Information [Needs More Information] ### Citation Information [Needs More Information]
3,060
[ [ -0.04302978515625, -0.01136016845703125, 0.0181121826171875, 0.00738525390625, -0.028106689453125, 0.01360321044921875, -0.040374755859375, -0.038299560546875, 0.040496826171875, 0.04193115234375, -0.0792236328125, -0.08856201171875, -0.03216552734375, -0.01...
pysentimiento/spanish-targeted-sentiment-headlines
2022-06-17T21:28:01.000Z
[ "region:us" ]
pysentimiento
null
null
1
8
2022-06-10T21:21:22
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
AlekseyKorshuk/mystery-crime-books
2022-06-11T10:54:38.000Z
[ "region:us" ]
AlekseyKorshuk
null
null
1
8
2022-06-11T10:54:36
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
nateraw/rendered-sst2
2022-10-25T10:32:21.000Z
[ "task_categories:image-classification", "task_ids:multi-class-image-classification", "annotations_creators:machine-generated", "language_creators:crowdsourced", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:extended|sst2", "language:en", "license:unknown", "region:us"...
nateraw
null
null
0
8
2022-06-15T05:32:09
--- annotations_creators: - machine-generated language_creators: - crowdsourced language: - en license: - unknown multilinguality: - monolingual pretty_name: Rendered SST-2 size_categories: - 1K<n<10K source_datasets: - extended|sst2 task_categories: - image-classification task_ids: - multi-class-image-classification --- # Rendered SST-2 The [Rendered SST-2 Dataset](https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md) from Open AI. Rendered SST2 is an image classification dataset used to evaluate the models capability on optical character recognition. This dataset was generated by rendering sentences in the Standford Sentiment Treebank v2 dataset. This dataset contains two classes (positive and negative) and is divided in three splits: a train split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images (444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
979
[ [ -0.0197906494140625, -0.019317626953125, 0.02716064453125, -0.0142669677734375, -0.042236328125, 0.01273345947265625, 0.005710601806640625, -0.0237274169921875, 0.00029659271240234375, 0.03851318359375, -0.025543212890625, -0.02593994140625, -0.03424072265625, ...
EddieChen372/javascript-small
2022-06-18T09:37:52.000Z
[ "region:us" ]
EddieChen372
null
null
2
8
2022-06-18T09:09:25
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
s3prl/mini_voxceleb1
2022-06-19T18:49:50.000Z
[ "region:us" ]
s3prl
null
null
0
8
2022-06-19T12:06:16
Entry not found
15
[ [ -0.0213775634765625, -0.014984130859375, 0.05718994140625, 0.0288543701171875, -0.0350341796875, 0.046478271484375, 0.052520751953125, 0.005062103271484375, 0.051361083984375, 0.016998291015625, -0.0521240234375, -0.01496124267578125, -0.0604248046875, 0.037...
scikit-learn/breast-cancer-wisconsin
2022-06-20T14:28:58.000Z
[ "license:cc-by-sa-4.0", "region:us" ]
scikit-learn
null
null
0
8
2022-06-20T14:22:00
--- license: cc-by-sa-4.0 --- ## Breast Cancer Wisconsin Diagnostic Dataset Following description was retrieved from [breast cancer dataset on UCI machine learning repository](https://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+(diagnostic)). Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. A few of the images can be found at [here](https://pages.cs.wisc.edu/~street/images/). Separating plane described above was obtained using Multisurface Method-Tree (MSM-T), a classification method which uses linear programming to construct a decision tree. Relevant features were selected using an exhaustive search in the space of 1-4 features and 1-3 separating planes. The actual linear program used to obtain the separating plane in the 3-dimensional space is that described in: [K. P. Bennett and O. L. Mangasarian: "Robust Linear Programming Discrimination of Two Linearly Inseparable Sets", Optimization Methods and Software 1, 1992, 23-34]. Attribute Information: - ID number - Diagnosis (M = malignant, B = benign) Ten real-valued features are computed for each cell nucleus: - radius (mean of distances from center to points on the perimeter) - texture (standard deviation of gray-scale values) - perimeter - area - smoothness (local variation in radius lengths) - compactness (perimeter^2 / area - 1.0) - concavity (severity of concave portions of the contour) - concave points (number of concave portions of the contour) - symmetry - fractal dimension ("coastline approximation" - 1)
1,626
[ [ -0.03826904296875, -0.047576904296875, 0.042236328125, 0.01300048828125, -0.0261383056640625, -0.006198883056640625, 0.039947509765625, -0.00690460205078125, 0.0156097412109375, 0.05078125, -0.046630859375, -0.07000732421875, -0.039886474609375, -0.000806331...
knkarthick/samsum
2022-10-21T03:03:27.000Z
[ "task_categories:summarization", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:cc-by-nc-nd-4.0", "conversations-summarization", "arxiv:1911.12237", "r...
knkarthick
null
null
3
8
2022-06-29T08:24:34
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - en license: - cc-by-nc-nd-4.0 multilinguality: - monolingual size_categories: - 10K<n<100K source_datasets: - original task_categories: - summarization task_ids: [] paperswithcode_id: samsum-corpus pretty_name: SAMSum Corpus tags: - conversations-summarization --- # Dataset Card for SAMSum Corpus ## Dataset Description ### Links - **Homepage:** hhttps://arxiv.org/abs/1911.12237v2 - **Repository:** https://arxiv.org/abs/1911.12237v2 - **Paper:** https://arxiv.org/abs/1911.12237v2 - **Point of Contact:** https://huggingface.co/knkarthick ### Dataset Summary The SAMSum dataset contains about 16k messenger-like conversations with summaries. Conversations were created and written down by linguists fluent in English. Linguists were asked to create conversations similar to those they write on a daily basis, reflecting the proportion of topics of their real-life messenger conversations. The style and register are diversified - conversations could be informal, semi-formal or formal, they may contain slang words, emoticons and typos. Then, the conversations were annotated with summaries. It was assumed that summaries should be a concise brief of what people talked about in the conversation in third person. The SAMSum dataset was prepared by Samsung R&D Institute Poland and is distributed for research purposes (non-commercial licence: CC BY-NC-ND 4.0). ### Languages English ## Dataset Structure ### Data Instances SAMSum dataset is made of 16369 conversations distributed uniformly into 4 groups based on the number of utterances in con- versations: 3-6, 7-12, 13-18 and 19-30. Each utterance contains the name of the speaker. Most conversations consist of dialogues between two interlocutors (about 75% of all conversations), the rest is between three or more people The first instance in the training set: {'id': '13818513', 'summary': 'Amanda baked cookies and will bring Jerry some tomorrow.', 'dialogue': "Amanda: I baked cookies. Do you want some?\r\nJerry: Sure!\r\nAmanda: I'll bring you tomorrow :-)"} ### Data Fields - dialogue: text of dialogue. - summary: human written summary of the dialogue. - id: unique file id of an example. ### Data Splits - train: 14732 - val: 818 - test: 819 ## Dataset Creation ### Curation Rationale In paper: In the first approach, we reviewed datasets from the following categories: chatbot dialogues, SMS corpora, IRC/chat data, movie dialogues, tweets, comments data (conversations formed by replies to comments), transcription of meetings, written discussions, phone dialogues and daily communication data. Unfortunately, they all differed in some respect from the conversations that are typically written in messenger apps, e.g. they were too technical (IRC data), too long (comments data, transcription of meetings), lacked context (movie dialogues) or they were more of a spoken type, such as a dialogue between a petrol station assistant and a client buying petrol. As a consequence, we decided to create a chat dialogue dataset by constructing such conversations that would epitomize the style of a messenger app. ### Who are the source language producers? linguists ### Who are the annotators? language experts ### Annotation process In paper: Each dialogue was created by one person. After collecting all of the conversations, we asked language experts to annotate them with summaries, assuming that they should (1) be rather short, (2) extract important pieces of information, (3) include names of interlocutors, (4) be written in the third person. Each dialogue contains only one reference summary. ## Licensing Information non-commercial licence: CC BY-NC-ND 4.0 ## Citation Information ``` @inproceedings{gliwa-etal-2019-samsum, title = "{SAMS}um Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization", author = "Gliwa, Bogdan and Mochol, Iwona and Biesek, Maciej and Wawer, Aleksander", booktitle = "Proceedings of the 2nd Workshop on New Frontiers in Summarization", month = nov, year = "2019", address = "Hong Kong, China", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/D19-5409", doi = "10.18653/v1/D19-5409", pages = "70--79" } ``` ## Contributions
4,358
[ [ -0.026611328125, -0.060699462890625, 0.01464080810546875, 0.01113128662109375, -0.020660400390625, 0.00576019287109375, -0.0229339599609375, -0.037261962890625, 0.051605224609375, 0.0418701171875, -0.042388916015625, -0.047576904296875, -0.0270233154296875, ...
PolyAI/evi
2022-10-25T10:39:33.000Z
[ "annotations_creators:crowdsourced", "annotations_creators:machine-generated", "language_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:multilingual", "language:en", "language:fr", "language:pl", "license:cc-by-4.0", "arxiv:2204.13496", "region:us" ]
PolyAI
EVI is a challenging spoken multilingual dataset with 5,506 dialogues in English, Polish, and French that can be used for benchmarking and developing knowledge-based enrolment, identification, and identification for spoken dialogue systems.
@inproceedings{Spithourakis2022evi, author = {Georgios P. Spithourakis and Ivan Vuli\'{c} and Micha\l{} Lis and I\~{n}igo Casanueva and Pawe\l{} Budzianowski}, title = {{EVI}: Multilingual Spoken Dialogue Tasks and Dataset for Knowledge-Based Enrolment, Verification, and Identification}, year = {2022}, note = {Data available at https://github.com/PolyAI-LDN/evi-paper}, url = {https://arxiv.org/abs/2204.13496}, booktitle = {Findings of NAACL (publication pending)} }
2
8
2022-06-30T11:42:45
--- annotations_creators: - crowdsourced - machine-generated language_creators: - crowdsourced - expert-generated language: - en - fr - pl license: - cc-by-4.0 multilinguality: - multilingual paperswithcode_id: evi-multilingual-spoken-dialogue-tasks-and-1 language_bcp47: - en - en-GB - fr - fr-FR - pl --- # EVI ## Dataset Description - **Paper:** [EVI: Multilingual Spoken Dialogue Tasks and Dataset for Knowledge-Based Enrolment, Verification, and Identification](https://arxiv.org/abs/2204.13496) - **Repository:** [Github](https://github.com/PolyAI-LDN/evi-paper) EVI is a challenging spoken multilingual dataset with 5,506 dialogues in English, Polish, and French that can be used for benchmarking and developing knowledge-based enrolment, identification, and identification for spoken dialogue systems. ## Example EVI can be downloaded and used as follows: ```py from datasets import load_dataset evi = load_dataset("PolyAI/evi", "en-GB") # for British English # to download data from all locales use: # evi = load_dataset("PolyAI/evi", "all") # see structure print(evi) ``` ## Dataset Structure We show detailed information of the example for the `en-GB` configuration of the dataset. All other configurations have the same structure. ### Data Instances An example of a data instance of the config `en-GB` looks as follows: ``` { "language": 0, "dialogue_id": "CA0007220161df7be23f4554704c8720f5", "speaker_id": "e80e9bdd33eda593f16a1b6f2fb228ff", "turn_id": 0, "target_profile_id": "en.GB.608", "asr_transcription": "w20 a b", "asr_nbest'": ["w20 a b", "w20 a bee", "w20 a baby"], "path": "audios/en/CA0007220161df7be23f4554704c8720f5/0.wav", "audio": { "path": "/home/georgios/.cache/huggingface/datasets/downloads/extracted/0335ebc25feace53243133b49ba17ba18e26f0f97cb083ffdf4e73dd7427b443/audios/en/CA0007220161df7be23f4554704c8720f5/0.wav", "array": array([ 0.00024414, 0.00024414, 0.00024414, ..., 0.00024414, -0.00024414, 0.00024414], dtype=float32), "sampling_rate": 8000, } } ``` ### Data Fields The data fields are the same among all splits. - **language** (int): ID of language - **dialogue_id** (str): the ID of the dialogue - **speaker_id** (str): the ID of the speaker - **turn_id** (int)": the ID of the turn - **target_profile_id** (str): the ID of the target profile - **asr_transcription** (str): ASR transcription of the audio file - **asr_nbest** (list): n-best ASR transcriptions of the audio file - **path** (str): Path to the audio file - **audio** (dict): Audio object including loaded audio array, sampling rate and path of audio ### Data Splits Every config only has the `"test"` split containing *ca.* 1,800 dialogues. ## Dataset Creation [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Discussion of Biases [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Other Known Limitations [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ## Additional Information ### Dataset Curators [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) ### Licensing Information All datasets are licensed under the [Creative Commons license (CC-BY)](https://creativecommons.org/licenses/). ### Citation Information ``` @inproceedings{Spithourakis2022evi, author = {Georgios P. Spithourakis and Ivan Vuli\'{c} and Micha\l{} Lis and I\~{n}igo Casanueva and Pawe\l{} Budzianowski}, title = {{EVI}: Multilingual Spoken Dialogue Tasks and Dataset for Knowledge-Based Enrolment, Verification, and Identification}, year = {2022}, note = {Data available at https://github.com/PolyAI-LDN/evi-paper}, url = {https://arxiv.org/abs/2204.13496}, booktitle = {Findings of NAACL (publication pending)} } ``` ### Contributions Thanks to [@polinaeterna](https://github.com/polinaeterna) for helping with adding this dataset
4,502
[ [ -0.035400390625, -0.037139892578125, 0.0131988525390625, 0.0193023681640625, -0.005096435546875, -0.005153656005859375, -0.031402587890625, -0.0183868408203125, 0.037261962890625, 0.0261993408203125, -0.060516357421875, -0.06390380859375, -0.0352783203125, 0...
MicPie/unpredictable_full
2022-08-04T20:07:28.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-02T20:22:21
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-full size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-full" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,787
[ [ -0.039459228515625, -0.04071044921875, 0.0330810546875, 0.0239410400390625, 0.005733489990234375, 0.00930023193359375, -0.00968170166015625, -0.043487548828125, 0.0382080078125, 0.022430419921875, -0.07342529296875, -0.047210693359375, -0.046630859375, 0.015...
shahidul034/text_generation_model_data
2022-07-03T09:53:36.000Z
[ "region:us" ]
shahidul034
null
null
1
8
2022-07-03T09:47:21
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
MicPie/unpredictable_w3-org
2022-08-04T20:16:53.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-03T11:45:06
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-w3-org size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-w3-org" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,791
[ [ -0.04058837890625, -0.0390625, 0.0328369140625, 0.0238494873046875, 0.006740570068359375, 0.01055908203125, -0.007568359375, -0.045989990234375, 0.03460693359375, 0.02008056640625, -0.07421875, -0.045928955078125, -0.04632568359375, 0.0165557861328125, -...
MicPie/unpredictable_sporcle-com
2022-08-04T20:13:59.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-03T11:58:21
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-sporcle-com size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-sporcle-com" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,801
[ [ -0.04144287109375, -0.041900634765625, 0.031219482421875, 0.023712158203125, 0.005908966064453125, 0.01126861572265625, -0.0114288330078125, -0.0428466796875, 0.0391845703125, 0.021331787109375, -0.072265625, -0.04522705078125, -0.048919677734375, 0.01655578...
MicPie/unpredictable_wiki-openmoko-org
2022-08-04T20:17:59.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-03T12:06:24
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-wiki-openmoko-org size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-wiki-openmoko-org" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,813
[ [ -0.0413818359375, -0.04010009765625, 0.03125, 0.0208740234375, 0.005863189697265625, 0.00731658935546875, -0.0096435546875, -0.043670654296875, 0.036590576171875, 0.021942138671875, -0.073486328125, -0.0462646484375, -0.04571533203125, 0.0160980224609375, ...
MicPie/unpredictable_ensembl-org
2022-08-04T20:06:23.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-03T12:19:43
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-ensembl-org size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-ensembl-org" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,801
[ [ -0.04046630859375, -0.040618896484375, 0.031829833984375, 0.023223876953125, 0.00659942626953125, 0.01143646240234375, -0.00839996337890625, -0.04425048828125, 0.038116455078125, 0.0212249755859375, -0.07257080078125, -0.04547119140625, -0.0458984375, 0.0166...
Paul/hatecheck-german
2022-07-05T10:38:52.000Z
[ "task_categories:text-classification", "task_ids:hate-speech-detection", "annotations_creators:crowdsourced", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:1K<n<10K", "source_datasets:original", "language:de", "license:cc-by-4.0", "arxiv:2206.09917", "regi...
Paul
null
null
0
8
2022-07-05T10:36:48
--- annotations_creators: - crowdsourced language_creators: - expert-generated language: - de license: - cc-by-4.0 multilinguality: - monolingual pretty_name: German HateCheck size_categories: - 1K<n<10K source_datasets: - original task_categories: - text-classification task_ids: - hate-speech-detection --- # Dataset Card for Multilingual HateCheck ## Dataset Description Multilingual HateCheck (MHC) is a suite of functional tests for hate speech detection models in 10 different languages: Arabic, Dutch, French, German, Hindi, Italian, Mandarin, Polish, Portuguese and Spanish. For each language, there are 25+ functional tests that correspond to distinct types of hate and challenging non-hate. This allows for targeted diagnostic insights into model performance. For more details, please refer to our paper about MHC, published at the 2022 Workshop on Online Abuse and Harms (WOAH) at NAACL 2022. If you are using MHC, please cite our work! - **Paper:** Röttger et al. (2022) - Multilingual HateCheck: Functional Tests for Multilingual Hate Speech Detection Models. https://arxiv.org/abs/2206.09917 - **Repository:** https://github.com/rewire-online/multilingual-hatecheck - **Point of Contact:** paul@rewire.online ## Dataset Structure The csv format mostly matches the original HateCheck data, with some adjustments for specific languages. **mhc_case_id** The test case ID that is unique to each test case across languages (e.g., "mandarin-1305") **functionality** The shorthand for the functionality tested by the test case (e.g, "target_obj_nh"). The same functionalities are tested in all languages, except for Mandarin and Arabic, where non-Latin script required adapting the tests for spelling variations. **test_case** The test case text. **label_gold** The gold standard label ("hateful" or "non-hateful") of the test case. All test cases within a given functionality have the same gold standard label. **target_ident** Where applicable, the protected group that is targeted or referenced in the test case. All HateChecks cover seven target groups, but their composition varies across languages. **ref_case_id** For hateful cases, where applicable, the ID of the hateful case which was perturbed to generate this test case. For non-hateful cases, where applicable, the ID of the hateful case which is contrasted by this test case. **ref_templ_id** The equivalent to ref_case_id, but for template IDs. **templ_id** The ID of the template from which the test case was generated. **case_templ** The template from which the test case was generated (where applicable). **gender_male** and **gender_female** For gender-inflected languages (French, Spanish, Portuguese, Hindi, Arabic, Italian, Polish, German), only for cases where gender inflection is relevant, separate entries for gender_male and gender_female replace case_templ. **label_annotated** A list of labels given by the three annotators who reviewed the test case (e.g., "['hateful', 'hateful', 'hateful']"). **label_annotated_maj** The majority vote of the three annotators (e.g., "hateful"). In some cases this differs from the gold label given by our language experts. **disagreement_in_case** True if label_annotated_maj does not match label_gold for the entry. **disagreement_in_template** True if the test case is generated from an IDENT template and there is at least one case with disagreement_in_case generated from the same template. This can be used to exclude entire templates from MHC.
3,489
[ [ -0.046661376953125, -0.05206298828125, -0.0040130615234375, 0.006702423095703125, -0.008392333984375, 0.00782012939453125, -0.0022068023681640625, -0.037109375, 0.0290679931640625, 0.023834228515625, -0.055206298828125, -0.056182861328125, -0.040863037109375, ...
MicPie/unpredictable_cluster10
2022-08-04T19:49:37.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-08T17:18:25
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster10 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster10" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.041900634765625, -0.039947509765625, 0.03350830078125, 0.0234375, 0.00724029541015625, 0.01116180419921875, -0.01091766357421875, -0.043182373046875, 0.03759765625, 0.019378662109375, -0.07183837890625, -0.04730224609375, -0.046661376953125, 0.01455688476...
MicPie/unpredictable_cluster12
2022-08-04T19:52:07.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-08T17:20:21
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster12 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster12" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.04083251953125, -0.03961181640625, 0.033447265625, 0.0233306884765625, 0.00632476806640625, 0.01149749755859375, -0.01045989990234375, -0.043182373046875, 0.037750244140625, 0.01885986328125, -0.0736083984375, -0.0484619140625, -0.04718017578125, 0.013175...
MicPie/unpredictable_cluster17
2022-08-04T19:55:23.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-08T17:33:42
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster17 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster17" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.041046142578125, -0.04046630859375, 0.032928466796875, 0.0220794677734375, 0.005657196044921875, 0.01129913330078125, -0.0109710693359375, -0.043182373046875, 0.036712646484375, 0.0198822021484375, -0.0733642578125, -0.049163818359375, -0.04608154296875, ...
MicPie/unpredictable_cluster26
2022-08-04T20:00:43.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-08T18:38:15
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster26 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster26" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.04107666015625, -0.03936767578125, 0.033477783203125, 0.023193359375, 0.0058746337890625, 0.01099395751953125, -0.01102447509765625, -0.042266845703125, 0.0372314453125, 0.019683837890625, -0.07366943359375, -0.048980712890625, -0.046478271484375, 0.01274...
MicPie/unpredictable_cluster09
2022-08-04T19:48:52.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-08T19:15:21
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-cluster09 size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-cluster09" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,797
[ [ -0.040008544921875, -0.039642333984375, 0.03411865234375, 0.0228424072265625, 0.006374359130859375, 0.0113677978515625, -0.010650634765625, -0.042388916015625, 0.038238525390625, 0.0204010009765625, -0.07232666015625, -0.047393798828125, -0.046630859375, 0.0...
MicPie/unpredictable_rated-high
2022-08-04T20:11:37.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
MicPie
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
@misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} }
0
8
2022-07-09T08:56:24
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-rated-high size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-rated-high" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Homepage:** https://ethanperez.net/unpredictable - **Repository:** https://github.com/JunShern/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data - **Point of Contact:** junshern@nyu.edu, perez@nyu.edu ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings): * [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low) * [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium) * [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com) * [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net) * [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com) * [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com) * [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com) * [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com) * [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org) * [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org) * [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com) * [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com) * [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com) * [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com) * [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com) * [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com) * [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com) * [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com) * [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com) * [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org) * [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org) * [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org) * UnpredicTable data subsets based on clustering (for the clustering details please see our publication): * [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00) * [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01) * [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02) * [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03) * [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04) * [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05) * [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06) * [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07) * [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08) * [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09) * [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10) * [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11) * [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12) * [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13) * [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14) * [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15) * [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16) * [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17) * [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18) * [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19) * [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20) * [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21) * [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22) * [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23) * [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24) * [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25) * [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26) * [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27) * [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28) * [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29) * [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Annotations #### Annotation process Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low), [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication. #### Who are the annotators? Annotations were carried out by a lab assistant. ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Dataset Curators Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez ### Licensing Information Apache 2.0 ### Citation Information ``` @misc{chan2022few, author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan}, title = {Few-shot Adaptation Works with UnpredicTable Data}, publisher={arXiv}, year = {2022}, url = {https://arxiv.org/abs/2208.01009} } ```
14,799
[ [ -0.040191650390625, -0.041107177734375, 0.030517578125, 0.0246429443359375, 0.007228851318359375, 0.0096435546875, -0.0092010498046875, -0.04315185546875, 0.03582763671875, 0.0228424072265625, -0.0709228515625, -0.048095703125, -0.047027587890625, 0.01397705...
jonathanli/legal-advice-reddit
2023-02-23T16:39:28.000Z
[ "language:en", "reddit", "law", "region:us" ]
jonathanli
null
null
5
8
2022-07-27T20:19:25
--- language: - en tags: - reddit - law pretty_name: Legal Advice Reddit --- # Dataset Card for Legal Advice Reddit Dataset ## Dataset Description - **Paper: [Parameter-Efficient Legal Domain Adaptation](https://aclanthology.org/2022.nllp-1.10/)** - **Point of Contact: jxl@queensu.ca** ### Dataset Summary New dataset introduced in [Parameter-Efficient Legal Domain Adaptation](https://aclanthology.org/2022.nllp-1.10) (Li et al., NLLP 2022) from the Legal Advice Reddit community (known as "/r/legaldvice"), sourcing the Reddit posts from the Pushshift Reddit dataset. The dataset maps the text and title of each legal question posted into one of eleven classes, based on the original Reddit post's "flair" (i.e., tag). Questions are typically informal and use non-legal-specific language. Per the Legal Advice Reddit rules, posts must be about actual personal circumstances or situations. We limit the number of labels to the top eleven classes and remove the other samples from the dataset. ### Citation Information ``` @inproceedings{li-etal-2022-parameter, title = "Parameter-Efficient Legal Domain Adaptation", author = "Li, Jonathan and Bhambhoria, Rohan and Zhu, Xiaodan", booktitle = "Proceedings of the Natural Legal Language Processing Workshop 2022", month = dec, year = "2022", address = "Abu Dhabi, United Arab Emirates (Hybrid)", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.nllp-1.10", pages = "119--129", } ```
1,537
[ [ -0.0217742919921875, -0.059295654296875, 0.0164642333984375, 0.01837158203125, -0.055755615234375, -0.019805908203125, -0.0232391357421875, -0.020263671875, 0.0213470458984375, 0.044189453125, -0.05218505859375, -0.06585693359375, -0.040496826171875, 0.00788...
graphs-datasets/ZINC
2023-02-07T16:37:32.000Z
[ "task_categories:graph-ml", "license:unknown", "region:us" ]
graphs-datasets
null
null
0
8
2022-08-01T15:11:09
--- license: unknown dataset_info: features: - name: node_feat sequence: sequence: int64 - name: edge_index sequence: sequence: int64 - name: edge_attr sequence: sequence: int64 - name: 'y' sequence: float64 - name: num_nodes dtype: int64 splits: - name: train num_bytes: 376796456 num_examples: 220011 - name: test num_bytes: 8538528 num_examples: 5000 - name: validation num_bytes: 41819628 num_examples: 24445 download_size: 20636253 dataset_size: 427154612 task_categories: - graph-ml --- # Dataset Card for ZINC ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [External Use](#external-use) - [PyGeometric](#pygeometric) - [Dataset Structure](#dataset-structure) - [Data Properties](#data-properties) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Additional Information](#additional-information) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **[Homepage](https://zinc15.docking.org/)** - **[Repository](https://www.dropbox.com/s/feo9qle74kg48gy/molecules.zip?dl=1):**: - **Paper:**: ZINC 15 – Ligand Discovery for Everyone (see citation) - **Leaderboard:**: [Papers with code leaderboard](https://paperswithcode.com/sota/) ### Dataset Summary The `ZINC` dataset is a "curated collection of commercially available chemical compounds prepared especially for virtual screening" (Wikipedia). ### Supported Tasks and Leaderboards `ZINC` should be used for molecular property prediction (aiming to predict the constrained solubility of the molecules), a graph regression task. The score used is the MAE. The associated leaderboard is here: [Papers with code leaderboard](https://paperswithcode.com/sota/graph-regression-on-zinc). ## External Use ### PyGeometric To load in PyGeometric, do the following: ```python from datasets import load_dataset from torch_geometric.data import Data from torch_geometric.loader import DataLoader dataset_hf = load_dataset("graphs-datasets/<mydataset>") # For the train set (replace by valid or test as needed) dataset_pg_list = [Data(graph) for graph in dataset_hf["train"]] dataset_pg = DataLoader(dataset_pg_list) ``` ## Dataset Structure ### Data Properties | property | value | |---|---| | scale | big | | #graphs | 220011 | | average #nodes | 23.15 | | average #edges | 49.81 | ### Data Fields Each row of a given file is a graph, with: - `node_feat` (list: #nodes x #node-features): nodes - `edge_index` (list: 2 x #edges): pairs of nodes constituting edges - `edge_attr` (list: #edges x #edge-features): for the aforementioned edges, contains their features - `y` (list: 1 x #labels): contains the number of labels available to predict (here 1, equal to zero or one) - `num_nodes` (int): number of nodes of the graph ### Data Splits This data comes from the PyGeometric version of the dataset, and follows the provided data splits. This information can be found back using ```python from torch_geometric.datasets import ZINC dataset = ZINC(root = '', split='train') # valid, test ``` ## Additional Information ### Licensing Information The dataset has been released under unknown license. Please open an issue if you know what is the license of this dataset. ### Citation Information ```bibtex @article{doi:10.1021/acs.jcim.5b00559, author = {Sterling, Teague and Irwin, John J.}, title = {ZINC 15 – Ligand Discovery for Everyone}, journal = {Journal of Chemical Information and Modeling}, volume = {55}, number = {11}, pages = {2324-2337}, year = {2015}, doi = {10.1021/acs.jcim.5b00559}, note ={PMID: 26479676}, URL = { https://doi.org/10.1021/acs.jcim.5b00559 }, eprint = { https://doi.org/10.1021/acs.jcim.5b00559 } } ``` ### Contributions Thanks to [@clefourrier](https://github.com/clefourrier) for adding this dataset.
4,177
[ [ -0.0233001708984375, -0.0240325927734375, 0.035247802734375, 0.006870269775390625, -0.0051422119140625, -0.016082763671875, -0.01447296142578125, -0.01471710205078125, 0.0226898193359375, 0.00982666015625, -0.058929443359375, -0.08099365234375, -0.02339172363281...
cjvt/sentinews
2022-08-17T06:28:13.000Z
[ "task_categories:text-classification", "task_ids:sentiment-classification", "annotations_creators:crowdsourced", "language_creators:found", "multilinguality:monolingual", "source_datasets:original", "language:sl", "license:cc-by-sa-4.0", "slovenian sentiment", "news articles", "region:us" ]
cjvt
SentiNews is a Slovenian sentiment classification dataset, consisting of news articles manually annotated with their sentiment by between 2 and 6 annotators. The news articles contain political, business, economic and financial content from the Slovenian news portals 24ur, Dnevnik, Finance, Rtvslo, and Žurnal24. The texts were annotated using the five-level Lickert scale (1 – very negative, 2 – negative, 3 – neutral, 4 – positive, and 5 – very positive) on three levels of granularity, i.e. on the document, paragraph, and sentence level. The final sentiment is determined using the following criterion: negative (if average of scores ≤ 2.4); neutral (if average of scores is between 2.4 and 3.6); positive (average of annotated scores ≥ 3.6).
@article{buvcar2018annotated, title={Annotated news corpora and a lexicon for sentiment analysis in Slovene}, author={Bu{\v{c}}ar, Jo{\v{z}}e and {\v{Z}}nidar{\v{s}}i{\v{c}}, Martin and Povh, Janez}, journal={Language Resources and Evaluation}, volume={52}, number={3}, pages={895--919}, year={2018}, publisher={Springer} }
1
8
2022-08-15T08:32:30
--- annotations_creators: - crowdsourced language: - sl language_creators: - found license: - cc-by-sa-4.0 multilinguality: - monolingual pretty_name: SentiNews size_categories: [] source_datasets: - original tags: - slovenian sentiment - news articles task_categories: - text-classification task_ids: - sentiment-classification --- # Dataset Card for SentiNews ## Dataset Description - **Homepage:** https://github.com/19Joey85/Sentiment-annotated-news-corpus-and-sentiment-lexicon-in-Slovene - **Paper:** Bučar, J., Žnidaršič, M. & Povh, J. Annotated news corpora and a lexicon for sentiment analysis in Slovene. Lang Resources & Evaluation 52, 895–919 (2018). https://doi.org/10.1007/s10579-018-9413-3 ### Dataset Summary SentiNews is a Slovenian sentiment classification dataset, consisting of news articles manually annotated with their sentiment by between two and six annotators. It is annotated at three granularities: - document-level (config `document_level`, 10 427 documents), - paragraph-level (config `paragraph_level`, 89 999 paragraphs), and - sentence-level (config `sentence_level`, 168 899 sentences). ### Supported Tasks and Leaderboards Sentiment classification, three classes (negative, neutral, positive). ### Languages Slovenian. ## Dataset Structure ### Data Instances A sample instance from the sentence-level config: ``` { 'nid': 2, 'content': 'Vilo Prešeren je na dražbi ministrstva za obrambo kupilo nepremičninsko podjetje Condor Real s sedežem v Lescah.', 'sentiment': 'neutral', 'pid': 1, 'sid': 1 } ``` ### Data Fields The data fields are similar among all three configs, with the only difference being the IDs. - `nid`: a uint16 containing a unique ID of the news article (document). - `content`: a string containing the body of the news article - `sentiment`: the sentiment of the instance - `pid`: a uint8 containing the consecutive number of the paragraph inside the current news article, **not unique** (present in the configs `paragraph_level` and `sentence_level`) - `sid`: a uint8 containing the consecutive number of the sentence inside the current paragraph, **not unique** (present in the config `sentence_level`) ## Additional Information ### Dataset Curators Jože Bučar, Martin Žnidaršič, Janez Povh. ### Licensing Information CC BY-SA 4.0 ### Citation Information ``` @article{buvcar2018annotated, title={Annotated news corpora and a lexicon for sentiment analysis in Slovene}, author={Bu{\v{c}}ar, Jo{\v{z}}e and {\v{Z}}nidar{\v{s}}i{\v{c}}, Martin and Povh, Janez}, journal={Language Resources and Evaluation}, volume={52}, number={3}, pages={895--919}, year={2018}, publisher={Springer} } ``` ### Contributions Thanks to [@matejklemen](https://github.com/matejklemen) for adding this dataset.
2,812
[ [ -0.0263214111328125, -0.0257110595703125, 0.015655517578125, 0.041473388671875, -0.0313720703125, -0.0132598876953125, -0.0277252197265625, -0.0107421875, 0.01947021484375, 0.0257720947265625, -0.06292724609375, -0.0859375, -0.044342041015625, 0.018051147460...
allenai/multinews_sparse_oracle
2022-11-12T00:15:42.000Z
[ "task_categories:summarization", "task_ids:news-articles-summarization", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:10K<n<100K", "source_datasets:original", "language:en", "license:other", "region:us" ]
allenai
null
null
1
8
2022-08-17T22:44:40
--- annotations_creators: - expert-generated language_creators: - expert-generated language: - en license: - other multilinguality: - monolingual pretty_name: Multi-News size_categories: - 10K<n<100K source_datasets: - original task_categories: - summarization task_ids: - news-articles-summarization paperswithcode_id: multi-news train-eval-index: - config: default task: summarization task_id: summarization splits: train_split: train eval_split: test col_mapping: document: text summary: target metrics: - type: rouge name: Rouge --- This is a copy of the [Multi-News](https://huggingface.co/datasets/multi_news) dataset, except the input source documents of its `test` split have been replaced by a __sparse__ retriever. The retrieval pipeline used: - __query__: The `summary` field of each example - __corpus__: The union of all documents in the `train`, `validation` and `test` splits - __retriever__: BM25 via [PyTerrier](https://pyterrier.readthedocs.io/en/latest/) with default settings - __top-k strategy__: `"oracle"`, i.e. the number of documents retrieved, `k`, is set as the original number of input documents for each example Retrieval results on the `test` set: | Recall@100 | Rprec | Precision@k | Recall@k | | ----------- | ----------- | ----------- | ----------- | | 0.8775 | 0.7480 | 0.7480 | 0.7480 |
1,361
[ [ -0.0275115966796875, -0.02703857421875, 0.01751708984375, 0.0170440673828125, -0.0284881591796875, 0.0034160614013671875, -0.0177001953125, 0.00844573974609375, 0.04290771484375, 0.028656005859375, -0.048614501953125, -0.0390625, -0.050384521484375, 0.000894...
pinecone/movielens-recent-ratings
2022-08-23T10:00:17.000Z
[ "annotations_creators:machine-generated", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:100K<n<1M", "language:en", "movielens", "recommendation", "collaborative filtering", "region:us" ]
pinecone
This dataset streams recent user ratings from the MovieLens 25M dataset and adds poster URLs.
@InProceedings{huggingface:dataset, title = {MovieLens Ratings}, author={Ismail Ashraq, James Briggs}, year={2022} }
1
8
2022-08-22T16:42:11
--- annotations_creators: - machine-generated language: - en language_creators: - machine-generated license: [] multilinguality: - monolingual pretty_name: MovieLens User Ratings size_categories: - 100K<n<1M source_datasets: [] tags: - movielens - recommendation - collaborative filtering task_categories: [] task_ids: [] --- # MovieLens User Ratings This dataset contains ~1M user ratings, consisting of ~10k of the most recent movies from the MovieLens 25M dataset, for which over 30k unique users have rated. The dataset is streamed from the MovieLens 25M dataset, filters for the recent movies, and returns the user ratings for those. After a few joins and checks, we get this dataset. Included are the URLs of the respective movie posters. The dataset is part of an example on [building a movie recommendation engine](https://www.pinecone.io/docs/examples/movie-recommender-system/) with vector search.
910
[ [ -0.052642822265625, -0.0010385513305664062, 0.029052734375, -0.00012981891632080078, -0.04156494140625, 0.0014181137084960938, 0.01311492919921875, -0.008056640625, 0.0148773193359375, 0.039215087890625, -0.0628662109375, -0.058135986328125, -0.0277099609375, ...
unpredictable/unpredictable_support-google-com
2022-08-28T18:25:26.000Z
[ "task_categories:multiple-choice", "task_categories:question-answering", "task_categories:zero-shot-classification", "task_categories:text2text-generation", "task_categories:table-question-answering", "task_categories:text-generation", "task_categories:text-classification", "task_categories:tabular-cl...
unpredictable
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
null
0
8
2022-08-28T18:12:13
--- annotations_creators: - no-annotation language_creators: - found language: - en license: - apache-2.0 multilinguality: - monolingual pretty_name: UnpredicTable-support-google-com size_categories: - 100K<n<1M source_datasets: [] task_categories: - multiple-choice - question-answering - zero-shot-classification - text2text-generation - table-question-answering - text-generation - text-classification - tabular-classification task_ids: - multiple-choice-qa - extractive-qa - open-domain-qa - closed-domain-qa - closed-book-qa - open-book-qa - language-modeling - multi-class-classification - natural-language-inference - topic-classification - multi-label-classification - tabular-multi-class-classification - tabular-multi-label-classification --- # Dataset Card for "UnpredicTable-support-google-com" - Dataset of Few-shot Tasks from Tables ## Table of Contents - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-instances) - [Data Splits](#data-instances) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) ## Dataset Description - **Repository:** https://github.com/AnonCodeShare/few-shot-adaptation - **Paper:** Few-shot Adaptation Works with UnpredicTable Data ### Dataset Summary The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. There are several dataset versions available: * [UnpredicTable-full](https://huggingface.co/datasets/unpredictable/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/unpredictable/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites. * [UnpredicTable-unique](https://huggingface.co/datasets/unpredictable/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/unpredictable/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/unpredictable/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites. * [UnpredicTable-5k](https://huggingface.co/datasets/unpredictable/unpredictable_5k): This dataset contains 5k random tables from the full dataset. * UnpredicTable data subsets based on the website of origin: * [UnpredicTable-support-google-com](https://huggingface.co/datasets/unpredictable/unpredictable_support-google-com) ### Supported Tasks and Leaderboards Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc. The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset. ### Languages English ## Dataset Structure ### Data Instances Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from. There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'. ### Data Fields 'task': task identifier 'input': column elements of a specific row in the table. 'options': for multiple choice classification, it provides the options to choose from. 'output': target column element of the same row as input. 'pageTitle': the title of the page containing the table. 'outputColName': output column name 'url': url to the website containing the table 'wdcFile': WDC Web Table Corpus file ### Data Splits The UnpredicTable datasets do not come with additional data splits. ## Dataset Creation ### Curation Rationale Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning. ### Source Data #### Initial Data Collection and Normalization We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline. #### Who are the source language producers? The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/). ### Personal and Sensitive Information The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset. ## Considerations for Using the Data ### Social Impact of Dataset This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations. ### Discussion of Biases Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset. ### Other Known Limitations No additional known limitations. ## Additional Information ### Licensing Information Apache 2.0
8,248
[ [ -0.02862548828125, -0.0587158203125, 0.01416015625, -0.00112152099609375, -0.00611114501953125, -0.006015777587890625, -0.0208282470703125, -0.034423828125, 0.0032958984375, 0.024688720703125, -0.06378173828125, -0.0589599609375, -0.05230712890625, 0.0137329...
Osaleh/ArSAS
2022-09-05T07:09:56.000Z
[ "region:us" ]
Osaleh
null
null
0
8
2022-09-05T05:13:23
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
ashgorithm/movie_review_prediction_3000
2022-09-13T12:43:51.000Z
[ "region:us" ]
ashgorithm
null
null
0
8
2022-09-13T12:43:26
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
abidlabs/celeb-dataset
2022-10-02T20:23:09.000Z
[ "region:us" ]
abidlabs
null
null
0
8
2022-10-02T19:15:52
Entry not found
15
[ [ -0.02142333984375, -0.01495361328125, 0.05718994140625, 0.0288238525390625, -0.035064697265625, 0.046539306640625, 0.052520751953125, 0.005062103271484375, 0.0513916015625, 0.016998291015625, -0.052093505859375, -0.014984130859375, -0.060394287109375, 0.0379...
Drewd/lex_fridman_podcast_transcripts
2022-10-05T01:41:30.000Z
[ "annotations_creators:found", "language_creators:machine-generated", "multilinguality:monolingual", "size_categories:n<1K", "language:en", "podcast", "ai", "interviews", "region:us" ]
Drewd
This new dataset is meant to fine tune a model on how lex would talk. It's meant to support Q+A style models as well as encoders.
null
0
8
2022-10-04T03:42:55
--- annotations_creators: - found language: - en language_creators: - machine-generated license: [] multilinguality: - monolingual pretty_name: The transcripts from Lex Fridman podcast episodes on Youtube. size_categories: - n<1K source_datasets: [] tags: - podcast - ai - interviews task_categories: [] task_ids: [] --- # Dataset Card for Lex Fridman Podcast Transcripts ## Table of Contents - [Dataset Card for Lex Fridman Podcast Transcripts](#dataset-card-for-lex-fridman-podcast-transcripts) - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) - [Who are the source language producers?](#who-are-the-source-language-producers) - [Annotations](#annotations) - [Annotation process](#annotation-process) - [Who are the annotators?](#who-are-the-annotators) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** https://karpathy.ai/lexicap/ - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** [@drewdresser](https://twitter.com/drewdresser) ### Dataset Summary These are transcripts from the Lex Fridman podcast. The podcast is hosted by Lex Fridman, a computer scientist at MIT. The podcast is a mix of interviews with researchers in AI and other fields, and discussions of current events in AI. The transcripts are generated using [OpenAI Whisper](https://github.com/openai/whisper), then made available on [Karpathy AI](https://karpathy.ai/lexicap/). ### Supported Tasks and Leaderboards [More Information Needed] ### Languages English ## Dataset Structure ### Data Instances ~325 ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
3,670
[ [ -0.036041259765625, -0.0275421142578125, 0.01531219482421875, 0.007396697998046875, -0.01035308837890625, 0.01141357421875, -0.03826904296875, -0.0264129638671875, 0.039306640625, 0.0291900634765625, -0.06005859375, -0.0753173828125, -0.043182373046875, 0.00...
YaYaB/onepiece-blip-captions
2022-10-05T10:08:34.000Z
[ "task_categories:text-to-image", "annotations_creators:machine-generated", "language_creators:other", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:YaYaB/onepiece-blip-captions", "language:en", "license:cc-by-nc-sa-4.0", "region:us" ]
YaYaB
null
null
6
8
2022-10-05T08:53:42
--- license: cc-by-nc-sa-4.0 annotations_creators: - machine-generated language: - en language_creators: - other multilinguality: - monolingual pretty_name: 'One Piece BLIP captions' size_categories: - n<1K source_datasets: - YaYaB/onepiece-blip-captions tags: [] task_categories: - text-to-image task_ids: [] --- # Disclaimer This was inspired from https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions # Dataset Card for One Piece BLIP captions _Dataset used to train [One Piece text to image model](https://github.com/LambdaLabsML/examples/tree/main/stable-diffusion-finetuning)_ BLIP generated captions for One piece images collected from the web. Original images were obtained from [Anime Characters](https://www.animecharactersdatabase.com) and captioned with the [pre-trained BLIP model](https://github.com/salesforce/BLIP). For each row the dataset contains `image` and `text` keys. `image` is a varying size PIL jpeg, and `text` is the accompanying text caption. Only a train split is provided. ## Examples ![pk1.jpg](https://ami.animecharactersdatabase.com/uploads/chars/11076-782139445.jpg) > a man in a straw hat ![pk10.jpg](https://www.animecharactersdatabase.com/uploads/chars/5457-1977266515.png) > a man in a green coat holding two swords ![pk100.jpg](https://ami.animecharactersdatabase.com/uploads/chars/12602-925960129.jpg) > a man with red hair and a black coat ## Citation If you use this dataset, please cite it as: ``` @misc{yayab2022onepiece, author = {YaYaB}, title = {One Piece BLIP captions}, year={2022}, howpublished= {\url{https://huggingface.co/datasets/YaYaB/onepiece-blip-captions/}} } ```
1,672
[ [ -0.029541015625, -0.0228271484375, 0.0166015625, 0.03277587890625, -0.04437255859375, 0.0029239654541015625, 0.0004794597625732422, -0.03240966796875, 0.05438232421875, 0.052032470703125, -0.07440185546875, -0.024566650390625, -0.0276336669921875, 0.02412414...
arbml/arabic_text_diacritization
2022-11-03T13:33:33.000Z
[ "region:us" ]
arbml
null
null
1
8
2022-10-05T13:05:16
Entry not found
15
[ [ -0.0214080810546875, -0.01497650146484375, 0.057098388671875, 0.028839111328125, -0.0350341796875, 0.046478271484375, 0.052520751953125, 0.005046844482421875, 0.051361083984375, 0.016998291015625, -0.05206298828125, -0.01497650146484375, -0.06036376953125, 0...
brendenc/celeb-identities
2022-10-09T02:33:12.000Z
[ "region:us" ]
brendenc
null
null
0
8
2022-10-09T02:31:19
This is a small dataset containing celebrity faces. This dataset was created for educational purposes and is far too small for any sort of model training. However, these images can be used for demo examples or other educational purposes.
237
[ [ -0.041778564453125, -0.036834716796875, -0.0150604248046875, -0.003631591796875, -0.004337310791015625, -0.007648468017578125, 0.01435089111328125, -0.0037364959716796875, 0.038604736328125, 0.048583984375, -0.06805419921875, -0.00653839111328125, -0.03102111816...
joey234/nan-nli
2022-10-13T23:18:18.000Z
[ "task_categories:text-classification", "task_ids:natural-language-inference", "annotations_creators:expert-generated", "language_creators:expert-generated", "multilinguality:monolingual", "size_categories:n<1K", "source_datasets:original", "language:en", "license:cc-by-sa-4.0", "negation", "regi...
joey234
null
null
0
8
2022-10-13T23:16:18
--- annotations_creators: - expert-generated language: - en language_creators: - expert-generated license: - cc-by-sa-4.0 multilinguality: - monolingual pretty_name: nan-nli size_categories: - n<1K source_datasets: - original tags: - negation task_categories: - text-classification task_ids: - natural-language-inference --- # Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards Natural Language Inference Text Classification ### Languages en ## Dataset Structure ### Data Instances ### Data Fields premise: hypothesis: label: ### Data Splits Evaluation: 258 samples ## Dataset Creation ### Curation Rationale Extracting samples corresponding to different linguistics constructions of negation. ### Source Data Geoffrey K. Pullum and Rodney Huddleston. 2002. Negation, chapter 9. Cambridge University Press. #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? The annotators are the authors of the papers, one of whom holds a graduate degree in linguistics. ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@joey234](https://github.com/joey234) for adding this dataset.
2,995
[ [ -0.037506103515625, -0.0506591796875, 0.00543975830078125, 0.0196075439453125, -0.006000518798828125, -0.00018489360809326172, -0.0272216796875, -0.02362060546875, 0.0465087890625, 0.0465087890625, -0.05908203125, -0.07891845703125, -0.053924560546875, 0.020...
TheoTsio/Health_Misinfo
2023-08-28T21:51:26.000Z
[ "task_categories:text-classification", "size_categories:1K<n<10K", "language:en", "health_misinformation, credibility", "region:us" ]
TheoTsio
null
null
0
8
2022-10-19T12:45:11
--- task_categories: - text-classification language: - en tags: - health_misinformation, credibility size_categories: - 1K<n<10K --- # Dataset Card for Dataset Name ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary The health misinfo dataset is an English Document dataset containing just over 6k unique articles related to health issues from web. This dataset was created in an effort to detect the misinformation in health documents. This dataset was created from the relevance judgment of the TREC health misinformation ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions [More Information Needed]
1,739
[ [ -0.01062774658203125, -0.0238800048828125, -0.0002315044403076172, 0.01326751708984375, -0.0157012939453125, -0.006862640380859375, 0.004947662353515625, -0.03607177734375, 0.03460693359375, 0.041595458984375, -0.053680419921875, -0.075439453125, -0.052520751953...
ChristophSchuhmann/aesthetic-logo-ratings
2022-11-06T15:48:48.000Z
[ "region:us" ]
ChristophSchuhmann
null
null
4
8
2022-11-06T15:42:12
~ 15k logo images from LAION-5B have been rated for aesthetic preference ( preference_average ) and for how professional the design look ( professionalism_average ). --- license: apache-2.0 ---
196
[ [ -0.0531005859375, -0.007488250732421875, 0.017822265625, 0.050323486328125, -0.07073974609375, -0.0266571044921875, 0.01087188720703125, -0.0621337890625, 0.0232696533203125, 0.029998779296875, -0.037506103515625, -0.052154541015625, -0.03369140625, 0.029617...