id stringlengths 2 115 | author stringlengths 2 42 ⌀ | last_modified timestamp[us, tz=UTC] | downloads int64 0 8.87M | likes int64 0 3.84k | paperswithcode_id stringlengths 2 45 ⌀ | tags list | lastModified timestamp[us, tz=UTC] | createdAt stringlengths 24 24 | key stringclasses 1 value | created timestamp[us] | card stringlengths 1 1.01M | embedding list | library_name stringclasses 21 values | pipeline_tag stringclasses 27 values | mask_token null | card_data null | widget_data null | model_index null | config null | transformers_info null | spaces null | safetensors null | transformersInfo null | modelId stringlengths 5 111 ⌀ | embeddings list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ikaankeskin/kk-platypus | ikaankeskin | 2023-11-24T12:20:10Z | 17 | 0 | null | [
"region:us"
] | 2023-11-24T12:20:10Z | 2023-11-21T15:06:01.000Z | 2023-11-21T15:06:01 | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 4201526
num_examples: 1000
download_size: 2247083
dataset_size: 4201526
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
| [
-0.12853367626667023,
-0.18616794049739838,
0.6529126763343811,
0.4943627417087555,
-0.19319313764572144,
0.23607443273067474,
0.36071979999542236,
0.05056338757276535,
0.5793654322624207,
0.7400138974189758,
-0.6508103013038635,
-0.23783987760543823,
-0.710224986076355,
-0.047825977206230... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
HamdanXI/arb-eng-parallel-10k-splitted | HamdanXI | 2023-11-22T14:47:19Z | 17 | 0 | null | [
"region:us"
] | 2023-11-22T14:47:19Z | 2023-11-22T14:47:15.000Z | 2023-11-22T14:47:15 | ---
dataset_info:
features:
- name: arabic
dtype: string
- name: english
dtype: string
splits:
- name: train
num_bytes: 3434606.738616423
num_examples: 8000
- name: validation
num_bytes: 429325.8423270529
num_examples: 1000
- name: test
num_bytes: 429325.8423270529
num_examples: 1000
download_size: 2377435
dataset_size: 4293258.423270529
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
| [
-0.12853367626667023,
-0.18616794049739838,
0.6529126763343811,
0.4943627417087555,
-0.19319313764572144,
0.23607443273067474,
0.36071979999542236,
0.05056338757276535,
0.5793654322624207,
0.7400138974189758,
-0.6508103013038635,
-0.23783987760543823,
-0.710224986076355,
-0.047825977206230... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
ibunescu/tos_court_opinions_filtered | ibunescu | 2023-11-23T15:38:04Z | 17 | 1 | null | [
"region:us"
] | 2023-11-23T15:38:04Z | 2023-11-23T13:39:54.000Z | 2023-11-23T13:39:54 | Entry not found | [
-0.3227649927139282,
-0.225684255361557,
0.862226128578186,
0.43461498618125916,
-0.5282987952232361,
0.7012963891029358,
0.7915717363357544,
0.07618629932403564,
0.7746025919914246,
0.2563219666481018,
-0.7852816581726074,
-0.2257382869720459,
-0.9104480743408203,
0.5715669393539429,
-0... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
topeomole/finan | topeomole | 2023-11-23T20:58:45Z | 17 | 0 | null | [
"license:apache-2.0",
"region:us"
] | 2023-11-23T20:58:45Z | 2023-11-23T20:58:21.000Z | 2023-11-23T20:58:21 | ---
license: apache-2.0
---
| [
-0.12853367626667023,
-0.18616794049739838,
0.6529126763343811,
0.4943627417087555,
-0.19319313764572144,
0.23607443273067474,
0.36071979999542236,
0.05056338757276535,
0.5793654322624207,
0.7400138974189758,
-0.6508103013038635,
-0.23783987760543823,
-0.710224986076355,
-0.047825977206230... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
mlabonne/medical_meadow_medqa | mlabonne | 2023-11-24T21:01:07Z | 17 | 0 | null | [
"region:us"
] | 2023-11-24T21:01:07Z | 2023-11-24T20:06:24.000Z | 2023-11-24T20:06:24 | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: output
dtype: string
splits:
- name: train
num_bytes: 9353196
num_examples: 10178
download_size: 5397984
dataset_size: 9353196
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
| [
-0.12853392958641052,
-0.18616779148578644,
0.6529127955436707,
0.49436280131340027,
-0.19319361448287964,
0.23607419431209564,
0.36072003841400146,
0.050563063472509384,
0.579365611076355,
0.7400140762329102,
-0.6508104205131531,
-0.23783954977989197,
-0.7102249264717102,
-0.0478260256350... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
automated-research-group/phi-winogrande_inverted_option | automated-research-group | 2023-11-25T15:26:25Z | 17 | 0 | null | [
"region:us"
] | 2023-11-25T15:26:25Z | 2023-11-25T11:57:38.000Z | 2023-11-25T11:57:38 | ---
dataset_info:
- config_name: default
features:
- name: id
dtype: string
- name: response
dtype: string
- name: request
dtype: string
- name: input_perplexity
dtype: float64
- name: input_likelihood
dtype: float64
- name: output_perplexity
dtype: float64
- name: output_likelihood
dtype: float64
splits:
- name: validation
num_bytes: 35815
num_examples: 127
download_size: 21040
dataset_size: 35815
- config_name: shard_0_0_10
features:
- name: id
dtype: string
- name: response
dtype: string
- name: request
dtype: string
- name: input_perplexity
dtype: float64
- name: input_likelihood
dtype: float64
- name: output_perplexity
dtype: float64
- name: output_likelihood
dtype: float64
splits:
- name: validation
num_bytes: 35815
num_examples: 127
download_size: 21040
dataset_size: 35815
configs:
- config_name: default
data_files:
- split: validation
path: data/validation-*
- config_name: shard_0_0_10
data_files:
- split: validation
path: shard_0_0_10/validation-*
---
# Dataset Card for "phi-winogrande_inverted_option"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.5134859085083008,
-0.2957207262516022,
0.04134400933980942,
0.06570586562156677,
-0.5055789351463318,
-0.07969984412193298,
0.2512952387332916,
-0.20543783903121948,
0.8478055596351624,
0.5248963236808777,
-0.8600331544876099,
-0.3974987864494324,
-0.6246089339256287,
-0.473000884056091... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
OrdalieTech/Ordalie-FR-STS-benchmark | OrdalieTech | 2023-11-27T17:32:55Z | 17 | 0 | null | [
"task_categories:feature-extraction",
"size_categories:10K<n<100K",
"language:fr",
"license:apache-2.0",
"region:us"
] | 2023-11-27T17:32:55Z | 2023-11-25T15:01:49.000Z | 2023-11-25T15:01:49 | ---
language:
- fr
license: apache-2.0
size_categories:
- 10K<n<100K
task_categories:
- feature-extraction
pretty_name: ordalie-fr-sts-benchmark
dataset_info:
features:
- name: sentence1
dtype: string
- name: sentence2
dtype: string
- name: score
dtype: int64
splits:
- name: test
num_bytes: 14934570
num_examples: 10000
download_size: 9328832
dataset_size: 14934570
configs:
- config_name: default
data_files:
- split: test
path: data/test-*
---
# Ordalie - French STS Benchmark
- 30k sentence pairs
- Score either 0 or 1 | [
-0.01975519210100174,
-0.36156538128852844,
0.47129449248313904,
0.5614303946495056,
-0.24527226388454437,
-0.10813966393470764,
0.2605604827404022,
-0.31260138750076294,
0.23741188645362854,
0.6392123699188232,
-0.4333018362522125,
-0.88922518491745,
-0.512066125869751,
-0.043801084160804... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
euisuh15/python-pis | euisuh15 | 2023-11-26T10:29:42Z | 17 | 0 | null | [
"region:us"
] | 2023-11-26T10:29:42Z | 2023-11-26T10:27:49.000Z | 2023-11-26T10:27:49 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
ctang/formatted_util_deontology_for_llama2 | ctang | 2023-11-26T21:06:07Z | 17 | 0 | null | [
"region:us"
] | 2023-11-26T21:06:07Z | 2023-11-26T21:05:40.000Z | 2023-11-26T21:05:40 | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 23535389
num_examples: 31902
download_size: 3489395
dataset_size: 23535389
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
dliu1/legal-llama-raw-text | dliu1 | 2023-11-27T15:31:59Z | 17 | 0 | null | [
"license:apache-2.0",
"region:us"
] | 2023-11-27T15:31:59Z | 2023-11-27T15:31:33.000Z | 2023-11-27T15:31:33 | ---
license: apache-2.0
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
jmamou/augmented-glue-sst2 | jmamou | 2022-07-17T12:25:34Z | 16 | 0 | null | [
"task_categories:text-classification",
"task_ids:sentiment-classification",
"annotations_creators:machine-generated",
"language_creators:machine-generated",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"license:unknown",
"region:us"
] | 2022-07-17T12:25:34Z | 2022-03-02T23:29:22.000Z | 2022-03-02T23:29:22 | ---
annotations_creators:
- machine-generated
extended:
- original
language_creators:
- machine-generated
language:
- en-US
license:
- unknown
multilinguality:
- monolingual
size_categories:
- 100K<n<1M
source_datasets:
- original
task_categories:
- text-classification
task_ids:
- sentiment-classification
---
# Dataset Card for Augmented-GLUE-SST2
Automatically augmented data from train split of SST-2 dataset using conditional text generation approach.
Code used to generate this file will be soon available at https://github.com/IntelLabs/nlp-architect.
| [
0.017087550833821297,
-0.6977480053901672,
0.0898832157254219,
0.15311692655086517,
-0.23634828627109528,
0.20231971144676208,
-0.24717457592487335,
-0.4226396977901459,
0.6200062036514282,
0.44447702169418335,
-0.7813650369644165,
-0.2118198722600937,
-0.4481760561466217,
-0.0927179753780... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
midas/nus | midas | 2022-03-05T03:35:59Z | 16 | 0 | null | [
"region:us"
] | 2022-03-05T03:35:59Z | 2022-03-02T23:29:22.000Z | 2022-03-02T23:29:22 | ## Dataset Summary
A dataset for benchmarking keyphrase extraction and generation techniques from long document english scientific papers. For more details about the dataset please refer the original paper - [https://www.comp.nus.edu.sg/~kanmy/papers/icadl2007.pdf](https://www.comp.nus.edu.sg/~kanmy/papers/icadl2007.pdf)
Original source of the data - []()
## Dataset Structure
### Data Fields
- **id**: unique identifier of the document.
- **document**: Whitespace separated list of words in the document.
- **doc_bio_tags**: BIO tags for each word in the document. B stands for the beginning of a keyphrase and I stands for inside the keyphrase. O stands for outside the keyphrase and represents the word that isn't a part of the keyphrase at all.
- **extractive_keyphrases**: List of all the present keyphrases.
- **abstractive_keyphrase**: List of all the absent keyphrases.
### Data Splits
|Split| #datapoints |
|--|--|
| Test | 211 |
- Percentage of keyphrases that are named entities: 67.95% (named entities detected using scispacy - en-core-sci-lg model)
- Percentage of keyphrases that are noun phrases: 82.16% (noun phrases detected using spacy en-core-web-lg after removing determiners)
## Usage
### Full Dataset
```python
from datasets import load_dataset
# get entire dataset
dataset = load_dataset("midas/nus", "raw")
# sample from the test split
print("Sample from test dataset split")
test_sample = dataset["test"][0]
print("Fields in the sample: ", [key for key in test_sample.keys()])
print("Tokenized Document: ", test_sample["document"])
print("Document BIO Tags: ", test_sample["doc_bio_tags"])
print("Extractive/present Keyphrases: ", test_sample["extractive_keyphrases"])
print("Abstractive/absent Keyphrases: ", test_sample["abstractive_keyphrases"])
print("\n-----------\n")
```
**Output**
```bash
Sample from test data split
Fields in the sample: ['id', 'document', 'doc_bio_tags', 'extractive_keyphrases', 'abstractive_keyphrases', 'other_metadata']
Tokenized Document: ['Learning', 'Spatially', 'Variant', 'Dissimilarity', '-LRB-', 'Svad', '-RRB-', 'Measures', 'Clustering', 'algorithms', 'typically', 'operate', 'on', 'a', 'feature', 'vector', 'representation', 'of', 'the', 'data', 'and', 'find', 'clusters', 'that', 'are', 'compact', 'with', 'respect', 'to', 'an', 'assumed', '-LRB-', 'dis', '-RRB-', 'similarity', 'measure', 'between', 'the', 'data', 'points', 'in', 'feature', 'space', '.', 'This', 'makes', 'the', 'type', 'of', 'clusters', 'identified', 'highly', 'dependent', 'on', 'the', 'assumed', 'similarity', 'measure', '.', 'Building', 'on', 'recent', 'work', 'in', 'this', 'area', ',', 'we', 'formally', 'define', 'a', 'class', 'of', 'spatially', 'varying', 'dissimilarity', 'measures', 'and', 'propose', 'algorithms', 'to', 'learn', 'the', 'dissimilarity', 'measure', 'automatically', 'from', 'the', 'data', '.', 'The', 'idea', 'is', 'to', 'identify', 'clusters', 'that', 'are', 'compact', 'with', 'respect', 'to', 'the', 'unknown', 'spatially', 'varying', 'dissimilarity', 'measure', '.', 'Our', 'experiments', 'show', 'that', 'the', 'proposed', 'algorithms', 'are', 'more', 'stable', 'and', 'achieve', 'better', 'accuracy', 'on', 'various', 'textual', 'data', 'sets', 'when', 'compared', 'with', 'similar', 'algorithms', 'proposed', 'in', 'the', 'literature', '.', 'H.', '2.8', '-LSB-', 'Database', 'Management', '-RSB-', ':', 'Database', 'Applications-Data', 'Mining', 'Algorithms', 'Clustering', 'plays', 'a', 'major', 'role', 'in', 'data', 'mining', 'as', 'a', 'tool', 'to', 'discover', 'structure', 'in', 'data', '.', 'Object', 'clustering', 'algorithms', 'operate', 'on', 'a', 'feature', 'vector', 'representation', 'of', 'the', 'data', 'and', 'find', 'clusters', 'that', 'are', 'compact', 'with', 'respect', 'to', 'an', 'assumed', '-LRB-', 'dis', '-RRB-', 'similarity', 'measure', 'between', 'the', 'data', 'points', 'in', 'feature', 'space', '.', 'As', 'a', 'consequence', ',', 'the', 'nature', 'of', 'clusters', 'identified', 'by', 'a', 'clustering', 'algorithm', 'is', 'highly', 'dependent', 'on', 'the', 'assumed', 'similarity', 'measure', '.', 'The', 'most', 'commonly', 'used', 'dissimilarity', 'measure', ',', 'namely', 'the', 'Euclidean', 'metric', ',', 'assumes', 'that', 'the', 'dissimilarity', 'measure', 'is', 'isotropic', 'and', 'spatially', 'invariant', ',', 'and', 'Permission', 'to', 'make', 'digital', 'or', 'hard', 'copies', 'of', 'all', 'or', 'part', 'of', 'this', 'work', 'for', 'personal', 'or', 'classroom', 'use', 'is', 'granted', 'without', 'fee', 'provided', 'that', 'copies', 'are', 'not', 'made', 'or', 'distributed', 'for', 'profit', 'or', 'commercial', 'advantage', 'and', 'that', 'copies', 'bear', 'this', 'notice', 'and', 'the', 'full', 'citation', 'on', 'the', 'first', 'page', '.', 'To', 'copy', 'otherwise', ',', 'to', 'republish', ',', 'to', 'post', 'on', 'servers', 'or', 'to', 'redistribute', 'to', 'lists', ',', 'requires', 'prior', 'specific', 'permission', 'and/or', 'a', 'fee', '.', 'KDD', "'", '04', ',', 'August', '22', '25', ',', '2004', ',', 'Seattle', ',', 'Washington', ',', 'USA', '.', 'Copyright', '2004', 'ACM', '1-58113-888-1', '/', '04/0008', '...', '$', '5.00', '.', 'it', 'is', 'effective', 'only', 'when', 'the', 'clusters', 'are', 'roughly', 'spherical', 'and', 'all', 'of', 'them', 'have', 'approximately', 'the', 'same', 'size', ',', 'which', 'is', 'rarely', 'the', 'case', 'in', 'practice', '-LSB-', '8', '-RSB-', '.', 'The', 'problem', 'of', 'finding', 'non-spherical', 'clusters', 'is', 'often', 'addressed', 'by', 'utilizing', 'a', 'feature', 'weighting', 'technique', '.', 'These', 'techniques', 'discover', 'a', 'single', 'set', 'of', 'weights', 'such', 'that', 'relevant', 'features', 'are', 'given', 'more', 'importance', 'than', 'irrelevant', 'features', '.', 'However', ',', 'in', 'practice', ',', 'each', 'cluster', 'may', 'have', 'a', 'different', 'set', 'of', 'relevant', 'features', '.', 'We', 'consider', 'Spatially', 'Varying', 'Dissimilarity', '-LRB-', 'SVaD', '-RRB-', 'measures', 'to', 'address', 'this', 'problem', '.', 'Diday', 'et', '.', 'al.', '-LSB-', '4', '-RSB-', 'proposed', 'the', 'adaptive', 'distance', 'dynamic', 'clusters', '-LRB-', 'ADDC', '-RRB-', 'algorithm', 'in', 'this', 'vain', '.', 'A', 'fuzzified', 'version', 'of', 'ADDC', ',', 'popularly', 'known', 'as', 'the', 'Gustafson-Kessel', '-LRB-', 'GK', '-RRB-', 'algorithm', '-LSB-', '7', '-RSB-', 'uses', 'a', 'dynamically', 'updated', 'covariance', 'matrix', 'so', 'that', 'each', 'cluster', 'can', 'have', 'its', 'own', 'norm', 'matrix', '.', 'These', 'algorithms', 'can', 'deal', 'with', 'hyperelliposoidal', 'clusters', 'of', 'various', 'sizes', 'and', 'orientations', '.', 'The', 'EM', 'algorithm', '-LSB-', '2', '-RSB-', 'with', 'Gaussian', 'probability', 'distributions', 'can', 'also', 'be', 'used', 'to', 'achieve', 'similar', 'results', '.', 'However', ',', 'the', 'above', 'algorithms', 'are', 'computationally', 'expensive', 'for', 'high-dimensional', 'data', 'since', 'they', 'invert', 'covariance', 'matrices', 'in', 'every', 'iteration', '.', 'Moreover', ',', 'matrix', 'inversion', 'can', 'be', 'unstable', 'when', 'the', 'data', 'is', 'sparse', 'in', 'relation', 'to', 'the', 'dimensionality', '.', 'One', 'possible', 'solution', 'to', 'the', 'problems', 'of', 'high', 'computation', 'and', 'instability', 'arising', 'out', 'of', 'using', 'covariance', 'matrices', 'is', 'to', 'force', 'the', 'matrices', 'to', 'be', 'diagonal', ',', 'which', 'amounts', 'to', 'weighting', 'each', 'feature', 'differently', 'in', 'different', 'clusters', '.', 'While', 'this', 'restricts', 'the', 'dissimilarity', 'measures', 'to', 'have', 'axis', 'parallel', 'isometry', ',', 'the', 'weights', 'also', 'provide', 'a', 'simple', 'interpretation', 'of', 'the', 'clusters', 'in', 'terms', 'of', 'relevant', 'features', ',', 'which', 'is', 'important', 'in', 'knowledge', 'discovery', '.', 'Examples', 'of', 'such', 'algorithms', 'are', 'SCAD', 'and', 'Fuzzy-SKWIC', '-LSB-', '5', ',', '6', '-RSB-', ',', 'which', 'perform', 'fuzzy', 'clustering', 'of', 'data', 'while', 'simultaneously', 'finding', 'feature', 'weights', 'in', 'individual', 'clusters', '.', 'In', 'this', 'paper', ',', 'we', 'generalize', 'the', 'idea', 'of', 'the', 'feature', 'weighting', 'approach', 'to', 'define', 'a', 'class', 'of', 'spatially', 'varying', 'dissimilarity', 'measures', 'and', 'propose', 'algorithms', 'that', 'learn', 'the', 'dissimilarity', 'measure', 'automatically', 'from', 'the', 'given', 'data', 'while', 'performing', 'the', 'clustering', '.', 'The', 'idea', 'is', 'to', 'identify', 'clusters', 'inherent', 'in', 'the', 'data', 'that', 'are', 'compact', 'with', 'respect', 'to', 'the', 'unknown', 'spatially', 'varying', 'dissimilarity', 'measure', '.', 'We', 'compare', 'the', 'proposed', 'algorithms', 'with', 'a', 'diagonal', 'version', 'of', 'GK', '-LRB-', 'DGK', '-RRB-', 'and', 'a', 'crisp', 'version', 'of', 'SCAD', '-LRB-', 'CSCAD', '-RRB-', 'on', 'a', 'variety', 'of', 'data', 'sets', '.', 'Our', 'algorithms', 'perform', 'better', 'than', 'DGK', 'and', 'CSCAD', ',', 'and', 'use', 'more', 'stable', 'update', 'equations', 'for', 'weights', 'than', 'CSCAD', '.', 'The', 'rest', 'of', 'the', 'paper', 'is', 'organized', 'as', 'follows', '.', 'In', 'the', 'next', 'section', ',', 'we', 'define', 'a', 'general', 'class', 'of', 'dissimilarity', 'measures', '611', 'Research', 'Track', 'Poster', 'and', 'formulate', 'two', 'objective', 'functions', 'based', 'on', 'them', '.', 'In', 'Section', '3', ',', 'we', 'derive', 'learning', 'algorithms', 'that', 'optimize', 'the', 'objective', 'functions', '.', 'We', 'present', 'an', 'experimental', 'study', 'of', 'the', 'proposed', 'algorithms', 'in', 'Section', '4', '.', 'We', 'compare', 'the', 'performance', 'of', 'the', 'proposed', 'algorithms', 'with', 'that', 'of', 'DGK', 'and', 'CSCAD', '.', 'These', 'two', 'algorithms', 'are', 'explained', 'in', 'Appendix', 'A.', 'Finally', ',', 'we', 'summarize', 'our', 'contributions', 'and', 'conclude', 'with', 'some', 'future', 'directions', 'in', 'Section', '5', '.', 'We', 'first', 'define', 'a', 'general', 'class', 'of', 'dissimilarity', 'measures', 'and', 'formulate', 'a', 'few', 'objective', 'functions', 'in', 'terms', 'of', 'the', 'given', 'data', 'set', '.', 'Optimization', 'of', 'the', 'objective', 'functions', 'would', 'result', 'in', 'learning', 'the', 'underlying', 'dissimilarity', 'measure', '.', '2.1', 'SVaD', 'Measures', 'In', 'the', 'following', 'definition', ',', 'we', 'generalize', 'the', 'concept', 'of', 'dissimilarity', 'measures', 'in', 'which', 'the', 'weights', 'associated', 'with', 'features', 'change', 'over', 'feature', 'space', '.', 'Definition', '2.1', 'We', 'define', 'the', 'measure', 'of', 'dissimilarity', 'of', 'x', 'from', 'y', '1', 'to', 'be', 'a', 'weighted', 'sum', 'of', 'M', 'dissimilarity', 'measures', 'between', 'x', 'and', 'y', 'where', 'the', 'values', 'of', 'the', 'weights', 'depend', 'on', 'the', 'region', 'from', 'which', 'the', 'dissimilarity', 'is', 'being', 'measured', '.', 'Let', 'P', '=', '-LCB-', 'R', '1', ',', '...', ',', 'R', 'K', '-RCB-', 'be', 'a', 'collection', 'of', 'K', 'regions', 'that', 'partition', 'the', 'feature', 'space', ',', 'and', 'w', '1', ',', 'w', '2', ',', '...', ',', 'and', 'w', 'K', 'be', 'the', 'weights', 'associated', 'with', 'R', '1', ',', 'R', '2', ',', '...', ',', 'and', 'R', 'K', ',', 'respectively', '.', 'Let', 'g', '1', ',', 'g', '2', ',', '...', ',', 'and', 'g', 'M', 'be', 'M', 'dissimilarity', 'measures', '.', 'Then', ',', 'each', 'w', 'j', ',', 'j', '=', '1', ',', '...', ',', 'K', ',', 'is', 'an', 'M', '-', 'dimensional', 'vector', 'where', 'its', 'l-th', 'component', ',', 'w', 'jl', 'is', 'associated', 'with', 'g', 'l', '.', 'Let', 'W', 'denote', 'the', 'K-tuple', '-LRB-', 'w', '1', ',', '...', ',', 'w', 'K', '-RRB-', 'and', 'let', 'r', 'be', 'a', 'real', 'number', '.', 'Then', ',', 'the', 'dissimilarity', 'of', 'x', 'from', 'y', 'is', 'given', 'by', ':', 'f', 'W', '-LRB-', 'x', ',', 'y', '-RRB-', '=', 'M', 'l', '=', '1', 'w', 'r', 'jl', 'g', 'l', '-LRB-', 'x', ',', 'y', '-RRB-', ',', 'if', 'y', 'R', 'j', '.', '-LRB-', '1', '-RRB-', 'We', 'refer', 'to', 'f', 'W', 'as', 'a', 'Spatially', 'Variant', 'Dissimilarity', '-LRB-', 'SVaD', '-RRB-', 'measure', '.', 'Note', 'that', 'f', 'W', 'need', 'not', 'be', 'symmetric', 'even', 'if', 'g', 'i', 'are', 'symmetric', '.', 'Hence', ',', 'f', 'W', 'is', 'not', 'a', 'metric', '.', 'Moreover', ',', 'the', 'behavior', 'of', 'f', 'W', 'depends', 'on', 'the', 'behavior', 'of', 'g', 'i', '.', 'There', 'are', 'many', 'ways', 'to', 'define', 'g', 'i', '.', 'We', 'list', 'two', 'instances', 'of', 'f', 'W', '.', 'Example', '2.1', '-LRB-', 'Minkowski', '-RRB-', 'Let', 'd', 'be', 'the', 'feature', 'space', 'and', 'M', '=', 'd.', 'Let', 'a', 'point', 'x', 'd', 'be', 'represented', 'as', '-LRB-', 'x', '1', ',', '...', ',', 'x', 'd', '-RRB-', '.', 'Then', ',', 'when', 'g', 'i', '-LRB-', 'x', ',', 'y', '-RRB-', '=', '|', 'x', 'i', '-', 'y', 'i', '|', 'p', 'for', 'i', '=', '1', ',', '...', ',', 'd', ',', 'and', 'p', '1', ',', 'the', 'resulting', 'SVaD', 'measure', ',', 'f', 'M', 'W', 'is', 'called', 'Minkowski', 'SVaD', '-LRB-', 'MSVaD', '-RRB-', 'measure', '.', 'That', 'is', ',', 'f', 'M', 'W', '-LRB-', 'x', ',', 'y', '-RRB-', '=', 'd', 'l', '=', '1', 'w', 'r', 'jl', '|', 'x', 'l', '-', 'y', 'l', '|', 'p', ',', 'if', 'y', 'R', 'j', '.', '-LRB-', '2', '-RRB-', 'One', 'may', 'note', 'that', 'when', 'w', '1', '=', '=', 'w', 'K', 'and', 'p', '=', '2', ',', 'f', 'M', 'W', 'is', 'the', 'weighted', 'Euclidean', 'distance', '.', 'When', 'p', '=', '2', ',', 'we', 'call', 'f', 'M', 'W', 'a', 'Euclidean', 'SVaD', '-LRB-', 'ESVaD', '-RRB-', 'measure', 'and', 'denote', 'it', 'by', 'f', 'E', 'W', '.', '1', 'We', 'use', 'the', 'phrase', '``', 'dissimilarity', 'of', 'x', 'from', 'y', "''", 'rather', 'than', '``', 'dissimilarity', 'between', 'x', 'and', 'y', "''", 'because', 'we', 'consider', 'a', 'general', 'situation', 'where', 'the', 'dissimilarity', 'measure', 'depends', 'on', 'the', 'location', 'of', 'y', '.', 'As', 'an', 'example', 'of', 'this', 'situation', 'in', 'text', 'mining', ',', 'when', 'the', 'dissimilarity', 'is', 'measured', 'from', 'a', 'document', 'on', '`', 'terrorism', "'", 'to', 'a', 'document', 'x', ',', 'a', 'particular', 'set', 'of', 'keywords', 'may', 'be', 'weighted', 'heavily', 'whereas', 'when', 'the', 'dissimilarity', 'is', 'measured', 'from', 'a', 'document', 'on', '`', 'football', "'", 'to', 'x', ',', 'a', 'different', 'set', 'of', 'keywords', 'may', 'be', 'weighted', 'heavily', '.', 'Example', '2.2', '-LRB-', 'Cosine', '-RRB-', 'Let', 'the', 'feature', 'space', 'be', 'the', 'set', 'of', 'points', 'with', 'l', '2', 'norm', 'equal', 'to', 'one', '.', 'That', 'is', ',', 'x', '2', '=', '1', 'for', 'all', 'points', 'x', 'in', 'feature', 'space', '.', 'Then', ',', 'when', 'g', 'l', '-LRB-', 'x', ',', 'y', '-RRB-', '=', '-LRB-', '1/d', '-', 'x', 'l', 'y', 'l', '-RRB-', 'for', 'l', '=', '1', ',', '...', ',', 'd', ',', 'the', 'resulting', 'SVaD', 'measure', 'f', 'C', 'W', 'is', 'called', 'a', 'Cosine', 'SVaD', '-LRB-', 'CSVaD', '-RRB-', 'measure', ':', 'f', 'C', 'W', '-LRB-', 'x', ',', 'y', '-RRB-', '=', 'd', 'i', '=', '1', 'w', 'r', 'jl', '-LRB-', '1/d', '-', 'x', 'l', 'y', 'l', '-RRB-', ',', 'if', 'y', 'R', 'j', '.', '-LRB-', '3', '-RRB-', 'In', 'the', 'formulation', 'of', 'the', 'objective', 'function', 'below', ',', 'we', 'use', 'a', 'set', 'of', 'parameters', 'to', 'represent', 'the', 'regions', 'R', '1', ',', 'R', '2', ',', '...', ',', 'and', 'R', 'K', '.', 'Let', 'c', '1', ',', 'c', '2', ',', '...', ',', 'and', 'c', 'K', 'be', 'K', 'points', 'in', 'feature', 'space', '.', 'Then', 'y', 'R', 'j', 'iff', 'f', 'W', '-LRB-', 'y', ',', 'c', 'j', '-RRB-', '<', 'f', 'W', '-LRB-', 'y', ',', 'c', 'i', '-RRB-', 'for', 'i', '=', 'j.', '-LRB-', '4', '-RRB-', 'In', 'the', 'case', 'of', 'ties', ',', 'y', 'is', 'assigned', 'to', 'the', 'region', 'with', 'the', 'lowest', 'index', '.', 'Thus', ',', 'the', 'K-tuple', 'of', 'points', 'C', '=', '-LRB-', 'c', '1', ',', 'c', '2', ',', '...', ',', 'c', 'K', '-RRB-', 'defines', 'a', 'partition', 'in', 'feature', 'space', '.', 'The', 'partition', 'induced', 'by', 'the', 'points', 'in', 'C', 'is', 'similar', 'in', 'nature', 'to', 'a', 'Voronoi', 'tessellation', '.', 'We', 'use', 'the', 'notation', 'f', 'W', ',', 'C', 'whenever', 'we', 'use', 'the', 'set', 'C', 'to', 'parameterize', 'the', 'regions', 'used', 'in', 'the', 'dissimilarity', 'measure', '.', '2.2', 'Objective', 'Function', 'for', 'Clustering', 'The', 'goal', 'of', 'the', 'present', 'work', 'is', 'to', 'identify', 'the', 'spatially', 'varying', 'dissimilarity', 'measure', 'and', 'the', 'associated', 'compact', 'clusters', 'simultaneously', '.', 'It', 'is', 'worth', 'mentioning', 'here', 'that', ',', 'as', 'in', 'the', 'case', 'of', 'any', 'clustering', 'algorithm', ',', 'the', 'underlying', 'assumption', 'in', 'this', 'paper', 'is', 'the', 'existence', 'of', 'such', 'a', 'dissimilarity', 'measure', 'and', 'clusters', 'for', 'a', 'given', 'data', 'set', '.', 'Let', 'x', '1', ',', 'x', '2', ',', '...', ',', 'and', 'x', 'n', 'be', 'n', 'given', 'data', 'points', '.', 'Let', 'K', 'be', 'a', 'given', 'positive', 'integer', '.', 'Assuming', 'that', 'C', 'represents', 'the', 'cluster', 'centers', ',', 'let', 'us', 'assign', 'each', 'data', 'point', 'x', 'i', 'to', 'a', 'cluster', 'R', 'j', 'with', 'the', 'closest', 'c', 'j', 'as', 'the', 'cluster', 'center', '2', ',', 'i.e.', ',', 'j', '=', 'arg', 'min', 'l', 'f', 'W', ',', 'C', '-LRB-', 'x', 'i', ',', 'c', 'l', '-RRB-', '.', '-LRB-', '5', '-RRB-', 'Then', ',', 'the', 'within-cluster', 'dissimilarity', 'is', 'given', 'by', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', '=', 'K', 'j', '=', '1', 'x', 'i', 'R', 'j', 'M', 'l', '=', '1', 'w', 'r', 'jl', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '.', '-LRB-', '6', '-RRB-', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'represents', 'the', 'sum', 'of', 'the', 'dissimilarity', 'measures', 'of', 'all', 'the', 'data', 'points', 'from', 'their', 'closest', 'centroids', '.', 'The', 'objective', 'is', 'to', 'find', 'W', 'and', 'C', 'that', 'minimize', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', '.', 'To', 'avoid', 'the', 'trivial', 'solution', 'to', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', ',', 'we', 'consider', 'a', 'normalization', 'condition', 'on', 'w', 'j', ',', 'viz.', ',', 'M', 'l', '=', '1', 'w', 'jl', '=', '1', '.', '-LRB-', '7', '-RRB-', 'Note', 'that', 'even', 'with', 'this', 'condition', ',', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'has', 'a', 'trivial', 'solution', ':', 'w', 'jp', '=', '1', 'where', 'p', '=', 'arg', 'min', 'l', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', ',', 'and', 'the', 'remaining', 'weights', 'are', 'zero', '.', 'One', 'way', 'to', 'avoid', 'convergence', 'of', 'w', 'j', 'to', 'unit', 'vectors', 'is', 'to', 'impose', 'a', 'regularization', 'condition', 'on', 'w', 'j', '.', 'We', 'consider', 'the', 'following', 'two', 'regularization', 'measures', 'in', 'this', 'paper', ':', '-LRB-', '1', '-RRB-', 'Entropy', 'measure', ':', 'M', 'l', '=', '1', 'w', 'jl', 'log', '-LRB-', 'w', 'jl', '-RRB-', 'and', '-LRB-', '2', '-RRB-', 'Gini', 'measure', ':', 'M', 'l', '=', '1', 'w', '2', 'jl', '.', '2', 'We', 'use', 'P', '=', '-LCB-', 'R', '1', ',', 'R', '2', ',', '...', ',', 'R', 'K', '-RCB-', 'to', 'represent', 'the', 'corresponding', 'partition', 'of', 'the', 'data', 'set', 'as', 'well', '.', 'The', 'intended', 'interpretation', '-LRB-', 'cluster', 'or', 'region', '-RRB-', 'would', 'be', 'evident', 'from', 'the', 'context', '.', '612', 'Research', 'Track', 'Poster', 'The', 'problem', 'of', 'determining', 'the', 'optimal', 'W', 'and', 'C', 'is', 'similar', 'to', 'the', 'traditional', 'clustering', 'problem', 'that', 'is', 'solved', 'by', 'the', 'K-Means', 'Algorithm', '-LRB-', 'KMA', '-RRB-', 'except', 'for', 'the', 'additional', 'W', 'matrix', '.', 'We', 'propose', 'a', 'class', 'of', 'iterative', 'algorithms', 'similar', 'to', 'KMA', '.', 'These', 'algorithms', 'start', 'with', 'a', 'random', 'partition', 'of', 'the', 'data', 'set', 'and', 'iteratively', 'update', 'C', ',', 'W', 'and', 'P', 'so', 'that', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'is', 'minimized', '.', 'These', 'iterative', 'algorithms', 'are', 'instances', 'of', 'Alternating', 'Optimization', '-LRB-', 'AO', '-RRB-', 'algorithms', '.', 'In', '-LSB-', '1', '-RSB-', ',', 'it', 'is', 'shown', 'that', 'AO', 'algorithms', 'converge', 'to', 'a', 'local', 'optimum', 'under', 'some', 'conditions', '.', 'We', 'outline', 'the', 'algorithm', 'below', 'before', 'actually', 'describing', 'how', 'to', 'update', 'C', ',', 'W', 'and', 'P', 'in', 'every', 'iteration', '.', 'Randomly', 'assign', 'the', 'data', 'points', 'to', 'K', 'clusters', '.', 'REPEAT', 'Update', 'C', ':', 'Compute', 'the', 'centroid', 'of', 'each', 'cluster', 'c', 'j', '.', 'Update', 'W', ':', 'Compute', 'the', 'w', 'jl', 'j', ',', 'l.', 'Update', 'P', ':', 'Reassign', 'the', 'data', 'points', 'to', 'the', 'clusters', '.', 'UNTIL', '-LRB-', 'termination', 'condition', 'is', 'reached', '-RRB-', '.', 'In', 'the', 'above', 'algorithm', ',', 'the', 'update', 'of', 'C', 'depends', 'on', 'the', 'definition', 'of', 'g', 'i', ',', 'and', 'the', 'update', 'of', 'W', 'on', 'the', 'regularization', 'terms', '.', 'The', 'update', 'of', 'P', 'is', 'done', 'by', 'reassigning', 'the', 'data', 'points', 'according', 'to', '-LRB-', '5', '-RRB-', '.', 'Before', 'explaining', 'the', 'computation', 'of', 'C', 'in', 'every', 'iteration', 'for', 'various', 'g', 'i', ',', 'we', 'first', 'derive', 'update', 'equations', 'for', 'W', 'for', 'various', 'regularization', 'measures', '.', '3.1', 'Update', 'of', 'Weights', 'While', 'updating', 'weights', ',', 'we', 'need', 'to', 'find', 'the', 'values', 'of', 'weights', 'that', 'minimize', 'the', 'objective', 'function', 'for', 'a', 'given', 'C', 'and', 'P', '.', 'As', 'mentioned', 'above', ',', 'we', 'consider', 'the', 'two', 'regularization', 'measures', 'for', 'w', 'jl', 'and', 'derive', 'update', 'equations', '.', 'If', 'we', 'consider', 'the', 'entropy', 'regularization', 'with', 'r', '=', '1', ',', 'the', 'objective', 'function', 'becomes', ':', 'J', 'EN', 'T', '-LRB-', 'W', ',', 'C', '-RRB-', '=', 'K', 'j', '=', '1', 'x', 'i', 'R', 'j', 'M', 'l', '=', '1', 'w', 'jl', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '+', 'K', 'j', '=', '1', 'j', 'M', 'l', '=', '1', 'w', 'jl', 'log', '-LRB-', 'w', 'jl', '-RRB-', '+', 'K', 'j', '=', '1', 'j', 'M', 'l', '=', '1', 'w', 'jl', '-', '1', '.', '-LRB-', '8', '-RRB-', 'Note', 'that', 'j', 'are', 'the', 'Lagrange', 'multipliers', 'corresponding', 'to', 'the', 'normalization', 'constraints', 'in', '-LRB-', '7', '-RRB-', ',', 'and', 'j', 'represent', 'the', 'relative', 'importance', 'given', 'to', 'the', 'regularization', 'term', 'relative', 'to', 'the', 'within-cluster', 'dissimilarity', '.', 'Differentiating', 'J', 'EN', 'T', '-LRB-', 'W', ',', 'C', '-RRB-', 'with', 'respect', 'to', 'w', 'jl', 'and', 'equating', 'it', 'to', 'zero', ',', 'we', 'obtain', 'w', 'jl', '=', 'exp', '-', '-LRB-', 'j', '+', 'x', 'i', 'Rj', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '-RRB-', 'j', '-', '1', '.', 'Solving', 'for', 'j', 'by', 'substituting', 'the', 'above', 'value', 'of', 'w', 'jl', 'in', '-LRB-', '7', '-RRB-', 'and', 'substituting', 'the', 'value', 'of', 'j', 'back', 'in', 'the', 'above', 'equation', ',', 'we', 'obtain', 'w', 'jl', '=', 'exp', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '/', 'j', 'M', 'n', '=', '1', 'exp', 'x', 'i', 'R', 'j', 'g', 'n', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '/', 'j', '.', '-LRB-', '9', '-RRB-', 'If', 'we', 'consider', 'the', 'Gini', 'measure', 'for', 'regularization', 'with', 'r', '=', '2', ',', 'the', 'corresponding', 'w', 'jl', 'that', 'minimizes', 'the', 'objective', 'function', 'can', 'be', 'shown', 'to', 'be', 'w', 'jl', '=', '1', '/', '-LRB-', 'j', '+', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '-RRB-', 'M', 'n', '=', '1', '-LRB-', '1', '/', '-LRB-', 'j', '+', 'x', 'i', 'R', 'j', 'g', 'n', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '-RRB-', '-RRB-', '.', '-LRB-', '10', '-RRB-', 'In', 'both', 'cases', ',', 'the', 'updated', 'value', 'of', 'w', 'jl', 'is', 'inversely', 'related', 'Algorithm', 'Update', 'Equations', 'Acronyms', 'P', 'C', 'W', 'EEnt', '-LRB-', '5', '-RRB-', '-LRB-', '11', '-RRB-', '-LRB-', '9', '-RRB-', 'EsGini', '-LRB-', '5', '-RRB-', '-LRB-', '11', '-RRB-', '-LRB-', '10', '-RRB-', 'CEnt', '-LRB-', '5', '-RRB-', '-LRB-', '12', '-RRB-', '-LRB-', '9', '-RRB-', 'CsGini', '-LRB-', '5', '-RRB-', '-LRB-', '12', '-RRB-', '-LRB-', '10', '-RRB-', 'Table', '1', ':', 'Summary', 'of', 'algorithms', '.', 'to', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '.', 'This', 'has', 'various', 'interpretations', 'based', 'on', 'the', 'nature', 'of', 'g', 'l', '.', 'For', 'example', ',', 'when', 'we', 'consider', 'the', 'ESVaD', 'measure', ',', 'w', 'jl', 'is', 'inversely', 'related', 'to', 'the', 'variance', 'of', 'l-th', 'element', 'of', 'the', 'data', 'vectors', 'in', 'the', 'j-th', 'cluster', '.', 'In', 'other', 'words', ',', 'when', 'the', 'variance', 'along', 'a', 'particular', 'dimension', 'is', 'high', 'in', 'a', 'cluster', ',', 'then', 'the', 'dimension', 'is', 'less', 'important', 'to', 'the', 'cluster', '.', 'This', 'popular', 'heuristic', 'has', 'been', 'used', 'in', 'various', 'contexts', '-LRB-', 'such', 'as', 'relevance', 'feedback', '-RRB-', 'in', 'the', 'literature', '-LSB-', '9', '-RSB-', '.', 'Similarly', ',', 'when', 'we', 'consider', 'the', 'CSVaD', 'measure', ',', 'w', 'jl', 'is', 'directly', 'proportional', 'to', 'the', 'correlation', 'of', 'the', 'j-th', 'dimension', 'in', 'the', 'l-th', 'cluster', '.', '3.2', 'Update', 'of', 'Centroids', 'Learning', 'ESVaD', 'Measures', ':', 'Substituting', 'the', 'ESVaD', 'measure', 'in', 'the', 'objective', 'function', 'and', 'solving', 'the', 'first', 'order', 'necessary', 'conditions', ',', 'we', 'observe', 'that', 'c', 'jl', '=', '1', '|', 'R', 'j', '|', 'x', 'i', 'R', 'j', 'x', 'il', '-LRB-', '11', '-RRB-', 'minimizes', 'J', 'ESV', 'AD', '-LRB-', 'W', ',', 'C', '-RRB-', '.', 'Learning', 'CSVaD', 'Measures', ':', 'Let', 'x', 'il', '=', 'w', 'jl', 'x', 'il', ',', 'then', 'using', 'the', 'Cauchy-Swartz', 'inequality', ',', 'it', 'can', 'be', 'shown', 'that', 'c', 'jl', '=', '1', '|', 'R', 'j', '|', 'x', 'i', 'R', 'j', 'x', 'il', '-LRB-', '12', '-RRB-', 'maximizes', 'x', 'i', 'R', 'j', 'd', 'l', '=', '1', 'w', 'jl', 'x', 'il', 'c', 'jl', '.', 'Hence', ',', '-LRB-', '12', '-RRB-', 'also', 'minimizes', 'the', 'objective', 'function', 'when', 'CSVaD', 'is', 'used', 'as', 'the', 'dissimilarity', 'measure', '.', 'Table', '1', 'summarizes', 'the', 'update', 'equations', 'used', 'in', 'various', 'algorithms', '.', 'We', 'refer', 'to', 'this', 'set', 'of', 'algorithms', 'as', 'SVaD', 'learning', 'algorithms', '.', 'In', 'this', 'section', ',', 'we', 'present', 'an', 'experimental', 'study', 'of', 'the', 'algorithms', 'described', 'in', 'the', 'previous', 'sections', '.', 'We', 'applied', 'the', 'proposed', 'algorithms', 'on', 'various', 'text', 'data', 'sets', 'and', 'compared', 'the', 'performance', 'of', 'EEnt', 'and', 'EsGini', 'with', 'that', 'of', 'K-Means', ',', 'CSCAD', 'and', 'DGK', 'algorithms', '.', 'The', 'reason', 'for', 'choosing', 'the', 'K-Means', 'algorithm', '-LRB-', 'KMA', '-RRB-', 'apart', 'from', 'CSCAD', 'and', 'DGK', 'is', 'that', 'it', 'provides', 'a', 'baseline', 'for', 'assessing', 'the', 'advantages', 'of', 'feature', 'weighting', '.', 'KMA', 'is', 'also', 'a', 'popular', 'algorithm', 'for', 'text', 'clustering', '.', 'We', 'have', 'included', 'a', 'brief', 'description', 'of', 'CSCAD', 'and', 'DGK', 'algorithms', 'in', 'Appendix', 'A.', 'Text', 'data', 'sets', 'are', 'sparse', 'and', 'high', 'dimensional', '.', 'We', 'consider', 'standard', 'labeled', 'document', 'collections', 'and', 'test', 'the', 'proposed', 'algorithms', 'for', 'their', 'ability', 'to', 'discover', 'dissimilarity', 'measures', 'that', 'distinguish', 'one', 'class', 'from', 'another', 'without', 'actually', 'considering', 'the', 'class', 'labels', 'of', 'the', 'documents', '.', 'We', 'measure', 'the', 'success', 'of', 'the', 'algorithms', 'by', 'the', 'purity', 'of', 'the', 'regions', 'that', 'they', 'discover', '.', '613', 'Research', 'Track', 'Poster', '4.1', 'Data', 'Sets', 'We', 'performed', 'our', 'experiments', 'on', 'three', 'standard', 'data', 'sets', ':', '20', 'News', 'Group', ',', 'Yahoo', 'K1', ',', 'and', 'Classic', '3', '.', 'These', 'data', 'sets', 'are', 'described', 'below', '.', '20', 'News', 'Group', '3', ':', 'We', 'considered', 'different', 'subsets', 'of', '20', 'News', 'Group', 'data', 'that', 'are', 'known', 'to', 'contain', 'clusters', 'of', 'varying', 'degrees', 'of', 'separation', '-LSB-', '10', '-RSB-', '.', 'As', 'in', '-LSB-', '10', '-RSB-', ',', 'we', 'considered', 'three', 'random', 'samples', 'of', 'three', 'subsets', 'of', 'the', '20', 'News', 'Group', 'data', '.', 'The', 'subsets', 'denoted', 'by', 'Binary', 'has', '250', 'documents', 'each', 'from', 'talk.politics.mideast', 'and', 'talk.politics.misc', '.', 'Multi5', 'has', '100', 'documents', 'each', 'from', 'comp.graphics', ',', 'rec.motorcycles', ',', 'rec.sport.baseball', ',', 'sci.space', ',', 'and', 'talk.politics.mideast', '.', 'Finally', ',', 'Multi10', 'has', '50', 'documents', 'each', 'from', 'alt.atheism', ',', 'comp', '.', 'sys.mac.hardware', ',', 'misc.forsale', ',', 'rec.autos', ',', 'rec.sport.hockey', ',', 'sci.crypt', ',', 'sci.electronics', ',', 'sci.med', ',', 'sci.space', ',', 'and', 'talk.politics', '.', 'gun', '.', 'It', 'may', 'be', 'noted', 'that', 'Binary', 'data', 'sets', 'have', 'two', 'highly', 'overlapping', 'classes', '.', 'Each', 'of', 'Multi5', 'data', 'sets', 'has', 'samples', 'from', '5', 'distinct', 'classes', ',', 'whereas', 'Multi10', 'data', 'sets', 'have', 'only', 'a', 'few', 'samples', 'from', '10', 'different', 'classes', '.', 'The', 'size', 'of', 'the', 'vocabulary', 'used', 'to', 'represent', 'the', 'documents', 'in', 'Binary', 'data', 'set', 'is', 'about', '4000', ',', 'Multi5', 'about', '3200', 'and', 'Multi10', 'about', '2800', '.', 'We', 'observed', 'that', 'the', 'relative', 'performance', 'of', 'the', 'algorithms', 'on', 'various', 'samples', 'of', 'Binary', ',', 'Multi5', 'and', 'Multi10', 'data', 'sets', 'was', 'similar', '.', 'Hence', ',', 'we', 'report', 'results', 'on', 'only', 'one', 'of', 'them', '.', 'Yahoo', 'K1', '4', ':', 'This', 'data', 'set', 'contains', '2340', 'Reuters', 'news', 'articles', 'downloaded', 'from', 'Yahoo', 'in', '1997', '.', 'There', 'are', '494', 'from', 'Health', ',', '1389', 'from', 'Entertainment', ',', '141', 'from', 'Sports', ',', '114', 'from', 'Politics', ',', '60', 'from', 'Technology', 'and', '142', 'from', 'Business', '.', 'After', 'preprocessing', ',', 'the', 'documents', 'from', 'this', 'data', 'set', 'are', 'represented', 'using', '12015', 'words', '.', 'Note', 'that', 'this', 'data', 'set', 'has', 'samples', 'from', '6', 'different', 'classes', '.', 'Here', ',', 'the', 'distribution', 'of', 'data', 'points', 'across', 'the', 'class', 'is', 'uneven', ',', 'ranging', 'from', '60', 'to', '1389', '.', 'Classic', '3', '5', ':', 'Classic', '3', 'data', 'set', 'contains', '1400', 'aerospace', 'systems', 'abstracts', 'from', 'the', 'Cranfield', 'collection', ',', '1033', 'medical', 'abstracts', 'from', 'the', 'Medline', 'collection', 'and', '1460', 'information', 'retrieval', 'abstracts', 'from', 'the', 'Cisi', 'collection', ',', 'making', 'up', '3893', 'documents', 'in', 'all', '.', 'After', 'preprocessing', ',', 'this', 'data', 'set', 'has', '4301', 'words', '.', 'The', 'points', 'are', 'almost', 'equally', 'distributed', 'among', 'the', 'three', 'distinct', 'classes', '.', 'The', 'data', 'sets', 'were', 'preprocessed', 'using', 'two', 'major', 'steps', '.', 'First', ',', 'a', 'set', 'of', 'words', '-LRB-', 'vocabulary', '-RRB-', 'is', 'extracted', 'and', 'then', 'each', 'document', 'is', 'represented', 'with', 'respect', 'to', 'this', 'vocabulary', '.', 'Finding', 'the', 'vocabulary', 'includes', ':', '-LRB-', '1', '-RRB-', 'elimination', 'of', 'the', 'standard', 'list', 'of', 'stop', 'words', 'from', 'the', 'documents', ',', '-LRB-', '2', '-RRB-', 'application', 'of', 'Porter', 'stemming', '6', 'for', 'term', 'normalization', ',', 'and', '-LRB-', '3', '-RRB-', 'keeping', 'only', 'the', 'words', 'which', 'appear', 'in', 'at', 'least', '3', 'documents', '.', 'We', 'represent', 'each', 'document', 'by', 'the', 'unitized', 'frequency', 'vector', '.', '4.2', 'Evaluation', 'of', 'Algorithms', 'We', 'use', 'the', 'accuracy', 'measure', 'to', 'compare', 'the', 'performance', 'of', 'various', 'algorithms', '.', 'Let', 'a', 'ij', 'represent', 'the', 'number', 'of', 'data', 'points', 'from', 'class', 'i', 'that', 'are', 'in', 'cluster', 'j', '.', 'Then', 'the', 'accuracy', 'of', 'the', 'partition', 'is', 'given', 'by', 'j', 'max', 'i', 'a', 'ij', '/', 'n', 'where', 'n', 'is', 'the', 'total', 'number', 'of', 'data', 'points', '.', 'It', 'is', 'to', 'be', 'noted', 'that', 'points', 'coming', 'from', 'a', 'single', 'class', 'need', 'not', 'form', 'a', 'single', 'cluster', '.', 'There', 'could', 'be', 'multiple', '3', 'http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20', '.', 'tar.gz', '4', 'ftp://ftp.cs.umn.edu/dept/users/boley/PDDPdata/doc-K', '5', 'ftp://ftp.cs.cornell.edu/pub/smart', '6', 'http://www.tartarus.org/~martin/PorterStemmer/', 'Iteration', '0', '1', '2', '3', '4', '5', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', '334.7', '329.5', '328.3', '328.1', '327.8', 'Accuracy', '73.8', '80.2', '81.4', '81.6', '82', '82', 'Table', '2', ':', 'Evolution', 'of', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'and', 'Accuracies', 'with', 'iterations', 'when', 'EEnt', 'applied', 'on', 'a', 'Multi5', 'data', '.', 'clusters', 'in', 'a', 'class', 'that', 'represent', 'sub-classes', '.', 'We', 'study', 'the', 'performance', 'of', 'SVaD', 'learning', 'algorithms', 'for', 'various', 'values', 'of', 'K', ',', 'i.e.', ',', 'the', 'number', 'of', 'clusters', '.', '4.3', 'Experimental', 'Setup', 'In', 'our', 'implementations', ',', 'we', 'have', 'observed', 'that', 'the', 'proposed', 'algorithms', ',', 'if', 'applied', 'on', 'randomly', 'initialized', 'centroids', ',', 'show', 'unstable', 'behavior', '.', 'One', 'reason', 'for', 'this', 'behavior', 'is', 'that', 'the', 'number', 'of', 'parameters', 'that', 'are', 'estimated', 'in', 'feature-weighting', 'clustering', 'algorithms', 'is', 'twice', 'as', 'large', 'as', 'that', 'estimated', 'by', 'the', 'traditional', 'KMA', '.', 'We', ',', 'therefore', ',', 'first', 'estimate', 'the', 'cluster', 'centers', 'giving', 'equal', 'weights', 'to', 'all', 'the', 'dimensions', 'using', 'KMA', 'and', 'then', 'fine-tune', 'the', 'cluster', 'centers', 'and', 'the', 'weights', 'using', 'the', 'feature-weighting', 'clustering', 'algorithms', '.', 'In', 'every', 'iteration', ',', 'the', 'new', 'sets', 'of', 'weights', 'are', 'updated', 'as', 'follows', '.', 'Let', 'w', 'n', '-LRB-', 't', '+1', '-RRB-', 'represent', 'the', 'weights', 'com-puted', 'using', 'one', 'of', '-LRB-', '9', '-RRB-', ',', '-LRB-', '10', '-RRB-', ',', '-LRB-', '14', '-RRB-', 'or', '-LRB-', '15', '-RRB-', 'in', 'iteration', '-LRB-', 't', '+', '1', '-RRB-', 'and', 'w', '-LRB-', 't', '-RRB-', 'the', 'weights', 'in', 'iteration', 't.', 'Then', ',', 'the', 'weights', 'in', 'iteration', '-LRB-', 't', '+', '1', '-RRB-', 'are', 'w', '-LRB-', 't', '+', '1', '-RRB-', '=', '-LRB-', '1', '-', '-LRB-', 't', '-RRB-', '-RRB-', 'w', '-LRB-', 't', '-RRB-', '+', '-LRB-', 't', '-RRB-', 'w', 'n', '-LRB-', 't', '+', '1', '-RRB-', ',', '-LRB-', '13', '-RRB-', 'where', '-LRB-', 't', '-RRB-', '-LSB-', '0', ',', '1', '-RSB-', 'decreases', 'with', 't', '.', 'That', 'is', ',', '-LRB-', 't', '-RRB-', '=', '-LRB-', 't', '1', '-RRB-', ',', 'for', 'a', 'given', 'constant', '-LSB-', '0', ',', '1', '-RSB-', '.', 'In', 'our', 'experiments', ',', 'we', 'observed', 'that', 'the', 'variance', 'of', 'purity', 'values', 'for', 'different', 'initial', 'values', 'of', '-LRB-', '0', '-RRB-', 'and', 'above', '0.5', 'is', 'very', 'small', '.', 'Hence', ',', 'we', 'report', 'the', 'results', 'for', '-LRB-', '0', '-RRB-', '=', '0.5', 'and', '=', '0.5', '.', 'We', 'set', 'the', 'value', 'of', 'j', '=', '1', '.', 'It', 'may', 'be', 'noted', 'that', 'when', 'the', 'documents', 'are', 'represented', 'as', 'unit', 'vectors', ',', 'KMA', 'with', 'the', 'cosine', 'dissimilarity', 'measure', 'and', 'Euclidean', 'distance', 'measure', 'would', 'yield', 'the', 'same', 'clusters', '.', 'This', 'is', 'essentially', 'the', 'same', 'as', 'Spherical', 'K-Means', 'algorithms', 'described', 'in', '-LSB-', '3', '-RSB-', '.', 'Therefore', ',', 'we', 'consider', 'only', 'the', 'weighted', 'Euclidean', 'measure', 'and', 'restrict', 'our', 'comparisons', 'to', 'EEnt', 'and', 'EsGini', 'in', 'the', 'experiments', '.', 'Since', 'the', 'clusters', 'obtained', 'by', 'KMA', 'are', 'used', 'to', 'initialize', 'all', 'other', 'algorithms', 'considered', 'here', ',', 'and', 'since', 'the', 'results', 'of', 'KMA', 'are', 'sensitive', 'to', 'initialization', ',', 'the', 'accuracy', 'numbers', 'reported', 'in', 'this', 'section', 'are', 'averages', 'over', '10', 'random', 'initializations', 'of', 'KMA', '.', '4.4', 'Results', 'and', 'Observations', '4.4.1', 'Effect', 'of', 'SVaD', 'Measures', 'on', 'Accuracies', 'In', 'Table', '2', ',', 'we', 'show', 'a', 'sample', 'run', 'of', 'EEnt', 'algorithm', 'on', 'one', 'of', 'the', 'Multi5', 'data', 'sets', '.', 'This', 'table', 'shows', 'the', 'evolution', 'of', 'J', '-LRB-', 'W', ',', 'C', '-RRB-', 'and', 'the', 'corresponding', 'accuracies', 'of', 'the', 'clusters', 'with', 'the', 'iterations', '.', 'The', 'accuracy', ',', 'shown', 'at', 'iteration', '0', ',', 'is', 'that', 'of', 'the', 'clusters', 'obtained', 'by', 'KMA', '.', 'The', 'purity', 'of', 'clusters', 'increases', 'with', 'decrease', 'in', 'the', 'value', 'of', 'the', 'objective', 'function', 'defined', 'using', 'SVaD', 'measures', '.', 'We', 'have', 'observed', 'a', 'similar', 'behavior', 'of', 'EEnt', 'and', 'EsGini', 'on', 'other', 'data', 'sets', 'also', '.', 'This', 'validates', 'our', 'hypothesis', 'that', 'SVaD', 'measures', 'capture', 'the', 'underlying', 'structure', 'in', 'the', 'data', 'sets', 'more', 'accurately', '.', '614', 'Research', 'Track', 'Poster', '4.4.2', 'Comparison', 'with', 'Other', 'Algorithms', 'Figure', '1', 'to', 'Figure', '5', 'show', 'average', 'accuracies', 'of', 'various', 'algorithms', 'on', 'the', '5', 'data', 'sets', 'for', 'various', 'number', 'of', 'clusters', '.', 'The', 'accuracies', 'of', 'KMA', 'and', 'DGK', 'are', 'very', 'close', 'to', 'each', 'other', 'and', 'hence', ',', 'in', 'the', 'figures', ',', 'the', 'lines', 'corresponding', 'to', 'these', 'algorithms', 'are', 'indistinguishable', '.', 'The', 'lines', 'corresponding', 'to', 'CSCAD', 'are', 'also', 'close', 'to', 'that', 'of', 'KMA', 'in', 'all', 'the', 'cases', 'except', 'Class', '3', '.', 'General', 'observations', ':', 'The', 'accuracies', 'of', 'SVaD', 'algorithms', 'follow', 'the', 'trend', 'of', 'the', 'accuracies', 'of', 'other', 'algorithms', '.', 'In', 'all', 'our', 'experiments', ',', 'both', 'SVaD', 'learning', 'algorithms', 'improve', 'the', 'accuracies', 'of', 'clusters', 'obtained', 'by', 'KMA', '.', 'It', 'is', 'observed', 'in', 'our', 'experiments', 'that', 'the', 'improvement', 'could', 'be', 'as', 'large', 'as', '8', '%', 'in', 'some', 'instances', '.', 'EEnt', 'and', 'EsGini', 'consis-tently', 'perform', 'better', 'than', 'DGK', 'on', 'all', 'data', 'sets', 'and', 'for', 'all', 'values', 'of', 'K.', 'EEnt', 'and', 'EsGini', 'perform', 'better', 'than', 'CSCAD', 'on', 'all', 'data', 'sets', 'excepts', 'in', 'the', 'case', 'of', 'Classic', '3', 'and', 'for', 'a', 'few', 'values', 'of', 'K.', 'Note', 'that', 'the', 'weight', 'update', 'equation', 'of', 'CSCAD', '-LRB-', '15', '-RRB-', 'may', 'result', 'in', 'negative', 'values', 'of', 'w', 'jl', '.', 'Our', 'experience', 'with', 'CSCAD', 'shows', 'that', 'it', 'is', 'quite', 'sensitive', 'to', 'initialization', 'and', 'it', 'may', 'have', 'convergence', 'problems', '.', 'In', 'contrast', ',', 'it', 'may', 'be', 'observed', 'that', 'w', 'jl', 'in', '-LRB-', '9', '-RRB-', 'and', '-LRB-', '10', '-RRB-', 'are', 'always', 'positive', '.', 'Moreover', ',', 'in', 'our', 'experience', ',', 'these', 'two', 'versions', 'are', 'much', 'less', 'sensitive', 'to', 'the', 'choice', 'of', 'j', '.', 'Data', 'specific', 'observations', ':', 'When', 'K', '=', '2', ',', 'EEnt', 'and', 'EsGini', 'could', 'not', 'further', 'improve', 'the', 'results', 'of', 'KMA', 'on', 'the', 'Binary', 'data', 'set', '.', 'The', 'reason', 'is', 'that', 'the', 'data', 'set', 'contains', 'two', 'highly', 'overlapping', 'classes', '.', 'However', ',', 'for', 'other', 'values', 'of', 'K', ',', 'they', 'marginally', 'improve', 'the', 'accuracies', '.', 'In', 'the', 'case', 'of', 'Multi5', ',', 'the', 'accuracies', 'of', 'the', 'algorithms', 'are', 'non-monotonic', 'with', 'K', '.', 'The', 'improvement', 'of', 'accuracies', 'is', 'large', 'for', 'intermediate', 'values', 'of', 'K', 'and', 'small', 'for', 'extreme', 'values', 'of', 'K', '.', 'When', 'K', '=', '5', ',', 'KMA', 'finds', 'relatively', 'stable', 'clusters', '.', 'Hence', ',', 'SVaD', 'algorithms', 'are', 'unable', 'to', 'improve', 'the', 'accuracies', 'as', 'much', 'as', 'they', 'did', 'for', 'intermediate', 'values', 'of', 'K.', 'For', 'larger', 'values', 'of', 'K', ',', 'the', 'clusters', 'are', 'closely', 'spaced', 'and', 'hence', 'there', 'is', 'little', 'scope', 'for', 'improvement', 'by', 'the', 'SVaD', 'algorithms', '.', 'Multi10', 'data', 'sets', 'are', 'the', 'toughest', 'to', 'cluster', 'because', 'of', 'the', 'large', 'number', 'of', 'classes', 'present', 'in', 'the', 'data', '.', 'In', 'this', 'case', ',', 'the', 'accuracies', 'of', 'the', 'algorithms', 'are', 'monotonically', 'increasing', 'with', 'the', 'number', 'of', 'clusters', '.', 'The', 'extent', 'of', 'improvement', 'of', 'accuracies', 'of', 'SVaD', 'algorithms', 'over', 'KMA', 'is', 'almost', 'constant', 'over', 'the', 'entire', 'range', 'of', 'K', '.', 'This', 'reflects', 'the', 'fact', 'that', 'the', 'documents', 'in', 'Multi10', 'data', 'set', 'are', 'uniformly', 'distributed', 'over', 'feature', 'space', '.', 'The', 'distribution', 'of', 'documents', 'in', 'Yahoo', 'K1', 'data', 'set', 'is', 'highly', 'skewed', '.', 'The', 'extent', 'of', 'improvements', 'that', 'the', 'SVaD', 'algorithms', 'could', 'achieve', 'decrease', 'with', 'K.', 'For', 'higher', 'values', 'of', 'K', ',', 'KMA', 'is', 'able', 'to', 'find', 'almost', 'pure', 'sub-clusters', ',', 'resulting', 'in', 'accuracies', 'of', 'about', '90', '%', '.', 'This', 'leaves', 'little', 'scope', 'for', 'improvement', '.', 'The', 'performance', 'of', 'CSCAD', 'differs', 'noticeably', 'in', 'the', 'case', 'of', 'Classic', '3', '.', 'It', 'performs', 'better', 'than', 'the', 'SVaD', 'algorithms', 'for', 'K', '=', '3', 'and', 'better', 'than', 'EEnt', 'for', 'K', '=', '9', '.', 'However', ',', 'for', 'larger', 'values', 'of', 'K', ',', 'the', 'SVaD', 'algorithms', 'perform', 'better', 'than', 'the', 'rest', '.', 'As', 'in', 'the', 'case', 'of', 'Multi5', ',', 'the', 'improvements', 'of', 'SVaD', 'algorithms', 'over', 'others', 'are', 'significant', 'and', 'consistent', '.', 'One', 'may', 'recall', 'that', 'Multi5', 'and', 'Classic', '3', 'consist', 'of', 'documents', 'from', 'distinct', 'classes', '.', 'Therefore', ',', 'this', 'observation', 'implies', 'that', 'when', 'there', 'are', 'distinct', 'clusters', 'in', 'the', 'data', 'set', ',', 'KMA', 'yields', 'confusing', 'clusters', 'when', 'the', 'number', 'of', 'clusters', 'is', 'over-Figure', '1', ':', 'Accuracy', 'results', 'on', 'Binary', 'data', '.', 'Figure', '2', ':', 'Accuracy', 'results', 'on', 'Multi5', 'data', '.', 'specified', '.', 'In', 'this', 'scenario', ',', 'EEnt', 'and', 'EsGini', 'can', 'fine-tune', 'the', 'clusters', 'to', 'improve', 'their', 'purity', '.', 'We', 'have', 'defined', 'a', 'general', 'class', 'of', 'spatially', 'variant', 'dissimilarity', 'measures', 'and', 'proposed', 'algorithms', 'to', 'learn', 'the', 'measure', 'underlying', 'a', 'given', 'data', 'set', 'in', 'an', 'unsupervised', 'learning', 'framework', '.', 'Through', 'our', 'experiments', 'on', 'various', 'textual', 'data', 'sets', ',', 'we', 'have', 'shown', 'that', 'such', 'a', 'formulation', 'of', 'dissimilarity', 'measure', 'can', 'more', 'accurately', 'capture', 'the', 'hidden', 'structure', 'in', 'the', 'data', 'than', 'a', 'standard', 'Euclidean', 'measure', 'that', 'does', 'not', 'vary', 'over', 'feature', 'space', '.', 'We', 'have', 'also', 'shown', 'that', 'the', 'proposed', 'learning', 'algorithms', 'perform', 'better', 'than', 'other', 'similar', 'algorithms', 'in', 'the', 'literature', ',', 'and', 'have', 'better', 'stability', 'properties', '.', 'Even', 'though', 'we', 'have', 'applied', 'these', 'algorithms', 'only', 'to', 'text', 'data', 'sets', ',', 'the', 'algorithms', 'derived', 'here', 'do', 'not', 'assume', 'any', 'specific', 'characteristics', 'of', 'textual', 'data', 'sets', '.', 'Hence', ',', 'they', 'Figure', '3', ':', 'Accuracy', 'results', 'on', 'Multi10', 'data', '.', '615', 'Research', 'Track', 'Poster', 'Figure', '4', ':', 'Accuracy', 'results', 'on', 'Yahoo', 'K1', 'data', '.', 'Figure', '5', ':', 'Accuracy', 'results', 'on', 'Classic', '3', 'data', '.', 'are', 'applicable', 'to', 'general', 'data', 'sets', '.', 'Since', 'the', 'algorithms', 'perform', 'better', 'for', 'larger', 'K', ',', 'it', 'would', 'be', 'interesting', 'to', 'investigate', 'whether', 'they', 'can', 'be', 'used', 'to', 'find', 'subtopics', 'of', 'a', 'topic', '.', 'Finally', ',', 'it', 'will', 'be', 'interesting', 'to', 'learn', 'SVaD', 'measures', 'for', 'labeled', 'data', 'sets', '.', '-LSB-', '1', '-RSB-', 'J.', 'C.', 'Bezdek', 'and', 'R.', 'J.', 'Hathaway', '.', 'Some', 'notes', 'on', 'alternating', 'optimization', '.', 'In', 'Proceedings', 'of', 'the', '2002', 'AFSS', 'International', 'Conference', 'on', 'Fuzzy', 'Systems', '.', 'Calcutta', ',', 'pages', '288', '300', '.', 'Springer-Verlag', ',', '2002', '.', '-LSB-', '2', '-RSB-', 'A.', 'P.', 'Dempster', ',', 'N.', 'M.', 'Laird', ',', 'and', 'Rubin', '.', 'Maximum', 'likelihood', 'from', 'incomplete', 'data', 'via', 'the', 'EM', 'algorithm', '.', 'Journal', 'Royal', 'Statistical', 'Society', 'B', ',', '39', '-LRB-', '2', '-RRB-', ':', '1', '38', ',', '1977', '.', '-LSB-', '3', '-RSB-', 'I.', 'S.', 'Dhillon', 'and', 'D.', 'S.', 'Modha', '.', 'Concept', 'decompositions', 'for', 'large', 'sparse', 'text', 'data', 'using', 'clustering', '.', 'Machine', 'Learning', ',', '42', '-LRB-', '1', '-RRB-', ':', '143', '175', ',', 'January', '2001', '.', '-LSB-', '4', '-RSB-', 'E.', 'Diday', 'and', 'J.', 'C.', 'Simon', '.', 'Cluster', 'analysis', '.', 'In', 'K.', 'S.', 'Fu', ',', 'editor', ',', 'Pattern', 'Recognition', ',', 'pages', '47', '94', '.', 'Springer-Verlag', ',', '1976', '.', '-LSB-', '5', '-RSB-', 'H.', 'Frigui', 'and', 'O.', 'Nasraoui', '.', 'Simultaneous', 'clustering', 'and', 'attribute', 'discrimination', '.', 'In', 'Proceedings', 'of', 'FUZZIEEE', ',', 'pages', '158', '163', ',', 'San', 'Antonio', ',', '2000', '.', '-LSB-', '6', '-RSB-', 'H.', 'Frigui', 'and', 'O.', 'Nasraoui', '.', 'Simultaneous', 'categorization', 'of', 'text', 'documents', 'and', 'identification', 'of', 'cluster-dependent', 'keywords', '.', 'In', 'Proceedings', 'of', 'FUZZIEEE', ',', 'pages', '158', '163', ',', 'Honolulu', ',', 'Hawaii', ',', '2001', '.', '-LSB-', '7', '-RSB-', 'D.', 'E.', 'Gustafson', 'and', 'W.', 'C.', 'Kessel', '.', 'Fuzzy', 'clustering', 'with', 'the', 'fuzzy', 'covariance', 'matrix', '.', 'In', 'Proccedings', 'of', 'IEEE', 'CDC', ',', 'pages', '761', '766', ',', 'San', 'Diego', ',', 'California', ',', '1979', '.', '-LSB-', '8', '-RSB-', 'R.', 'Krishnapuram', 'and', 'J.', 'Kim', '.', 'A', 'note', 'on', 'fuzzy', 'clustering', 'algorithms', 'for', 'Gaussian', 'clusters', '.', 'IEEE', 'Transactions', 'on', 'Fuzzy', 'Systems', ',', '7', '-LRB-', '4', '-RRB-', ':', '453', '461', ',', 'Aug', '1999', '.', '-LSB-', '9', '-RSB-', 'Y.', 'Rui', ',', 'T.', 'S.', 'Huang', ',', 'and', 'S.', 'Mehrotra', '.', 'Relevance', 'feedback', 'techniques', 'in', 'interactive', 'content-based', 'image', 'retrieval', '.', 'In', 'Storage', 'and', 'Retrieval', 'for', 'Image', 'and', 'Video', 'Databases', '-LRB-', 'SPIE', '-RRB-', ',', 'pages', '25', '36', ',', '1998', '.', '-LSB-', '10', '-RSB-', 'N.', 'Slonim', 'and', 'N.', 'Tishby', '.', 'Document', 'clustering', 'using', 'word', 'clusters', 'via', 'the', 'information', 'bottleneck', 'method', '.', 'In', 'Proceedings', 'of', 'SIGIR', ',', 'pages', '208', '215', ',', '2000', '.', 'APPENDIX', 'A', '.', 'OTHER', 'FEATURE', 'WEIGHTING', 'CLUSTERING', 'TECHNIQUES', 'A.', '1', 'Diagonal', 'Gustafson-Kessel', '-LRB-', 'DGK', '-RRB-', 'Gustafson', 'and', 'Kessel', '-LSB-', '7', '-RSB-', 'associate', 'each', 'cluster', 'with', 'a', 'different', 'norm', 'matrix', '.', 'Let', 'A', '=', '-LRB-', 'A', '1', ',', '...', ',', 'A', 'k', '-RRB-', 'be', 'the', 'set', 'of', 'k', 'norm', 'matrices', 'associated', 'with', 'k', 'clusters', '.', 'Let', 'u', 'ji', 'is', 'the', 'fuzzy', 'membership', 'of', 'x', 'i', 'in', 'cluster', 'j', 'and', 'U', '=', '-LSB-', 'u', 'ji', '-RSB-', '.', 'By', 'restricting', 'A', 'j', 's', 'to', 'be', 'diagonal', 'and', 'u', 'ji', '-LCB-', '0', ',', '1', '-RCB-', ',', 'we', 'can', 'reformulate', 'the', 'original', 'optimization', 'problem', 'in', 'terms', 'of', 'SVaD', 'measures', 'as', 'follows', ':', 'min', 'C', ',', 'W', 'J', 'DGK', '-LRB-', 'C', ',', 'W', '-RRB-', '=', 'k', 'j', '=', '1', 'x', 'i', 'R', 'j', 'M', 'l', '=', '1', 'w', 'jl', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', ',', 'subject', 'to', 'l', 'w', 'jl', '=', 'j', '.', 'Note', 'that', 'this', 'problem', 'can', 'be', 'solved', 'using', 'the', 'same', 'AO', 'algorithms', 'described', 'in', 'Section', '3', '.', 'Here', ',', 'the', 'update', 'for', 'C', 'and', 'P', 'would', 'remain', 'the', 'same', 'as', 'that', 'discussed', 'in', 'Section', '3', '.', 'It', 'can', 'be', 'easily', 'shown', 'that', 'when', 'j', '=', '1', ',', 'j', ',', 'w', 'jl', '=', 'M', 'm', '=', '1', 'x', 'i', 'R', 'j', 'g', 'm', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '1/M', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '-LRB-', '14', '-RRB-', 'minimize', 'J', 'DGK', 'for', 'a', 'given', 'C.', 'A.', '2', 'Crisp', 'Simultaneous', 'Clustering', 'and', 'Attribute', 'Discrimination', '-LRB-', 'CSCAD', '-RRB-', 'Frigui', 'et', '.', 'al.', 'in', '-LSB-', '5', ',', '6', '-RSB-', ',', 'considered', 'a', 'fuzzy', 'version', 'of', 'the', 'feature-weighting', 'based', 'clustering', 'problem', '-LRB-', 'SCAD', '-RRB-', '.', 'To', 'make', 'a', 'fair', 'comparison', 'of', 'our', 'algorithms', 'with', 'SCAD', ',', 'we', 'derive', 'its', 'crisp', 'version', 'and', 'refer', 'to', 'it', 'as', 'Crisp', 'SCAD', '-LRB-', 'CSCAD', '-RRB-', '.', 'In', '-LSB-', '5', ',', '6', '-RSB-', ',', 'the', 'Gini', 'measure', 'is', 'used', 'for', 'regularization', '.', 'If', 'the', 'Gini', 'measure', 'is', 'considered', 'with', 'r', '=', '1', ',', 'the', 'weights', 'w', 'jl', 'that', 'minimize', 'the', 'corresponding', 'objective', 'function', 'for', 'a', 'given', 'C', 'and', 'P', ',', 'are', 'given', 'by', 'w', 'jl', '=', '1', 'M', '+', '1', '2', 'j', '1', 'M', 'M', 'n', '=', '1', 'x', 'i', 'R', 'j', 'g', 'n', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', 'x', 'i', 'R', 'j', 'g', 'l', '-LRB-', 'x', 'i', ',', 'c', 'j', '-RRB-', '.', '-LRB-', '15', '-RRB-', 'Since', 'SCAD', 'uses', 'the', 'weighted', 'Euclidean', 'measure', ',', 'the', 'update', 'equations', 'of', 'centroids', 'in', 'CSCAD', 'remain', 'the', 'same', 'as', 'in', '-LRB-', '11', '-RRB-', '.', 'The', 'update', 'equation', 'for', 'w', 'jl', 'in', 'SCAD', 'is', 'quite', 'similar', 'to', '-LRB-', '15', '-RRB-', '.', 'One', 'may', 'note', 'that', ',', 'in', '-LRB-', '15', '-RRB-', ',', 'the', 'value', 'of', 'w', 'jl', 'can', 'become', 'negative', '.', 'In', '-LSB-', '5', '-RSB-', ',', 'a', 'heuristic', 'is', 'used', 'to', 'estimate', 'the', 'value', 'j', 'in', 'every', 'iteration', 'and', 'set', 'the', 'negative', 'values', 'of', 'w', 'jl', 'to', 'zero', 'before', 'normalizing', 'the', 'weights', '.', '616', 'Research', 'Track', 'Poster']
Document BIO Tags: ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'I', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']
Extractive/present Keyphrases: ['dissimilarity measure', 'clustering', 'feature weighting']
Abstractive/absent Keyphrases: ['spatially varying dissimilarity (svad)', 'learning dissimilarity measures']
-----------
```
### Keyphrase Extraction
```python
from datasets import load_dataset
# get the dataset only for keyphrase extraction
dataset = load_dataset("midas/nus", "extraction")
print("Samples for Keyphrase Extraction")
# sample from the test split
print("Sample from test data split")
test_sample = dataset["test"][0]
print("Fields in the sample: ", [key for key in test_sample.keys()])
print("Tokenized Document: ", test_sample["document"])
print("Document BIO Tags: ", test_sample["doc_bio_tags"])
print("\n-----------\n")
```
### Keyphrase Generation
```python
# get the dataset only for keyphrase generation
dataset = load_dataset("midas/nus", "generation")
print("Samples for Keyphrase Generation")
# sample from the test split
print("Sample from test data split")
test_sample = dataset["test"][0]
print("Fields in the sample: ", [key for key in test_sample.keys()])
print("Tokenized Document: ", test_sample["document"])
print("Extractive/present Keyphrases: ", test_sample["extractive_keyphrases"])
print("Abstractive/absent Keyphrases: ", test_sample["abstractive_keyphrases"])
print("\n-----------\n")
```
## Citation Information
```
@InProceedings{10.1007/978-3-540-77094-7_41,
author="Nguyen, Thuy Dung
and Kan, Min-Yen",
editor="Goh, Dion Hoe-Lian
and Cao, Tru Hoang
and Solvberg, Ingeborg Torvik
and Rasmussen, Edie",
title="Keyphrase Extraction in Scientific Publications",
booktitle="Asian Digital Libraries. Looking Back 10 Years and Forging New Frontiers",
year="2007",
publisher="Springer Berlin Heidelberg",
address="Berlin, Heidelberg",
pages="317--326",
isbn="978-3-540-77094-7"
}
```
## Contributions
Thanks to [@debanjanbhucs](https://github.com/debanjanbhucs), [@dibyaaaaax](https://github.com/dibyaaaaax) and [@ad6398](https://github.com/ad6398) for adding this dataset
| [
-0.6632483601570129,
-0.5185294151306152,
0.75310218334198,
0.4307438135147095,
0.04310658946633339,
0.09522149711847305,
-0.1632414311170578,
-0.20158767700195312,
0.6630810499191284,
-0.046103350818157196,
-0.4079219400882721,
-0.9770545363426208,
-0.43399864435195923,
0.2842360734939575... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
ncats/EpiSet4BinaryClassification | ncats | 2023-09-14T00:42:57Z | 16 | 1 | glue | [
"annotations_creators:unknown",
"language_creators:unknown",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:unknown",
"language:en",
"license:cc-by-4.0",
"region:us"
] | 2023-09-14T00:42:57Z | 2022-03-02T23:29:22.000Z | 2022-03-02T23:29:22 | ---
annotations_creators:
- unknown
language_creators:
- unknown
language:
- en
license:
- cc-by-4.0
multilinguality:
- monolingual
size_categories:
- 10K<n<100K
source_datasets:
- unknown
paperswithcode_id: glue
pretty_name: GLUE (General Language Understanding Evaluation benchmark)
---
# DOCUMENTATION UPDATES IN PROGRESS! IGNORE BELOW
# Dataset Card for GLUE
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://nyu-mll.github.io/CoLA/](https://nyu-mll.github.io/CoLA/)
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Size of downloaded dataset files:** 955.33 MB
- **Size of the generated dataset:** 229.68 MB
- **Total amount of disk used:** 1185.01 MB
### Dataset Summary
GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems.
### Supported Tasks and Leaderboards
The leaderboard for the GLUE benchmark can be found [at this address](https://gluebenchmark.com/). It comprises the following tasks:
#### cola
The Corpus of Linguistic Acceptability consists of English acceptability judgments drawn from books and journal articles on linguistic theory. Each example is a sequence of words annotated with whether it is a grammatical English sentence.
### Languages
The language data in is in English
## Dataset Structure
### Data Instances
#### cola
- **Size of downloaded dataset files:** 0.36 MB
- **Size of the generated dataset:** 0.58 MB
- **Total amount of disk used:** 0.94 MB
An example of 'train' looks as follows.
```
{
"sentence": "Our friends won't buy this analysis, let alone the next one we propose.",
"label": 1,
"id": 0
}
```
### Data Fields
The data fields are the same among all splits.
#### cola
- `abstract`: a `string` feature.
- `label`: a classification label, with possible values including `unacceptable` (0), `acceptable` (1).
- `idx`: a `int32` feature.
### Data Splits
|train|validation|test|
|----:|---------:|---:|
| 8551| 1043|1063|
## Dataset Creation
### Curation Rationale
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the source language producers?
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Annotations
#### Annotation process
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
#### Who are the annotators?
Rare Disease Curators from the [National Institutes of Health (NIH) Genetic and Rare Diseases Information Center (GARD)](https://rarediseases.info.nih.gov/)
### Personal and Sensitive Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Discussion of Biases
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Other Known Limitations
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
## Additional Information
### Dataset Curators
Rare Disease Curators from the [National Institutes of Health (NIH) Genetic and Rare Diseases Information Center (GARD)](https://rarediseases.info.nih.gov/)
### Licensing Information
[More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Citation Information
```
@inproceedings{john2021recurrent,
title={Recurrent Neural Networks to Automatically Identify Rare Disease Epidemiologic Studies from PubMed},
author={John, Jennifer N and Sid, Eric and Zhu, Qian},
booktitle={AMIA Annual Symposium Proceedings},
volume={2021},
pages={325},
year={2021},
organization={American Medical Informatics Association}
}
```
### Contributions
Thanks to [@patpizio](https://github.com/patpizio), [@jeswan](https://github.com/jeswan), [@thomwolf](https://github.com/thomwolf), [@patrickvonplaten](https://github.com/patrickvonplaten), [@mariamabarham](https://github.com/mariamabarham) for adding this dataset.
```
from datasets import load_dataset
epiclassify = load_dataset("ncats/EpiSet4BinaryClassification")
``` | [
-0.4500674605369568,
-0.5908492207527161,
0.09088066220283508,
0.25921985507011414,
-0.10407830774784088,
0.011747450567781925,
-0.2866387665271759,
-0.5031288862228394,
0.7332699298858643,
0.31952160596847534,
-0.6618156433105469,
-0.809195876121521,
-0.5626859068870544,
0.165419280529022... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
persiannlp/parsinlu_reading_comprehension | persiannlp | 2022-10-25T09:54:26Z | 16 | 0 | null | [
"task_categories:question-answering",
"task_ids:extractive-qa",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:extended|wikipedia|google",
"language:fa",
"license:cc-by-nc-sa-4.0",
"arxiv:20... | 2022-10-25T09:54:26Z | 2022-03-02T23:29:22.000Z | 2022-03-02T23:29:22 | ---
annotations_creators:
- expert-generated
language_creators:
- expert-generated
language:
- fa
license:
- cc-by-nc-sa-4.0
multilinguality:
- monolingual
size_categories:
- 1K<n<10K
source_datasets:
- extended|wikipedia|google
task_categories:
- question-answering
task_ids:
- extractive-qa
---
# Dataset Card for PersiNLU (Reading Comprehension)
## Table of Contents
- [Dataset Card for PersiNLU (Reading Comprehension)](#dataset-card-for-persi_nlu_reading_comprehension)
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
- [Who are the source language producers?](#who-are-the-source-language-producers)
- [Annotations](#annotations)
- [Annotation process](#annotation-process)
- [Who are the annotators?](#who-are-the-annotators)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [Github](https://github.com/persiannlp/parsinlu/)
- **Repository:** [Github](https://github.com/persiannlp/parsinlu/)
- **Paper:** [Arxiv](https://arxiv.org/abs/2012.06154)
- **Leaderboard:**
- **Point of Contact:** d.khashabi@gmail.com
### Dataset Summary
A Persian reading comprehenion task (generating an answer, given a question and a context paragraph).
The questions are mined using Google auto-complete, their answers and the corresponding evidence documents are manually annotated by native speakers.
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
The text dataset is in Persian (`fa`).
## Dataset Structure
### Data Instances
Here is an example from the dataset:
```
{
'question': 'پیامبر در چه سالی به پیامبری رسید؟',
'url': 'https://fa.wikipedia.org/wiki/%D9%85%D8%AD%D9%85%D8%AF',
'passage': 'محمد که از روش زندگی مردم مکه ناخشنود بود، گهگاه در غار حرا در یکی از کوه\u200cهای اطراف آن دیار به تفکر و عبادت می\u200cپرداخت. به باور مسلمانان، محمد در همین مکان و در حدود ۴۰ سالگی از طرف خدا به پیامبری برگزیده، و وحی بر او فروفرستاده شد. در نظر آنان، دعوت محمد همانند دعوت دیگر پیامبرانِ کیش یکتاپرستی مبنی بر این بود که خداوند (الله) یکتاست و تسلیم شدن برابر خدا راه رسیدن به اوست.',
'answers': [
{'answer_start': 160, 'answer_text': 'حدود ۴۰ سالگی'}
]
}
```
### Data Fields
- `question`: the question, mined using Google auto-complete.
- `passage`: the passage that contains the answer.
- `url`: the url from which the passage was mined.
- `answers`: a list of answers, containing the string and the index of the answer.
### Data Splits
The train/test split contains 600/575 samples.
## Dataset Creation
### Curation Rationale
The question were collected via Google auto-complete.
The answers were annotated by native speakers.
For more details, check [the corresponding draft](https://arxiv.org/abs/2012.06154).
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
CC BY-NC-SA 4.0 License
### Citation Information
```bibtex
@article{huggingface:dataset,
title = {ParsiNLU: A Suite of Language Understanding Challenges for Persian},
authors = {Khashabi, Daniel and Cohan, Arman and Shakeri, Siamak and Hosseini, Pedram and Pezeshkpour, Pouya and Alikhani, Malihe and Aminnaseri, Moin and Bitaab, Marzieh and Brahman, Faeze and Ghazarian, Sarik and others},
year={2020}
journal = {arXiv e-prints},
eprint = {2012.06154},
}
```
### Contributions
Thanks to [@danyaljj](https://github.com/danyaljj) for adding this dataset.
| [
-0.5937898755073547,
-0.8344095945358276,
0.2557273805141449,
0.12493512034416199,
-0.23369412124156952,
-0.10449780523777008,
-0.4377623498439789,
-0.2470296174287796,
0.39612478017807007,
0.4209875464439392,
-0.6593711972236633,
-0.7548232674598694,
-0.529769241809845,
0.4940336346626282... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
philschmid/test_german_squad | philschmid | 2021-10-25T13:55:14Z | 16 | 2 | null | [
"region:us"
] | 2021-10-25T13:55:14Z | 2022-03-02T23:29:22.000Z | 2022-03-02T23:29:22 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
qanastek/ANTILLES | qanastek | 2022-10-24T17:13:19Z | 16 | 1 | null | [
"task_categories:token-classification",
"annotations_creators:machine-generated",
"annotations_creators:expert-generated",
"language_creators:found",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:fr",
"region:us"
] | 2022-10-24T17:13:19Z | 2022-03-02T23:29:22.000Z | 2022-03-02T23:29:22 | ---
annotations_creators:
- machine-generated
- expert-generated
language_creators:
- found
language:
- fr
language_bcp47:
- fr-FR
pretty_name: ANTILLES
size_categories:
- 100K<n<1M
source_datasets:
- original
task_categories:
- token-classification
task_ids:
- part-of-speech-tagging
---
# ANTILLES : An Open French Linguistically Enriched Part-of-Speech Corpus
## Table of Contents
- [Dataset Card for [Needs More Information]](#dataset-card-for-needs-more-information)
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [sent_id = fr-ud-dev_00005](#sent_id--fr-ud-dev_00005)
- [text = Travail de trés grande qualité exécuté par un imprimeur artisan passionné.](#text--travail-de-trs-grande-qualit-excut-par-un-imprimeur-artisan-passionn)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
- [Who are the source language producers?](#who-are-the-source-language-producers)
- [Annotations](#annotations)
- [Annotation process](#annotation-process)
- [Who are the annotators?](#who-are-the-annotators)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
## Dataset Description
- **Homepage:** https://qanastek.github.io/ANTILLES/
- **Repository:** https://github.com/qanastek/ANTILLES
- **Paper:** https://hal.archives-ouvertes.fr/hal-03696042/document
- **Leaderboard:** https://paperswithcode.com/dataset/antilles
- **Point of Contact:** [Yanis Labrak](mailto:yanis.labrak@univ-avignon.fr)
### Dataset Summary
`ANTILLES` is a part-of-speech tagging corpora based on [UD_French-GSD](https://universaldependencies.org/treebanks/fr_gsd/index.html) which was originally created in 2015 and is based on the [universal dependency treebank v2.0](https://github.com/ryanmcd/uni-dep-tb).
Originally, the corpora consists of 400,399 words (16,341 sentences) and had 17 different classes. Now, after applying our tags augmentation script `transform.py`, we obtain 60 different classes which add semantic information such as: the gender, number, mood, person, tense or verb form given in the different CoNLL-U fields from the original corpora.
We based our tags on the level of details given by the [LIA_TAGG](http://pageperso.lif.univ-mrs.fr/frederic.bechet/download.html) statistical POS tagger written by [Frédéric Béchet](http://pageperso.lif.univ-mrs.fr/frederic.bechet/index-english.html) in 2001.
<a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-sa/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">Creative Commons Attribution-ShareAlike 4.0 International License</a>.
### Supported Tasks and Leaderboards
`part-of-speech-tagging`: The dataset can be used to train a model for part-of-speech-tagging. The performance is measured by how high its F1 score is. A Flair Sequence-To-Sequence model trained to tag tokens from Wikipedia passages achieves a F1 score (micro) of 0.952.
### Languages
The text in the dataset is in French, as spoken by [Wikipedia](https://en.wikipedia.org/wiki/Main_Page) users. The associated [BCP-47](https://tools.ietf.org/html/bcp47) code is `fr`.
## Load the dataset
### HuggingFace
```python
from datasets import load_dataset
dataset = load_dataset("qanastek/ANTILLES")
print(dataset)
```
### FlairNLP
```python
from flair.datasets import UniversalDependenciesCorpus
corpus: Corpus = UniversalDependenciesCorpus(
data_folder='ANTILLES',
train_file="train.conllu",
test_file="test.conllu",
dev_file="dev.conllu"
)
```
## Load the model
### Flair ([model](https://huggingface.co/qanastek/pos-french))
```python
from flair.models import SequenceTagger
tagger = SequenceTagger.load("qanastek/pos-french")
```
## HuggingFace Spaces
<table style="width: fit-content;">
<thead>
<tr>
<td>
<a href="https://huggingface.co/spaces/qanastek/French-Part-Of-Speech-Tagging">
<img src="https://huggingface.co/datasets/qanastek/ANTILLES/raw/main/imgs/en.png" width="160">
</a>
</td>
<td>
<a href="https://huggingface.co/spaces/qanastek/Etiqueteur-Morphosyntaxique-Etendu">
<img src="https://huggingface.co/datasets/qanastek/ANTILLES/raw/main/imgs/fr.png" width="160">
</a>
</td>
</tr>
</thead>
</table>
## Dataset Structure
### Data Instances
```plain
# sent_id = fr-ud-dev_00005
# text = Travail de trés grande qualité exécuté par un imprimeur artisan passionné.
1 Travail travail NMS _ Gender=Masc|Number=Sing 0 root _ wordform=travail
2 de de PREP _ _ 5 case _ _
3 trés trés ADV _ _ 4 advmod _ _
4 grande grand ADJFS _ Gender=Fem|Number=Sing 5 amod _ _
5 qualité qualité NFS _ Gender=Fem|Number=Sing 1 nmod _ _
6 exécuté exécuter VPPMS _ Gender=Masc|Number=Sing|Tense=Past|VerbForm=Part 1 acl _ _
7 par par PREP _ _ 9 case _ _
8 un un DINTMS _ Definite=Ind|Gender=Masc|Number=Sing|PronType=Art 9 det _ _
9 imprimeur imprimeur NMS _ Gender=Masc|Number=Sing 6 obl:agent _ _
10 artisan artisan NMS _ Gender=Masc|Number=Sing 9 nmod _ _
11 passionné passionné ADJMS _ Gender=Masc|Number=Sing 9 amod _ SpaceAfter=No
12 . . YPFOR _ _ 1 punct _ _
```
### Data Fields
| Abbreviation | Description | Examples | # tokens |
|:--------:|:--------:|:--------:|:--------:|
| PREP | Preposition | de | 63 738 |
| AUX | Auxiliary Verb | est | 12 886 |
| ADV | Adverb | toujours | 14 969 |
| COSUB | Subordinating conjunction | que | 3 007 |
| COCO | Coordinating Conjunction | et | 10 102 |
| PART | Demonstrative particle | -t | 93 |
| PRON | Pronoun | qui ce quoi | 667 |
| PDEMMS | Singular Masculine Demonstrative Pronoun | ce | 1 950 |
| PDEMMP | Plurial Masculine Demonstrative Pronoun | ceux | 108 |
| PDEMFS | Singular Feminine Demonstrative Pronoun | cette | 1 004 |
| PDEMFP | Plurial Feminine Demonstrative Pronoun | celles | 53 |
| PINDMS | Singular Masculine Indefinite Pronoun | tout | 961 |
| PINDMP | Plurial Masculine Indefinite Pronoun | autres | 89 |
| PINDFS | Singular Feminine Indefinite Pronoun | chacune | 136 |
| PINDFP | Plurial Feminine Indefinite Pronoun | certaines | 31 |
| PROPN | Proper noun | houston | 22 135 |
| XFAMIL | Last name | levy | 6 449 |
| NUM | Numerical Adjectives | trentaine vingtaine | 67 |
| DINTMS | Masculine Numerical Adjectives | un | 4 254 |
| DINTFS | Feminine Numerical Adjectives | une | 3 543 |
| PPOBJMS | Singular Masculine Pronoun complements of objects | le lui | 1 425 |
| PPOBJMP | Plurial Masculine Pronoun complements of objects | eux y | 212 |
| PPOBJFS | Singular Feminine Pronoun complements of objects | moi la | 358 |
| PPOBJFP | Plurial Feminine Pronoun complements of objects | en y | 70 |
| PPER1S | Personal Pronoun First Person Singular | je | 571 |
| PPER2S | Personal Pronoun Second Person Singular | tu | 19 |
| PPER3MS | Personal Pronoun Third Person Masculine Singular | il | 3 938 |
| PPER3MP | Personal Pronoun Third Person Masculine Plurial | ils | 513 |
| PPER3FS | Personal Pronoun Third Person Feminine Singular | elle | 992 |
| PPER3FP | Personal Pronoun Third Person Feminine Plurial | elles | 121 |
| PREFS | Reflexive Pronouns First Person of Singular | me m' | 120 |
| PREF | Reflexive Pronouns Third Person of Singular | se s' | 2 337 |
| PREFP | Reflexive Pronouns First / Second Person of Plurial | nous vous | 686 |
| VERB | Verb | obtient | 21 131 |
| VPPMS | Singular Masculine Participle Past Verb | formulé | 6 275 |
| VPPMP | Plurial Masculine Participle Past Verb | classés | 1 352 |
| VPPFS | Singular Feminine Participle Past Verb | appelée | 2 434 |
| VPPFP | Plurial Feminine Participle Past Verb | sanctionnées | 813 |
| VPPRE | Present participle | étant | 2 |
| DET | Determinant | les l' | 25 206 |
| DETMS | Singular Masculine Determinant | les | 15 444 |
| DETFS | Singular Feminine Determinant | la | 10 978 |
| ADJ | Adjective | capable sérieux | 1 075 |
| ADJMS | Singular Masculine Adjective | grand important | 8 338 |
| ADJMP | Plurial Masculine Adjective | grands petits | 3 274 |
| ADJFS | Singular Feminine Adjective | franéaise petite | 8 004 |
| ADJFP | Plurial Feminine Adjective | légéres petites | 3 041 |
| NOUN | Noun | temps | 1 389 |
| NMS | Singular Masculine Noun | drapeau | 29 698 |
| NMP | Plurial Masculine Noun | journalistes | 10 882 |
| NFS | Singular Feminine Noun | téte | 25 414 |
| NFP | Plurial Feminine Noun | ondes | 7 448 |
| PREL | Relative Pronoun | qui dont | 2 976 |
| PRELMS | Singular Masculine Relative Pronoun | lequel | 94 |
| PRELMP | Plurial Masculine Relative Pronoun | lesquels | 29 |
| PRELFS | Singular Feminine Relative Pronoun | laquelle | 70 |
| PRELFP | Plurial Feminine Relative Pronoun | lesquelles | 25 |
| PINTFS | Singular Feminine Interrogative Pronoun | laquelle | 3 |
| INTJ | Interjection | merci bref | 75 |
| CHIF | Numbers | 1979 10 | 10 417 |
| SYM | Symbol | é % | 705 |
| YPFOR | Endpoint | . | 15 088 |
| PUNCT | Ponctuation | : , | 28 918 |
| MOTINC | Unknown words | Technology Lady | 2 022 |
| X | Typos & others | sfeir 3D statu | 175 |
### Data Splits
| | Train | Dev | Test |
|:------------------:|:------:|:------:|:-----:|
| # Docs | 14 449 | 1 476 | 416 |
| Avg # Tokens / Doc | 24.54 | 24.19 | 24.08 |
## Dataset Creation
### Curation Rationale
[Needs More Information]
### Source Data
#### Initial Data Collection and Normalization
[Needs More Information]
#### Who are the source language producers?
[Needs More Information]
### Annotations
#### Annotation process
[Needs More Information]
#### Who are the annotators?
[Needs More Information]
### Personal and Sensitive Information
The corpora is free of personal or sensitive information since it has been based on `Wikipedia` articles content.
## Considerations for Using the Data
### Social Impact of Dataset
[Needs More Information]
### Discussion of Biases
The nature of the corpora introduce various biases such as the names of the streets which are temporaly based and can therefore introduce named entity like author or event names. For example, street names such as `Rue Victor-Hugo` or `Rue Pasteur` doesn't exist before the 20's century in France.
### Other Known Limitations
[Needs More Information]
## Additional Information
### Dataset Curators
__ANTILLES__: Labrak Yanis, Dufour Richard
__UD_FRENCH-GSD__: de Marneffe Marie-Catherine, Guillaume Bruno, McDonald Ryan, Suhr Alane, Nivre Joakim, Grioni Matias, Dickerson Carly, Perrier Guy
__Universal Dependency__: Ryan McDonald, Joakim Nivre, Yvonne Quirmbach-Brundage, Yoav Goldberg, Dipanjan Das, Kuzman Ganchev, Keith Hall, Slav Petrov, Hao Zhang, Oscar Tackstrom, Claudia Bedini, Nuria Bertomeu Castello and Jungmee Lee
### Licensing Information
```plain
For the following languages
German, Spanish, French, Indonesian, Italian, Japanese, Korean and Brazilian
Portuguese
we will distinguish between two portions of the data.
1. The underlying text for sentences that were annotated. This data Google
asserts no ownership over and no copyright over. Some or all of these
sentences may be copyrighted in some jurisdictions. Where copyrighted,
Google collected these sentences under exceptions to copyright or implied
license rights. GOOGLE MAKES THEM AVAILABLE TO YOU 'AS IS', WITHOUT ANY
WARRANTY OF ANY KIND, WHETHER EXPRESS OR IMPLIED.
2. The annotations -- part-of-speech tags and dependency annotations. These are
made available under a CC BY-SA 4.0. GOOGLE MAKES
THEM AVAILABLE TO YOU 'AS IS', WITHOUT ANY WARRANTY OF ANY KIND, WHETHER
EXPRESS OR IMPLIED. See attached LICENSE file for the text of CC BY-NC-SA.
Portions of the German data were sampled from the CoNLL 2006 Tiger Treebank
data. Hans Uszkoreit graciously gave permission to use the underlying
sentences in this data as part of this release.
Any use of the data should reference the above plus:
Universal Dependency Annotation for Multilingual Parsing
Ryan McDonald, Joakim Nivre, Yvonne Quirmbach-Brundage, Yoav Goldberg,
Dipanjan Das, Kuzman Ganchev, Keith Hall, Slav Petrov, Hao Zhang,
Oscar Tackstrom, Claudia Bedini, Nuria Bertomeu Castello and Jungmee Lee
Proceedings of ACL 2013
```
### Citation Information
Please cite the following paper when using this model.
ANTILLES extended corpus:
```latex
@inproceedings{labrak:hal-03696042,
TITLE = {{ANTILLES: An Open French Linguistically Enriched Part-of-Speech Corpus}},
AUTHOR = {Labrak, Yanis and Dufour, Richard},
URL = {https://hal.archives-ouvertes.fr/hal-03696042},
BOOKTITLE = {{25th International Conference on Text, Speech and Dialogue (TSD)}},
ADDRESS = {Brno, Czech Republic},
PUBLISHER = {{Springer}},
YEAR = {2022},
MONTH = Sep,
KEYWORDS = {Part-of-speech corpus ; POS tagging ; Open tools ; Word embeddings ; Bi-LSTM ; CRF ; Transformers},
PDF = {https://hal.archives-ouvertes.fr/hal-03696042/file/ANTILLES_A_freNch_linguisTIcaLLy_Enriched_part_of_Speech_corpus.pdf},
HAL_ID = {hal-03696042},
HAL_VERSION = {v1},
}
```
UD_French-GSD corpora:
```latex
@misc{
universaldependencies,
title={UniversalDependencies/UD_French-GSD},
url={https://github.com/UniversalDependencies/UD_French-GSD}, journal={GitHub},
author={UniversalDependencies}
}
```
{U}niversal {D}ependency Annotation for Multilingual Parsing:
```latex
@inproceedings{mcdonald-etal-2013-universal,
title = "{U}niversal {D}ependency Annotation for Multilingual Parsing",
author = {McDonald, Ryan and
Nivre, Joakim and
Quirmbach-Brundage, Yvonne and
Goldberg, Yoav and
Das, Dipanjan and
Ganchev, Kuzman and
Hall, Keith and
Petrov, Slav and
Zhang, Hao and
T{\"a}ckstr{\"o}m, Oscar and
Bedini, Claudia and
Bertomeu Castell{\'o}, N{\'u}ria and
Lee, Jungmee},
booktitle = "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = aug,
year = "2013",
address = "Sofia, Bulgaria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P13-2017",
pages = "92--97",
}
```
LIA TAGG:
```latex
@techreport{LIA_TAGG,
author = {Frédéric Béchet},
title = {LIA_TAGG: a statistical POS tagger + syntactic bracketer},
institution = {Aix-Marseille University & CNRS},
year = {2001}
}
```
| [
-0.5143780708312988,
-0.5619504451751709,
0.06665552407503128,
0.2585592567920685,
-0.15657345950603485,
0.1734684407711029,
-0.29194211959838867,
-0.3321177363395691,
0.7015437483787537,
0.39438000321388245,
-0.5369210243225098,
-0.9150920510292053,
-0.5751474499702454,
0.2936149835586548... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
qanastek/ELRC-Medical-V2 | qanastek | 2022-10-24T17:15:17Z | 16 | 7 | null | [
"task_categories:translation",
"annotations_creators:machine-generated",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:multilingual",
"size_categories:100K<n<1M",
"source_datasets:extended",
"language:en",
"language:bg",
"language:cs",
"language:da",
"lan... | 2022-10-24T17:15:17Z | 2022-03-02T23:29:22.000Z | 2022-03-02T23:29:22 | ---
annotations_creators:
- machine-generated
- expert-generated
language_creators:
- found
language:
- en
- bg
- cs
- da
- de
- el
- es
- et
- fi
- fr
- ga
- hr
- hu
- it
- lt
- lv
- mt
- nl
- pl
- pt
- ro
- sk
- sl
- sv
multilinguality:
- multilingual
pretty_name: ELRC-Medical-V2
size_categories:
- 100K<n<1M
source_datasets:
- extended
task_categories:
- translation
task_ids:
- translation
---
# ELRC-Medical-V2 : European parallel corpus for healthcare machine translation
## Table of Contents
- [Dataset Card for [Needs More Information]](#dataset-card-for-needs-more-information)
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
- [Who are the source language producers?](#who-are-the-source-language-producers)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
## Dataset Description
- **Homepage:** https://live.european-language-grid.eu/catalogue/project/2209
- **Repository:** https://github.com/qanastek/ELRC-Medical-V2/
- **Paper:** [Needs More Information]
- **Leaderboard:** [Needs More Information]
- **Point of Contact:** [Yanis Labrak](mailto:yanis.labrak@univ-avignon.fr)
### Dataset Summary
`ELRC-Medical-V2` is a parallel corpus for neural machine translation funded by the [European Commission](http://www.lr-coordination.eu/) and coordinated by the [German Research Center for Artificial Intelligence](https://www.dfki.de/web).
### Supported Tasks and Leaderboards
`translation`: The dataset can be used to train a model for translation.
### Languages
In our case, the corpora consists of a pair of source and target sentences for 23 differents languages from the European Union (EU) with as source language in each cases english (EN).
**List of languages :** `Bulgarian (bg)`,`Czech (cs)`,`Danish (da)`,`German (de)`,`Greek (el)`,`Spanish (es)`,`Estonian (et)`,`Finnish (fi)`,`French (fr)`,`Irish (ga)`,`Croatian (hr)`,`Hungarian (hu)`,`Italian (it)`,`Lithuanian (lt)`,`Latvian (lv)`,`Maltese (mt)`,`Dutch (nl)`,`Polish (pl)`,`Portuguese (pt)`,`Romanian (ro)`,`Slovak (sk)`,`Slovenian (sl)`,`Swedish (sv)`.
## Load the dataset with HuggingFace
```python
from datasets import load_dataset
NAME = "qanastek/ELRC-Medical-V2"
dataset = load_dataset(NAME, use_auth_token=True)
print(dataset)
dataset_train = load_dataset(NAME, "en-es", split='train[:90%]')
dataset_test = load_dataset(NAME, "en-es", split='train[10%:]')
print(dataset_train)
print(dataset_train[0])
print(dataset_test)
```
## Dataset Structure
### Data Instances
```plain
id,lang,source_text,target_text
1,en-bg,"TOC \o ""1-3"" \h \z \u Introduction 3","TOC \o ""1-3"" \h \z \u Въведение 3"
2,en-bg,The international humanitarian law and its principles are often not respected.,Международното хуманитарно право и неговите принципи често не се зачитат.
3,en-bg,"At policy level, progress was made on several important initiatives.",На равнище политики напредък е постигнат по няколко важни инициативи.
```
### Data Fields
**id** : The document identifier of type `Integer`.
**lang** : The pair of source and target language of type `String`.
**source_text** : The source text of type `String`.
**target_text** : The target text of type `String`.
### Data Splits
| Lang | # Docs | Avg. # Source Tokens | Avg. # Target Tokens |
|--------|-----------|------------------------|------------------------|
| bg | 13 149 | 23 | 24 |
| cs | 13 160 | 23 | 21 |
| da | 13 242 | 23 | 22 |
| de | 13 291 | 23 | 22 |
| el | 13 091 | 23 | 26 |
| es | 13 195 | 23 | 28 |
| et | 13 016 | 23 | 17 |
| fi | 12 942 | 23 | 16 |
| fr | 13 149 | 23 | 28 |
| ga | 412 | 12 | 12 |
| hr | 12 836 | 23 | 21 |
| hu | 13 025 | 23 | 21 |
| it | 13 059 | 23 | 25 |
| lt | 12 580 | 23 | 18 |
| lv | 13 044 | 23 | 19 |
| mt | 3 093 | 16 | 14 |
| nl | 13 191 | 23 | 25 |
| pl | 12 761 | 23 | 22 |
| pt | 13 148 | 23 | 26 |
| ro | 13 163 | 23 | 25 |
| sk | 12 926 | 23 | 20 |
| sl | 13 208 | 23 | 21 |
| sv | 13 099 | 23 | 21 |
|||||
| Total | 277 780 | 22.21 | 21.47 |
## Dataset Creation
### Curation Rationale
For details, check the corresponding [pages](https://elrc-share.eu/repository/search/?q=mfsp%3A87ef9e5e8ac411ea913100155d026706e19a1a9f908b463c944490c36ba2f454&page=3).
### Source Data
#### Initial Data Collection and Normalization
The acquisition of bilingual data (from multilingual websites), normalization, cleaning, deduplication and identification of parallel documents have been done by [ILSP-FC tool](http://nlp.ilsp.gr/redmine/projects/ilsp-fc/wiki/Introduction). [Maligna aligner](https://github.com/loomchild/maligna) was used for alignment of segments. Merging/filtering of segment pairs has also been applied.
#### Who are the source language producers?
Every data of this corpora as been uploaded by [Vassilis Papavassiliou](mailto:vpapa@ilsp.gr) on [ELRC-Share](https://elrc-share.eu/repository/browse/bilingual-corpus-from-the-publications-office-of-the-eu-on-the-medical-domain-v2-en-fr/6b31b32e8ac411ea913100155d0267061547d9b3ec284584af19a2953baa8937/).
### Personal and Sensitive Information
The corpora is free of personal or sensitive information.
## Considerations for Using the Data
### Other Known Limitations
The nature of the task introduce a variability in the quality of the target translations.
## Additional Information
### Dataset Curators
__ELRC-Medical-V2__: Labrak Yanis, Dufour Richard
__Bilingual corpus from the Publications Office of the EU on the medical domain v.2 (EN-XX) Corpus__: [Vassilis Papavassiliou](mailto:vpapa@ilsp.gr) and [others](https://live.european-language-grid.eu/catalogue/project/2209).
### Licensing Information
<a rel="license" href="https://elrc-share.eu/static/metashare/licences/CC-BY-4.0.pdf"><img alt="Attribution 4.0 International (CC BY 4.0) License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="https://elrc-share.eu/static/metashare/licences/CC-BY-4.0.pdf">Attribution 4.0 International (CC BY 4.0) License</a>.
### Citation Information
Please cite the following paper when using this model.
```latex
@inproceedings{losch-etal-2018-european,
title = European Language Resource Coordination: Collecting Language Resources for Public Sector Multilingual Information Management,
author = {
L'osch, Andrea and
Mapelli, Valérie and
Piperidis, Stelios and
Vasiljevs, Andrejs and
Smal, Lilli and
Declerck, Thierry and
Schnur, Eileen and
Choukri, Khalid and
van Genabith, Josef
},
booktitle = Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018),
month = may,
year = 2018,
address = Miyazaki, Japan,
publisher = European Language Resources Association (ELRA),
url = https://aclanthology.org/L18-1213,
}
```
| [
-0.4476202428340912,
-0.3352988660335541,
0.22863039374351501,
0.23354105651378632,
-0.2731429636478424,
0.005926682613790035,
-0.3218778073787689,
-0.34442615509033203,
0.3478426933288574,
0.41249075531959534,
-0.5120294690132141,
-0.9123725891113281,
-0.7297516465187073,
0.52490997314453... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
valurank/hate-multi | valurank | 2022-10-25T09:57:06Z | 16 | 0 | null | [
"task_categories:text-classification",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:derived",
"language:en",
"license:other",
"region:us"
] | 2022-10-25T09:57:06Z | 2022-03-02T23:29:22.000Z | 2022-03-02T23:29:22 | ---
language:
- en
license: other
multilinguality:
- monolingual
size_categories:
- 10K<n<100K
source_datasets:
- derived
task_categories:
- text-classification
---
# Dataset Card for hate-multi
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Dataset Creation](#dataset-creation)
- [Source Data](#source-data)
## Dataset Description
### Dataset Summary
This dataset contains a collection of text labeled as hate speech (class 1) or not (class 0).
## Dataset Creation
The dataset was creating by aggregating multiple publicly available datasets.
### Source Data
The following datasets were used:
* https://huggingface.co/datasets/hate_speech18 - Filtered to remove examples labeled as 'idk/skip', 'relation'
* https://huggingface.co/datasets/hate_speech_offensive - Tweet text cleaned by lower casing, removing mentions and urls. Dropped instanced labeled as 'offensive language'
* https://huggingface.co/datasets/ucberkeley-dlab/measuring-hate-speech - Tweet text cleaned by lower casing, removing mentions and urls. Dropped instanced with hatespeech == 1
| [
-0.6390488147735596,
-0.5434549450874329,
-0.18180440366268158,
0.22487623989582062,
-0.3668885827064514,
0.2724262773990631,
-0.18529470264911652,
-0.33152833580970764,
0.5443993806838989,
0.2963600754737854,
-0.8955249786376953,
-0.8776156902313232,
-0.9663757085800171,
0.031388003379106... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
vershasaxena91/squad_multitask | vershasaxena91 | 2021-05-06T09:29:54Z | 16 | 0 | null | [
"region:us"
] | 2021-05-06T09:29:54Z | 2022-03-02T23:29:22.000Z | 2022-03-02T23:29:22 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
wanyu/IteraTeR_full_sent | wanyu | 2022-10-24T18:58:37Z | 16 | 0 | null | [
"task_categories:text2text-generation",
"annotations_creators:crowdsourced",
"language_creators:found",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:apache-2.0",
"conditional-text-generation",
"text-editing",
"arxiv:2203.03802",
"region:us"
] | 2022-10-24T18:58:37Z | 2022-03-13T19:29:50.000Z | 2022-03-13T19:29:50 | ---
annotations_creators:
- crowdsourced
language_creators:
- found
language:
- en
license:
- apache-2.0
multilinguality:
- monolingual
source_datasets:
- original
task_categories:
- text2text-generation
task_ids: []
pretty_name: IteraTeR_full_sent
language_bcp47:
- en-US
tags:
- conditional-text-generation
- text-editing
---
Paper: [Understanding Iterative Revision from Human-Written Text](https://arxiv.org/abs/2203.03802)
Authors: Wanyu Du, Vipul Raheja, Dhruv Kumar, Zae Myung Kim, Melissa Lopez, Dongyeop Kang
Github repo: https://github.com/vipulraheja/IteraTeR
| [
-0.07486411929130554,
-0.4984821677207947,
0.7287435531616211,
0.13309934735298157,
-0.333004891872406,
0.2274867594242096,
-0.261161744594574,
-0.25514981150627136,
0.011397454887628555,
0.7972139120101929,
-0.6495078206062317,
-0.40806397795677185,
-0.23328807950019836,
0.230274885892868... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
wanyu/IteraTeR_full_doc | wanyu | 2022-10-24T18:58:30Z | 16 | 1 | null | [
"task_categories:text2text-generation",
"annotations_creators:crowdsourced",
"language_creators:found",
"multilinguality:monolingual",
"source_datasets:original",
"language:en",
"license:apache-2.0",
"conditional-text-generation",
"text-editing",
"arxiv:2203.03802",
"region:us"
] | 2022-10-24T18:58:30Z | 2022-03-13T20:41:13.000Z | 2022-03-13T20:41:13 | ---
annotations_creators:
- crowdsourced
language_creators:
- found
language:
- en
license:
- apache-2.0
multilinguality:
- monolingual
source_datasets:
- original
task_categories:
- text2text-generation
task_ids: []
pretty_name: IteraTeR_full_doc
language_bcp47:
- en-US
tags:
- conditional-text-generation
- text-editing
---
Paper: [Understanding Iterative Revision from Human-Written Text](https://arxiv.org/abs/2203.03802)
Authors: Wanyu Du, Vipul Raheja, Dhruv Kumar, Zae Myung Kim, Melissa Lopez, Dongyeop Kang
Github repo: https://github.com/vipulraheja/IteraTeR
| [
-0.07486411184072495,
-0.4984819293022156,
0.7287437319755554,
0.13309958577156067,
-0.3330046236515045,
0.22748687863349915,
-0.2611616849899292,
-0.25515037775039673,
0.011397427879273891,
0.7972144484519958,
-0.6495076417922974,
-0.40806400775909424,
-0.23328770697116852,
0.230275005102... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
benjaminbeilharz/ed-for-lm | benjaminbeilharz | 2022-03-19T12:58:35Z | 16 | 0 | null | [
"region:us"
] | 2022-03-19T12:58:35Z | 2022-03-13T21:16:24.000Z | 2022-03-13T21:16:24 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
GEM-submissions/lewtun__this-is-a-test__1647256250 | GEM-submissions | 2022-03-14T11:10:55Z | 16 | 0 | null | [
"benchmark:gem",
"evaluation",
"benchmark",
"region:us"
] | 2022-03-14T11:10:55Z | 2022-03-14T11:10:54.000Z | 2022-03-14T11:10:54 | ---
benchmark: gem
type: prediction
submission_name: This is a test
tags:
- evaluation
- benchmark
---
# GEM Submission
Submission name: This is a test
| [
-0.01583682745695114,
-0.9654787182807922,
0.5841941833496094,
0.1292470544576645,
-0.28037282824516296,
0.4549468457698822,
0.18859517574310303,
0.3502408564090729,
0.47759607434272766,
0.4162292778491974,
-1.146683692932129,
-0.13004909455776215,
-0.4930274486541748,
0.040180496871471405... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
rubrix/research_papers_multi-label | rubrix | 2022-03-17T11:29:02Z | 16 | 2 | null | [
"region:us"
] | 2022-03-17T11:29:02Z | 2022-03-17T11:28:57.000Z | 2022-03-17T11:28:57 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
malteos/test-ds | malteos | 2022-10-25T10:03:23Z | 16 | 0 | null | [
"task_categories:text-retrieval",
"multilinguality:monolingual",
"size_categories:unknown",
"region:us"
] | 2022-10-25T10:03:23Z | 2022-03-18T10:02:26.000Z | 2022-03-18T10:02:26 | ---
annotations_creators: []
language_creators: []
language:
- en-US
license: []
multilinguality:
- monolingual
pretty_name: test ds
size_categories:
- unknown
source_datasets: []
task_categories:
- text-retrieval
task_ids: []
---
# Dataset Card for [Dataset Name]
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
[More Information Needed]
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
| [
-0.47841677069664,
-0.5084842443466187,
0.14602938294410706,
0.278889000415802,
-0.21702472865581512,
0.24832050502300262,
-0.3366999328136444,
-0.3758932054042816,
0.6720380783081055,
0.6457639932632446,
-0.9167346358299255,
-1.2200127840042114,
-0.7551794052124023,
0.07273735105991364,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
emrecan/nli_tr_for_simcse | emrecan | 2023-01-25T16:56:04Z | 16 | 0 | null | [
"task_categories:text-classification",
"task_ids:semantic-similarity-scoring",
"task_ids:text-scoring",
"size_categories:100K<n<1M",
"source_datasets:nli_tr",
"language:tr",
"region:us"
] | 2023-01-25T16:56:04Z | 2022-03-22T12:01:59.000Z | 2022-03-22T12:01:59 | ---
language:
- tr
size_categories:
- 100K<n<1M
source_datasets:
- nli_tr
task_categories:
- text-classification
task_ids:
- semantic-similarity-scoring
- text-scoring
---
# NLI-TR for Supervised SimCSE
This dataset is a modified version of [NLI-TR](https://huggingface.co/datasets/nli_tr) dataset. Its intended use is to train Supervised [SimCSE](https://github.com/princeton-nlp/SimCSE) models for sentence-embeddings. Steps followed to produce this dataset are listed below:
1. Merge train split of snli_tr and multinli_tr subsets.
2. Find every premise that has an entailment hypothesis **and** a contradiction hypothesis.
3. Write found triplets into sent0 (premise), sent1 (entailment hypothesis), hard_neg (contradiction hypothesis) format.
See this [Colab Notebook](https://colab.research.google.com/drive/1Ysq1SpFOa7n1X79x2HxyWjfKzuR_gDQV?usp=sharing) for training and evaluation on Turkish sentences. | [
-0.38619139790534973,
-0.6645225882530212,
0.49943646788597107,
0.43545809388160706,
-0.2426803559064865,
-0.24390383064746857,
0.05543427914381027,
-0.04776817560195923,
0.4625641405582428,
0.7222681045532227,
-0.8443042039871216,
-0.6143420338630676,
-0.2756311297416687,
0.26969772577285... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
huggan/edges2shoes | huggan | 2022-04-12T14:18:05Z | 16 | 0 | null | [
"region:us"
] | 2022-04-12T14:18:05Z | 2022-03-23T16:12:59.000Z | 2022-03-23T16:12:59 | # Citation
```
@article{pix2pix2017,
title={Image-to-Image Translation with Conditional Adversarial Networks},
author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A},
journal={CVPR},
year={2017}
}
``` | [
0.03880864009261131,
-0.28023239970207214,
0.36057421565055847,
0.03225378319621086,
-0.39171460270881653,
-0.6022403240203857,
-0.17084580659866333,
-0.5091608762741089,
-0.09093120694160461,
0.23600906133651733,
-0.13457730412483215,
-0.44220826029777527,
-0.988401472568512,
0.1408354341... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
IIC/spanish_biomedical_crawled_corpus_splitted | IIC | 2022-10-23T05:25:14Z | 16 | 0 | null | [
"task_ids:language-modeling",
"annotations_creators:no-annotation",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:1M<n<10M",
"source_datasets:IIC/spanish_biomedical_crawled_corpus",
"language:es",
"arxiv:2109.07765",
"region:us"
] | 2022-10-23T05:25:14Z | 2022-03-27T18:51:49.000Z | 2022-03-27T18:51:49 | ---
annotations_creators:
- no-annotation
language_creators:
- crowdsourced
language:
- es
multilinguality:
- monolingual
pretty_name: Spanish_Biomedical_Crawled_Corpus_Splitted
size_categories:
- 1M<n<10M
source_datasets:
- IIC/spanish_biomedical_crawled_corpus
task_categories:
- sequence-modeling
task_ids:
- language-modeling
---
# Spanish_Biomedical_Crawled_Corpus_Splitted
This is a dataset retrieved directly from [this link](https://zenodo.org/record/5510033#.Ykho3-hByUk), which was originally developed by [BSC](https://temu.bsc.es/). This is a direct copy-paste of the usage, limitations and license of the original dataset:
```
Description
The largest Spanish biomedical and heath corpus to date gathered from a massive Spanish health domain crawler over more than 3,000 URLs were downloaded and preprocessed. The collected data have been preprocessed to produce the CoWeSe (Corpus Web Salud Español) resource, a large-scale and high-quality corpus intended for biomedical and health NLP in Spanish.
Directory structure
CoWeSe.txt: the CoWeSe corpus; an empty line separates each document
License
The corpus is released under this licensing scheme:
- We do not own any of the text from which these data has been extracted and preprocessed to be ready for use for language modeling tasks.
- We license the actual packaging of these data under a CC0 1.0 Universal License
Notice and take down policy
Notice: Should you consider that our data contains material that is owned by you and should therefore not be reproduced here, please:
Clearly identify yourself, with detailed contact data such as an address, telephone number or email address at which you can be contacted.
Clearly identify the copyrighted work claimed to be infringed.
Clearly identify the material that is claimed to be infringing and information reasonably sufficient to allow us to locate
Copyright (c) 2021 Text Mining Unit at BSC
```
License, distribution and usage conditions of the original dataset apply.
### Contributions
Thanks to [@avacaondata](https://huggingface.co/avacaondata), [@alborotis](https://huggingface.co/alborotis), [@albarji](https://huggingface.co/albarji), [@Dabs](https://huggingface.co/Dabs), [@GuillemGSubies](https://huggingface.co/GuillemGSubies) for adding this dataset.
### Citation
```
@misc{carrino2021spanish,
title={Spanish Biomedical Crawled Corpus: A Large, Diverse Dataset for Spanish Biomedical Language Models},
author={Casimiro Pio Carrino and Jordi Armengol-Estapé and Ona de Gibert Bonet and Asier Gutiérrez-Fandiño and Aitor Gonzalez-Agirre and Martin Krallinger and Marta Villegas},
year={2021},
eprint={2109.07765},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
``` | [
-0.09767252206802368,
-0.4603375196456909,
0.23643462359905243,
0.5314623117446899,
-0.2728487253189087,
0.10832149535417557,
-0.009123585186898708,
-0.49649178981781006,
0.7602941989898682,
0.5973565578460693,
-0.47129353880882263,
-0.9994534254074097,
-0.6112870573997498,
0.3472873568534... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
laion/laion2B-multi-joined | laion | 2022-04-01T01:23:57Z | 16 | 2 | null | [
"license:cc-by-4.0",
"region:us"
] | 2022-04-01T01:23:57Z | 2022-03-29T22:03:23.000Z | 2022-03-29T22:03:23 | ---
license: cc-by-4.0
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
hackathon-pln-es/scientific_papers_en | hackathon-pln-es | 2022-04-03T23:54:33Z | 16 | 0 | null | [
"region:us"
] | 2022-04-03T23:54:33Z | 2022-04-03T23:44:19.000Z | 2022-04-03T23:44:19 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
miracFence/scientific_papers_en_es | miracFence | 2022-04-03T23:59:08Z | 16 | 0 | null | [
"region:us"
] | 2022-04-03T23:59:08Z | 2022-04-03T23:48:11.000Z | 2022-04-03T23:48:11 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
jet-universe/jetclass | jet-universe | 2022-05-27T19:00:45Z | 16 | 2 | null | [
"license:mit",
"arxiv:2202.03772",
"region:us"
] | 2022-05-27T19:00:45Z | 2022-04-05T07:32:22.000Z | 2022-04-05T07:32:22 | ---
license: mit
---
# Dataset Card for JetClass
## Table of Contents
- [Dataset Card for [Dataset Name]](#dataset-card-for-dataset-name)
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
- [Who are the source language producers?](#who-are-the-source-language-producers)
- [Annotations](#annotations)
- [Annotation process](#annotation-process)
- [Who are the annotators?](#who-are-the-annotators)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:**
- **Repository:** https://github.com/jet-universe/particle_transformer
- **Paper:** https://arxiv.org/abs/2202.03772
- **Leaderboard:**
- **Point of Contact:** [Huilin Qu](mailto:huilin.qu@cern.ch)
### Dataset Summary
JetClass is a large and comprehensive dataset to advance deep learning for jet tagging. The dataset consists of 100 million jets for training, with 10 different types of jets. The jets in this dataset generally fall into two categories:
* The background jets are initiated by light quarks or gluons (q/g) and are ubiquitously produced at the
LHC.
* The signal jets are those arising either from the top quarks (t), or from the W, Z or Higgs (H) bosons. For top quarks and Higgs bosons, we further consider their different decay modes as separate types, because the resulting jets have rather distinct characteristics and are often tagged individually.
Jets in this dataset are simulated with standard Monte Carlo event generators used by LHC experiments. The production and decay of the top quarks and the W, Z and Higgs bosons are generated with MADGRAPH5_aMC@NLO. We use PYTHIA to evolve the produced particles, i.e., performing parton showering and hadronization, and produce the final outgoing particles. To be close to realistic jets reconstructed at the ATLAS or CMS experiment, detector effects are simulated with DELPHES using the CMS detector configuration provided in DELPHES. In addition, the impact parameters of electrically charged particles are smeared to match the resolution of the CMS tracking detector . Jets are clustered from DELPHES E-Flow objects with the anti-kT algorithm using a distance
parameter R = 0.8. Only jets with transverse momentum in 500–1000 GeV and pseudorapidity |η| < 2 are considered. For signal jets, only the “high-quality” ones that fully contain the decay products of initial particles are included.
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
If you use the JetClass dataset, please cite:
```
@article{Qu:2022mxj,
author = "Qu, Huilin and Li, Congqiao and Qian, Sitian",
title = "{Particle Transformer for Jet Tagging}",
eprint = "2202.03772",
archivePrefix = "arXiv",
primaryClass = "hep-ph",
month = "2",
year = "2022"
}
```
### Contributions
Thanks to [@lewtun](https://github.com/lewtun) for adding this dataset.
| [
-0.7045315504074097,
-0.3924593925476074,
0.07515658438205719,
-0.318525105714798,
-0.18662431836128235,
0.347307950258255,
-0.23256759345531464,
-0.20280976593494415,
0.2735643684864044,
0.4330170154571533,
-0.6850733160972595,
-0.6160762906074524,
-0.6182646751403809,
-0.0904203653335571... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
met/Meti_try | met | 2022-04-05T12:42:25Z | 16 | 0 | null | [
"license:apache-2.0",
"region:us"
] | 2022-04-05T12:42:25Z | 2022-04-05T12:41:41.000Z | 2022-04-05T12:41:41 | ---
license: apache-2.0
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
billray110/corpus-of-diverse-styles | billray110 | 2022-10-22T00:52:53Z | 16 | 3 | null | [
"task_categories:text-classification",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10M<n<100M",
"arxiv:2010.05700",
"region:us"
] | 2022-10-22T00:52:53Z | 2022-04-21T01:13:59.000Z | 2022-04-21T01:13:59 | ---
annotations_creators: []
language_creators:
- found
language: []
license: []
multilinguality:
- monolingual
pretty_name: Corpus of Diverse Styles
size_categories:
- 10M<n<100M
source_datasets: []
task_categories:
- text-classification
task_ids: []
---
# Dataset Card for Corpus of Diverse Styles
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
## Disclaimer
I am not the original author of the paper that presents the Corpus of Diverse Styles. I uploaded the dataset to HuggingFace as a convenience.
## Dataset Description
- **Homepage:** http://style.cs.umass.edu/
- **Repository:** https://github.com/martiansideofthemoon/style-transfer-paraphrase
- **Paper:** https://arxiv.org/abs/2010.05700
### Dataset Summary
A new benchmark dataset that contains 15M
sentences from 11 diverse styles.
To create CDS, we obtain data from existing academic
research datasets and public APIs or online collections
like Project Gutenberg. We choose
styles that are easy for human readers to identify at
a sentence level (e.g., Tweets or Biblical text). While
prior benchmarks involve a transfer between two
styles, CDS has 110 potential transfer directions.
### Citation Information
```
@inproceedings{style20,
author={Kalpesh Krishna and John Wieting and Mohit Iyyer},
Booktitle = {Empirical Methods in Natural Language Processing},
Year = "2020",
Title={Reformulating Unsupervised Style Transfer as Paraphrase Generation},
}
``` | [
-0.4549407958984375,
-0.5575083494186401,
0.1831454187631607,
0.6544502377510071,
-0.5050097703933716,
0.1778680384159088,
-0.5462867617607117,
-0.2829624116420746,
0.3474734127521515,
0.6402711868286133,
-0.48465055227279663,
-0.9587672352790833,
-0.5815582871437073,
0.43837442994117737,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
mwong/climatetext-climate_evidence-claim-related-evaluation | mwong | 2022-10-25T10:08:48Z | 16 | 1 | null | [
"task_categories:text-classification",
"task_ids:fact-checking",
"annotations_creators:crowdsourced",
"language_creators:crowdsourced",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:extended|climate_text",
"language:en",
"license:cc-by-sa-3.0",
"license:gpl-3.0",
"... | 2022-10-25T10:08:48Z | 2022-04-21T09:55:30.000Z | 2022-04-21T09:55:30 | ---
annotations_creators:
- crowdsourced
language_creators:
- crowdsourced
language:
- en
license:
- cc-by-sa-3.0
- gpl-3.0
multilinguality:
- monolingual
size_categories:
- 100K<n<1M
source_datasets:
- extended|climate_text
task_categories:
- text-classification
task_ids:
- fact-checking
---
### Dataset Summary
This dataset is extracted from Climate Text dataset (https://www.sustainablefinance.uzh.ch/en/research/climate-fever/climatext.html), pre-processed and, ready to evaluate.
The evaluation objective is a text classification task - given a claim and climate related evidence, predict if claim is related to evidence. | [
-0.15002110600471497,
-0.4870069622993469,
0.33572444319725037,
0.12615741789340973,
-0.2509671151638031,
-0.11754948645830154,
-0.18590298295021057,
-0.33893048763275146,
0.037458647042512894,
0.8886210918426514,
-0.5313149690628052,
-0.5900936722755432,
-0.7372861504554749,
0.13033074140... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
strombergnlp/dkstance | strombergnlp | 2022-10-25T21:45:42Z | 16 | 1 | dast | [
"task_categories:text-classification",
"task_ids:fact-checking",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:da",
"license:cc-by-4.0",
"stance-detection",
"region:us"
] | 2022-10-25T21:45:42Z | 2022-04-28T10:07:39.000Z | 2022-04-28T10:07:39 | ---
annotations_creators:
- expert-generated
language_creators:
- found
language:
- da
license:
- cc-by-4.0
multilinguality:
- monolingual
size_categories:
- 1K<n<10K
source_datasets:
- original
task_categories:
- text-classification
task_ids:
- fact-checking
paperswithcode_id: dast
pretty_name: DAST
extra_gated_prompt: 'Warning: the data in this repository contains harmful content
(misinformative claims).'
tags:
- stance-detection
---
# Dataset Card for "dkstance / DAST"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://stromberg.ai/publication/jointrumourstanceandveracity/](https://stromberg.ai/publication/jointrumourstanceandveracity/)
- **Repository:** [https://figshare.com/articles/dataset/Danish_stance-annotated_Reddit_dataset/8217137](https://figshare.com/articles/dataset/Danish_stance-annotated_Reddit_dataset/8217137)
- **Paper:** [https://aclanthology.org/W19-6122/](https://aclanthology.org/W19-6122/)
- **Point of Contact:** [Leon Derczynski](https://github.com/leondz)
- **Size of downloaded dataset files:**
- **Size of the generated dataset:**
- **Total amount of disk used:**
### Dataset Summary
This is an SDQC stance-annotated Reddit dataset for the Danish language generated within a thesis project. The dataset consists of over 5000 comments structured as comment trees and linked to 33 source posts.
The dataset is applicable for supervised stance classification and rumour veracity prediction.
### Supported Tasks and Leaderboards
* Stance prediction
### Languages
## Dataset Structure
### Data Instances
#### DAST / dkstance
- **Size of downloaded dataset files:** 4.72 MiB
- **Size of the generated dataset:** 3.69 MiB
- **Total amount of disk used:** 8.41 MiB
An example of 'train' looks as follows.
```
{
'id': '1',
'native_id': 'ebwjq5z',
'text': 'Med de udfordringer som daginstitutionerne har med normeringer, og økonomi i det hele taget, synes jeg det er en vanvittig beslutning at prioritere skattebetalt vegansk kost i daginstitutionerne. Brug dog pengene på noget mere personale, og lad folk selv betale for deres individuelle kostønsker.',
'parent_id': 'a6o3us',
'parent_text': 'Mai Mercado om mad i daginstitutioner: Sund kost rimer ikke på veganer-mad',
'parent_stance': 0,
'source_id': 'a6o3us',
'source_text': 'Mai Mercado om mad i daginstitutioner: Sund kost rimer ikke på veganer-mad',
'source_stance': 0
}
```
### Data Fields
- `id`: a `string` feature.
- `native_id`: a `string` feature representing the native ID of the entry.
- `text`: a `string` of the comment text in which stance is annotated.
- `parent_id`: the `native_id` of this comment's parent.
- `parent_text`: a `string` of the parent comment's text.
- `parent_stance`: the label of the stance in the comment towards its parent comment.
```
0: "Supporting",
1: "Denying",
2: "Querying",
3: "Commenting",
```
- `source_id`: the `native_id` of this comment's source / post.
- `source_text`: a `string` of the source / post text.
- `source_stance`: the label of the stance in the comment towards the original source post.
```
0: "Supporting",
1: "Denying",
2: "Querying",
3: "Commenting",
```
### Data Splits
| name |size|
|---------|----:|
|train|3122|
|validation|1066|
|test|1060|
These splits are specified after the original reserach was reported. The splits add an extra level of rigour, in that no source posts' comment tree is spread over more than one partition.
## Dataset Creation
### Curation Rationale
Comments around rumourous claims to enable rumour and stance analysis in Danish
### Source Data
#### Initial Data Collection and Normalization
The data is from Reddit posts that relate to one of a specific set of news stories; these stories are enumerated in the paper.
#### Who are the source language producers?
Danish-speaking Twitter users.
### Annotations
#### Annotation process
There was multi-user annotation process mediated through a purpose-built interface for annotating stance in Reddit threads.
#### Who are the annotators?
* Age: 20-30.
* Gender: male.
* Race/ethnicity: white northern European.
* Native language: Danish.
* Socioeconomic status: higher education student.
### Personal and Sensitive Information
The data was public at the time of collection. User names are not preserved.
## Considerations for Using the Data
### Social Impact of Dataset
There's a risk of user-deleted content being in this data. The data has NOT been vetted for any content, so there's a risk of harmful text.
### Discussion of Biases
The source of the text has a strong demographic bias, being mostly young white men who are vocal their opinions. This constrains both the styles of language and discussion contained in the data, as well as the topics discussed and viewpoints held.
### Other Known Limitations
The above limitations apply.
## Additional Information
### Dataset Curators
The dataset is curated by the paper's authors.
### Licensing Information
The authors distribute this data under Creative Commons attribution license, CC-BY 4.0.
An NLP data statement is included in the paper describing the work, [https://aclanthology.org/W19-6122.pdf](https://aclanthology.org/W19-6122.pdf)
### Citation Information
```
@inproceedings{lillie-etal-2019-joint,
title = "Joint Rumour Stance and Veracity Prediction",
author = "Lillie, Anders Edelbo and
Middelboe, Emil Refsgaard and
Derczynski, Leon",
booktitle = "Proceedings of the 22nd Nordic Conference on Computational Linguistics",
month = sep # "{--}" # oct,
year = "2019",
address = "Turku, Finland",
publisher = {Link{\"o}ping University Electronic Press},
url = "https://aclanthology.org/W19-6122",
pages = "208--221",
}
```
### Contributions
Author-added dataset [@leondz](https://github.com/leondz)
| [
-0.5692445039749146,
-0.5533444881439209,
0.38400208950042725,
0.14466089010238647,
-0.4558156132698059,
0.012491978704929352,
-0.32317137718200684,
-0.40078073740005493,
0.5977234244346619,
0.25925812125205994,
-0.7510152459144592,
-0.9701517820358276,
-0.6946932077407837,
0.2367185950279... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
strombergnlp/polstance | strombergnlp | 2022-10-25T21:42:18Z | 16 | 1 | polstance | [
"task_categories:text-classification",
"task_ids:sentiment-analysis",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:n<1K",
"source_datasets:original",
"language:da",
"license:cc-by-4.0",
"stance-detection",
"region:us"
] | 2022-10-25T21:42:18Z | 2022-04-28T10:08:13.000Z | 2022-04-28T10:08:13 | ---
annotations_creators:
- expert-generated
language_creators:
- found
language:
- da
license:
- cc-by-4.0
multilinguality:
- monolingual
size_categories:
- n<1K
source_datasets:
- original
task_categories:
- text-classification
task_ids:
- sentiment-analysis
paperswithcode_id: polstance
pretty_name: Political Stance for Danish
tags:
- stance-detection
---
# Dataset Card for "polstance"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://stromberg.ai/publication/politicalstanceindanish/](https://stromberg.ai/publication/politicalstanceindanish/)
- **Repository:** [https://github.com/StrombergNLP/Political-Stance-in-Danish/](https://github.com/StrombergNLP/Political-Stance-in-Danish/)
- **Paper:** [https://aclanthology.org/W19-6121/](https://aclanthology.org/W19-6121/)
- **Point of Contact:** [Leon Derczynski](https://github.com/leondz)
- **Size of downloaded dataset files:** 548 KB
- **Size of the generated dataset:** 222 KB
- **Total amount of disk used:** 770 KB
### Dataset Summary
Political stance in Danish. Examples represent statements by
politicians and are annotated for, against, or neutral to a given topic/article.
### Supported Tasks and Leaderboards
*
### Languages
Danish, bcp47: `da-DK`
## Dataset Structure
### Data Instances
#### polstance
An example of 'train' looks as follows.
```
{
'id': '0',
'topic': 'integration',
'quote': 'Der kunne jeg godt tænke mig, at der stod mere eksplicit, at de (landene, red.) skal bekæmpe menneskesmuglere og tage imod deres egne borgere',
'label': 2,
'quoteID': '516',
'party': 'Det Konservative Folkeparti',
'politician': 'Naser Khader',
}
```
### Data Fields
- `id`: a `string` feature.
- `topic`: a `string` expressing a topic.
- `quote`: a `string` to be classified for its stance to the topic.
- `label`: a class label representing the stance the text expresses towards the target. Full tagset with indices:
```
0: "against",
1: "neutral",
2: "for",
```
- `quoteID`: a `string` of the internal quote ID.
- `party`: a `string` describing the party affiliation of the quote utterer at the time of utterance.
- `politician`: a `string` naming the politician who uttered the quote.
### Data Splits
| name |train|
|---------|----:|
|polstance|900 sentences|
## Dataset Creation
### Curation Rationale
Collection of quotes from politicians to allow detecting how political quotes orient to issues.
### Source Data
#### Initial Data Collection and Normalization
The data is taken from proceedings of the Danish parliament, the Folketing - [ft.dk](https://ft.dk).
#### Who are the source language producers?
Danish polticians
### Annotations
#### Annotation process
Annotators labelled comments for being against, neutral, or for a specified topic
#### Who are the annotators?
Danish native speakers, 20s, male, studying Software Design.
### Personal and Sensitive Information
The data was public at the time of collection and will remain open public record by law in Denmark.
## Considerations for Using the Data
### Social Impact of Dataset
### Discussion of Biases
### Other Known Limitations
The above limitations apply.
## Additional Information
### Dataset Curators
The dataset is curated by the paper's authors.
### Licensing Information
The authors distribute this data under Creative Commons attribution license, CC-BY 4.0.
### Citation Information
```
@inproceedings{lehmann2019political,
title={Political Stance in Danish},
author={Lehmann, Rasmus and Derczynski, Leon},
booktitle={Proceedings of the 22nd Nordic Conference on Computational Linguistics},
pages={197--207},
year={2019}
}
```
### Contributions
Author-added dataset [@leondz](https://github.com/leondz)
| [
-0.6612501740455627,
-0.5602015852928162,
0.2825671136379242,
0.11475233733654022,
-0.5535857677459717,
0.1116689145565033,
-0.6110903024673462,
0.022224562242627144,
0.5834716558456421,
0.48347100615501404,
-0.44649773836135864,
-1.0923343896865845,
-0.7756621837615967,
0.0923460498452186... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
JEFFREY-VERDIERE/Creditcard | JEFFREY-VERDIERE | 2022-04-30T13:41:41Z | 16 | 0 | null | [
"region:us"
] | 2022-04-30T13:41:41Z | 2022-04-30T13:40:21.000Z | 2022-04-30T13:40:21 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
dlwh/wikitext_2_detokenized | dlwh | 2022-05-05T20:16:18Z | 16 | 0 | null | [
"region:us"
] | 2022-05-05T20:16:18Z | 2022-05-05T20:16:17.000Z | 2022-05-05T20:16:17 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
IljaSamoilov/ERR-transcription-to-subtitles | IljaSamoilov | 2022-05-09T18:29:16Z | 16 | 0 | null | [
"license:afl-3.0",
"region:us"
] | 2022-05-09T18:29:16Z | 2022-05-09T15:30:37.000Z | 2022-05-09T15:30:37 | ---
license: afl-3.0
---
This dataset is created by Ilja Samoilov. In dataset is tv show subtitles from ERR and transcriptions of those shows created with TalTech ASR.
```
from datasets import load_dataset, load_metric
dataset = load_dataset('csv', data_files={'train': "train.tsv", \
"validation":"val.tsv", \
"test": "test.tsv"}, delimiter='\t')
``` | [
0.06609731167554855,
-0.2410234957933426,
-0.182118222117424,
-0.1270209699869156,
-0.17879438400268555,
0.3417450785636902,
-0.14471444487571716,
0.3265436291694641,
0.3215351104736328,
0.671272873878479,
-0.8115436434745789,
-0.444291889667511,
-0.5839765667915344,
0.3341309428215027,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
rajeshvarma/QA_on_SLA | rajeshvarma | 2022-10-25T05:31:01Z | 16 | 0 | null | [
"annotations_creators:no-annotations",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:original",
"language:en",
"license:apache-2.0",
"region:us"
] | 2022-10-25T05:31:01Z | 2022-05-18T14:14:49.000Z | 2022-05-18T14:14:49 | ---
annotations_creators:
- no-annotations
language_creators:
- found
language:
- en
license:
- apache-2.0
multilinguality:
- monolingual
size_categories:
- 100K<n<1M
source_datasets:
- original
task_categories:
- conditional-text-generation
task_ids:
- summarization
---
| [
-0.12853363156318665,
-0.18616768717765808,
0.6529127955436707,
0.49436265230178833,
-0.19319328665733337,
0.23607468605041504,
0.3607196807861328,
0.05056334659457207,
0.5793654918670654,
0.7400138974189758,
-0.6508101224899292,
-0.2378396987915039,
-0.710224986076355,
-0.0478258803486824... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
tomekkorbak/pile-chunk-toxicity-scored-3 | tomekkorbak | 2022-05-20T18:40:31Z | 16 | 0 | null | [
"region:us"
] | 2022-05-20T18:40:31Z | 2022-05-20T12:48:15.000Z | 2022-05-20T12:48:15 | A chunk 3 of the Pile (2.2m documents) scored using the Perspective API (on May 18-20 2022) | [
-0.41522178053855896,
-0.44953009486198425,
0.8264727592468262,
0.7073258757591248,
-0.5049349665641785,
-0.3810937702655792,
0.6523556113243103,
-0.9716464877128601,
0.5502991080284119,
0.9932330846786499,
-0.48929333686828613,
-0.36013275384902954,
-0.642824649810791,
-0.2339043915271759... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
myvision/filtered_synthetic | myvision | 2022-05-26T03:11:09Z | 16 | 0 | null | [
"region:us"
] | 2022-05-26T03:11:09Z | 2022-05-25T16:13:46.000Z | 2022-05-25T16:13:46 | Entry not found | [
-0.32276493310928345,
-0.22568407654762268,
0.8622258901596069,
0.43461474776268005,
-0.5282987952232361,
0.7012965083122253,
0.7915716171264648,
0.07618629932403564,
0.7746024131774902,
0.25632184743881226,
-0.7852815985679626,
-0.22573812305927277,
-0.9104483723640442,
0.5715669393539429... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
spoiled/ecqa_classify_5 | spoiled | 2022-05-26T03:53:16Z | 16 | 0 | null | [
"region:us"
] | 2022-05-26T03:53:16Z | 2022-05-26T03:52:20.000Z | 2022-05-26T03:52:20 | Entry not found | [
-0.32276493310928345,
-0.22568407654762268,
0.8622258901596069,
0.43461474776268005,
-0.5282987952232361,
0.7012965083122253,
0.7915716171264648,
0.07618629932403564,
0.7746024131774902,
0.25632184743881226,
-0.7852815985679626,
-0.22573812305927277,
-0.9104483723640442,
0.5715669393539429... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
ErenHali/disaster_edited | ErenHali | 2022-05-26T13:41:41Z | 16 | 0 | null | [
"license:afl-3.0",
"region:us"
] | 2022-05-26T13:41:41Z | 2022-05-26T13:28:41.000Z | 2022-05-26T13:28:41 | ---
license: afl-3.0
---
annotations_creators:
- expert-generated
language_creators:
- found
languages:
- en
licenses:
- mit
multilinguality:
- monolingual
paperswithcode_id: acronym-identification
pretty_name: disaster
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- token-classification
task_ids: [] | [
-0.5376501679420471,
-0.3342498540878296,
0.3281243145465851,
0.691193699836731,
-0.1562620848417282,
0.29281923174858093,
-0.04061892256140709,
-0.34679949283599854,
0.44519418478012085,
0.7188196778297424,
-0.4085044860839844,
-0.5087035894393921,
-0.9008381366729736,
0.42749232053756714... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
silver/personal_dialog | silver | 2022-07-10T13:05:21Z | 16 | 15 | personaldialog | [
"task_categories:conversational",
"task_ids:dialogue-generation",
"annotations_creators:no-annotation",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:10M<n<100M",
"source_datasets:original",
"language:zh",
"license:other",
"arxiv:1901.09672",
"region:us"
] | 2022-07-10T13:05:21Z | 2022-05-29T14:23:58.000Z | 2022-05-29T14:23:58 | ---
annotations_creators:
- no-annotation
language_creators:
- found
language:
- zh
license:
- other
multilinguality:
- monolingual
paperswithcode_id: personaldialog
pretty_name: "PersonalDialog"
size_categories:
- 10M<n<100M
source_datasets:
- original
task_categories:
- conversational
task_ids:
- dialogue-generation
---
# Dataset Card for PersonalDialog
## Table of Contents
- [Dataset Card for PersonalDialog](#dataset-card-for-personaldialog)
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
- [Who are the source language producers?](#who-are-the-source-language-producers)
- [Annotations](#annotations)
- [Annotation process](#annotation-process)
- [Who are the annotators?](#who-are-the-annotators)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://www.zhengyinhe.com/datasets/
- **Repository:** https://github.com/silverriver/PersonalDilaog
- **Paper:** https://arxiv.org/abs/1901.09672
### Dataset Summary
The PersonalDialog dataset is a large-scale multi-turn Chinese dialogue dataset containing various traits from a large number of speakers.
We are releasing about 5M sessions of carefully filtered dialogues.
Each utterance in PersonalDialog is associated with a speaker marked with traits like Gender, Location, Interest Tags.
### Supported Tasks and Leaderboards
- dialogue-generation: The dataset can be used to train a model for generating dialogue responses.
- response-retrieval: The dataset can be used to train a reranker model that can be used to implement a retrieval-based dialogue model.
### Languages
PersonalDialog is in Chinese
PersonalDialog中的对话是中文的
## Dataset Structure
### Data Instances
`train` split:
```json
{
"dialog": ["那么 晚", "加班 了 刚 到 家 呀 !", "吃饭 了 么", "吃 过 了 !"],
"profile": [
{
"tag": ["间歇性神经病", "爱笑的疯子", "他们说我犀利", "爱做梦", "自由", "旅游", "学生", "双子座", "好性格"],
"loc": "福建 厦门", "gender": "male"
}, {
"tag": ["设计师", "健康养生", "热爱生活", "善良", "宅", "音樂", "时尚"],
"loc": "山东 济南", "gender": "male"
}
],
"uid": [0, 1, 0, 1],
}
```
`dev` and `test` split:
```json
{
"dialog": ["没 人性 啊 !", "可以 来 组织 啊", "来 上海 陪姐 打 ?"],
"profile": [
{"tag": [""], "loc": "上海 浦东新区", "gender": "female"},
{"tag": ["嘉庚", "keele", "leicester", "UK", "泉州五中"], "loc": "福建 泉州", "gender": "male"},
],
"uid": [0, 1, 0],
"responder_profile": {"tag": ["嘉庚", "keele", "leicester", "UK", "泉州五中"], "loc": "福建 泉州", "gender": "male"},
"golden_response": "吴经理 派车来 小 泉州 接 么 ?",
"is_biased": true,
}
```
### Data Fields
- `dialog` (list of strings): List of utterances consisting of a dialogue.
- `profile` (list of dicts): List of profiles associated with each speaker.
- `tag` (list of strings): List of tags associated with each speaker.
- `loc` (string): Location of each speaker.
- `gender` (string): Gender of each speaker.
- `uid` (list of int): Speaker id for each utterance in the dialogue.
- `responder_profile` (dict): Profile of the responder. (Only available in `dev` and `test` split)
- `golden_response` (str): Response of the responder. (Only available in `dev` and `test` split)
- `id_biased` (bool): Whether the dialogue is guranteed to be persona related or not. (Only available in `dev` and `test` split)
### Data Splits
|train|valid|test|
|---:|---:|---:|
|5,438,165 | 10,521 | 10,523 |
## Dataset Creation
### Curation Rationale
[Needs More Information]
### Source Data
#### Initial Data Collection and Normalization
[Needs More Information]
#### Who are the source language producers?
[Needs More Information]
### Annotations
#### Annotation process
[Needs More Information]
#### Who are the annotators?
[Needs More Information]
### Personal and Sensitive Information
[Needs More Information]
## Considerations for Using the Data
### Social Impact of Dataset
[Needs More Information]
### Discussion of Biases
[Needs More Information]
### Other Known Limitations
[Needs More Information]
## Additional Information
### Dataset Curators
[Needs More Information]
### Licensing Information
other-weibo
This dataset is collected from Weibo.
You can refer to the [detailed policy](https://weibo.com/signup/v5/privacy) required to use this dataset.
Please restrict the usage of this dataset to non-commerical purposes.
### Citation Information
```bibtex
@article{zheng2019personalized,
title = {Personalized dialogue generation with diversified traits},
author = {Zheng, Yinhe and Chen, Guanyi and Huang, Minlie and Liu, Song and Zhu, Xuan},
journal = {arXiv preprint arXiv:1901.09672},
year = {2019}
}
@inproceedings{zheng2020pre,
title = {A pre-training based personalized dialogue generation model with persona-sparse data},
author = {Zheng, Yinhe and Zhang, Rongsheng and Huang, Minlie and Mao, Xiaoxi},
booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence},
volume = {34},
number = {05},
pages = {9693--9700},
year = {2020}
}
```
### Contributions
Thanks to [Yinhe Zheng](https://github.com/silverriver) for adding this dataset.
| [
-0.5004129409790039,
-0.8652406930923462,
0.1935138702392578,
0.10928408801555634,
-0.0014674485428258777,
0.0808592140674591,
-0.3351922035217285,
-0.19338522851467133,
0.2841356694698334,
0.5960014462471008,
-1.02409827709198,
-0.7269601225852966,
-0.24724629521369934,
0.2036321461200714... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
danielhou13/cogs402dataset | danielhou13 | 2022-08-19T01:51:13Z | 16 | 0 | null | [
"region:us"
] | 2022-08-19T01:51:13Z | 2022-05-29T19:34:14.000Z | 2022-05-29T19:34:14 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
koudeheld/beatles_lyrics | koudeheld | 2022-06-02T13:12:13Z | 16 | 0 | null | [
"region:us"
] | 2022-06-02T13:12:13Z | 2022-06-02T12:58:24.000Z | 2022-06-02T12:58:24 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Taeham/wav2vec2-ksponspeech-test | Taeham | 2022-06-12T09:30:23Z | 16 | 0 | null | [
"region:us"
] | 2022-06-12T09:30:23Z | 2022-06-12T09:29:34.000Z | 2022-06-12T09:29:34 | Entry not found | [
-0.3227649927139282,
-0.225684255361557,
0.862226128578186,
0.43461498618125916,
-0.5282987952232361,
0.7012963891029358,
0.7915717363357544,
0.07618629932403564,
0.7746025919914246,
0.2563219666481018,
-0.7852816581726074,
-0.2257382869720459,
-0.9104480743408203,
0.5715669393539429,
-0... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
ScandEval/absabank-imm-mini | ScandEval | 2023-07-05T09:50:27Z | 16 | 0 | null | [
"task_categories:text-classification",
"size_categories:1K<n<10K",
"language:sv",
"license:cc-by-4.0",
"region:us"
] | 2023-07-05T09:50:27Z | 2022-06-14T18:30:48.000Z | 2022-06-14T18:30:48 | ---
license: cc-by-4.0
task_categories:
- text-classification
language:
- sv
size_categories:
- 1K<n<10K
--- | [
-0.12853367626667023,
-0.18616794049739838,
0.6529126763343811,
0.4943627417087555,
-0.19319313764572144,
0.23607443273067474,
0.36071979999542236,
0.05056338757276535,
0.5793654322624207,
0.7400138974189758,
-0.6508103013038635,
-0.23783987760543823,
-0.710224986076355,
-0.047825977206230... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
angie-chen55/javascript-github-code | angie-chen55 | 2022-06-15T08:08:09Z | 16 | 0 | null | [
"region:us"
] | 2022-06-15T08:08:09Z | 2022-06-15T07:36:45.000Z | 2022-06-15T07:36:45 | Entry not found | [
-0.3227649927139282,
-0.225684255361557,
0.862226128578186,
0.43461498618125916,
-0.5282987952232361,
0.7012963891029358,
0.7915717363357544,
0.07618629932403564,
0.7746025919914246,
0.2563219666481018,
-0.7852816581726074,
-0.2257382869720459,
-0.9104480743408203,
0.5715669393539429,
-0... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Rizqi/emotion-raw | Rizqi | 2022-06-17T07:44:57Z | 16 | 2 | null | [
"license:afl-3.0",
"region:us"
] | 2022-06-17T07:44:57Z | 2022-06-17T07:43:53.000Z | 2022-06-17T07:43:53 | ---
license: afl-3.0
---
| [
-0.12853367626667023,
-0.18616794049739838,
0.6529126763343811,
0.4943627417087555,
-0.19319313764572144,
0.23607443273067474,
0.36071979999542236,
0.05056338757276535,
0.5793654322624207,
0.7400138974189758,
-0.6508103013038635,
-0.23783987760543823,
-0.710224986076355,
-0.047825977206230... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
c17hawke/test-xml-data | c17hawke | 2022-06-18T05:42:06Z | 16 | 0 | null | [
"region:us"
] | 2022-06-18T05:42:06Z | 2022-06-18T04:13:25.000Z | 2022-06-18T04:13:25 | # Dataset Card for [Dataset Name]
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
- **license:** gpl-3.0
### Dataset Summary
[More Information Needed]
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset. | [
-0.4542044699192047,
-0.4492812752723694,
0.10422442853450775,
0.33749160170555115,
-0.3467874526977539,
0.21058735251426697,
-0.3966537117958069,
-0.31098121404647827,
0.5062375664710999,
0.6518647074699402,
-0.850616991519928,
-1.2370346784591675,
-0.7639541625976562,
0.04857935756444931... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
EddieChen372/javascript-medium | EddieChen372 | 2022-06-18T09:25:04Z | 16 | 0 | null | [
"region:us"
] | 2022-06-18T09:25:04Z | 2022-06-18T09:07:52.000Z | 2022-06-18T09:07:52 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
mounikaiiith/Telugu_Emotion | mounikaiiith | 2022-07-04T15:04:59Z | 16 | 1 | null | [
"license:cc-by-4.0",
"region:us"
] | 2022-07-04T15:04:59Z | 2022-06-19T12:09:17.000Z | 2022-06-19T12:09:17 | ---
license: cc-by-4.0
---
Do cite the below reference for using the dataset:
@article{marreddy2022resource, title={Am I a Resource-Poor Language? Data Sets, Embeddings, Models and Analysis for four different NLP tasks in Telugu Language},
author={Marreddy, Mounika and Oota, Subba Reddy and Vakada, Lakshmi Sireesha and Chinni, Venkata Charan and Mamidi, Radhika},
journal={Transactions on Asian and Low-Resource Language Information Processing}, publisher={ACM New York, NY} }
If you want to use the four classes (angry, happy, sad and fear) from the dataset, do cite the below reference:
@article{marreddy2022multi,
title={Multi-Task Text Classification using Graph Convolutional Networks for Large-Scale Low Resource Language},
author={Marreddy, Mounika and Oota, Subba Reddy and Vakada, Lakshmi Sireesha and Chinni, Venkata Charan and Mamidi, Radhika},
journal={arXiv preprint arXiv:2205.01204},
year={2022}
}
| [
-0.13960868120193481,
-0.32709741592407227,
-0.03976903110742569,
0.367902934551239,
-0.17717702686786652,
-0.09779290109872818,
-0.3719780743122101,
-0.17955884337425232,
0.09550157189369202,
0.3932946026325226,
-0.07973968982696533,
-0.22396862506866455,
-0.4952733814716339,
0.5358126759... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
mounikaiiith/Telugu-Sarcasm | mounikaiiith | 2022-07-04T15:06:49Z | 16 | 0 | null | [
"license:cc-by-4.0",
"region:us"
] | 2022-07-04T15:06:49Z | 2022-06-19T12:15:20.000Z | 2022-06-19T12:15:20 | ---
license: cc-by-4.0
---
Do cite the below references for using the dataset:
@article{marreddy2022resource, title={Am I a Resource-Poor Language? Data Sets, Embeddings, Models and Analysis for four different NLP tasks in Telugu Language},
author={Marreddy, Mounika and Oota, Subba Reddy and Vakada, Lakshmi Sireesha and Chinni, Venkata Charan and Mamidi, Radhika},
journal={Transactions on Asian and Low-Resource Language Information Processing}, publisher={ACM New York, NY} }
@article{marreddy2022multi,
title={Multi-Task Text Classification using Graph Convolutional Networks for Large-Scale Low Resource Language},
author={Marreddy, Mounika and Oota, Subba Reddy and Vakada, Lakshmi Sireesha and Chinni, Venkata Charan and Mamidi, Radhika},
journal={arXiv preprint arXiv:2205.01204},
year={2022}
}
| [
-0.08982960134744644,
-0.36450788378715515,
0.1729285567998886,
0.31009534001350403,
-0.09565744549036026,
-0.2962803542613983,
-0.2550356686115265,
-0.12069175392389297,
0.0697687640786171,
0.5304949879646301,
-0.1776314377784729,
-0.15556158125400543,
-0.4126558303833008,
0.5733964443206... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
sudo-s/herbier_mesuem1 | sudo-s | 2022-06-19T14:17:17Z | 16 | 0 | null | [
"region:us"
] | 2022-06-19T14:17:17Z | 2022-06-19T14:16:54.000Z | 2022-06-19T14:16:54 | Entry not found | [
-0.32276487350463867,
-0.22568444907665253,
0.8622263073921204,
0.43461570143699646,
-0.5282988548278809,
0.7012969255447388,
0.7915717363357544,
0.07618642598390579,
0.7746027112007141,
0.25632190704345703,
-0.7852815389633179,
-0.22573848068714142,
-0.910447895526886,
0.5715675354003906,... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
mahfooz/lm_for_stt_mix_cv | mahfooz | 2022-06-19T19:36:37Z | 16 | 0 | null | [
"region:us"
] | 2022-06-19T19:36:37Z | 2022-06-19T14:28:31.000Z | 2022-06-19T14:28:31 | Entry not found | [
-0.32276487350463867,
-0.22568444907665253,
0.8622263073921204,
0.43461570143699646,
-0.5282988548278809,
0.7012969255447388,
0.7915717363357544,
0.07618642598390579,
0.7746027112007141,
0.25632190704345703,
-0.7852815389633179,
-0.22573848068714142,
-0.910447895526886,
0.5715675354003906,... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Nexdata/American_Children_Speech_Data_By_Mobile_Phone | Nexdata | 2023-11-10T07:29:35Z | 16 | 0 | null | [
"task_categories:automatic-speech-recognition",
"language:en",
"region:us"
] | 2023-11-10T07:29:35Z | 2022-06-22T06:28:10.000Z | 2022-06-22T06:28:10 | ---
task_categories:
- automatic-speech-recognition
language:
- en
---
# Dataset Card for Nexdata/American_Children_Speech_Data_By_Mobile_Phone
## Description
The data is recorded by 290 children from the U.S.A, with a balanced male-female ratio. The recorded content of the data mainly comes from children's books and textbooks, which are in line with children's language usage habits. The recording environment is relatively quiet indoors, the text is manually transferred with high accuracy.
For more details, please refer to the link: https://www.nexdata.ai/datasets/1197?source=Huggingface
# Specifications
## Format
16kHz, 16bit, uncompressed wav, mono channel
## Recording environment
quiet indoor environment, without echo
## Recording content (read speech)
children's books and textbooks
## Demographics
286 American children, 53% of which are female, all children are 5-12 years old
## Device
Android mobile phone, iPhone
## Language
American English
## Application scenarios
speech recognition; voiceprint recognition.
## Accuracy rate
95% of sentence accuracy
# Licensing Information
Commercial License | [
-0.3652772307395935,
-0.4452008605003357,
-0.193954199552536,
0.2320145219564438,
-0.19469958543777466,
0.0193620752543211,
0.22851194441318512,
-0.5185620784759521,
0.17364394664764404,
0.26264262199401855,
-0.8013948798179626,
-0.6648069620132446,
-0.4370735287666321,
-0.1008774712681770... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
malteos/wechsel_de | malteos | 2022-07-30T18:57:02Z | 16 | 0 | null | [
"task_categories:text-generation",
"task_ids:language-modeling",
"task_ids:masked-language-modeling",
"size_categories:100k<n<1M",
"language:de",
"region:us"
] | 2022-07-30T18:57:02Z | 2022-06-24T08:13:38.000Z | 2022-06-24T08:13:38 | ---
language:
- de
task_categories:
- text-generation
size_categories:
- 100k<n<1M
task_ids:
- language-modeling
- masked-language-modeling
---
German validation dataset from WECHSEL () to evaluate LLM perplexity.
JSON-line files (on JSON object per line):
- `valid.json.gz`: Gzipped validation set as generated by the paper (163,698 docs)
- `valid.random_1636.json.gz`: Random 1% (1636 docs) of the validation set
| [
-0.41939690709114075,
-0.5772333741188049,
0.6858906149864197,
0.07420902699232101,
-0.44109493494033813,
-0.1735374927520752,
0.04674983024597168,
0.061518363654613495,
-0.14280082285404205,
0.6825868487358093,
-1.0438127517700195,
-0.962846577167511,
-0.7423292994499207,
0.41615134477615... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Nexdata/Living_Face_Anti-Spoofing_Data | Nexdata | 2023-08-31T02:18:53Z | 16 | 1 | null | [
"region:us"
] | 2023-08-31T02:18:53Z | 2022-06-27T08:38:04.000Z | 2022-06-27T08:38:04 | ---
YAML tags:
- copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging
---
# Dataset Card for Nexdata/Living_Face_Anti-Spoofing_Data
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://www.nexdata.ai/datasets/971?source=Huggingface
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
1,056 People Living_face & Anti-Spoofing Data. The collection scenes include indoor and outdoor scenes. The data includes male and female. The age distribution ranges from juvenile to the elderly, the young people and the middle aged are the majorities. The data includes multiple postures, multiple expressions, and multiple anti-spoofing samples. The data can be used for tasks such as face payment, remote ID authentication, and face unlocking of mobile phone.
For more details, please refer to the link: https://www.nexdata.ai/datasets/971?source=Huggingface
### Supported Tasks and Leaderboards
face-detection, computer-vision: The dataset can be used to train a model for face detection.
### Languages
English
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Commerical License: https://drive.google.com/file/d/1saDCPm74D4UWfBL17VbkTsZLGfpOQj1J/view?usp=sharing
### Citation Information
[More Information Needed]
### Contributions
| [
-0.5814986228942871,
-0.8833613991737366,
0.0423126257956028,
0.46614065766334534,
-0.11632674187421799,
0.057673774659633636,
0.07742884010076523,
-0.603861927986145,
0.8676086068153381,
0.6672228574752808,
-0.8621706962585449,
-0.8019587993621826,
-0.6532359719276428,
0.02524197287857532... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Nexdata/3D_Living_Face_Anti_Spoofing_Data | Nexdata | 2023-08-31T02:45:50Z | 16 | 0 | null | [
"region:us"
] | 2023-08-31T02:45:50Z | 2022-06-27T08:42:33.000Z | 2022-06-27T08:42:33 | ---
YAML tags:
- copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging
---
# Dataset Card for Nexdata/3D_Living_Face_Anti_Spoofing_Data
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://www.nexdata.ai/datasets/1089?source=Huggingface
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
1,417 People – 3D Living_Face & Anti_Spoofing Data. The collection scenes include indoor and outdoor scenes. The dataset includes males and females. The age distribution ranges from juvenile to the elderly, the young people and the middle aged are the majorities. The device includes iPhone X, iPhone XR. The data diversity includes various expressions, facial postures, anti-spoofing samples, multiple light conditions, multiple scenes. This data can be used for tasks such as 3D face recognition, 3D Living_Face & Anti_Spoofing.
For more details, please refer to the link: https://www.nexdata.ai/datasets/1089?source=Huggingface
### Supported Tasks and Leaderboards
face-detection, computer-vision: The dataset can be used to train a model for face detection.
### Languages
English
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Commerical License: https://drive.google.com/file/d/1saDCPm74D4UWfBL17VbkTsZLGfpOQj1J/view?usp=sharing
### Citation Information
[More Information Needed]
### Contributions | [
-0.6469585299491882,
-0.8638604283332825,
0.04325218126177788,
0.5021752119064331,
-0.09587346762418747,
0.07697594910860062,
0.16564790904521942,
-0.6565883755683899,
0.885969340801239,
0.6339783072471619,
-0.8053327798843384,
-0.8086786866188049,
-0.6285244822502136,
0.06420635432004929,... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Nexdata/Multi-race_7_Expressions_Recognition_Data | Nexdata | 2023-08-31T02:40:49Z | 16 | 0 | null | [
"region:us"
] | 2023-08-31T02:40:49Z | 2022-06-27T08:43:50.000Z | 2022-06-27T08:43:50 | ---
YAML tags:
- copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging
---
# Dataset Card for Nexdata/Multi-race_7_Expressions_Recognition_Data
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://www.nexdata.ai/datasets/973?source=Huggingface
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
25,998 People Multi-race 7 Expressions Recognition Data. The data includes male and female. The age distribution ranges from child to the elderly, the young people and the middle aged are the majorities. For each person, 7 images were collected. The data diversity includes different facial postures, different expressions, different light conditions and different scenes. The data can be used for tasks such as face expression recognition.
For more details, please refer to the link: https://www.nexdata.ai/datasets/973?source=Huggingface
### Supported Tasks and Leaderboards
face-detection, computer-vision: The dataset can be used to train a model for face detection.
### Languages
English
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Commerical License: https://drive.google.com/file/d/1saDCPm74D4UWfBL17VbkTsZLGfpOQj1J/view?usp=sharing
### Citation Information
[More Information Needed]
### Contributions
| [
-0.709244430065155,
-0.4940350651741028,
0.09766159951686859,
0.4047636091709137,
-0.18208087980747223,
0.19622033834457397,
-0.13965269923210144,
-0.6786660552024841,
0.7830847501754761,
0.38788068294525146,
-0.8121716976165771,
-0.8830440044403076,
-0.7090279459953308,
0.2942509949207306... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Nexdata/Multi-race_and_Multi-pose_Face_Images_Data | Nexdata | 2023-08-31T02:41:43Z | 16 | 1 | null | [
"region:us"
] | 2023-08-31T02:41:43Z | 2022-06-27T08:49:18.000Z | 2022-06-27T08:49:18 | ---
YAML tags:
- copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging
---
# Dataset Card for Nexdata/Multi-race_and_Multi-pose_Face_Images_Data
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://www.nexdata.ai/datasets/1016?source=Huggingface
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
23,110 People Multi-race and Multi-pose Face Images Data. This data includes Asian race, Caucasian race, black race, brown race and Indians. Each subject were collected 29 images under different scenes and light conditions. The 29 images include 28 photos (multi light conditions, multiple poses and multiple scenes) + 1 ID photo. This data can be used for face recognition related tasks.
For more details, please refer to the link: https://www.nexdata.ai/datasets/1016?source=Huggingface
### Supported Tasks and Leaderboards
face-detection, computer-vision: The dataset can be used to train a model for face detection.
### Languages
English
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Commerical License: https://drive.google.com/file/d/1saDCPm74D4UWfBL17VbkTsZLGfpOQj1J/view?usp=sharing
### Citation Information
[More Information Needed]
### Contributions | [
-0.7704541087150574,
-0.509636640548706,
0.0975731834769249,
0.4320056438446045,
-0.15401752293109894,
0.10533060133457184,
-0.09695156663656235,
-0.6087031364440918,
0.778935968875885,
0.5321471095085144,
-0.8115492463111877,
-0.8942006826400757,
-0.6741018295288086,
0.20535987615585327,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Nexdata/3D_Face_Recognition_Images_Data | Nexdata | 2023-08-31T02:45:01Z | 16 | 1 | null | [
"region:us"
] | 2023-08-31T02:45:01Z | 2022-06-27T08:55:51.000Z | 2022-06-27T08:55:51 | ---
YAML tags:
- copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging
---
# Dataset Card for Nexdata/3D_Face_Recognition_Images_Data
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://www.nexdata.ai/datasets/1093?source=Huggingface
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
5,199 People – 3D Face Recognition Images Data. The collection scene is indoor scene. The dataset includes males and females. The age distribution ranges from juvenile to the elderly, the young people and the middle aged are the majorities. The device includes iPhone X, iPhone XR. The data diversity includes multiple facial postures, multiple light conditions, multiple indoor scenes. This data can be used for tasks such as 3D face recognition.
For more details, please refer to the link: https://www.nexdata.ai/datasets/1093?source=Huggingface
### Supported Tasks and Leaderboards
face-detection, computer-vision: The dataset can be used to train a model for face detection.
### Languages
English
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Commerical License: https://drive.google.com/file/d/1saDCPm74D4UWfBL17VbkTsZLGfpOQj1J/view?usp=sharing
### Citation Information
[More Information Needed]
### Contributions
| [
-0.768442690372467,
-0.5377151966094971,
0.13061246275901794,
0.3537544310092926,
-0.06813475489616394,
-0.04536295309662819,
0.20517244935035706,
-0.661036491394043,
0.5373091101646423,
0.715377688407898,
-0.7461321353912354,
-0.8342283964157104,
-0.597037672996521,
0.25236544013023376,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Nexdata/3D_Facial_Expressions_Recognition_Data | Nexdata | 2023-08-31T02:44:33Z | 16 | 0 | null | [
"region:us"
] | 2023-08-31T02:44:33Z | 2022-06-27T08:57:14.000Z | 2022-06-27T08:57:14 | ---
YAML tags:
- copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging
---
# Dataset Card for Nexdata/3D_Facial_Expressions_Recognition_Data
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://www.nexdata.ai/datasets/1097?source=Huggingface
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
4,458 People - 3D Facial Expressions Recognition Data. The collection scenes include indoor scenes and outdoor scenes. The dataset includes males and females. The age distribution ranges from juvenile to the elderly, the young people and the middle aged are the majorities. The device includes iPhone X, iPhone XR. The data diversity includes different expressions, different ages, different races, different collecting scenes. This data can be used for tasks such as 3D facial expression recognition.
For more details, please refer to the link: https://www.nexdata.ai/datasets/1097?source=Huggingface
### Supported Tasks and Leaderboards
face-detection, computer-vision: The dataset can be used to train a model for face detection.
### Languages
English
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Commerical License: https://drive.google.com/file/d/1saDCPm74D4UWfBL17VbkTsZLGfpOQj1J/view?usp=sharing
### Citation Information
[More Information Needed]
### Contributions | [
-0.6262585520744324,
-0.6223310828208923,
0.10103385150432587,
0.39636877179145813,
-0.09361716359853745,
-0.045570649206638336,
0.14204035699367523,
-0.6823050379753113,
0.677699863910675,
0.5913069248199463,
-0.8064372539520264,
-0.8738443851470947,
-0.6373000741004944,
0.249900281429290... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Nexdata/Re-ID_Data_in_Real_Surveillance_Scenes | Nexdata | 2023-08-31T02:19:21Z | 16 | 0 | null | [
"region:us"
] | 2023-08-31T02:19:21Z | 2022-06-27T09:02:41.000Z | 2022-06-27T09:02:41 | ---
YAML tags:
- copy-paste the tags obtained with the tagging app: https://github.com/huggingface/datasets-tagging
---
# Dataset Card for Nexdata/Re-ID_Data_in_Real_Surveillance_Scenes
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://www.nexdata.ai/datasets/1160?source=Huggingface
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
10,000 People - Re-ID Data in Real Surveillance Scenes. The data includes indoor scenes and outdoor scenes. The data includes males and females, and the age distribution is from children to the elderly. The data diversity includes different age groups, different time periods, different shooting angles, different human body orientations and postures, clothing for different seasons. For annotation, the rectangular bounding boxes and 15 attributes of human body were annotated. This data can be used for re-id and other tasks.
For more details, please refer to the link: https://www.nexdata.ai/datasets/1160?source=Huggingface
### Supported Tasks and Leaderboards
face-detection, computer-vision: The dataset can be used to train a model for face detection.
### Languages
English
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
Commerical License: https://drive.google.com/file/d/1saDCPm74D4UWfBL17VbkTsZLGfpOQj1J/view?usp=sharing
### Citation Information
[More Information Needed]
### Contributions | [
-0.672085165977478,
-0.6240255832672119,
0.16777148842811584,
0.161707803606987,
-0.20412427186965942,
0.06971226632595062,
0.12703724205493927,
-0.5736097097396851,
0.7872243523597717,
0.6308761239051819,
-0.9785727858543396,
-0.9986441731452942,
-0.6076583862304688,
0.1868993639945984,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-6489fc46-7764981 | autoevaluate | 2022-06-27T09:23:45Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-06-27T09:23:45Z | 2022-06-27T09:23:19.000Z | 2022-06-27T09:23:19 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- glue
eval_info:
task: binary_classification
model: winegarj/distilbert-base-uncased-finetuned-sst2
metrics: []
dataset_name: glue
dataset_config: sst2
dataset_split: validation
col_mapping:
text: sentence
target: label
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Binary Text Classification
* Model: winegarj/distilbert-base-uncased-finetuned-sst2
* Dataset: glue
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@lewtun](https://huggingface.co/lewtun) for evaluating this model. | [
-0.1647535264492035,
-0.37943756580352783,
0.29816436767578125,
0.16994628310203552,
-0.09134899079799652,
-0.022365249693393707,
-0.046292100101709366,
-0.40361398458480835,
0.025912730023264885,
0.38039305806159973,
-0.8755883574485779,
-0.26699647307395935,
-0.770889401435852,
-0.082617... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
victor/real-or-fake-fake-jobposting-prediction | victor | 2022-06-28T16:05:26Z | 16 | 1 | null | [
"license:cc0-1.0",
"region:us"
] | 2022-06-28T16:05:26Z | 2022-06-28T16:03:38.000Z | 2022-06-28T16:03:38 | ---
license: cc0-1.0
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
SerdarHelli/SegmentationOfTeethPanoramicXRayImages | SerdarHelli | 2022-10-29T20:05:26Z | 16 | 7 | null | [
"task_categories:image-segmentation",
"task_ids:semantic-segmentation",
"size_categories:n<1K",
"teeth-segmentation",
"dental-imaging",
"medical-imaging",
"region:us"
] | 2022-10-29T20:05:26Z | 2022-06-29T21:07:00.000Z | 2022-06-29T21:07:00 | ---
size_categories:
- n<1K
task_categories:
- image-segmentation
task_ids:
- semantic-segmentation
tags:
- teeth-segmentation
- dental-imaging
- medical-imaging
train-eval-index:
- config: plain_text
task: semantic_segmentation
task_id: semantic_segmentation
splits:
train_split: train
eval_split: test
col_mapping:
image: image
label: image
---
# Dataset Card for [Dataset Name]
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [https://github.com/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net](https://github.com/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net)
- **Repository:** [https://github.com/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net](https://github.com/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net)
- **Paper:** [Tooth Instance Segmentation on Panoramic Dental Radiographs Using U-Nets and Morphological Processing](https://dergipark.org.tr/tr/pub/dubited/issue/68307/950568)
- **Leaderboard:**
- **Point of Contact:** S.Serdar Helli
### Dataset Summary
# Semantic-Segmentation-of-Teeth-in-Panoramic-X-ray-Image
The aim of this study is automatic semantic segmentation and measurement total length of teeth in one-shot panoramic x-ray image by using deep learning method with U-Net Model and binary image analysis in order to provide diagnostic information for the management of dental disorders, diseases, and conditions.
[***Github Link***](https://github.com/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net)
***Original Dataset For Only Images***
DATASET ref - H. Abdi, S. Kasaei, and M. Mehdizadeh, “Automatic segmentation of mandible in panoramic x-ray,” J. Med. Imaging, vol. 2, no. 4, p. 44003, 2015
[Link DATASET for only original images.](https://data.mendeley.com/datasets/hxt48yk462/1)
## Dataset Structure
### Data Instances
An example of 'train' looks as follows.
```
{
"image": X-ray Image (Image),
"label": Binary Image Segmentation Map (Image)
}
```
## Dataset Creation
### Source Data
***Original Dataset For Only Images***
DATASET ref - H. Abdi, S. Kasaei, and M. Mehdizadeh, “Automatic segmentation of mandible in panoramic x-ray,” J. Med. Imaging, vol. 2, no. 4, p. 44003, 2015
[Link DATASET for only original images.](https://data.mendeley.com/datasets/hxt48yk462/1)
### Annotations
#### Annotation process
The annotation was made manually.
#### Who are the annotators?
S.Serdar Helli
### Other Known Limitations
The X-Ray Images files associated with this dataset are licensed under a Creative Commons Attribution 4.0 International license.
To Check Out For More Information:
***Original Dataset For Only Images***
DATASET ref - H. Abdi, S. Kasaei, and M. Mehdizadeh, “Automatic segmentation of mandible in panoramic x-ray,” J. Med. Imaging, vol. 2, no. 4, p. 44003, 2015
[Link DATASET for only original images.](https://data.mendeley.com/datasets/hxt48yk462/1)
## Additional Information
### Citation Information
For Labelling
```
@article{helli10tooth,
title={Tooth Instance Segmentation on Panoramic Dental Radiographs Using U-Nets and Morphological Processing},
author={HELL{\.I}, Serdar and HAMAMCI, Anda{\c{c}}},
journal={D{\"u}zce {\"U}niversitesi Bilim ve Teknoloji Dergisi},
volume={10},
number={1},
pages={39--50}
}
```
For Original Images
```
@article{abdi2015automatic,
title={Automatic segmentation of mandible in panoramic x-ray},
author={Abdi, Amir Hossein and Kasaei, Shohreh and Mehdizadeh, Mojdeh},
journal={Journal of Medical Imaging},
volume={2},
number={4},
pages={044003},
year={2015},
publisher={SPIE}
}
```
### Contributions
Thanks to [@SerdarHelli](https://github.com/SerdarHelli) for adding this dataset. | [
-0.5689207911491394,
-0.4827898442745209,
0.2919432520866394,
-0.1734239012002945,
-0.5692848563194275,
-0.03313911333680153,
0.04116559773683548,
-0.4647170603275299,
0.5175223350524902,
0.28760769963264465,
-0.6757852435112,
-0.8800833821296692,
-0.16705617308616638,
0.10083451867103577,... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-00ac2adb-9115199 | autoevaluate | 2022-06-29T22:42:09Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-06-29T22:42:09Z | 2022-06-29T22:41:07.000Z | 2022-06-29T22:41:07 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- cifar10
eval_info:
task: image_multi_class_classification
model: karthiksv/vit-base-patch16-224-cifar10
metrics: []
dataset_name: cifar10
dataset_config: plain_text
dataset_split: test
col_mapping:
image: img
target: label
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Multi-class Image Classification
* Model: karthiksv/vit-base-patch16-224-cifar10
* Dataset: cifar10
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@davidberg](https://huggingface.co/davidberg) for evaluating this model. | [
-0.5412911176681519,
-0.1521088033914566,
0.1340634822845459,
0.14384689927101135,
-0.0381147563457489,
-0.11981987208127975,
0.015149549581110477,
-0.5760711431503296,
0.03011125512421131,
0.2635370194911957,
-0.858056902885437,
-0.12065070122480392,
-0.7289437055587769,
-0.15112787485122... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-00ac2adb-9115200 | autoevaluate | 2022-06-29T22:42:47Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-06-29T22:42:47Z | 2022-06-29T22:41:49.000Z | 2022-06-29T22:41:49 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- cifar10
eval_info:
task: image_multi_class_classification
model: jimypbr/cifar10_outputs
metrics: []
dataset_name: cifar10
dataset_config: plain_text
dataset_split: test
col_mapping:
image: img
target: label
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Multi-class Image Classification
* Model: jimypbr/cifar10_outputs
* Dataset: cifar10
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@davidberg](https://huggingface.co/davidberg) for evaluating this model. | [
-0.6053362488746643,
-0.10098104178905487,
0.1835835576057434,
0.10206736624240875,
-0.029737243428826332,
-0.13253988325595856,
0.04965093731880188,
-0.5750940442085266,
0.06845784932374954,
0.2698274850845337,
-0.8213531374931335,
-0.17761896550655365,
-0.7370723485946655,
-0.14000287652... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-xsum-9cdb3b8b-10115340 | autoevaluate | 2022-07-07T14:53:50Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-07-07T14:53:50Z | 2022-07-07T14:47:02.000Z | 2022-07-07T14:47:02 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- xsum
eval_info:
task: summarization
model: eslamxm/mbert2mbert-finetune-fa
metrics: []
dataset_name: xsum
dataset_config: default
dataset_split: test
col_mapping:
text: document
target: summary
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Summarization
* Model: eslamxm/mbert2mbert-finetune-fa
* Dataset: xsum
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@iserralv](https://huggingface.co/iserralv) for evaluating this model. | [
-0.4978177845478058,
-0.13269880414009094,
0.22498813271522522,
0.12447486817836761,
-0.08454583585262299,
-0.12114467471837997,
0.05805014818906784,
-0.3476406931877136,
0.4041639268398285,
0.5171364545822144,
-1.143188714981079,
-0.1863207221031189,
-0.6424577236175537,
-0.11330989748239... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
imadd/water_dataset | imadd | 2022-07-07T17:38:50Z | 16 | 0 | null | [
"region:us"
] | 2022-07-07T17:38:50Z | 2022-07-07T17:35:33.000Z | 2022-07-07T17:35:33 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-xsum-ad8ac8a3-10195347 | autoevaluate | 2022-07-07T18:51:02Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-07-07T18:51:02Z | 2022-07-07T18:19:07.000Z | 2022-07-07T18:19:07 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- xsum
eval_info:
task: summarization
model: t5-large
metrics: []
dataset_name: xsum
dataset_config: default
dataset_split: test
col_mapping:
text: document
target: summary
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Summarization
* Model: t5-large
* Dataset: xsum
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@abhijeet](https://huggingface.co/abhijeet) for evaluating this model. | [
-0.38367006182670593,
0.030422795563936234,
0.27861887216567993,
0.09926918148994446,
-0.14069092273712158,
-0.014720925129950047,
0.04310706630349159,
-0.4303313195705414,
0.3198910355567932,
0.44697338342666626,
-1.1247828006744385,
-0.2676348090171814,
-0.663223385810852,
-0.07189479470... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-xsum-ad8ac8a3-10195349 | autoevaluate | 2022-07-07T18:27:18Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-07-07T18:27:18Z | 2022-07-07T18:19:17.000Z | 2022-07-07T18:19:17 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- xsum
eval_info:
task: summarization
model: t5-base
metrics: []
dataset_name: xsum
dataset_config: default
dataset_split: test
col_mapping:
text: document
target: summary
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Summarization
* Model: t5-base
* Dataset: xsum
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@abhijeet](https://huggingface.co/abhijeet) for evaluating this model. | [
-0.36169224977493286,
0.048805996775627136,
0.2307690978050232,
0.0676681399345398,
-0.14350591599941254,
-0.00015765297575853765,
0.10491441190242767,
-0.3886655867099762,
0.30382880568504333,
0.44388529658317566,
-1.142102599143982,
-0.2839253842830658,
-0.6489373445510864,
-0.0834617316... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
mbyanfei/amazon-shoe-reviews | mbyanfei | 2022-07-07T19:29:11Z | 16 | 0 | null | [
"region:us"
] | 2022-07-07T19:29:11Z | 2022-07-07T19:28:59.000Z | 2022-07-07T19:28:59 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-samsum-0c672345-10275363 | autoevaluate | 2022-07-08T05:51:52Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-07-08T05:51:52Z | 2022-07-08T04:34:01.000Z | 2022-07-08T04:34:01 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- samsum
eval_info:
task: summarization
model: google/pegasus-cnn_dailymail
metrics: []
dataset_name: samsum
dataset_config: samsum
dataset_split: train
col_mapping:
text: dialogue
target: summary
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Summarization
* Model: google/pegasus-cnn_dailymail
* Dataset: samsum
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@ikadebi](https://huggingface.co/ikadebi) for evaluating this model. | [
-0.4776797890663147,
-0.1956530213356018,
0.16862353682518005,
0.1061306893825531,
-0.23780669271945953,
-0.20013362169265747,
0.059273939579725266,
-0.3606891632080078,
0.3371943235397339,
0.43012499809265137,
-1.0702871084213257,
-0.2614041268825531,
-0.7465654611587524,
-0.1249647065997... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
MicPie/unpredictable_cluster27 | MicPie | 2022-08-04T20:01:16Z | 16 | 0 | null | [
"task_categories:multiple-choice",
"task_categories:question-answering",
"task_categories:zero-shot-classification",
"task_categories:text2text-generation",
"task_categories:table-question-answering",
"task_categories:text-generation",
"task_categories:text-classification",
"task_categories:tabular-cl... | 2022-08-04T20:01:16Z | 2022-07-08T18:40:03.000Z | 2022-07-08T18:40:03 | ---
annotations_creators:
- no-annotation
language_creators:
- found
language:
- en
license:
- apache-2.0
multilinguality:
- monolingual
pretty_name: UnpredicTable-cluster27
size_categories:
- 100K<n<1M
source_datasets: []
task_categories:
- multiple-choice
- question-answering
- zero-shot-classification
- text2text-generation
- table-question-answering
- text-generation
- text-classification
- tabular-classification
task_ids:
- multiple-choice-qa
- extractive-qa
- open-domain-qa
- closed-domain-qa
- closed-book-qa
- open-book-qa
- language-modeling
- multi-class-classification
- natural-language-inference
- topic-classification
- multi-label-classification
- tabular-multi-class-classification
- tabular-multi-label-classification
---
# Dataset Card for "UnpredicTable-cluster27" - Dataset of Few-shot Tasks from Tables
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-instances)
- [Data Splits](#data-instances)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
## Dataset Description
- **Homepage:** https://ethanperez.net/unpredictable
- **Repository:** https://github.com/JunShern/few-shot-adaptation
- **Paper:** Few-shot Adaptation Works with UnpredicTable Data
- **Point of Contact:** junshern@nyu.edu, perez@nyu.edu
### Dataset Summary
The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance.
There are several dataset versions available:
* [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites.
* [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/MicPie/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/MicPie/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites.
* [UnpredicTable-5k](https://huggingface.co/datasets/MicPie/unpredictable_5k): This dataset contains 5k random tables from the full dataset.
* UnpredicTable data subsets based on a manual human quality rating (please see our publication for details of the ratings):
* [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low)
* [UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium)
* [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high)
* UnpredicTable data subsets based on the website of origin:
* [UnpredicTable-baseball-fantasysports-yahoo-com](https://huggingface.co/datasets/MicPie/unpredictable_baseball-fantasysports-yahoo-com)
* [UnpredicTable-bulbapedia-bulbagarden-net](https://huggingface.co/datasets/MicPie/unpredictable_bulbapedia-bulbagarden-net)
* [UnpredicTable-cappex-com](https://huggingface.co/datasets/MicPie/unpredictable_cappex-com)
* [UnpredicTable-cram-com](https://huggingface.co/datasets/MicPie/unpredictable_cram-com)
* [UnpredicTable-dividend-com](https://huggingface.co/datasets/MicPie/unpredictable_dividend-com)
* [UnpredicTable-dummies-com](https://huggingface.co/datasets/MicPie/unpredictable_dummies-com)
* [UnpredicTable-en-wikipedia-org](https://huggingface.co/datasets/MicPie/unpredictable_en-wikipedia-org)
* [UnpredicTable-ensembl-org](https://huggingface.co/datasets/MicPie/unpredictable_ensembl-org)
* [UnpredicTable-gamefaqs-com](https://huggingface.co/datasets/MicPie/unpredictable_gamefaqs-com)
* [UnpredicTable-mgoblog-com](https://huggingface.co/datasets/MicPie/unpredictable_mgoblog-com)
* [UnpredicTable-mmo-champion-com](https://huggingface.co/datasets/MicPie/unpredictable_mmo-champion-com)
* [UnpredicTable-msdn-microsoft-com](https://huggingface.co/datasets/MicPie/unpredictable_msdn-microsoft-com)
* [UnpredicTable-phonearena-com](https://huggingface.co/datasets/MicPie/unpredictable_phonearena-com)
* [UnpredicTable-sittercity-com](https://huggingface.co/datasets/MicPie/unpredictable_sittercity-com)
* [UnpredicTable-sporcle-com](https://huggingface.co/datasets/MicPie/unpredictable_sporcle-com)
* [UnpredicTable-studystack-com](https://huggingface.co/datasets/MicPie/unpredictable_studystack-com)
* [UnpredicTable-support-google-com](https://huggingface.co/datasets/MicPie/unpredictable_support-google-com)
* [UnpredicTable-w3-org](https://huggingface.co/datasets/MicPie/unpredictable_w3-org)
* [UnpredicTable-wiki-openmoko-org](https://huggingface.co/datasets/MicPie/unpredictable_wiki-openmoko-org)
* [UnpredicTable-wkdu-org](https://huggingface.co/datasets/MicPie/unpredictable_wkdu-org)
* UnpredicTable data subsets based on clustering (for the clustering details please see our publication):
* [UnpredicTable-cluster00](https://huggingface.co/datasets/MicPie/unpredictable_cluster00)
* [UnpredicTable-cluster01](https://huggingface.co/datasets/MicPie/unpredictable_cluster01)
* [UnpredicTable-cluster02](https://huggingface.co/datasets/MicPie/unpredictable_cluster02)
* [UnpredicTable-cluster03](https://huggingface.co/datasets/MicPie/unpredictable_cluster03)
* [UnpredicTable-cluster04](https://huggingface.co/datasets/MicPie/unpredictable_cluster04)
* [UnpredicTable-cluster05](https://huggingface.co/datasets/MicPie/unpredictable_cluster05)
* [UnpredicTable-cluster06](https://huggingface.co/datasets/MicPie/unpredictable_cluster06)
* [UnpredicTable-cluster07](https://huggingface.co/datasets/MicPie/unpredictable_cluster07)
* [UnpredicTable-cluster08](https://huggingface.co/datasets/MicPie/unpredictable_cluster08)
* [UnpredicTable-cluster09](https://huggingface.co/datasets/MicPie/unpredictable_cluster09)
* [UnpredicTable-cluster10](https://huggingface.co/datasets/MicPie/unpredictable_cluster10)
* [UnpredicTable-cluster11](https://huggingface.co/datasets/MicPie/unpredictable_cluster11)
* [UnpredicTable-cluster12](https://huggingface.co/datasets/MicPie/unpredictable_cluster12)
* [UnpredicTable-cluster13](https://huggingface.co/datasets/MicPie/unpredictable_cluster13)
* [UnpredicTable-cluster14](https://huggingface.co/datasets/MicPie/unpredictable_cluster14)
* [UnpredicTable-cluster15](https://huggingface.co/datasets/MicPie/unpredictable_cluster15)
* [UnpredicTable-cluster16](https://huggingface.co/datasets/MicPie/unpredictable_cluster16)
* [UnpredicTable-cluster17](https://huggingface.co/datasets/MicPie/unpredictable_cluster17)
* [UnpredicTable-cluster18](https://huggingface.co/datasets/MicPie/unpredictable_cluster18)
* [UnpredicTable-cluster19](https://huggingface.co/datasets/MicPie/unpredictable_cluster19)
* [UnpredicTable-cluster20](https://huggingface.co/datasets/MicPie/unpredictable_cluster20)
* [UnpredicTable-cluster21](https://huggingface.co/datasets/MicPie/unpredictable_cluster21)
* [UnpredicTable-cluster22](https://huggingface.co/datasets/MicPie/unpredictable_cluster22)
* [UnpredicTable-cluster23](https://huggingface.co/datasets/MicPie/unpredictable_cluster23)
* [UnpredicTable-cluster24](https://huggingface.co/datasets/MicPie/unpredictable_cluster24)
* [UnpredicTable-cluster25](https://huggingface.co/datasets/MicPie/unpredictable_cluster25)
* [UnpredicTable-cluster26](https://huggingface.co/datasets/MicPie/unpredictable_cluster26)
* [UnpredicTable-cluster27](https://huggingface.co/datasets/MicPie/unpredictable_cluster27)
* [UnpredicTable-cluster28](https://huggingface.co/datasets/MicPie/unpredictable_cluster28)
* [UnpredicTable-cluster29](https://huggingface.co/datasets/MicPie/unpredictable_cluster29)
* [UnpredicTable-cluster-noise](https://huggingface.co/datasets/MicPie/unpredictable_cluster-noise)
### Supported Tasks and Leaderboards
Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc.
The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset.
### Languages
English
## Dataset Structure
### Data Instances
Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from.
There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'.
### Data Fields
'task': task identifier
'input': column elements of a specific row in the table.
'options': for multiple choice classification, it provides the options to choose from.
'output': target column element of the same row as input.
'pageTitle': the title of the page containing the table.
'outputColName': output column name
'url': url to the website containing the table
'wdcFile': WDC Web Table Corpus file
### Data Splits
The UnpredicTable datasets do not come with additional data splits.
## Dataset Creation
### Curation Rationale
Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning.
### Source Data
#### Initial Data Collection and Normalization
We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline.
#### Who are the source language producers?
The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/).
### Annotations
#### Annotation process
Manual annotation was only carried out for the [UnpredicTable-rated-low](https://huggingface.co/datasets/MicPie/unpredictable_rated-low),
[UnpredicTable-rated-medium](https://huggingface.co/datasets/MicPie/unpredictable_rated-medium), and [UnpredicTable-rated-high](https://huggingface.co/datasets/MicPie/unpredictable_rated-high) data subsets to rate task quality. Detailed instructions of the annotation instructions can be found in our publication.
#### Who are the annotators?
Annotations were carried out by a lab assistant.
### Personal and Sensitive Information
The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset.
## Considerations for Using the Data
### Social Impact of Dataset
This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations.
### Discussion of Biases
Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset.
### Other Known Limitations
No additional known limitations.
## Additional Information
### Dataset Curators
Jun Shern Chan, Michael Pieler, Jonathan Jao, Jérémy Scheurer, Ethan Perez
### Licensing Information
Apache 2.0
### Citation Information
```
@misc{chan2022few,
author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan},
title = {Few-shot Adaptation Works with UnpredicTable Data},
publisher={arXiv},
year = {2022},
url = {https://arxiv.org/abs/2208.01009}
}
```
| [
-0.5831499099731445,
-0.5522048473358154,
0.46540993452072144,
0.3368372619152069,
0.09072668105363846,
0.1603643149137497,
-0.15700924396514893,
-0.5947463512420654,
0.531753659248352,
0.2816917598247528,
-1.0269756317138672,
-0.691243052482605,
-0.6508496999740601,
0.18802659213542938,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
biglam/spanish_golden_age_sonnets | biglam | 2022-08-17T14:59:49Z | 16 | 3 | null | [
"multilinguality:monolingual",
"language:es",
"license:cc-by-nc-4.0",
"region:us"
] | 2022-08-17T14:59:49Z | 2022-07-11T21:19:39.000Z | 2022-07-11T21:19:39 | ---
annotations_creators: []
language:
- es
language_creators: []
license:
- cc-by-nc-4.0
multilinguality:
- monolingual
pretty_name: Spanish Golden-Age Sonnets
size_categories: []
source_datasets: []
tags: []
task_categories: []
task_ids: []
---
[](https://zenodo.org/badge/latestdoi/46981468)
# Corpus of Spanish Golden-Age Sonnets
## Introduction
This corpus comprises sonnets written in Spanish between the 16th and 17th centuries.
This corpus is a dataset saved in .csv, from a previous one in .xml.
All the information of the original dataset can be consulted in [its original repository](https://github.com/bncolorado/CorpusSonetosSigloDeOro).
Each sonnet has been annotated in accordance with the TEI standard. Besides the header and structural information, each sonnet includes the formal representation of each verse’s particular **metrical pattern**.
The pattern consists of a sequence of unstressed syllables (represented by the "-" sign) and stressed syllables ("+" sign). Thus, each verse’s metrical pattern is represented as follows:
"---+---+-+-"
Each line in the metric_pattern codifies a line in the sonnet_text column.
## Column description
- 'author' (string): Author of the sonnet described
- 'sonnet_title' (string): Sonnet title
- 'sonnet_text' (string): Full text of the specific sonnet, divided by lines ('\n')
- 'metric_pattern' (string): Full metric pattern of the sonnet, in text, with TEI standard, divided by lines ('\n')
- 'reference_id' (int): Id of the original XML file where the sonnet is extracted
- 'publisher' (string): Name of the publisher
- 'editor' (string): Name of the editor
- 'research_author' (string): Name of the principal research author
- 'metrical_patterns_annotator' (string): Name of the annotation's checker
- 'research_group' (string): Name of the research group that processed the sonnet
## Poets
With the purpose of having a corpus as representative as possible, every author from the 16th and 17th centuries with more than 10 digitalized and available sonnets has been included.
All texts have been taken from the [Biblioteca Virtual Miguel de Cervantes](http://www.cervantesvirtual.com/).
Currently, the corpus comprises more than 5,000 sonnets (more than 71,000 verses).
## Annotation
The metrical pattern annotation has been carried out in a semi-automatic way. Firstly, all sonnets have been processed by an automatic metrical scansion system which assigns a distinct metrical pattern to each verse. Secondly, a part of the corpus has been manually checked and errors have been corrected.
Currently the corpus is going through the manual validation phase, and each sonnet includes information about whether it has already been manually checked or not.
## How to cite this corpus
If you would like to cite this corpus for academic research purposes, please use this reference:
Navarro-Colorado, Borja; Ribes Lafoz, María, and Sánchez, Noelia (2015) "Metrical annotation of a large corpus of Spanish sonnets: representation, scansion and evaluation" 10th edition of the Language Resources and Evaluation Conference 2016 Portorož, Slovenia. ([PDF](http://www.dlsi.ua.es/~borja/navarro2016_MetricalPatternsBank.pdf))
## Further Information
This corpus is part of the [ADSO project](https://adsoen.wordpress.com/), developed at the [University of Alicante](http://www.ua.es) and funded by [Fundación BBVA](http://www.fbbva.es/TLFU/tlfu/ing/home/index.jsp).
If you require further information about the metrical annotation, please consult the [Annotation Guide](https://github.com/bncolorado/CorpusSonetosSigloDeOro/blob/master/GuiaAnotacionMetrica.pdf) (in Spanish) or the following papers:
- Navarro-Colorado, Borja; Ribes-Lafoz, María and Sánchez, Noelia (2016) "Metrical Annotation of a Large Corpus of Spanish Sonnets: Representation, Scansion and Evaluation" [Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)](http://www.lrec-conf.org/proceedings/lrec2016/pdf/453_Paper.pdf) Portorož, Slovenia.
- Navarro-Colorado, Borja (2015) "A computational linguistic approach to Spanish Golden Age Sonnets: metrical and semantic aspects" [Computational Linguistics for Literature NAACL 2015](https://sites.google.com/site/clfl2015/), Denver (Co), USA ([PDF](https://aclweb.org/anthology/W/W15/W15-0712.pdf)).
## License
The metrical annotation of this corpus is licensed under a Creative Commons Attribution-Non Commercial 4.0 International License.
About the texts, "this digital object is protected by copyright and/or related rights. This digital object is accessible without charge, but its use is subject to the licensing conditions set by the organization giving access to it. Further information available at http://www.cervantesvirtual.com/marco-legal/ ". | [
-0.45457273721694946,
-0.21796442568302155,
0.11609405279159546,
0.39819425344467163,
-0.3155817985534668,
-0.07851926237344742,
-0.24657835066318512,
-0.8381349444389343,
0.5127813220024109,
0.9578643441200256,
-0.35243839025497437,
-0.727912962436676,
-0.5603719353675842,
0.3233371078968... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
erikacardenas300/MNLI-Processed | erikacardenas300 | 2022-07-21T20:25:07Z | 16 | 2 | null | [
"region:us"
] | 2022-07-21T20:25:07Z | 2022-07-12T00:24:12.000Z | 2022-07-12T00:24:12 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Zaib/java-vulnerability | Zaib | 2022-07-14T11:09:57Z | 16 | 3 | null | [
"license:afl-3.0",
"region:us"
] | 2022-07-14T11:09:57Z | 2022-07-14T10:16:40.000Z | 2022-07-14T10:16:40 | ---
license: afl-3.0
---
| [
-0.12853392958641052,
-0.18616779148578644,
0.6529127955436707,
0.49436280131340027,
-0.19319361448287964,
0.23607419431209564,
0.36072003841400146,
0.050563063472509384,
0.579365611076355,
0.7400140762329102,
-0.6508104205131531,
-0.23783954977989197,
-0.7102249264717102,
-0.0478260256350... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-samsum-f90fd7b5-10915466 | autoevaluate | 2022-07-15T09:35:16Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-07-15T09:35:16Z | 2022-07-15T08:27:21.000Z | 2022-07-15T08:27:21 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- samsum
eval_info:
task: summarization
model: pszemraj/led-large-book-summary
metrics: ['bleu']
dataset_name: samsum
dataset_config: samsum
dataset_split: test
col_mapping:
text: dialogue
target: summary
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Summarization
* Model: pszemraj/led-large-book-summary
* Dataset: samsum
* Config: samsum
* Split: test
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model. | [
-0.4113379120826721,
-0.12251129001379013,
0.19838391244411469,
0.08160281181335449,
-0.2113797664642334,
-0.133088618516922,
0.003951639868319035,
-0.2302839308977127,
0.22420641779899597,
0.48225295543670654,
-1.060201644897461,
-0.23687733709812164,
-0.605709969997406,
-0.04528912901878... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
jonaskoenig/future-time-references-static-filter-D1 | jonaskoenig | 2022-07-15T11:45:57Z | 16 | 0 | null | [
"region:us"
] | 2022-07-15T11:45:57Z | 2022-07-15T08:59:48.000Z | 2022-07-15T08:59:48 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
carlosejimenez/flickr30k_CLIP_ViT-B-32_subset_pairs_SimCSE_similarity | carlosejimenez | 2023-03-04T00:54:54Z | 16 | 0 | null | [
"region:us"
] | 2023-03-04T00:54:54Z | 2022-07-15T09:07:59.000Z | 2022-07-15T09:07:59 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-samsum-f4288f9c-10925467 | autoevaluate | 2022-07-15T09:38:05Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-07-15T09:38:05Z | 2022-07-15T09:11:11.000Z | 2022-07-15T09:11:11 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- samsum
eval_info:
task: summarization
model: pszemraj/led-base-book-summary
metrics: ['bleu']
dataset_name: samsum
dataset_config: samsum
dataset_split: test
col_mapping:
text: dialogue
target: summary
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Summarization
* Model: pszemraj/led-base-book-summary
* Dataset: samsum
* Config: samsum
* Split: test
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model. | [
-0.4060242176055908,
-0.14256450533866882,
0.16735480725765228,
0.06305873394012451,
-0.21273276209831238,
-0.09804560244083405,
0.0614989772439003,
-0.2215733379125595,
0.2267635017633438,
0.459373414516449,
-1.1147103309631348,
-0.2360718548297882,
-0.5999846458435059,
-0.076642625033855... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-cnn_dailymail-899c0b5b-10935468 | autoevaluate | 2022-07-16T13:52:25Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-07-16T13:52:25Z | 2022-07-15T09:35:37.000Z | 2022-07-15T09:35:37 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- cnn_dailymail
eval_info:
task: summarization
model: pszemraj/led-base-book-summary
metrics: ['bleu']
dataset_name: cnn_dailymail
dataset_config: 3.0.0
dataset_split: test
col_mapping:
text: article
target: highlights
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Summarization
* Model: pszemraj/led-base-book-summary
* Dataset: cnn_dailymail
* Config: 3.0.0
* Split: test
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@pszemraj](https://huggingface.co/pszemraj) for evaluating this model. | [
-0.4334414601325989,
-0.2503490149974823,
0.08571699261665344,
0.10293702036142349,
-0.23911578953266144,
-0.10129611194133759,
-0.02440548501908779,
-0.2778643071651459,
0.1513054370880127,
0.4050568640232086,
-1.031599760055542,
-0.290280818939209,
-0.6850579977035522,
-0.075987547636032... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
tokeron/Piyyut | tokeron | 2023-04-08T10:36:57Z | 16 | 0 | null | [
"task_categories:text-classification",
"multilinguality:monolingual",
"size_categories:10K<n<100K",
"source_datasets:original",
"language:heb",
"license:afl-3.0",
"metaphor-detection",
"region:us"
] | 2023-04-08T10:36:57Z | 2022-07-20T09:01:23.000Z | 2022-07-20T09:01:23 | ---
license: afl-3.0
language:
- heb
multilinguality:
- monolingual
size_categories:
- 10K<n<100K
source_datasets:
- original
task_categories:
- text-classification
tags:
- metaphor-detection
viewer: true
---
| [
-0.12853392958641052,
-0.18616779148578644,
0.6529127955436707,
0.49436280131340027,
-0.19319361448287964,
0.23607419431209564,
0.36072003841400146,
0.050563063472509384,
0.579365611076355,
0.7400140762329102,
-0.6508104205131531,
-0.23783954977989197,
-0.7102249264717102,
-0.0478260256350... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
rony/climate-change-MRC | rony | 2022-07-25T06:14:09Z | 16 | 0 | null | [
"license:mit",
"region:us"
] | 2022-07-25T06:14:09Z | 2022-07-24T11:22:03.000Z | 2022-07-24T11:22:03 | ---
license: mit
---
The Climate Change MRC dataset, also known as CCMRC, is a part of the work "Climate Bot: A Machine Reading Comprehension System for Climate Change Question Answering", accepted at IJCAI-ECAI 2022. The paper was accepted in the special system demo track "AI for Good".
If you use the dataset, cite the following paper:
```
@inproceedings{rony2022climatemrc,
title={Climate Bot: A Machine Reading Comprehension System for Climate Change Question Answering.},
author={Rony, Md Rashad Al Hasan and Zuo, Ying and Kovriguina, Liubov and Teucher, Roman and Lehmann, Jens},
booktitle={IJCAI},
year={2022}
}
```
| [
-0.676809549331665,
-0.27109983563423157,
0.35286572575569153,
-0.26032888889312744,
-0.3226540684700012,
0.1750563532114029,
-0.2147076278924942,
-0.08548002690076828,
-0.02465875633060932,
0.7539915442466736,
-0.9149571061134338,
-0.44655370712280273,
-0.5417517423629761,
0.1333817690610... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-squad_v2-e85023ec-11745564 | autoevaluate | 2022-07-25T11:40:25Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-07-25T11:40:25Z | 2022-07-25T11:37:45.000Z | 2022-07-25T11:37:45 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- squad_v2
eval_info:
task: extractive_question_answering
model: deepset/tinyroberta-squad2
metrics: []
dataset_name: squad_v2
dataset_config: squad_v2
dataset_split: validation
col_mapping:
context: context
question: question
answers-text: answers.text
answers-answer_start: answers.answer_start
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Question Answering
* Model: deepset/tinyroberta-squad2
* Dataset: squad_v2
* Config: squad_v2
* Split: validation
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@sjrlee](https://huggingface.co/sjrlee) for evaluating this model. | [
-0.4396345913410187,
-0.40067777037620544,
0.37262389063835144,
0.09650465101003647,
0.03568068519234657,
0.10272999107837677,
0.08112159371376038,
-0.37085649371147156,
0.058863550424575806,
0.3874131143093109,
-1.335182785987854,
-0.05430855229496956,
-0.47518011927604675,
-0.02284384705... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
jordane95/msmarco-passage-corpus-with-query | jordane95 | 2022-07-27T02:02:45Z | 16 | 0 | null | [
"license:afl-3.0",
"region:us"
] | 2022-07-27T02:02:45Z | 2022-07-26T08:30:52.000Z | 2022-07-26T08:30:52 | ---
license: afl-3.0
---
| [
-0.12853392958641052,
-0.18616779148578644,
0.6529127955436707,
0.49436280131340027,
-0.19319361448287964,
0.23607419431209564,
0.36072003841400146,
0.050563063472509384,
0.579365611076355,
0.7400140762329102,
-0.6508104205131531,
-0.23783954977989197,
-0.7102249264717102,
-0.0478260256350... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
asparius/demirtas-movie | asparius | 2022-07-26T11:56:21Z | 16 | 0 | null | [
"license:mit",
"region:us"
] | 2022-07-26T11:56:21Z | 2022-07-26T11:39:08.000Z | 2022-07-26T11:39:08 | ---
license: mit
---
| [
-0.12853392958641052,
-0.18616779148578644,
0.6529127955436707,
0.49436280131340027,
-0.19319361448287964,
0.23607419431209564,
0.36072003841400146,
0.050563063472509384,
0.579365611076355,
0.7400140762329102,
-0.6508104205131531,
-0.23783954977989197,
-0.7102249264717102,
-0.0478260256350... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
chinglohsiu/github-issues | chinglohsiu | 2022-07-26T11:46:53Z | 16 | 0 | null | [
"region:us"
] | 2022-07-26T11:46:53Z | 2022-07-26T11:46:08.000Z | 2022-07-26T11:46:08 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
autoevaluate/autoeval-staging-eval-project-deepset__germanquad-7176bd7d-11875590 | autoevaluate | 2022-07-26T14:40:57Z | 16 | 0 | null | [
"autotrain",
"evaluation",
"region:us"
] | 2022-07-26T14:40:57Z | 2022-07-26T14:38:52.000Z | 2022-07-26T14:38:52 | ---
type: predictions
tags:
- autotrain
- evaluation
datasets:
- deepset/germanquad
eval_info:
task: extractive_question_answering
model: deepset/gelectra-large-germanquad
metrics: []
dataset_name: deepset/germanquad
dataset_config: plain_text
dataset_split: test
col_mapping:
context: context
question: question
answers-text: answers.text
answers-answer_start: answers.answer_start
---
# Dataset Card for AutoTrain Evaluator
This repository contains model predictions generated by [AutoTrain](https://huggingface.co/autotrain) for the following task and dataset:
* Task: Question Answering
* Model: deepset/gelectra-large-germanquad
* Dataset: deepset/germanquad
* Config: plain_text
* Split: test
To run new evaluation jobs, visit Hugging Face's [automatic model evaluator](https://huggingface.co/spaces/autoevaluate/model-evaluator).
## Contributions
Thanks to [@sjlree](https://huggingface.co/sjlree) for evaluating this model. | [
-0.5632662773132324,
-0.6143108606338501,
0.36824849247932434,
0.01202950906008482,
-0.001944436109624803,
0.047359056770801544,
-0.027298301458358765,
-0.3622008264064789,
0.06432921439409256,
0.45034417510032654,
-1.0108330249786377,
-0.2293517142534256,
-0.5346943736076355,
-0.081340029... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
owaiskha9654/PubMed_MultiLabel_Text_Classification_Dataset_MeSH | owaiskha9654 | 2023-01-30T09:50:44Z | 16 | 6 | null | [
"task_categories:text-classification",
"task_ids:multi-label-classification",
"size_categories:10K<n<100K",
"source_datasets:BioASQ Task A",
"language:en",
"license:afl-3.0",
"region:us"
] | 2023-01-30T09:50:44Z | 2022-08-02T20:13:50.000Z | 2022-08-02T20:13:50 | ---
language:
- en
license: afl-3.0
source_datasets:
- BioASQ Task A
task_categories:
- text-classification
task_ids:
- multi-label-classification
pretty_name: BioASQ, PUBMED
size_categories:
- 10K<n<100K
---
This dataset consists of a approx 50k collection of research articles from **PubMed** repository. Originally these documents are manually annotated by Biomedical Experts with their MeSH labels and each articles are described in terms of 10-15 MeSH labels. In this Dataset we have huge numbers of labels present as a MeSH major which is raising the issue of extremely large output space and severe label sparsity issues. To solve this Issue Dataset has been Processed and mapped to its root as Described in the Below Figure.

 | [
-0.6306214332580566,
-0.5796144008636475,
0.25540822744369507,
0.02677999623119831,
-0.24939455091953278,
-0.04810921102762222,
0.0024983915500342846,
-0.2523285448551178,
0.13507091999053955,
0.5546870231628418,
-0.4120590090751648,
-0.6205779910087585,
-0.9368510246276855,
0.169717893004... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
RUCAIBox/Open-Dialogue | RUCAIBox | 2023-03-03T14:43:02Z | 16 | 10 | null | [
"task_categories:conversational",
"task_ids:dialogue-generation",
"multilinguality:monolingual",
"language:en",
"dialogue-response-generation",
"open-dialogue",
"dialog-response-generation",
"region:us"
] | 2023-03-03T14:43:02Z | 2022-08-13T02:08:40.000Z | 2022-08-13T02:08:40 | ---
language:
- en
multilinguality:
- monolingual
task_categories:
- conversational
task_ids:
- dialogue-generation
tags:
- dialogue-response-generation
- open-dialogue
- dialog-response-generation
---
This is the open dialogue datasets collected by TextBox, including:
- PersonaChat (pc)
- DailyDialog (dd)
- DSTC7-AVSD (da)
- SGD (sgd)
- Topical-Chat (tc)
- Wizard of Wikipedia (wow)
- Movie Dialog (md)
- Cleaned OpenSubtitles Dialogs (cos)
- Empathetic Dialogues (ed)
- Curiosity (curio)
- CMU Document Grounded Conversations (cmudog)
- MuTual (mutual)
- OpenDialKG (odkg)
- DREAM (dream).
The detail and leaderboard of each dataset can be found in [TextBox page](https://github.com/RUCAIBox/TextBox#dataset). | [
-0.48906153440475464,
-0.932923436164856,
0.22163838148117065,
-0.22799818217754364,
0.08489122241735458,
0.25225889682769775,
-0.012796656228601933,
0.11556044220924377,
0.22959496080875397,
1.0301990509033203,
-1.1667571067810059,
-0.9629495739936829,
-0.10581611096858978,
0.103249974548... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
hugginglearners/data-science-job-salaries | hugginglearners | 2022-08-17T18:42:40Z | 16 | 2 | null | [
"license:cc0-1.0",
"region:us"
] | 2022-08-17T18:42:40Z | 2022-08-15T00:00:27.000Z | 2022-08-15T00:00:27 | ---
license:
- cc0-1.0
kaggle_id: ruchi798/data-science-job-salaries
---
# Dataset Card for Data Science Job Salaries
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://kaggle.com/datasets/ruchi798/data-science-job-salaries
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
### Content
| Column | Description |
|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| work_year | The year the salary was paid. |
| experience_level | The experience level in the job during the year with the following possible values: EN Entry-level / Junior MI Mid-level / Intermediate SE Senior-level / Expert EX Executive-level / Director |
| employment_type | The type of employement for the role: PT Part-time FT Full-time CT Contract FL Freelance |
| job_title | The role worked in during the year. |
| salary | The total gross salary amount paid. |
| salary_currency | The currency of the salary paid as an ISO 4217 currency code. |
| salary_in_usd | The salary in USD (FX rate divided by avg. USD rate for the respective year via fxdata.foorilla.com). |
| employee_residence | Employee's primary country of residence in during the work year as an ISO 3166 country code. |
| remote_ratio | The overall amount of work done remotely, possible values are as follows: 0 No remote work (less than 20%) 50 Partially remote 100 Fully remote (more than 80%) |
| company_location | The country of the employer's main office or contracting branch as an ISO 3166 country code. |
| company_size | The average number of people that worked for the company during the year: S less than 50 employees (small) M 50 to 250 employees (medium) L more than 250 employees (large) |
### Acknowledgements
I'd like to thank ai-jobs.net Salaries for aggregating this data!
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
[More Information Needed]
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
[More Information Needed]
### Annotations
#### Annotation process
[More Information Needed]
#### Who are the annotators?
[More Information Needed]
### Personal and Sensitive Information
[More Information Needed]
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
This dataset was shared by [@ruchi798](https://kaggle.com/ruchi798)
### Licensing Information
The license for this dataset is cc0-1.0
### Citation Information
```bibtex
[More Information Needed]
```
### Contributions
[More Information Needed] | [
-0.1348394900560379,
-0.16900020837783813,
0.12200026214122772,
0.15777365863323212,
-0.30948489904403687,
-0.07190665602684021,
-0.03968922048807144,
-0.5801985263824463,
0.6515167355537415,
0.46284598112106323,
-0.8398929834365845,
-0.7630396485328674,
-0.6719675064086914,
0.339686989784... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
espejelomar/data-science-job-salaries | espejelomar | 2022-08-15T00:15:46Z | 16 | 0 | null | [
"region:us"
] | 2022-08-15T00:15:46Z | 2022-08-15T00:15:44.000Z | 2022-08-15T00:15:44 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
kasumi222/busygt | kasumi222 | 2022-08-15T02:03:49Z | 16 | 0 | null | [
"region:us"
] | 2022-08-15T02:03:49Z | 2022-08-15T02:03:42.000Z | 2022-08-15T02:03:42 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
kasumi222/busigt | kasumi222 | 2022-09-09T03:23:23Z | 16 | 0 | null | [
"region:us"
] | 2022-09-09T03:23:23Z | 2022-08-15T02:05:10.000Z | 2022-08-15T02:05:10 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.