id stringlengths 2 115 | author stringlengths 2 42 ⌀ | last_modified timestamp[us, tz=UTC] | downloads int64 0 8.87M | likes int64 0 3.84k | paperswithcode_id stringlengths 2 45 ⌀ | tags list | lastModified timestamp[us, tz=UTC] | createdAt stringlengths 24 24 | key stringclasses 1
value | created timestamp[us] | card stringlengths 1 1.01M | embedding list | library_name stringclasses 21
values | pipeline_tag stringclasses 27
values | mask_token null | card_data null | widget_data null | model_index null | config null | transformers_info null | spaces null | safetensors null | transformersInfo null | modelId stringlengths 5 111 ⌀ | embeddings list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Muktau/k_infinity_dataset | Muktau | 2023-01-19T22:55:33Z | 12 | 0 | null | [
"region:us"
] | 2023-01-19T22:55:33Z | 2023-01-18T14:20:47.000Z | 2023-01-18T14:20:47 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Ssunbell/boostcamp-docvqa-v2 | Ssunbell | 2023-01-18T14:37:24Z | 12 | 0 | null | [
"region:us"
] | 2023-01-18T14:37:24Z | 2023-01-18T14:27:39.000Z | 2023-01-18T14:27:39 | ---
dataset_info:
features:
- name: questionId
dtype: int64
- name: question
dtype: string
- name: image
sequence:
sequence:
sequence: uint8
- name: docId
dtype: int64
- name: ucsf_document_id
dtype: string
- name: ucsf_document_page_no
dtype: string
- name: answers
sequence: string
- name: data_split
dtype: string
- name: words
sequence: string
- name: boxes
sequence:
sequence: int64
splits:
- name: train
num_bytes: 6381793673
num_examples: 39454
- name: val
num_bytes: 869361798
num_examples: 5349
download_size: 2578867675
dataset_size: 7251155471
---
# Dataset Card for "boostcamp-docvqa-v2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.40653830766677856,
-0.19754809141159058,
0.2007853090763092,
0.3231397271156311,
-0.015144752338528633,
0.15039098262786865,
0.38265442848205566,
-0.17139087617397308,
0.6403316259384155,
0.06280138343572617,
-0.9847449064254761,
-0.5449039340019226,
-0.5346453785896301,
-0.540294170379... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
clip-benchmark/wds_vtab-kitti_closest_vehicle_distance | clip-benchmark | 2023-01-20T07:16:46Z | 12 | 0 | null | [
"region:us"
] | 2023-01-20T07:16:46Z | 2023-01-20T07:15:24.000Z | 2023-01-20T07:15:24 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
clip-benchmark/wds_vtab-pcam | clip-benchmark | 2023-01-20T07:20:04Z | 12 | 1 | null | [
"region:us"
] | 2023-01-20T07:20:04Z | 2023-01-20T07:17:51.000Z | 2023-01-20T07:17:51 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
basilis/wvDataset2 | basilis | 2023-01-23T16:58:36Z | 12 | 0 | null | [
"region:us"
] | 2023-01-23T16:58:36Z | 2023-01-23T16:54:08.000Z | 2023-01-23T16:54:08 | ---
dataset_info:
features:
- name: tokenized_text
sequence: string
splits:
- name: train
num_bytes: 6675666248
num_examples: 97928
download_size: 1690147799
dataset_size: 6675666248
---
# Dataset Card for "wvDataset2"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.5511952042579651,
-0.10576289892196655,
0.1064915806055069,
0.1501156985759735,
-0.30490562319755554,
0.04327114298939705,
0.42549121379852295,
-0.2608688175678253,
0.6816001534461975,
0.4394592344760895,
-0.9481205940246582,
-0.5769873857498169,
-0.5135354399681091,
-0.4276778995990753... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
jairodm/spanish-ber-bk | jairodm | 2023-01-23T18:02:41Z | 12 | 0 | null | [
"region:us"
] | 2023-01-23T18:02:41Z | 2023-01-23T18:02:22.000Z | 2023-01-23T18:02:22 | Entry not found | [
-0.3227647542953491,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965083122253,
0.7915717959403992,
0.07618629932403564,
0.7746022343635559,
0.2563222348690033,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
juancopi81/academia | juancopi81 | 2023-01-24T18:28:53Z | 12 | 0 | null | [
"task_categories:automatic-speech-recognition",
"whisper",
"whispering",
"medium",
"region:us"
] | 2023-01-24T18:28:53Z | 2023-01-23T18:19:25.000Z | 2023-01-23T18:19:25 | ---
task_categories:
- automatic-speech-recognition
dataset_info:
features:
- name: CHANNEL_NAME
dtype: string
- name: URL
dtype: string
- name: TITLE
dtype: string
- name: DESCRIPTION
dtype: string
- name: TRANSCRIPTION
dtype: string
- name: SEGMENTS
dtype: string
splits:
- name: train
num_bytes: 4010418
num_examples: 52
download_size: 273124
dataset_size: 4010418
tags:
- whisper
- whispering
- medium
---
# Dataset Card for "academia"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.6131056547164917,
-0.2926342785358429,
0.3894093334674835,
0.1199471727013588,
-0.007536381483078003,
0.2792535424232483,
0.2672562003135681,
-0.2842943072319031,
0.859066367149353,
0.3863335847854614,
-0.8268465995788574,
-0.9016115665435791,
-0.6009789705276489,
-0.1912166029214859,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
plncmm/wl-medication | plncmm | 2023-01-23T18:32:07Z | 12 | 0 | null | [
"license:cc-by-nc-4.0",
"region:us"
] | 2023-01-23T18:32:07Z | 2023-01-23T18:29:51.000Z | 2023-01-23T18:29:51 | ---
license: cc-by-nc-4.0
---
| [
-0.12853392958641052,
-0.18616779148578644,
0.6529127955436707,
0.49436280131340027,
-0.19319361448287964,
0.23607419431209564,
0.36072003841400146,
0.050563063472509384,
0.579365611076355,
0.7400140762329102,
-0.6508104205131531,
-0.23783954977989197,
-0.7102249264717102,
-0.0478260256350... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
plncmm/wl-body-part | plncmm | 2023-01-23T18:36:59Z | 12 | 0 | null | [
"license:cc-by-nc-4.0",
"region:us"
] | 2023-01-23T18:36:59Z | 2023-01-23T18:35:00.000Z | 2023-01-23T18:35:00 | ---
license: cc-by-nc-4.0
---
| [
-0.12853392958641052,
-0.18616779148578644,
0.6529127955436707,
0.49436280131340027,
-0.19319361448287964,
0.23607419431209564,
0.36072003841400146,
0.050563063472509384,
0.579365611076355,
0.7400140762329102,
-0.6508104205131531,
-0.23783954977989197,
-0.7102249264717102,
-0.0478260256350... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
plncmm/wl-finding | plncmm | 2023-01-23T18:40:24Z | 12 | 0 | null | [
"license:cc-by-nc-4.0",
"region:us"
] | 2023-01-23T18:40:24Z | 2023-01-23T18:37:31.000Z | 2023-01-23T18:37:31 | ---
license: cc-by-nc-4.0
---
| [
-0.12853392958641052,
-0.18616779148578644,
0.6529127955436707,
0.49436280131340027,
-0.19319361448287964,
0.23607419431209564,
0.36072003841400146,
0.050563063472509384,
0.579365611076355,
0.7400140762329102,
-0.6508104205131531,
-0.23783954977989197,
-0.7102249264717102,
-0.0478260256350... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
plncmm/wl-procedure | plncmm | 2023-01-23T18:42:45Z | 12 | 0 | null | [
"license:cc-by-nc-4.0",
"region:us"
] | 2023-01-23T18:42:45Z | 2023-01-23T18:40:54.000Z | 2023-01-23T18:40:54 | ---
license: cc-by-nc-4.0
---
| [
-0.12853392958641052,
-0.18616779148578644,
0.6529127955436707,
0.49436280131340027,
-0.19319361448287964,
0.23607419431209564,
0.36072003841400146,
0.050563063472509384,
0.579365611076355,
0.7400140762329102,
-0.6508104205131531,
-0.23783954977989197,
-0.7102249264717102,
-0.0478260256350... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
purplebear/dreambooth-hackathon-images | purplebear | 2023-01-23T20:07:29Z | 12 | 0 | null | [
"region:us"
] | 2023-01-23T20:07:29Z | 2023-01-23T19:54:39.000Z | 2023-01-23T19:54:39 | ---
dataset_info:
features:
- name: image
dtype: image
splits:
- name: train
num_bytes: 54613224.0
num_examples: 20
download_size: 54616715
dataset_size: 54613224.0
---
# Dataset Card for "dreambooth-hackathon-images"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.42646852135658264,
-0.13476064801216125,
0.23689782619476318,
0.23072221875190735,
-0.40631160140037537,
0.3039889335632324,
0.32873836159706116,
-0.32934942841529846,
1.1760584115982056,
0.36833885312080383,
-0.8234314322471619,
-0.6366724967956543,
-0.3446301519870758,
-0.267739206552... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Rami/utd_reddit.json | Rami | 2023-01-24T16:30:59Z | 12 | 0 | null | [
"region:us"
] | 2023-01-24T16:30:59Z | 2023-01-23T20:05:54.000Z | 2023-01-23T20:05:54 | ---
dataset_info:
features:
- name: j52edo
struct:
- name: title
dtype: string
- name: selftext
dtype: string
- name: author
dtype: string
- name: num_comments
dtype: int64
- name: permalink
dtype: string
- name: url
dtype: string
- name: comments
struct:
- name: g7p723l
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7pmgai
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7q0gtr
struct:
- name: body
dtype: string
- name: g7p6z8q
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7q37rw
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7qjj6o
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7p4ynr
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7paxsm
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7p543c
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7pvhwr
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7qgcr3
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7p8y1o
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7pajp9
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7pn8t5
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7psgy5
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7s767n
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7qrjeu
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7r3brk
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7q48td
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7q3j2n
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7ujauu
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7pt766
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7pyov9
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7q1j3w
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7qvvrm
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7t8u30
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7sqe5g
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: gn3icng
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: gn3id7g
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7qjzq9
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: grxwrut
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: is1ekdj
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7q0gtr
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7qn1hx
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7qjj6o
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7tdb88
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7pvhwr
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7psgy5
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7psssg
struct:
- name: body
dtype: string
- name: g7r3brk
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7ujauu
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7ujcwo
struct:
- name: body
dtype: string
- name: g7q1j3w
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7q1ukv
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7t8u30
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: gn3id7g
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7qn1hx
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7tdb88
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7psssg
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: replies
struct:
- name: g7qvgs1
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7ujcwo
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7q1ukv
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
- name: g7qvgs1
struct:
- name: body
dtype: string
- name: author
dtype: string
- name: score
dtype: int64
splits:
- name: train
num_bytes: 5510
num_examples: 1
download_size: 94050
dataset_size: 5510
---
# Dataset Card for "utd_reddit.json"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.6092205047607422,
-0.30199864506721497,
0.2679706811904907,
0.166580930352211,
-0.208896666765213,
0.25947511196136475,
0.08683335036039352,
-0.0030029662884771824,
0.825337827205658,
0.5658121705055237,
-0.8040182590484619,
-0.8505425453186035,
-0.4693138599395752,
0.12262178212404251,... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
SDbiaseval/dataset-identities-v-1.4-colorfulness | SDbiaseval | 2023-01-23T20:12:00Z | 12 | 0 | null | [
"region:us"
] | 2023-01-23T20:12:00Z | 2023-01-23T20:11:47.000Z | 2023-01-23T20:11:47 | ---
dataset_info:
features:
- name: ethnicity
dtype: string
- name: gender
dtype: string
- name: 'no'
dtype: int32
- name: image_path
dtype: string
- name: colorfulness
dtype: float64
splits:
- name: train
num_bytes: 65148
num_examples: 480
download_size: 12121
dataset_size: 65148
---
# Dataset Card for "dataset-identities-v-1.4-colorfulness"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.5377838015556335,
-0.02863915264606476,
0.00010801723692566156,
0.6163544654846191,
-0.3993779718875885,
0.13659508526325226,
0.21054883301258087,
-0.3632616400718689,
1.0471649169921875,
0.24605391919612885,
-0.9623758792877197,
-0.7356608510017395,
-0.430507093667984,
-0.0454396717250... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Dahoas/augmented_synthetic_prompt_responses | Dahoas | 2023-02-24T04:18:52Z | 12 | 2 | null | [
"region:us"
] | 2023-02-24T04:18:52Z | 2023-01-24T02:30:28.000Z | 2023-01-24T02:30:28 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
dragonboas/autotrain-data-bp-data | dragonboas | 2023-01-24T06:46:10Z | 12 | 0 | null | [
"region:us"
] | 2023-01-24T06:46:10Z | 2023-01-24T06:45:47.000Z | 2023-01-24T06:45:47 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Joe02/mizushima_oonari | Joe02 | 2023-01-24T08:37:41Z | 12 | 0 | null | [
"license:other",
"region:us"
] | 2023-01-24T08:37:41Z | 2023-01-24T08:19:11.000Z | 2023-01-24T08:19:11 | ---
license: other
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
DarthReca/california_burned_areas | DarthReca | 2023-09-27T08:52:20Z | 12 | 3 | null | [
"task_categories:image-segmentation",
"size_categories:n<1K",
"license:openrail",
"climate",
"doi:10.57967/hf/0389",
"region:us"
] | 2023-09-27T08:52:20Z | 2023-01-24T10:31:47.000Z | 2023-01-24T10:31:47 | ---
license: openrail
task_categories:
- image-segmentation
pretty_name: California Burned Areas
size_categories:
- n<1K
tags:
- climate
---
# California Burned Areas Dataset
**Working on adding more data**
## Dataset Description
- **Paper:**
### Dataset Summary
This dataset contains images from Sentinel-2 satellites taken before and after a wildfire.
The ground truth masks are provided by the California Department of Forestry and Fire Protection and they are mapped on the images.
### Supported Tasks
The dataset is designed to do binary semantic segmentation of burned vs unburned areas.
## Dataset Structure
We opted to use HDF5 to grant better portability and lower file size than GeoTIFF.
### Dataset opening
Using the dataset library, you download only the pre-patched raw version for simplicity.
```python
from dataset import load_dataset
# There are two available configurations, "post-fire" and "pre-post-fire."
dataset = load_dataset("DarthReca/california_burned_areas", name="post-fire")
```
The dataset was compressed using `h5py` and BZip2 from `hdf5plugin`. **WARNING: `hdf5plugin` is necessary to extract data**.
### Data Instances
Each matrix has a shape of 5490x5490xC, where C is 12 for pre-fire and post-fire images, while it is 0 for binary masks.
Pre-patched version is provided with matrices of size 512x512xC, too. In this case, only mask with at least one positive pixel is present.
You can find two versions of the dataset: _raw_ (without any transformation) and _normalized_ (with data normalized in the range 0-255).
Our suggestion is to use the _raw_ version to have the possibility to apply any wanted pre-processing step.
### Data Fields
In each standard HDF5 file, you can find post-fire, pre-fire images, and binary masks. The file is structured in this way:
```bash
├── foldn
│ ├── uid0
│ │ ├── pre_fire
│ │ ├── post_fire
│ │ ├── mask
│ ├── uid1
│ ├── post_fire
│ ├── mask
│
├── foldm
├── uid2
│ ├── post_fire
│ ├── mask
├── uid3
├── pre_fire
├── post_fire
├── mask
...
```
where `foldn` and `foldm` are fold names and `uidn` is a unique identifier for the wildfire.
For the pre-patched version, the structure is:
```bash
root
|
|-- uid0_x: {post_fire, pre_fire, mask}
|
|-- uid0_y: {post_fire, pre_fire, mask}
|
|-- uid1_x: {post_fire, mask}
|
...
```
the fold name is stored as an attribute.
### Data Splits
There are 5 random splits whose names are: 0, 1, 2, 3, and 4.
### Source Data
Data are collected directly from Copernicus Open Access Hub through the API. The band files are aggregated into one single matrix.
## Additional Information
### Licensing Information
This work is under OpenRAIL license.
### Citation Information
If you plan to use this dataset in your work please give the credit to Sentinel-2 mission and the California Department of Forestry and Fire Protection and cite using this BibTex:
```
@ARTICLE{cabuar,
author={Cambrin, Daniele Rege and Colomba, Luca and Garza, Paolo},
journal={IEEE Geoscience and Remote Sensing Magazine},
title={CaBuAr: California burned areas dataset for delineation [Software and Data Sets]},
year={2023},
volume={11},
number={3},
pages={106-113},
doi={10.1109/MGRS.2023.3292467}
}
``` | [
-0.24133533239364624,
-0.36820608377456665,
0.4501030445098877,
0.3775380849838257,
-0.2369680404663086,
-0.1470831036567688,
0.19856855273246765,
-0.011774864047765732,
0.25517433881759644,
0.8736158609390259,
-0.6879025101661682,
-1.1124765872955322,
-0.43478161096572876,
-0.274667143821... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
metaeval/acceptability-prediction | metaeval | 2023-03-24T13:42:37Z | 12 | 0 | null | [
"task_categories:text-classification",
"task_ids:acceptability-classification",
"language:en",
"license:apache-2.0",
"region:us"
] | 2023-03-24T13:42:37Z | 2023-01-27T13:31:01.000Z | 2023-01-27T13:31:01 | ---
license: apache-2.0
task_categories:
- text-classification
task_ids:
- acceptability-classification
language:
- en
---
```bib
@inproceedings{lau-etal-2015-unsupervised,
title = "Unsupervised Prediction of Acceptability Judgements",
author = "Lau, Jey Han and
Clark, Alexander and
Lappin, Shalom",
booktitle = "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
month = jul,
year = "2015",
address = "Beijing, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P15-1156",
doi = "10.3115/v1/P15-1156",
pages = "1618--1628",
}
``` | [
-0.34426450729370117,
-0.5001387000083923,
0.4295985698699951,
0.2693016827106476,
-0.274080753326416,
-0.1347285360097885,
-0.4377697706222534,
-0.8038084506988525,
0.3598126769065857,
0.30949148535728455,
-0.21010246872901917,
-0.47394418716430664,
-0.713109016418457,
0.24781297147274017... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
metaeval/num-glue | metaeval | 2023-01-27T20:11:40Z | 12 | 0 | null | [
"license:apache-2.0",
"region:us"
] | 2023-01-27T20:11:40Z | 2023-01-27T20:09:59.000Z | 2023-01-27T20:09:59 | ---
license: apache-2.0
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
lshowway/wikipedia.VOS | lshowway | 2023-01-30T22:20:02Z | 12 | 0 | null | [
"region:us"
] | 2023-01-30T22:20:02Z | 2023-01-30T18:45:44.000Z | 2023-01-30T18:45:44 | ---
dataset_info:
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 6595233500
num_examples: 4035672
download_size: 4574322349
dataset_size: 6595233500
---
# Dataset Card for "wikipedia.VOS"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.8322833776473999,
-0.18846504390239716,
0.05234459787607193,
0.14379392564296722,
-0.3300120234489441,
-0.2890530526638031,
0.14362911880016327,
-0.07815133780241013,
0.8799453973770142,
0.4696093201637268,
-0.7402353882789612,
-0.7596029043197632,
-0.4637942612171173,
-0.09503765404224... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Tinsae/Ethiopian-foods | Tinsae | 2023-01-31T15:25:36Z | 12 | 2 | null | [
"region:us"
] | 2023-01-31T15:25:36Z | 2023-01-30T19:52:39.000Z | 2023-01-30T19:52:39 | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 278017824.86
num_examples: 1097
download_size: 271567376
dataset_size: 278017824.86
---
# Dataset Card for "Ethiopian-foods"
### The dataset contains images of the following Ethiopian foods collected from social medias
* Beyaynetu (በያይነቱ)
* Chechebsa (ጨጨብሳ)
* Doro Wat (ዶሮ ወጥ)
* Fir-fir (ፍርፍር)
* Genfo (ገንፎ)
* Kikil (ቅቅል)
* Kitfo (ክትፎ)
* Shekla Tibs (ሸክላ ጥብስ)
* Shiro Wat (ሽሮ ወጥ)
* Tihlo (ጥህሎ)
* Tire Siga(ጥሬ ስጋ)
| [
-0.45809704065322876,
-0.5845950841903687,
0.045196108520030975,
0.30698367953300476,
-0.4787009060382843,
0.10328180342912674,
0.1185629740357399,
-0.44002678990364075,
0.9299143552780151,
0.6143020391464233,
-0.7474028468132019,
-0.8975253105163574,
-0.8842273354530334,
0.110478639602661... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
williamberman/images | williamberman | 2023-01-30T20:13:00Z | 12 | 0 | null | [
"region:us"
] | 2023-01-30T20:13:00Z | 2023-01-30T20:03:08.000Z | 2023-01-30T20:03:08 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
reallyvenom/myagedataset | reallyvenom | 2023-01-30T20:15:12Z | 12 | 0 | null | [
"region:us"
] | 2023-01-30T20:15:12Z | 2023-01-30T20:13:49.000Z | 2023-01-30T20:13:49 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
vanderbilt-dsi/narrative-arc | vanderbilt-dsi | 2023-02-27T18:28:44Z | 12 | 1 | null | [
"license:mit",
"region:us"
] | 2023-02-27T18:28:44Z | 2023-01-30T20:33:01.000Z | 2023-01-30T20:33:01 | ---
license: mit
---
---
language_creators:
- other
license:
- mit
multilinguality:
- monolingual
pretty_name: narrative-arc
size_categories: []
source_datasets: []
tags: []
task_categories:
- text-classification
task_ids: []
---
# Dataset Card for [narrative-arc]
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
Dataset of stories used for Narrative Arc post-processing. An instance of a story in this dataset will include the original text and its metadata, the transformer model used to make the embeddings, the model's checkpoint, the window indices of the stored embeddings, and the embeddings.
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
[More Information Needed]
## Dataset Structure
### Data Instances
[More Information Needed]
### Data Fields
An example story will look like the following:
{
"book name": "",
"book meta data": "",
"full text": "",
"model": {
"distilbert-base-cased": {
"window indices": (first_index, last_index),
"embeddings": [[]] },
"distilbert-base-uncased": {
"window indices": (first_index, last_index),
"embeddings": [[]]
}
},
}
...
}
### Data Splits
[More Information Needed]
## Dataset Creation
### Curation Rationale
The processed text needs to be stored somewhere that is both accessible and can accomodate the large amount of data generated.
### Source Data
#### Initial Data Collection and Normalization
The data were sourced from the Project Gutenberg[https://www.gutenberg.org/] library.
#### Who are the source language producers?
Each instance in the dataset represents a text written by a human author. At present, data selected for processing are English-language short stories.
### Personal and Sensitive Information
Not applicable.
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
[More Information Needed]
### Licensing Information
[More Information Needed]
### Citation Information
[More Information Needed]
### Contributions
Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset. | [
-0.30776247382164,
-0.5347082614898682,
0.4266834855079651,
0.09661553800106049,
-0.10753767192363739,
0.21821223199367523,
-0.299287885427475,
-0.2493160218000412,
0.30015653371810913,
0.77809077501297,
-0.8372317552566528,
-1.1174039840698242,
-0.6285873651504517,
0.21388988196849823,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
oz117/lala | oz117 | 2023-01-30T21:19:38Z | 12 | 0 | null | [
"region:us"
] | 2023-01-30T21:19:38Z | 2023-01-30T21:19:12.000Z | 2023-01-30T21:19:12 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622264862060547,
0.43461528420448303,
-0.52829909324646,
0.7012971639633179,
0.7915720343589783,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104477167129517,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
osanseviero/langchain_hub_test | osanseviero | 2023-01-30T22:27:09Z | 12 | 0 | null | [
"region:us"
] | 2023-01-30T22:27:09Z | 2023-01-30T21:58:28.000Z | 2023-01-30T21:58:28 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622264862060547,
0.43461528420448303,
-0.52829909324646,
0.7012971639633179,
0.7915720343589783,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104477167129517,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
keelezibel/jjlin | keelezibel | 2023-01-31T01:54:03Z | 12 | 0 | null | [
"license:cc",
"region:us"
] | 2023-01-31T01:54:03Z | 2023-01-31T01:53:17.000Z | 2023-01-31T01:53:17 | ---
license: cc
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
calvegh/diffusion_db_10k_processed | calvegh | 2023-02-01T04:38:38Z | 12 | 0 | null | [
"region:us"
] | 2023-02-01T04:38:38Z | 2023-01-31T03:51:48.000Z | 2023-01-31T03:51:48 | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype: int64
- name: label_txt
dtype: string
- name: topic_keywords
dtype: string
splits:
- name: train
num_bytes: 2762536
num_examples: 8571
download_size: 647051
dataset_size: 2762536
---
# Dataset Card for "diffusion_db_10k_processed"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.5967780947685242,
-0.6566003561019897,
0.44536876678466797,
0.49907466769218445,
-0.08896233141422272,
-0.15193726122379303,
0.27692073583602905,
0.008366966620087624,
0.9092540144920349,
0.5607549548149109,
-0.7704311013221741,
-0.755474865436554,
-0.664713442325592,
-0.283165782690048... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
abhishek/test | abhishek | 2023-01-31T06:52:27Z | 12 | 0 | null | [
"region:us"
] | 2023-01-31T06:52:27Z | 2023-01-31T06:52:25.000Z | 2023-01-31T06:52:25 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
abhishek/test4 | abhishek | 2023-01-31T07:01:39Z | 12 | 0 | null | [
"region:us"
] | 2023-01-31T07:01:39Z | 2023-01-31T07:01:37.000Z | 2023-01-31T07:01:37 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
seungwon12/layoutlmv2_train_data | seungwon12 | 2023-02-06T05:11:21Z | 12 | 0 | null | [
"region:us"
] | 2023-02-06T05:11:21Z | 2023-01-31T07:25:21.000Z | 2023-01-31T07:25:21 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
orhunc/Bias-Evaluation-Turkish | orhunc | 2023-03-10T12:54:35Z | 12 | 0 | null | [
"language:tr",
"arxiv:1903.10561",
"region:us"
] | 2023-03-10T12:54:35Z | 2023-01-31T07:46:27.000Z | 2023-01-31T07:46:27 | ---
language:
- tr
---
Translation of bias evaluation framework of May et al. (2019) from [this repository](https://github.com/W4ngatang/sent-bias) and [this paper](https://arxiv.org/abs/1903.10561) into Turkish. There is a total of 37 tests including tests addressing gender-bias as well as tests designed to evaluate the ethnic bias toward Kurdish people in Türkiye context.
Abstract of the paper:
While the growing size of pre-trained language models has led to large improvements in a variety of natural language processing tasks, the success of these models comes with a price: They are trained on drastic amounts of mostly Web-based data, which often contains social stereotypes and biases that the models might pick up. This can have negative consequences, as models can abuse these biases in downstream tasks or applications. An application exemplifying the embedded cultural stereotypes is statistical machine translation, a common natural language processing task. Translations to English from a gender-neutral language such as Turkish, which does not have any grammatical gender like the gendered pronouns 'he' or 'she' in English, lead to gender-stereotyped sentences. For instance, Google Translate converts these Turkish sentences with gender-neutral pronouns: 'O bir doktor. O bir hemşire.' to these English sentences: 'He is a doctor. She is a nurse.' The same behavior can be observed when translating these Turkish sentences into other languages with grammatical gender like Spanish, Russian, and German. The gender-neutral Turkish pronoun 'o' is converted into gender-stereotyped pronouns in the respective language. Mitigating different types of bias in LMs would have diverse implications: On the one hand, it would allow us to avoid amplifying these biases. On the other hand, by avoiding algorithms enforcing social biases against minorities one could shift the social balance in the long term.
Previous research has primarily focused on the English language, especially in the realm of gender bias in language models. However, the investigation of more languages with different linguistic elements than English, especially the ones like Turkish that are grammatically gender-neutral, can deepen our insights into the role of gender bias in LMs. The goal of this thesis was to address this research gap and to investigate the significance of gender-bias in Turkish language models. We used existing bias evaluation frameworks on Turkish models by both translating existing English datasets and creating new ones designed to measure gender-bias in the context of Türkiye. We also extended the testing framework to evaluate Turkish models for their embedded ethnic bias toward Kurdish people. Based on the test outcomes, we suggested possible relations of the picked up biases to different model characteristics such as the model size, their multilingualism, and the training corpora. | [
-0.44598257541656494,
-0.6757511496543884,
0.30671295523643494,
0.29784339666366577,
-0.0985206589102745,
-0.3249693810939789,
-0.24314288794994354,
-0.3808000087738037,
-0.19509869813919067,
0.546864926815033,
-0.6692910194396973,
-0.3084029257297516,
-0.7702895402908325,
0.39807236194610... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
oskarspakers/songs | oskarspakers | 2023-04-28T20:43:51Z | 12 | 1 | null | [
"language:lv",
"license:openrail",
"region:us"
] | 2023-04-28T20:43:51Z | 2023-01-31T09:01:17.000Z | 2023-01-31T09:01:17 | ---
license: openrail
language:
- lv
pretty_name: Songs in latvian
---
Nothing here | [
-0.24843600392341614,
-0.5702426433563232,
0.888563871383667,
0.4881754517555237,
0.013685587793588638,
0.4540126323699951,
0.8597304821014404,
-0.3024650812149048,
1.0563910007476807,
0.4827995300292969,
-0.6187446713447571,
-0.5242654085159302,
-0.6521553993225098,
-0.26189833879470825,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
suanlixianren/dxl1.0 | suanlixianren | 2023-01-31T11:34:44Z | 12 | 0 | null | [
"region:us"
] | 2023-01-31T11:34:44Z | 2023-01-31T11:20:29.000Z | 2023-01-31T11:20:29 | Entry not found | [
-0.32276487350463867,
-0.22568444907665253,
0.8622263073921204,
0.43461570143699646,
-0.5282988548278809,
0.7012969255447388,
0.7915717363357544,
0.07618642598390579,
0.7746027112007141,
0.25632190704345703,
-0.7852815389633179,
-0.22573848068714142,
-0.910447895526886,
0.5715675354003906,... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
matchbench/geo-heter | matchbench | 2023-01-31T13:05:39Z | 12 | 0 | null | [
"region:us"
] | 2023-01-31T13:05:39Z | 2023-01-31T12:46:14.000Z | 2023-01-31T12:46:14 | Entry not found | [
-0.32276487350463867,
-0.22568444907665253,
0.8622263073921204,
0.43461570143699646,
-0.5282988548278809,
0.7012969255447388,
0.7915717363357544,
0.07618642598390579,
0.7746027112007141,
0.25632190704345703,
-0.7852815389633179,
-0.22573848068714142,
-0.910447895526886,
0.5715675354003906,... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
EddieChen372/devign_with_norm_vul_lines | EddieChen372 | 2023-02-04T16:35:18Z | 12 | 2 | null | [
"region:us"
] | 2023-02-04T16:35:18Z | 2023-02-04T16:28:55.000Z | 2023-02-04T16:28:55 | ---
dataset_info:
features:
- name: id
dtype: int32
- name: func
dtype: string
- name: target
dtype: bool
- name: project
dtype: string
- name: commit_id
dtype: string
- name: func_clean
dtype: string
- name: vul_lines
struct:
- name: code
sequence: string
- name: line_no
sequence: int64
- name: normalized_func
dtype: string
- name: lines
sequence: string
- name: label
sequence: int64
- name: line_no
sequence:
sequence: int64
splits:
- name: test
num_bytes: 22801956
num_examples: 2732
- name: train
num_bytes: 183794878
num_examples: 21854
- name: validation
num_bytes: 22451009
num_examples: 2732
download_size: 72100845
dataset_size: 229047843
---
# Dataset Card for "devign_with_norm_vul_lines"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.5054252743721008,
-0.4771508574485779,
0.08974042534828186,
0.28082168102264404,
-0.2628273069858551,
-0.35647645592689514,
0.425508588552475,
-0.07022856920957565,
0.6198431849479675,
0.6663687229156494,
-0.8092087507247925,
-0.8259689807891846,
-0.37058302760124207,
-0.373138546943664... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
shields/catalan_commonvoice_first15hr_processed | shields | 2023-02-06T12:45:19Z | 12 | 0 | null | [
"region:us"
] | 2023-02-06T12:45:19Z | 2023-02-06T12:39:56.000Z | 2023-02-06T12:39:56 | ---
dataset_info:
features:
- name: input_features
sequence:
sequence: float32
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 6723710888
num_examples: 7000
- name: val
num_bytes: 2881592776
num_examples: 3000
download_size: 1776942256
dataset_size: 9605303664
---
# Dataset Card for "catalan_commonvoice_first15hr_processed"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.38871094584465027,
-0.2554953098297119,
0.011533951386809349,
0.7344471216201782,
-0.5636150240898132,
-0.13661031424999237,
0.1862289309501648,
-0.3081310987472534,
0.8974944949150085,
0.623652994632721,
-1.0322744846343994,
-0.9787695407867432,
-0.5066954493522644,
-0.2100398987531662... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
leobertolazzi/ita2medieval | leobertolazzi | 2023-02-06T13:00:18Z | 12 | 2 | null | [
"task_categories:text2text-generation",
"size_categories:1K<n<10K",
"language:it",
"region:us"
] | 2023-02-06T13:00:18Z | 2023-02-06T12:40:21.000Z | 2023-02-06T12:40:21 | ---
task_categories:
- text2text-generation
language:
- it
size_categories:
- 1K<n<10K
---
## ita2medieval
The **ita2medieval** dataset contains sentences from medieval italian along with paraphrases in contemporary italian (approximately 6.5k pairs in total). The medieval italian sentences are extracted from texts by Dante, Petrarca, Guinizelli and Cavalcanti.
It is intended to perform text-style-transfer from contemporary to medieval italian and vice-versa.
## Loading the dataset
```
from datasets import load_dataset
dataset = load_dataset("leobertolazzi/ita2medieval")
```
Note: due to the small size of the dataset there are no predefined train and test splits.
## Dataset creation
**ita2medieval** was created by scraping [letteritaliana.weebly.com](https://letteritaliana.weebly.com/). | [
-0.07152966409921646,
-0.5628997087478638,
0.3639902174472809,
0.3791748881340027,
-0.39956653118133545,
-0.36367571353912354,
-0.0014163026353344321,
-0.5490666627883911,
0.32844123244285583,
0.5735666751861572,
-0.6083526611328125,
-0.33660125732421875,
-0.46764564514160156,
0.3794886469... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
achang/plot_qa | achang | 2023-02-12T01:20:56Z | 12 | 3 | null | [
"task_categories:visual-question-answering",
"language:en",
"license:cc",
"plotQA",
"region:us"
] | 2023-02-12T01:20:56Z | 2023-02-06T18:51:17.000Z | 2023-02-06T18:51:17 | ---
license: cc
task_categories:
- visual-question-answering
language:
- en
tags:
- plotQA
pretty_name: PlotQA
---
# Dataset Card for PlotQA
## Dataset Description
- **PlotQA from here:** [PlotQA](https://github.com/NiteshMethani/PlotQA)
### Dataset Summary
PlotQA is a VQA dataset with 28.9 million question-answer pairs grounded over 224,377 plots on data from real-world sources and questions based on crowd-sourced question templates.
## Dataset Structure
### Data Fields
List and describe the fields present in the dataset. Mention their data type, and whether they are used as input or output in any of the tasks the dataset currently supports. If the data has span indices, describe their attributes, such as whether they are at the character level or word level, whether they are contiguous or not, etc. If the datasets contains example IDs, state whether they have an inherent meaning, such as a mapping to other datasets or pointing to relationships between data points.
- `image`: PIL image of a plot
- `text`: string of json data 'models'. See notes below.
From [here](https://github.com/NiteshMethani/PlotQA/blob/master/PlotQA_Dataset.md):
'models': It is a list of dictionaries. Depending on the type of the plot (single or 2,3,4-multi), the length of the dictionary can vary from 1 to 4. Each dictionary contains the following keys-
name: Label corresponding to the datapoint.
color: Color corresponding to the `name` datapoint.
bboxes: Bounding boxes corresponding to the `name` datapoints in the plot.
label: label corresponding to the datapoint which will appear as the legend (same as the `name` field).
x: x-value of the datapoints.
y: y-value of the datapoints.
[json2token](https://github.com/clovaai/donut/blob/b317b4bbf1eecec7c62e7666f2097e1e90a6b441/donut/model.py#L495) function was used to convert json to string.
The new tokens are already loaded in plotQA processor:
```
from transformers import DonutProcessor
processor = DonutProcessor.from_pretrained("[achang/donut-plotqa-trained](https://huggingface.co/achang/donut-plotqa-trained)")
```
### Data Splits
```
validation: Dataset({
features: ['image', 'text'],
num_rows: 33650
})
train: Dataset({
features: ['image', 'text'],
num_rows: 157070
})
test: Dataset({
features: ['image', 'text'],
num_rows: 33657
})
```
## Misc
Dataset Creation, Annotations, Considerations for Using the Data, Social Impact of Dataset, Additional Information, Licensing Information look at [plotQA](https://github.com/NiteshMethani/PlotQA)
### Citation Information
Please cite the following if you use the PlotQA dataset in your work:
```
@InProceedings{Methani_2020_WACV,
author = {Methani, Nitesh and Ganguly, Pritha and Khapra, Mitesh M. and Kumar, Pratyush},
title = {PlotQA: Reasoning over Scientific Plots},
booktitle = {The IEEE Winter Conference on Applications of Computer Vision (WACV)},
month = {March},
year = {2020}
}
```
| [
-0.24267858266830444,
-0.33761337399482727,
0.36296945810317993,
0.04440384730696678,
-0.16636709868907928,
0.13829560577869415,
0.2706359028816223,
-0.2137877494096756,
0.31839168071746826,
0.6272556781768799,
-0.4535009264945984,
-0.5662981867790222,
-0.6151019334793091,
-0.1120721474289... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Loie/VGGSound | Loie | 2023-03-26T13:25:40Z | 12 | 6 | null | [
"task_categories:audio-classification",
"size_categories:100B<n<1T",
"arxiv:2004.14368",
"region:us"
] | 2023-03-26T13:25:40Z | 2023-02-17T10:27:55.000Z | 2023-02-17T10:27:55 | ---
task_categories:
- audio-classification
size_categories:
- 100B<n<1T
---
# VGGSound
VGG-Sound is an audio-visual correspondent dataset consisting of short clips of audio sounds, extracted from videos uploaded to YouTube.
- **Homepage:** https://www.robots.ox.ac.uk/~vgg/data/vggsound/
- **Paper:** https://arxiv.org/abs/2004.14368
- **Github:** https://github.com/hche11/VGGSound
## Analysis
- **310+ classes:** VGG-Sound contains audios spanning a large number of challenging acoustic environments and noise characteristics of real applications.
- **200,000+ videos:** All videos are captured "in the wild" with audio-visual correspondence in the sense that the sound source is visually evident.
- **550+ hours:** VGG-Sound consists of both audio and video. Each segment is 10 seconds long.

## Download
We provide a csv file. For each YouTube video, we provide YouTube URLs, time stamps, audio labels and train/test split. Each line in the csv file has columns defined by here.
```
# YouTube ID, start seconds, label, train/test split.
```
And you can download VGGSound directly from this [repository](https://huggingface.co/datasets/Loie/VGGSound/tree/main).
## License
The VGG-Sound dataset is available to download for commercial/research purposes under a Creative Commons Attribution 4.0 International License. The copyright remains with the original owners of the video. A complete version of the license can be found [here](https://thor.robots.ox.ac.uk/datasets/vggsound/license_vggsound.txt).
## Citation
Please cite the following if you make use of the dataset.
```
@InProceedings{Chen20,
author = "Honglie Chen and Weidi Xie and Andrea Vedaldi and Andrew Zisserman",
title = "VGGSound: A Large-scale Audio-Visual Dataset",
booktitle = "International Conference on Acoustics, Speech, and Signal Processing (ICASSP)",
year = "2020",
}
``` | [
-0.3844376802444458,
-0.5389659404754639,
0.28820160031318665,
-0.09818695485591888,
-0.2738102376461029,
0.07269243150949478,
-0.32999974489212036,
-0.2379930466413498,
0.3478035032749176,
0.07417326420545578,
-0.8993510007858276,
-0.6047762632369995,
-0.34992069005966187,
-0.274147123098... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Jacobvs/PoliticalTweets | Jacobvs | 2023-02-22T19:19:34Z | 12 | 0 | null | [
"license:mit",
"region:us"
] | 2023-02-22T19:19:34Z | 2023-02-22T19:18:37.000Z | 2023-02-22T19:18:37 | ---
license: mit
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
larrylawl/multilexnorm | larrylawl | 2023-05-05T08:17:00Z | 12 | 1 | null | [
"task_categories:text-generation",
"size_categories:100K<n<1M",
"language:en",
"language:da",
"language:de",
"language:es",
"language:hr",
"language:it",
"language:nl",
"language:sl",
"language:sr",
"language:tr",
"language:id",
"license:cc-by-4.0",
"region:us"
] | 2023-05-05T08:17:00Z | 2023-03-07T09:51:47.000Z | 2023-03-07T09:51:47 | ---
license: cc-by-4.0
task_categories:
- text-generation
language:
- en
- da
- de
- es
- hr
- it
- nl
- sl
- sr
- tr
- id
size_categories:
- 100K<n<1M
---
# Dataset Card Creation Guide
## Table of Contents
- [Dataset Card Creation Guide](#dataset-card-creation-guide)
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
- [Who are the source language producers?](#who-are-the-source-language-producers)
- [Annotations](#annotations)
- [Annotation process](#annotation-process)
- [Who are the annotators?](#who-are-the-annotators)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [http://noisy-text.github.io/2021/multi-lexnorm.html]()
- **Paper:** [https://aclanthology.org/2021.wnut-1.55/]()
### Dataset Summary
This is the huggingface version of the MultiLexnorm dataset.
I'm not affiliated with the creators, I'm just releasing the files in an easier-to-access format after processing.
### Citation Information
```
@inproceedings{van-der-goot-etal-2021-multilexnorm,
title = "{M}ulti{L}ex{N}orm: A Shared Task on Multilingual Lexical Normalization",
author = {van der Goot, Rob and
Ramponi, Alan and
Zubiaga, Arkaitz and
Plank, Barbara and
Muller, Benjamin and
San Vicente Roncal, I{\~n}aki and
Ljube{\v{s}}i{\'c}, Nikola and
{\c{C}}etino{\u{g}}lu, {\"O}zlem and
Mahendra, Rahmad and
{\c{C}}olako{\u{g}}lu, Talha and
Baldwin, Timothy and
Caselli, Tommaso and
Sidorenko, Wladimir},
booktitle = "Proceedings of the Seventh Workshop on Noisy User-generated Text (W-NUT 2021)",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.wnut-1.55",
doi = "10.18653/v1/2021.wnut-1.55",
pages = "493--509",
abstract = "Lexical normalization is the task of transforming an utterance into its standardized form. This task is beneficial for downstream analysis, as it provides a way to harmonize (often spontaneous) linguistic variation. Such variation is typical for social media on which information is shared in a multitude of ways, including diverse languages and code-switching. Since the seminal work of Han and Baldwin (2011) a decade ago, lexical normalization has attracted attention in English and multiple other languages. However, there exists a lack of a common benchmark for comparison of systems across languages with a homogeneous data and evaluation setup. The MultiLexNorm shared task sets out to fill this gap. We provide the largest publicly available multilingual lexical normalization benchmark including 13 language variants. We propose a homogenized evaluation setup with both intrinsic and extrinsic evaluation. As extrinsic evaluation, we use dependency parsing and part-of-speech tagging with adapted evaluation metrics (a-LAS, a-UAS, and a-POS) to account for alignment discrepancies. The shared task hosted at W-NUT 2021 attracted 9 participants and 18 submissions. The results show that neural normalization systems outperform the previous state-of-the-art system by a large margin. Downstream parsing and part-of-speech tagging performance is positively affected but to varying degrees, with improvements of up to 1.72 a-LAS, 0.85 a-UAS, and 1.54 a-POS for the winning system.",
}
```
### Contributions
Thanks to [@larrylawl](https://github.com/larrylawl) for adding this dataset.
| [
-0.5571335554122925,
-0.4499603509902954,
0.003278242889791727,
0.3662315607070923,
-0.027041170746088028,
-0.030675243586301804,
-0.792866051197052,
-0.5561534762382507,
0.4890666902065277,
0.18468469381332397,
-0.5997536182403564,
-0.8955276608467102,
-0.6513707041740417,
0.2478730380535... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
chcaa/DANSK | chcaa | 2023-07-13T18:59:14Z | 12 | 4 | null | [
"language:da",
"region:us"
] | 2023-07-13T18:59:14Z | 2023-03-08T11:24:22.000Z | 2023-03-08T11:24:22 | ---
language: da
YAML tags:
- copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging
dataset_info:
features:
- name: text
dtype: string
- name: ents
list:
- name: start
dtype: int64
- name: end
dtype: int64
- name: label
dtype: string
- name: sents
list:
- name: start
dtype: int64
- name: end
dtype: int64
- name: tokens
list:
- name: id
dtype: int64
- name: start
dtype: int64
- name: end
dtype: int64
- name: spans
struct:
- name: incorrect_spans
sequence: 'null'
- name: dagw_source
dtype: string
- name: dagw_domain
dtype: string
- name: dagw_source_full
dtype: string
splits:
- name: dev
num_bytes: 600679
num_examples: 1500
- name: test
num_bytes: 605135
num_examples: 1500
- name: train
num_bytes: 4819833
num_examples: 12062
download_size: 1439625
dataset_size: 6025647
---
## Dataset Description
- **Paper:** [If the dataset was introduced by a paper or there was a paper written describing the dataset, add URL here (landing page for Arxiv paper preferred)]()
### Dataset Summary
DANSK: Danish Annotations for NLP Specific TasKs is a dataset consisting of texts from multiple domains, sampled from the Danish GigaWord Corpus (DAGW).
The dataset was created to fill in the gap of Danish NLP datasets from different domains, that are required for training models that generalize across domains. The Named-Entity annotations are moreover fine-grained and have a similar form to that of OntoNotes v5, which significantly broadens the use cases of the dataset.
The domains include Web, News, Wiki & Books, Legal, Dannet, Conversation and Social Media. For a more in-depth understanding of the domains, please refer to [DAGW](https://huggingface.co/datasets/DDSC/partial-danish-gigaword-no-twitter).
The distribution of texts and Named Entities within each domain can be seen in the table below:
### Update log
- 2023-05-26: Added individual annotations for each annotator to allow for analysis of inter-annotator agreement
### Supported Tasks
The DANSK dataset currently only supports Named-Entity Recognition, but additional version releases will contain data for more tasks.
### Languages
All texts in the dataset are in Danish.
Slang from various platforms or dialects may appear, consistent with the domains from which the texts originally have been sampled - e.g. Social Media.
## Dataset Structure
### Data Instances
The JSON-formatted data is in the form seen below:
```
{
"text": "Aborrer over 2 kg er en uhyre sj\u00e6lden fangst.",
"ents": [{"start": 13, "end": 17, "label": "QUANTITY"}],
"sents": [{"start": 0, "end": 45}],
"tokens": [
{"id": 0, "start": 0, "end": 7},
{"id": 1, "start": 8, "end": 12},
{"id": 2, "start": 13, "end": 14},
{"id": 3, "start": 15, "end": 17},
{"id": 4, "start": 18, "end": 20},
{"id": 5, "start": 21, "end": 23},
{"id": 6, "start": 24, "end": 29},
{"id": 7, "start": 30, "end": 37},
{"id": 8, "start": 38, "end": 44},
{"id": 9, "start": 44, "end": 45},
],
"spans": {"incorrect_spans": []},
"dagw_source": "wiki",
"dagw_domain": "Wiki & Books",
"dagw_source_full": "Wikipedia",
}
```
### Data Fields
- `text`: The text
- `ents`: The annotated entities
- `sents`: The sentences of the text
- `dagw_source`: Shorthand name of the source from which the text has been sampled in the Danish Gigaword Corpus
- `dagw_source_full`: Full name of the source from which the text has been sampled in the Danish Gigaword Corpus
- `dagw_domain`: Name of the domain to which the source adheres to
### Data Splits
The data was randomly split up into three distinct partitions; train, dev, as well as a test partition.
The splits come from the same pool, and there are thus no underlying differences between the sets.
To see the distribution of named entities, and domains of the different partitions,
please refer to the paper, or read the superficial statistics provided in the Dataset composition section of this markdown
## Descriptive Statistics
### Dataset Composition
Named entity annotation composition across partitions can be seen in the table below:
| | Full | Train | Validation | Test |
| :------------: | :---: | :------------: | :----------: | :-----------: |
| Texts | 15062 | 12062 (80%) | 1500 (10%) | 1500 (10%) |
| Named entities | 14462 | 11638 (80.47%) | 1327 (9.18%) | 1497 (10.25%) |
| CARDINAL | 2069 | 1702 (82.26%) | 168 (8.12%) | 226 (10.92%) |
| DATE | 1756 | 1411 (80.35%) | 182 (10.36%) | 163 (9.28%) |
| EVENT | 211 | 175 (82.94%) | 19 (9.00%) | 17 (8.06%) |
| FACILITY | 246 | 200 (81.30%) | 25 (10.16%) | 21 (8.54%) |
| GPE | 1604 | 1276 (79.55%) | 135 (8.42%) | 193 (12.03%) |
| LANGUAGE | 126 | 53 (42.06%) | 17 (13.49%) | 56 (44.44%) |
| LAW | 183 | 148 (80.87%) | 17 (9.29%) | 18 (9.84%) |
| LOCATION | 424 | 351 (82.78%) | 46 (10.85%) | 27 (6.37%) |
| MONEY | 714 | 566 (79.27%) | 72 (10.08%) | 76 (10.64%) |
| NORP | 495 | 405 (81.82%) | 41 (8.28%) | 49 (9.90%) |
| ORDINAL | 127 | 105 (82.68%) | 11 (8.66%) | 11 (8.66%) |
| ORGANIZATION | 2507 | 1960 (78.18%) | 249 (9.93%) | 298 (11.87%) |
| PERCENT | 148 | 123 (83.11%) | 13 (8.78%) | 12 (8.11%) |
| PERSON | 2133 | 1767 (82.84%) | 191 (8.95%) | 175 (8.20%) |
| PRODUCT | 763 | 634 (83.09%) | 57 (7.47%) | 72 (9.44%) |
| QUANTITY | 292 | 242 (82.88%) | 28 (9.59%) | 22 (7.53%) |
| TIME | 218 | 185 (84.86%) | 18 (8.26%) | 15 (6.88%) |
| WORK OF ART | 419 | 335 (79.95%) | 38 (9.07%) | 46 (10.98%) |
### Domain distribution
Domain and source distribution across partitions can be seen in the table below:
| Domain | Source | Full | Train | Dev | Test |
| :----------: | :----------------: | :---: | :---: | :---: | :---: |
| Conversation | Europa Parlamentet | 206 | 173 | 17 | 16 |
| Conversation | Folketinget | 23 | 21 | 1 | 1 |
| Conversation | NAAT | 554 | 431 | 50 | 73 |
| Conversation | OpenSubtitles | 377 | 300 | 39 | 38 |
| Conversation | Spontaneous speech | 489 | 395 | 54 | 40 |
| Dannet | Dannet | 25 | 18 | 4 | 3 |
| Legal | Retsinformation.dk | 965 | 747 | 105 | 113 |
| Legal | Skat.dk | 471 | 364 | 53 | 54 |
| Legal | Retspraktis | 727 | 579 | 76 | 72 |
| News | DanAvis | 283 | 236 | 20 | 27 |
| News | TV2R | 138 | 110 | 16 | 12 |
| Social Media | hestenettet.dk | 554 | 439 | 51 | 64 |
| Web | Common Crawl | 8270 | 6661 | 826 | 783 |
| Wiki & Books | adl | 640 | 517 | 57 | 66 |
| Wiki & Books | Wikipedia | 279 | 208 | 30 | 41 |
| Wiki & Books | WikiBooks | 335 | 265 | 36 | 34 |
| Wiki & Books | WikiSource | 455 | 371 | 43 | 41 |
### Entity Distribution across
Domain and named entity distributions for the training set can be seen below:
| | All domains combined | Conversation | Dannet | Legal | News | Social Media | Web | Wiki and Books |
| :----------: | :------------------: | :----------: | :----: | :---: | :---: | :----------: | :---: | :------------: |
| DOCS | 12062 | 1320 | 18 | 1690 | 346 | 439 | 6661 | 1361 |
| ENTS | 11638 | 1060 | 15 | 1292 | 419 | 270 | 7502 | 883 |
| CARDINAL | 1702 | 346 | 6 | 95 | 35 | 17 | 1144 | 59 |
| DATE | 1411 | 113 | 5 | 257 | 40 | 29 | 831 | 126 |
| EVENT | 175 | 43 | 0 | 1 | 9 | 3 | 106 | 8 |
| FACILITY | 200 | 2 | 0 | 4 | 18 | 3 | 159 | 10 |
| GPE | 1276 | 130 | 2 | 60 | 68 | 31 | 846 | 128 |
| LANGUAGE | 53 | 3 | 0 | 0 | 0 | 0 | 34 | 16 |
| LAW | 148 | 10 | 0 | 100 | 1 | 0 | 22 | 13 |
| LOCATION | 351 | 18 | 0 | 1 | 7 | 7 | 288 | 29 |
| MONEY | 566 | 1 | 0 | 62 | 13 | 18 | 472 | 0 |
| NORP | 405 | 70 | 0 | 61 | 22 | 1 | 188 | 42 |
| ORDINAL | 105 | 11 | 0 | 17 | 9 | 2 | 43 | 22 |
| ORGANIZATION | 1960 | 87 | 0 | 400 | 61 | 39 | 1303 | 58 |
| PERCENT | 123 | 5 | 0 | 10 | 11 | 0 | 91 | 4 |
| PERSON | 1767 | 189 | 2 | 194 | 101 | 69 | 970 | 121 |
| PRODUCT | 634 | 3 | 0 | 10 | 2 | 33 | 581 | 3 |
| QUANTITY | 242 | 1 | 0 | 9 | 6 | 17 | 188 | 20 |
| TIME | 185 | 16 | 0 | 5 | 13 | 1 | 144 | 6 |
| WORK OF ART | 335 | 12 | 0 | 6 | 3 | 0 | 92 | 218 |
Domain and named entity distributions for the validation set can be seen below:
| | Sum | Conversation | Dannet | Legal | News | Social Media | Web | Wiki |
| :----------: | :---: | :----------: | :----: | :---: | :---: | :----------: | :---: | :---: |
| DOCS | 1500 | 161 | 4 | 234 | 36 | 51 | 826 | 166 |
| ENTS | 1497 | 110 | 4 | 171 | 43 | 30 | 983 | 143 |
| CARDINAL | 226 | 41 | 2 | 19 | 7 | 5 | 139 | 13 |
| DATE | 163 | 11 | 0 | 27 | 6 | 4 | 89 | 26 |
| EVENT | 17 | 2 | 0 | 0 | 1 | 0 | 13 | 1 |
| FACILITY | 21 | 1 | 0 | 0 | 0 | 0 | 16 | 4 |
| GPE | 193 | 17 | 1 | 8 | 7 | 2 | 131 | 25 |
| LANGUAGE | 56 | 0 | 0 | 0 | 0 | 0 | 50 | 6 |
| LAW | 18 | 2 | 0 | 8 | 0 | 0 | 8 | 0 |
| LOCATION | 27 | 2 | 0 | 1 | 0 | 0 | 21 | 3 |
| MONEY | 76 | 2 | 0 | 9 | 1 | 6 | 58 | 0 |
| NORP | 49 | 8 | 0 | 8 | 1 | 2 | 21 | 9 |
| ORDINAL | 11 | 2 | 0 | 2 | 0 | 1 | 3 | 3 |
| ORGANIZATION | 298 | 6 | 0 | 68 | 5 | 3 | 212 | 4 |
| PERCENT | 12 | 0 | 0 | 2 | 0 | 0 | 10 | 0 |
| PERSON | 175 | 16 | 1 | 16 | 11 | 4 | 96 | 20 |
| PRODUCT | 72 | 0 | 0 | 0 | 0 | 2 | 69 | 1 |
| QUANTITY | 22 | 0 | 0 | 1 | 2 | 1 | 17 | 1 |
| TIME | 15 | 0 | 0 | 0 | 2 | 0 | 13 | 0 |
| WORK OF ART | 46 | 0 | 0 | 2 | 0 | 0 | 17 | 27 |
Domain and named entity distributions for the testing set can be seen below:
| | Sum | Conversation | Dannet | Legal | News | Social Media | Web | Wiki |
| :----------: | :---: | :----------: | :----: | :---: | :---: | :----------: | :---: | :---: |
| DOCS | 1500 | 161 | 4 | 234 | 36 | 51 | 826 | 166 |
| ENTS | 1497 | 110 | 4 | 171 | 43 | 30 | 983 | 143 |
| CARDINAL | 226 | 41 | 2 | 19 | 7 | 5 | 139 | 13 |
| DATE | 163 | 11 | 0 | 27 | 6 | 4 | 89 | 26 |
| EVENT | 17 | 2 | 0 | 0 | 1 | 0 | 13 | 1 |
| FACILITY | 21 | 1 | 0 | 0 | 0 | 0 | 16 | 4 |
| GPE | 193 | 17 | 1 | 8 | 7 | 2 | 131 | 25 |
| LANGUAGE | 56 | 0 | 0 | 0 | 0 | 0 | 50 | 6 |
| LAW | 18 | 2 | 0 | 8 | 0 | 0 | 8 | 0 |
| LOCATION | 27 | 2 | 0 | 1 | 0 | 0 | 21 | 3 |
| MONEY | 76 | 2 | 0 | 9 | 1 | 6 | 58 | 0 |
| NORP | 49 | 8 | 0 | 8 | 1 | 2 | 21 | 9 |
| ORDINAL | 11 | 2 | 0 | 2 | 0 | 1 | 3 | 3 |
| ORGANIZATION | 298 | 6 | 0 | 68 | 5 | 3 | 212 | 4 |
| PERCENT | 12 | 0 | 0 | 2 | 0 | 0 | 10 | 0 |
| PERSON | 175 | 16 | 1 | 16 | 11 | 4 | 96 | 20 |
| PRODUCT | 72 | 0 | 0 | 0 | 0 | 2 | 69 | 1 |
| QUANTITY | 22 | 0 | 0 | 1 | 2 | 1 | 17 | 1 |
| TIME | 15 | 0 | 0 | 0 | 2 | 0 | 13 | 0 |
| WORK OF ART | 46 | 0 | 0 | 2 | 0 | 0 | 17 | 27 |
## Dataset Creation
### Curation Rationale
The dataset is meant to fill in the gap of Danish NLP that up until now
has been missing a dataset with 1) fine-grained named entity recognition
labels, and 2) high variance in domain origin of texts. As such, it is the
intention that DANSK should be employed in training by anyone who wishes
to create models for NER that are both generalizable across domains and
fine-grained in their predictions. It may also be utilized to assess across-domain evaluations, in order to unfold any potential domain biases. While
the dataset currently only entails annotations for named entities, it is the
intention that future versions of the dataset will feature dependency Parsing,
pos tagging, and possibly revised NER annotations.
### Source Data
The data collection, annotation, and normalization steps of the data were extensive.
As the description is too long for this readme, please refer to the associated paper upon its publication for a full description.
#### Initial Data Collection and Normalization
### Annotations
#### Annotation process
To afford high granularity, the DANSK dataset utilized the annotation standard of OntoNotes 5.0.
The standard features 18 different named entity types. The full description can be seen in the associated paper.
#### Who are the annotators?
10 English Linguistics Master’s program students from Aarhus University were employed.
They worked 10 hours/week for six weeks from October 11, 2021, to November 22, 2021.
Their annotation tasks included part-of-speech tagging, dependency parsing, and NER annotation.
Named entity annotations and dependency parsing was done from scratch, while the POS tagging consisted of corrections of silver-standard predictions by an NLP model.
### Annotator Compensation
10 English Linguistics Master’s program students from Aarhus University
were employed. They worked 10 hours/week for six weeks from October 11,
2021, to November 22, 2021. Their annotation tasks included
part-of-speech tagging, dependency parsing, and NER annotation. **Annotators were compensated at the standard rate for students, as determined by the collective agreement of the Danish Ministry of Finance and the Central Organization of Teachers and the
CO10 Central Organization of 2010 (the CO10 joint agreement), which is 140DKK/hour.** Named
entity annotations and dependency parsing was done from scratch, while
the POS tagging consisted of corrections of predictions by an NLP model.
### Automatic correction
During the manual correction of the annotation a series of consistent errors were found. These were corrected using the following Regex patterns (see also the Danish Addendum to the Ontonotes annotation guidelines):
<details><summary>Regex Patterns</summary>
<p>
For matching with TIME spans, e.g. [16:30 - 17:30] (TIME):
```
\d{1,2}:\d\d ?[-|\||\/] ?\d
dag: \d{1,2}
```
For matching with DATE spans, e.g. [1938 - 1992] (DATE):
```
\d{2,4} ?[-|–] ?\d{2,4}
```
For matching companies with A/S og ApS,
```
e.g. [Hansens Skomager A/S] (ORGANIZATION):
ApS
A\/S
```
For matching written numerals, e.g. "en":
```
to | to$|^to| To | To$|^To| TO | TO$|^TO|
tre | tre$|^tre| Tre | Tre$|^Tre| TRE | TRE$|^TRE|
fire | fire$|^fire| Fire | Fire$|^Fire| FIRE | FIRE$|^FIRE|
fem | fem$|^fem| Fem | Fem$|^Fem| FEM | FEM$|^FEM|
seks | seks$|^seks| Seks | Seks$|^Seks| SEKS | SEKS$|
^SYV|
otte | otte$|^otte| Otte | Otte$|^Otte| OTTE | OTTE$|^OTTE|
ni | ni$|^ni| Ni | Ni$|^Ni| NI | NI$|^NI|
ti | ti$|^ti| Ti | Ti$|^Ti| TI | TI$|^TI
```
For matching "Himlen" or "Himmelen" already annotated
as LOCATION, e.g. "HIMLEN":
```
[Hh][iI][mM][lL][Ee][Nn]|[Hh][iI][mM][mM][Ee][lL][Ee][Nn]
```
For matching "Gud" already tagged as PERSON, e.g. "GUD":
```
[Gg][Uu][Dd]
```
For matching telephone numbers wrongly already
tagged as CARDINAL, e.g. "20 40 44 30":
```
\d{2} \d{2} \d{2} \d{2}
\+\d{2} \d{2} ?\d{2} ?\d{2} ?\d{2}$
\+\d{2} \d{2} ?\d{2} ?\d{2} ?\d{2}$
\d{4} ?\d{4}$
^\d{4} ?\d{4}$
```
For matching websites already
wrongly tagged as ORGANIZATION:
```
.dk$|.com$
```
For matching Hotels and Resorts
already wrongly tagged as ORGANIZATION:
```
.*[h|H]otel.*|.*[R|r]esort.*
```
For matching numbers including /
or :, already wrongly tagged as CARDINAL:
```
\/
\/
-
```
For matching rights already
wrongly tagged as LAW:
```
[C|c]opyright
[®|©]
[f|F]ortrydelsesret
[o|O]phavsret$
enneskeret
```
</p>
</details>
### Licensing Information
Creative Commons Attribution-Share Alike 4.0 International license
### Citation Information
The paper is in progress.
| [
-0.7340766787528992,
-0.45806455612182617,
0.2856961488723755,
0.3492349684238434,
-0.3288515508174896,
-0.15625321865081787,
-0.24500125646591187,
-0.44412386417388916,
0.5939871668815613,
0.4695344865322113,
-0.6436465382575989,
-1.031288504600525,
-0.6129211187362671,
0.3097481727600097... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
cartesinus/iva_mt_wslot | cartesinus | 2023-07-21T15:40:44Z | 12 | 0 | null | [
"task_categories:translation",
"size_categories:10K<n<100K",
"language:en",
"language:pl",
"language:de",
"language:es",
"language:sv",
"language:fr",
"language:pt",
"license:cc-by-4.0",
"machine translation",
"nlu",
"natural-language-understanding",
"virtual assistant",
"region:us"
] | 2023-07-21T15:40:44Z | 2023-03-09T14:02:00.000Z | 2023-03-09T14:02:00 | ---
dataset_info:
features:
- name: id
dtype: string
- name: locale
dtype: string
- name: origin
dtype: string
- name: partition
dtype: string
- name: translation_utt
dtype:
translation:
languages:
- en
- pl
- name: translation_xml
dtype:
translation:
languages:
- en
- pl
- name: src_bio
dtype: string
- name: tgt_bio
dtype: string
splits:
- name: train
num_bytes: 6187206
num_examples: 20362
- name: validation
num_bytes: 1115480
num_examples: 3681
- name: test
num_bytes: 1587613
num_examples: 5394
download_size: 3851892
dataset_size: 8890299
task_categories:
- translation
language:
- en
- pl
- de
- es
- sv
- fr
- pt
tags:
- machine translation
- nlu
- natural-language-understanding
- virtual assistant
pretty_name: Machine translation for NLU with slot transfer
size_categories:
- 10K<n<100K
license: cc-by-4.0
---
# Machine translation dataset for NLU (Virual Assistant) with slot transfer between languages
## Dataset Summary
Disclaimer: This is for research purposes only. Please have a look at the license section below. Some of the datasets used to construct IVA_MT have an unknown license.
IVA_MT is a machine translation dataset that can be used to train, adapt and evaluate MT models used in Virtual Assistant NLU context (e.g. to translate trainig corpus of NLU).
## Dataset Composition
### en-pl
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 11514 | 2033 | 2974 |
| [Leyzer 0.2.0](https://github.com/cartesinus/leyzer/tree/0.2.0) | 3974 | 701 | 1380 |
| [OpenSubtitles from OPUS](https://opus.nlpl.eu/OpenSubtitles-v1.php) | 2329 | 411 | 500 |
| [KDE from OPUS](https://opus.nlpl.eu/KDE4.php) | 1154 | 241 | 241 |
| [CCMatrix from Opus](https://opus.nlpl.eu/CCMatrix.php) | 1096 | 232 | 237 |
| [Ubuntu from OPUS](https://opus.nlpl.eu/Ubuntu.php) | 281 | 60 | 59 |
| [Gnome from OPUS](https://opus.nlpl.eu/GNOME.php) | 14 | 3 | 3 |
| *total* | 20362 | 3681 | 5394 |
### en-de
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 7536 | 1346 | 1955 |
### en-es
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 8415 | 1526 | 2202 |
### en-sv
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 7540 | 1360 | 1921 |
### en-fr
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 6800 | 1203 | 1757 |
### en-pt
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 7368 | 1296 | 1885 |
### en-hi
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 6702 | 1175 | 1747 |
### en-tr
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 8269 | 1474 | 2170 |
### en-ja
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 8066 | 1434 | 2085 |
### en-zh
| Corpus | Train | Dev | Test |
|----------------------------------------------------------------------|--------|-------|-------|
| [Massive 1.1](https://huggingface.co/datasets/AmazonScience/massive) | 8433 | 1513 | 2179 |
## Tools
Scripts used to generate this dataset can be found on [github](https://github.com/cartesinus/iva_mt).
## Citation
If you use this models please cite:
```
@article{Sowanski2023SlotLI,
title={Slot Lost in Translation? Not Anymore: A Machine Translation Model for Virtual Assistants with Type-Independent Slot Transfer},
author={Marcin Sowanski and Artur Janicki},
journal={2023 30th International Conference on Systems, Signals and Image Processing (IWSSIP)},
year={2023},
pages={1-5}
}
```
## License
This is a composition of 7 datasets, and the license is as defined in original release:
- MASSIVE: [CC-BY 4.0](https://huggingface.co/datasets/AmazonScience/massive/blob/main/LICENSE)
- Leyzer: [CC BY-NC 4.0](https://github.com/cartesinus/leyzer/blob/master/LICENSE)
- OpenSubtitles: unknown
- KDE: [GNU Public License](https://l10n.kde.org/about.php)
- CCMatrix: no license given, therefore assuming it is LASER project license [BSD](https://github.com/facebookresearch/LASER/blob/main/LICENSE)
- Ubuntu: [GNU Public License](https://help.launchpad.net/Legal)
- Gnome: unknown
| [
-0.7105191349983215,
-0.5085111260414124,
0.2916882634162903,
0.1784636229276657,
-0.2607070207595825,
-0.15867988765239716,
-0.190155491232872,
-0.5041347742080688,
0.4206772446632385,
0.6106223464012146,
-0.6984780430793762,
-0.6248029470443726,
-0.7204629778862,
0.14400002360343933,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
celikmus/symptom_text_to_disease_01 | celikmus | 2023-03-10T10:09:08Z | 12 | 1 | null | [
"license:apache-2.0",
"region:us"
] | 2023-03-10T10:09:08Z | 2023-03-10T10:08:35.000Z | 2023-03-10T10:08:35 | ---
license: apache-2.0
dataset_info:
features:
- name: text
dtype: string
- name: labels
dtype:
class_label:
names:
'0': emotional pain
'1': hair falling out
'2': heart hurts
'3': infected wound
'4': foot ache
'5': shoulder pain
'6': injury from sports
'7': skin issue
'8': stomach ache
'9': knee pain
'10': joint pain
'11': hard to breath
'12': head ache
'13': body feels weak
'14': feeling dizzy
'15': back pain
'16': open wound
'17': internal pain
'18': blurry vision
'19': acne
'20': muscle pain
'21': neck pain
'22': cough
'23': ear ache
'24': feeling cold
splits:
- name: train
num_bytes: 330494.3762197868
num_examples: 5328
- name: test
num_bytes: 41373.82675273983
num_examples: 667
- name: valid
num_bytes: 41311.79702747335
num_examples: 666
download_size: 145457
dataset_size: 413180.0
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
LangChainDatasets/question-answering-paul-graham | LangChainDatasets | 2023-03-12T01:02:15Z | 12 | 3 | null | [
"license:mit",
"region:us"
] | 2023-03-12T01:02:15Z | 2023-03-12T01:01:16.000Z | 2023-03-12T01:01:16 | ---
license: mit
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
khalidalt/model-written-evals | khalidalt | 2023-07-02T20:24:29Z | 12 | 0 | null | [
"task_categories:multiple-choice",
"task_categories:zero-shot-classification",
"task_categories:question-answering",
"task_ids:multiple-choice-qa",
"task_ids:multiple-choice-coreference-resolution",
"annotations_creators:machine-generated",
"language_creators:machine-generated",
"multilinguality:monol... | 2023-07-02T20:24:29Z | 2023-03-17T18:42:09.000Z | 2023-03-17T18:42:09 | ---
annotations_creators:
- machine-generated
language:
- en
language_creators:
- machine-generated
license:
- cc-by-4.0
multilinguality:
- monolingual
pretty_name: Evaluations from "Discovering Language Model Behaviors with Model-Written
Evaluations"
size_categories:
- 100K<n<1M
source_datasets:
- original
tags:
- gender bias
- social bias
- AI safety
- personality
- politics
task_categories:
- multiple-choice
- zero-shot-classification
- question-answering
task_ids:
- multiple-choice-qa
- multiple-choice-coreference-resolution
---
# Model-Written Evaluation Datasets
This repository includes datasets written by language models, used in the paper "Discovering Language Model Behaviors with Model-Written Evaluations."
The evaluations in this dataset were designed for dialogue agents, such as models fine-tuned to respond to user utterances or pretrained language models prompted to simulate a dialogue agent's behavior. However, the data can be adapted to test various other types of models as well.
The dataset consis of each of the following:
1. persona: Datasets designed to evaluate models on different aspects of their behavior, such as their political and religious views, personality traits, moral beliefs, and willingness to pursue potentially risky objectives (e.g., self-preservation or power-seeking).
2. sycophancy: Datasets created to assess models based on their tendency to echo a user's perspective when presented with various questions in fields like philosophy, NLP research, and politics.
3. winogenerated: An extended version of the Winogender Dataset (Rudinger et al., 2018) generated by models. The dataset includes occupation titles generated specifically for this dataset, alongside occupation gender statistics from the Bureau of Labor Statistics.
4. advanced-ai-risk: Datasets evaluating models on behaviors associated with potential catastrophic risks posed by advanced AI systems. These datasets were generated in a few-shot manner.
Please see the cited paper for additional details on the datasets.
**Disclaimer**: As discussed in the paper, some data contains content that includes social biases and stereotypes. The data may also contain other forms of harmful or offensive content. The views expressed in the data do not reflect the views of Anthropic or any of its employees.
## Bibtex Citation
If you would like to cite this work or data, you may use the following bibtex citation:
```
@misc{perez2022discovering,
doi = {10.48550/ARXIV.2212.09251},
url = {https://arxiv.org/abs/2212.09251},
author = {Perez, Ethan and Ringer, Sam and Lukošiūtė, Kamilė and Nguyen, Karina and Chen, Edwin and Heiner, Scott and Pettit, Craig and Olsson, Catherine and Kundu, Sandipan and Kadavath, Saurav and Jones, Andy and Chen, Anna and Mann, Ben and Israel, Brian and Seethor, Bryan and McKinnon, Cameron and Olah, Christopher and Yan, Da and Amodei, Daniela and Amodei, Dario and Drain, Dawn and Li, Dustin and Tran-Johnson, Eli and Khundadze, Guro and Kernion, Jackson and Landis, James and Kerr, Jamie and Mueller, Jared and Hyun, Jeeyoon and Landau, Joshua and Ndousse, Kamal and Goldberg, Landon and Lovitt, Liane and Lucas, Martin and Sellitto, Michael and Zhang, Miranda and Kingsland, Neerav and Elhage, Nelson and Joseph, Nicholas and Mercado, Noemí and DasSarma, Nova and Rausch, Oliver and Larson, Robin and McCandlish, Sam and Johnston, Scott and Kravec, Shauna and {El Showk}, Sheer and Lanham, Tamera and Telleen-Lawton, Timothy and Brown, Tom and Henighan, Tom and Hume, Tristan and Bai, Yuntao and Hatfield-Dodds, Zac and Clark, Jack and Bowman, Samuel R. and Askell, Amanda and Grosse, Roger and Hernandez, Danny and Ganguli, Deep and Hubinger, Evan and Schiefer, Nicholas and Kaplan, Jared},
keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Discovering Language Model Behaviors with Model-Written Evaluations},
publisher = {arXiv},
year = {2022},
copyright = {arXiv.org perpetual, non-exclusive license}
}
```
| [
-0.2505446970462799,
-0.45275363326072693,
0.4704314172267914,
-0.004761562217026949,
0.31532052159309387,
0.03315454721450806,
0.02208508923649788,
-0.34955859184265137,
0.14659248292446136,
0.45728379487991333,
-0.568981945514679,
-0.636583149433136,
-0.4684576094150543,
-0.0072462144307... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
MortenTabaka/LandCover-Aerial-Imagery-for-semantic-segmentation | MortenTabaka | 2023-03-26T17:28:43Z | 12 | 4 | null | [
"task_categories:image-segmentation",
"license:cc-by-nc-sa-4.0",
"arxiv:2005.02264",
"region:us"
] | 2023-03-26T17:28:43Z | 2023-03-26T14:36:08.000Z | 2023-03-26T14:36:08 | ---
license: cc-by-nc-sa-4.0
task_categories:
- image-segmentation
---
# LandCover.ai: Dataset for Automatic Mapping of Buildings, Woodlands, Water and Roads from Aerial Imagery
My project based on the dataset, can be found on Github: https://github.com/MortenTabaka/Semantic-segmentation-of-LandCover.ai-dataset
The dataset used in this project is the [Landcover.ai Dataset](https://landcover.ai.linuxpolska.com/),
which was originally published with [LandCover.ai: Dataset for Automatic Mapping of Buildings, Woodlands, Water and Roads from Aerial Imagery paper](https://arxiv.org/abs/2005.02264)
also accessible on [PapersWithCode](https://paperswithcode.com/paper/landcover-ai-dataset-for-automatic-mapping-of).
**Please note that I am not the author or owner of this dataset, and I am using it under the terms of the license specified by the original author.
All credits for the dataset go to the original author and contributors.**
---
license: cc-by-nc-sa-4.0
--- | [
-0.4555191099643707,
-0.5314900875091553,
0.5032774209976196,
0.028495673090219498,
-0.397135853767395,
-0.02951318584382534,
-0.03285452350974083,
-0.40044617652893066,
0.10716436058282852,
0.666098415851593,
-0.22807693481445312,
-1.0876505374908447,
-0.5658879280090332,
-0.3041937053203... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
bbaaaa/iwslt14-de-en-preprocess | bbaaaa | 2023-03-28T16:19:35Z | 12 | 0 | iwslt-2014 with fairseq preprocess | [
"task_categories:translation",
"annotations_creators:crowdsourced",
"language_creators:expert-generated",
"multilinguality:translation",
"source_datasets:original",
"language:de",
"language:en",
"license:cc-by-nc-nd-4.0",
"region:us"
] | 2023-03-28T16:19:35Z | 2023-03-27T03:34:37.000Z | 2023-03-27T03:34:37 | ---
annotations_creators:
- crowdsourced
language:
- de
- en
language_creators:
- expert-generated
license:
- cc-by-nc-nd-4.0
multilinguality:
- translation
pretty_name: IWSLT 2014 with fairseq preprocess
source_datasets:
- original
task_categories:
- translation
task_ids: []
paperswithcode_id: iwslt-2014 with fairseq preprocess
---
# Dataset Card for IWSLT 2014 with fairseq preprocess
## Dataset Description
- **Homepage:** [https://sites.google.com/site/iwsltevaluation2014](https://sites.google.com/site/iwsltevaluation2014)
dataset_info:
- config_name: de-en
features:
- name: translation
languages:
- de
- en
splits:
- name: train
num_examples: 160239
- name: test
num_examples: 6750
- name: validation
num_examples: 7283
| [
-0.7305787205696106,
-0.08152550458908081,
0.22900433838367462,
0.6475548148155212,
-0.42243069410324097,
0.023278625681996346,
0.09978769719600677,
-0.24347054958343506,
-0.10474872589111328,
0.38870683312416077,
-1.0370017290115356,
-0.6536375284194946,
-0.6829137206077576,
0.17609427869... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
patrickramos/conceptual_captions | patrickramos | 2023-03-28T07:44:47Z | 12 | 0 | null | [
"region:us"
] | 2023-03-28T07:44:47Z | 2023-03-28T02:41:41.000Z | 2023-03-28T02:41:41 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622264862060547,
0.43461528420448303,
-0.52829909324646,
0.7012971639633179,
0.7915720343589783,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104477167129517,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Yulong-W/squadori | Yulong-W | 2023-04-01T10:26:03Z | 12 | 0 | null | [
"region:us"
] | 2023-04-01T10:26:03Z | 2023-04-01T10:25:12.000Z | 2023-04-01T10:25:12 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622264862060547,
0.43461528420448303,
-0.52829909324646,
0.7012971639633179,
0.7915720343589783,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104477167129517,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
liuyanchen1015/MULTI_VALUE_sst2_em_obj_pronoun | liuyanchen1015 | 2023-04-03T19:48:57Z | 12 | 0 | null | [
"region:us"
] | 2023-04-03T19:48:57Z | 2023-04-03T19:48:52.000Z | 2023-04-03T19:48:52 | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: label
dtype: int64
- name: idx
dtype: int64
- name: score
dtype: int64
splits:
- name: dev
num_bytes: 4356
num_examples: 31
- name: test
num_bytes: 11772
num_examples: 83
- name: train
num_bytes: 204729
num_examples: 1852
download_size: 106321
dataset_size: 220857
---
# Dataset Card for "MULTI_VALUE_sst2_em_obj_pronoun"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.24922800064086914,
-0.5132604241371155,
0.2734740674495697,
0.14270925521850586,
-0.41839343309402466,
0.10195766389369965,
-0.11429500579833984,
-0.07415179908275604,
0.626937985420227,
0.42383861541748047,
-0.651368260383606,
-0.6243965029716492,
-0.7068344950675964,
-0.24730031192302... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Babypotatotang/logo-captioning-BLIP-BrandInfoWBP | Babypotatotang | 2023-04-04T06:23:31Z | 12 | 1 | null | [
"region:us"
] | 2023-04-04T06:23:31Z | 2023-04-04T05:03:29.000Z | 2023-04-04T05:03:29 | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 321581037.08
num_examples: 24080
- name: test
num_bytes: 82453208.54
num_examples: 6021
download_size: 265975818
dataset_size: 404034245.62
---
# Dataset Card for "logo-captioning-BLIP-BrandInfoWBP"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.45783036947250366,
0.0744803249835968,
-0.18588511645793915,
0.45755836367607117,
-0.3309533894062042,
0.5095604062080383,
0.11797916889190674,
-0.42226091027259827,
0.8734334111213684,
0.44316837191581726,
-0.7755739688873291,
-0.6479806900024414,
-0.6988204121589661,
-0.08687211573123... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
slone/bak_rus_3M2023_scored | slone | 2023-04-09T18:12:00Z | 12 | 0 | null | [
"region:us"
] | 2023-04-09T18:12:00Z | 2023-04-09T18:08:40.000Z | 2023-04-09T18:08:40 | ---
dataset_info:
features:
- name: ba
dtype: string
- name: ru
dtype: string
- name: source
dtype: string
- name: cosine_sim
dtype: float64
- name: cross_encoder_sim
dtype: float64
- name: joint_sim
dtype: float64
- name: idx
dtype: int64
splits:
- name: train
num_bytes: 1228138533
num_examples: 3686157
- name: validation
num_bytes: 1161040
num_examples: 3000
download_size: 706620038
dataset_size: 1229299573
---
# Dataset Card for "bak_rus_3M2023_scored"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.4494973421096802,
-0.2325601577758789,
0.19939512014389038,
0.5301246643066406,
-0.24460811913013458,
-0.03432845324277878,
0.29470765590667725,
-0.10058315843343735,
0.6684966087341309,
0.33238911628723145,
-0.6309244632720947,
-0.976559042930603,
-0.4129684567451477,
-0.29682573676109... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
climatebert/climate_commitments_actions | climatebert | 2023-04-18T16:12:44Z | 12 | 1 | null | [
"task_categories:text-classification",
"annotations_creators:expert-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:1K<n<10K",
"source_datasets:original",
"language:en",
"license:cc-by-nc-sa-4.0",
"region:us"
] | 2023-04-18T16:12:44Z | 2023-04-11T13:11:49.000Z | 2023-04-11T13:11:49 | ---
annotations_creators:
- expert-generated
language_creators:
- found
language:
- en
license: cc-by-nc-sa-4.0
multilinguality:
- monolingual
size_categories:
- 1K<n<10K
source_datasets:
- original
task_categories:
- text-classification
task_ids: []
pretty_name: ClimateCommitmentsActions
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': 'no'
'1': 'yes'
splits:
- name: train
num_bytes: 492077
num_examples: 1000
- name: test
num_bytes: 174265
num_examples: 320
download_size: 373387
dataset_size: 666342
---
# Dataset Card for climate_commitments_actions
## Dataset Description
- **Homepage:** [climatebert.ai](https://climatebert.ai)
- **Repository:**
- **Paper:** [papers.ssrn.com/sol3/papers.cfm?abstract_id=3998435](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3998435)
- **Leaderboard:**
- **Point of Contact:** [Nicolas Webersinke](mailto:nicolas.webersinke@fau.de)
### Dataset Summary
We introduce an expert-annotated dataset for identifying climate-related paragraphs about climate commitments and actions in corporate disclosures.
### Supported Tasks and Leaderboards
The dataset supports a binary classification task of whether a given climate-related paragraph is about climate commitments and actions or not.
### Languages
The text in the dataset is in English.
## Dataset Structure
### Data Instances
```
{
'text': '− Scope 3: Optional scope that includes indirect emissions associated with the goods and services supply chain produced outside the organization. Included are emissions from the transport of products from our logistics centres to stores (downstream) performed by external logistics operators (air, land and sea transport) as well as the emissions associated with electricity consumption in franchise stores.',
'label': 0
}
```
### Data Fields
- text: a climate-related paragraph extracted from corporate annual reports and sustainability reports
- label: the label (0 -> not talking about climate commitmens and actions, 1 -> talking about climate commitmens and actions)
### Data Splits
The dataset is split into:
- train: 1,000
- test: 320
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
#### Initial Data Collection and Normalization
Our dataset contains climate-related paragraphs extracted from financial disclosures by firms. We collect text from corporate annual reports and sustainability reports.
For more information regarding our sample selection, please refer to the Appendix of our paper (see [citation](#citation-information)).
#### Who are the source language producers?
Mainly large listed companies.
### Annotations
#### Annotation process
For more information on our annotation process and annotation guidelines, please refer to the Appendix of our paper (see [citation](#citation-information)).
#### Who are the annotators?
The authors and students at Universität Zürich and Friedrich-Alexander-Universität Erlangen-Nürnberg with majors in finance and sustainable finance.
### Personal and Sensitive Information
Since our text sources contain public information, no personal and sensitive information should be included.
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
- Julia Anna Bingler
- Mathias Kraus
- Markus Leippold
- Nicolas Webersinke
### Licensing Information
This dataset is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International license (cc-by-nc-sa-4.0). To view a copy of this license, visit [creativecommons.org/licenses/by-nc-sa/4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/).
If you are interested in commercial use of the dataset, please contact [markus.leippold@bf.uzh.ch](mailto:markus.leippold@bf.uzh.ch).
### Citation Information
```bibtex
@techreport{bingler2023cheaptalk,
title={How Cheap Talk in Climate Disclosures Relates to Climate Initiatives, Corporate Emissions, and Reputation Risk},
author={Bingler, Julia and Kraus, Mathias and Leippold, Markus and Webersinke, Nicolas},
type={Working paper},
institution={Available at SSRN 3998435},
year={2023}
}
```
### Contributions
Thanks to [@webersni](https://github.com/webersni) for adding this dataset. | [
-0.20788419246673584,
-0.30353736877441406,
0.26258420944213867,
0.1285632699728012,
-0.3098987638950348,
-0.08869852870702744,
-0.20141391456127167,
-0.5310208201408386,
0.2945931851863861,
0.4434567391872406,
-0.6263930797576904,
-0.7328088283538818,
-0.5848607420921326,
-0.0124162919819... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
cvssp/WavCaps | cvssp | 2023-07-06T13:28:10Z | 12 | 19 | null | [
"size_categories:100B<n<1T",
"language:en",
"license:cc-by-4.0",
"arxiv:2303.17395",
"region:us"
] | 2023-07-06T13:28:10Z | 2023-04-12T08:09:04.000Z | 2023-04-12T08:09:04 | ---
license: cc-by-4.0
language:
- en
size_categories:
- 100B<n<1T
---
# WavCaps
WavCaps is a ChatGPT-assisted weakly-labelled audio captioning dataset for audio-language multimodal research, where the audio clips are sourced from three websites ([FreeSound](https://freesound.org/), [BBC Sound Effects](https://sound-effects.bbcrewind.co.uk/), and [SoundBible](https://soundbible.com/)) and a sound event detection dataset ([AudioSet Strongly-labelled Subset](https://research.google.com/audioset/download_strong.html)).
- **Paper:** https://arxiv.org/abs/2303.17395
- **Github:** https://github.com/XinhaoMei/WavCaps
## Statistics
| Data Source | # audio | avg. audio duration (s) | avg. text length |
|--------------------|----------|-------------------------|------------------|
| FreeSound | 262300 | 85.98 | 6.77 |
| BBC Sound Effects | 31201 | 115.04 | 9.67 |
| SoundBible | 1232 | 13.12 | 5.87 |
| AudioSet SL subset | 108317 | 10.00 | 9.79 |
| WavCaps | 403050 | 67.59 | 7.80 |
## Download
We provide a json file for each data source. For audio clips sourced from websites, we provide processed caption, raw description, as well as other metadata. For audio clips from AudioSet, we use the version from PANNs, where each file name is appended with a 'Y' at the start. For the start time, please refer to the original metadata of AudioSet SL subset.
Waveforms with flac format can be downloaded through [Zip_files](https://huggingface.co/datasets/cvssp/WavCaps/tree/main/Zip_files) directory.
Pretrained models can be downloaded [here](https://drive.google.com/drive/folders/1pFr8IRY3E1FAtc2zjYmeuSVY3M5a-Kdj?usp=share_link).
<font color='red'>If you get "error: invalid zip file with overlapped components (possible zip bomb)" when unzipping,
please try the following commands: </font>
`zip -F AudioSet_SL.zip --out AS.zip`
`unzip AS.zip`
## License
Only academic uses are allowed for WavCaps dataset. By downloading audio clips through the links provided in the json files, you agree that you will use the audios for research purposes only.
For credits for audio clips from FreeSound, please refer to its own page.
For detailed license information, please refer to:
[FreeSound](https://freesound.org/help/faq/#licenses), [BBC Sound Effects](https://sound-effects.bbcrewind.co.uk/licensing), [SoundBible](https://soundbible.com/about.php)
The models we provided are created under a UK data copyright exemption for non-commercial research.
## Code for related tasks
We provide codes and pre-trained models for audio-language retrieval, automated audio captioning, and zero-shot audio classification.
* [Retrieval](https://github.com/XinhaoMei/WavCaps/tree/master/retrieval)
* [Captioning](https://github.com/XinhaoMei/WavCaps/tree/master/captioning)
* [Zero-shot Audio Classification](https://github.com/XinhaoMei/WavCaps/blob/master/retrieval/zero_shot_classification.py)
* [Text-to-Sound Generation](https://github.com/haoheliu/AudioLDM)
## Citation
Please cite the following if you make use of the dataset.
```bibtex
@article{mei2023wavcaps,
title={WavCaps: A ChatGPT-Assisted Weakly-Labelled Audio Captioning Dataset for Audio-Language Multimodal Research},
author={Mei, Xinhao and Meng, Chutong and Liu, Haohe and Kong, Qiuqiang and Ko, Tom and Zhao, Chengqi and Plumbley, Mark D and Zou, Yuexian and Wang, Wenwu},
journal={arXiv preprint arXiv:2303.17395},
year={2023}
}
``` | [
-0.5513380169868469,
-0.5216943621635437,
0.14349521696567535,
0.28715386986732483,
-0.4402688145637512,
-0.18192318081855774,
-0.36634963750839233,
-0.4983690679073334,
0.34513282775878906,
0.38835805654525757,
-0.7146225571632385,
-0.664892315864563,
-0.5209457874298096,
-0.0063782664947... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
one-sec-cv12/chunk_273 | one-sec-cv12 | 2023-04-17T21:14:32Z | 12 | 0 | null | [
"region:us"
] | 2023-04-17T21:14:32Z | 2023-04-17T21:09:47.000Z | 2023-04-17T21:09:47 | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
splits:
- name: train
num_bytes: 5506047648.25
num_examples: 57326
download_size: 4808654570
dataset_size: 5506047648.25
---
# Dataset Card for "chunk_273"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.5725325345993042,
-0.3963035047054291,
0.35144469141960144,
0.37500858306884766,
-0.4046497941017151,
0.033607251942157745,
0.2014206498861313,
-0.18705114722251892,
1.0424970388412476,
0.4813351333141327,
-0.8203518986701965,
-0.5380598902702332,
-0.672843873500824,
-0.2732576429843902... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
iulusoy/test-data | iulusoy | 2023-04-24T10:54:25Z | 12 | 0 | null | [
"task_categories:text-classification",
"size_categories:n<1K",
"language:en",
"license:mit",
"region:us"
] | 2023-04-24T10:54:25Z | 2023-04-24T08:55:30.000Z | 2023-04-24T08:55:30 | ---
license: mit
task_categories:
- text-classification
language:
- en
pretty_name: mytest
size_categories:
- n<1K
--- | [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
gelabgaboo/Anticancer-peptide | gelabgaboo | 2023-05-01T07:46:39Z | 12 | 0 | null | [
"region:us"
] | 2023-05-01T07:46:39Z | 2023-05-01T07:46:00.000Z | 2023-05-01T07:46:00 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
maxardito/beatbox | maxardito | 2023-05-08T02:40:48Z | 12 | 0 | null | [
"license:mit",
"Audio",
"Voice",
"Percussion",
"region:us"
] | 2023-05-08T02:40:48Z | 2023-05-02T16:32:38.000Z | 2023-05-02T16:32:38 | ---
pretty_name: "Beatbox Dataset"
tags:
- Audio
- Voice
- Percussion
license: "mit"
arxiv: https://doi.org/10.1007/978-3-031-05981-0_14
---
# Beatbox Dataset
Dataset consisting of isolated beatbox samples. Reimplementation of a dataset from the paper **[BaDumTss: Multi-task
Learning for Beatbox
Transcription](https://link.springer.com/chapter/10.1007/978-3-031-05981-0_14])**
## Citations
Mehta, P., Maheshwari, M., Joshi, B., Chakraborty, T. (2022). BaDumTss: Multi-task
Learning for Beatbox Transcription. In: Gama, J., Li, T., Yu, Y., Chen, E., Zheng, Y.,
Teng, F. (eds) Advances in Knowledge Discovery and Data Mining. PAKDD 2022. Lecture Notes
in Computer Science(), vol 13282. Springer, Cham.
https://doi.org/10.1007/978-3-031-05981-0_14
| [
-0.2737094759941101,
-0.7504050135612488,
0.3617090880870819,
-0.005942963063716888,
-0.1706894487142563,
-0.03511801362037659,
-0.28388407826423645,
-0.15115611255168915,
0.16125424206256866,
0.5515392422676086,
-1.0394810438156128,
-0.6708143353462219,
-0.49674439430236816,
0.05982092395... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
LennardZuendorf/Dynamically-Generated-Hate-Speech-Dataset | LennardZuendorf | 2023-05-16T16:01:46Z | 12 | 1 | null | [
"task_categories:text-classification",
"task_categories:text-generation",
"language:en",
"not-for-all-audiences",
"legal",
"arxiv:2012.15761",
"region:us"
] | 2023-05-16T16:01:46Z | 2023-05-09T14:04:29.000Z | 2023-05-09T14:04:29 | ---
task_categories:
- text-classification
- text-generation
language:
- en
tags:
- not-for-all-audiences
- legal
pretty_name: dynamically generated hate speech dataset
---
# Dataset Card for dynamically generated hate speech dataset
## Dataset Description
- **Homepage:** [GitHub](https://github.com/bvidgen/Dynamically-Generated-Hate-Speech-Dataset)
- **Point of Contact:** [bertievidgen@gmail.com](mailto:bertievidgen@gmail.com)
### Dataset Summary
This is a copy of the Dynamically-Generated-Hate-Speech-Dataset, presented in [this paper](https://arxiv.org/abs/2012.15761) by
- **Bertie Vidgen**, **Tristan Thrush**, **Zeerak Waseem** and **Douwe Kiela**
## Original README from [GitHub](https://github.com/bvidgen/Dynamically-Generated-Hate-Speech-Dataset/blob/main/README.md)
## Dynamically-Generated-Hate-Speech-Dataset
ReadMe for v0.2 of the Dynamically Generated Hate Speech Dataset from Vidgen et al. (2021). If you use the dataset, please cite our paper in the Proceedings of ACL 2021, and available on [Arxiv](https://arxiv.org/abs/2012.15761).
Contact Dr. Bertie Vidgen if you have feedback or queries: bertievidgen@gmail.com.
The full author list is: Bertie Vidgen (The Alan Turing Institute), Tristan Thrush (Facebook AI Research), Zeerak Waseem (University of Sheffield) and Douwe Kiela (Facebook AI Research). This paper is an output of the Dynabench project: https://dynabench.org/tasks/5#overall
### Dataset descriptions
v0.2.2.csv is the full dataset used in our ACL paper.
v0.2.3.csv removes duplicate entries, all of which occurred in round 1. Duplicates come from two sources: (1) annotators entering the same content multiple times and (2) different annotators entering the same content. The duplicates are interesting for understanding the annotation process, and the challenges of dynamically generating datasets. However, they are likely to be less useful for training classifiers and so are removed in v0.2.3. We did not lower case the text before removing duplicates as capitalisations contain potentially useful signals.
### Overview
The Dynamically Generated Hate Speech Dataset is provided in one table.
'acl.id' is the unique ID of the entry.
'Text' is the content which has been entered. All content is synthetic.
'Label' is a binary variable, indicating whether or not the content has been identified as hateful. It takes two values: hate, nothate.
'Type' is a categorical variable, providing a secondary label for hateful content. For hate it can take five values: Animosity, Derogation, Dehumanization, Threatening and Support for Hateful Entities. Please see the paper for more detail. For nothate the 'type' is 'none'. In round 1 the 'type' was not given and is marked as 'notgiven'.
'Target' is a categorical variable, providing the group that is attacked by the hate. It can include intersectional characteristics and multiple groups can be identified. For nothate the type is 'none'. Note that in round 1 the 'target' was not given and is marked as 'notgiven'.
'Level' reports whether the entry is original content or a perturbation.
'Round' is a categorical variable. It gives the round of data entry (1, 2, 3 or 4) with a letter for whether the entry is original content ('a') or a perturbation ('b'). Perturbations were not made for round 1.
'Round.base' is a categorical variable. It gives the round of data entry, indicated with just a number (1, 2, 3 or 4).
'Split' is a categorical variable. it gives the data split that the entry has been assigned to. This can take the values 'train', 'dev' and 'test'. The choice of splits is explained in the paper.
'Annotator' is a categorical variable. It gives the annotator who entered the content. Annotator IDs are random alphanumeric strings. There are 20 annotators in the dataset.
'acl.id.matched' is the ID of the matched entry, connecting the original (given in 'acl.id') and the perturbed version.
For identities (recorded under 'Target') we use shorthand labels to constructed the dataset, which can be converted (and grouped) as follows:
none -> for non hateful entries
NoTargetRecorded -> for hateful entries with no target recorded
mixed -> Mixed race background
ethnic minority -> Ethnic Minorities
indig -> Indigenous people
indigwom -> Indigenous Women
non-white -> Non-whites (attacked as 'non-whites', rather than specific non-white groups which are generally addressed separately)
trav -> Travellers (including Roma, gypsies)
bla -> Black people
blawom -> Black women
blaman -> Black men
african -> African (all 'African' attacks will also be an attack against Black people)
jew -> Jewish people
mus -> Muslims
muswom -> Muslim women
wom -> Women
trans -> Trans people
gendermin -> Gender minorities,
bis -> Bisexual
gay -> Gay people (both men and women)
gayman -> Gay men
gaywom -> Lesbians
dis -> People with disabilities
working -> Working class people
old -> Elderly people
asi -> Asians
asiwom -> Asian women
east -> East Asians
south -> South Asians (e.g. Indians)
chinese -> Chinese people
pak -> Pakistanis
arab -> Arabs, including people from the Middle East
immig -> Immigrants
asylum -> Asylum seekers
ref -> Refguees
for -> Foreigners
eastern european -> Eastern Europeans
russian -> Russian people
pol -> Polish people
hispanic -> Hispanic people, including latinx and Mexicans
nazi -> Nazis ('Support' type of hate)
hitler -> Hitler ('Support' type of hate)
### Code
Code was implemented using hugging face transformers library.
## Additional Information
### Licensing Information
The original repository does not provide any license, but is free for use with proper citation of the original paper in the Proceedings of ACL 2021, available on [Arxiv](https://arxiv.org/abs/2012.15761)
### Citation Information
cite as [arXiv:2012.15761](https://arxiv.org/abs/2012.15761)
or [https://doi.org/10.48550/arXiv.2012.15761](https://[doi.org/10.48550/arXiv.2012.15761) | [
-0.5577258467674255,
-0.7572422623634338,
-0.0021019713021814823,
0.12527112662792206,
-0.048289332538843155,
0.198177769780159,
-0.12333957850933075,
-0.6286841630935669,
0.3914850652217865,
0.1948782354593277,
-0.7361450791358948,
-0.6674079895019531,
-0.7255886793136597,
0.0776653438806... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
mehnaazasad/arxiv_astro_co_ga | mehnaazasad | 2023-05-10T02:47:29Z | 12 | 0 | null | [
"task_categories:summarization",
"size_categories:10K<n<100K",
"language:en",
"license:mit",
"arxiv:1905.00075",
"region:us"
] | 2023-05-10T02:47:29Z | 2023-05-10T01:54:30.000Z | 2023-05-10T01:54:30 | ---
license: mit
task_categories:
- summarization
language:
- en
size_categories:
- 10K<n<100K
---
# Dataset Card for `arxiv_astro_co_ga`
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
This is a dataset consisting of titles and abstracts for all Cosmology and Galaxy Astrophysics arXiv articles to date (99,659 papers).
### Supported Tasks and Leaderboards
[More Information Needed]
### Languages
English
## Dataset Structure
### Data Instances
```
{'title': 'Probing cluster formation under extreme conditions: massive star clusters in blue compact galaxies',
'abstract': ' The numerous and massive young star clusters in blue compact galaxies (BCGs) are used to investigate the properties of their hosts. We test whether BCGs follow claimed relations between cluster populations and their hosts, such as the the fraction of the total luminosity contributed by the clusters as function of the mean star formation rate density; the $V$ band luminosity of the brightest youngest cluster as related to the mean host star formation rate; and the cluster formation efficiency (i.e., the fraction of star formation happening in star clusters) versus the density of the SFR. We find that BCGs follow the trends, supporting a scenario where cluster formation and environmental properties of the host are correlated. They occupy, in all the diagrams, the regions of higher SFRs, as expected by the extreme nature of the starbursts operating in these systems. We find that the star clusters contribute almost to the 20 % of the UV luminosity of the hosts. We suggest that the BCG starburst environment has most likely favoured the compression and collapse of the giant molecular clouds, enhancing the local star formation efficiency, so that massive clusters have been formed. The estimated cluster formation efficiency supports this scenario. BCGs have a cluster formation efficiency comparable to luminous IR galaxies and spiral starburst nuclei (the averaged value is about 35 %) which is much higher than the 8 - 10 % reported for quiescent spirals and dwarf star-forming galaxies. '
}
```
### Data Fields
- `title`: Title of the paper
- `abstract`: The abstract of the paper
### Data Splits
This dataset has 3 splits: _train_, _validation_, and _test_. Below are the statistics for these splits.
| Dataset Split | Number of Instances in Split |
| ------------- | ------------------------------------------- |
| Train | 79,727 |
| Validation | 9966 |
| Test | 9966 |
## Dataset Creation
### Curation Rationale
[More Information Needed]
### Source Data
The original dataset from which this subset was constructed can be found here: [Kaggle arXiv Dataset Homepage](https://www.kaggle.com/Cornell-University/arxiv).
#### Initial Data Collection and Normalization
[More Information Needed]
#### Who are the source language producers?
Various authors.
### Annotations
This dataset contains no annotations.
#### Annotation process
[N/A]
#### Who are the annotators?
[N/A]
### Personal and Sensitive Information
No author information included in this dataset.
## Considerations for Using the Data
### Social Impact of Dataset
[More Information Needed]
### Discussion of Biases
[More Information Needed]
### Other Known Limitations
[More Information Needed]
## Additional Information
### Dataset Curators
The original data is maintained by ArXiv, huge thanks to the team for building and maintaining that dataset.
### Licensing Information
The arxiv_astro_co_ga dataset version 1.0.0 is released under the [MIT License](https://mitsloan.mit.edu/licensing).
### Citation Information
```
@misc{clement2019arxiv,
title={On the Use of ArXiv as a Dataset},
author={Colin B. Clement and Matthew Bierbaum and Kevin P. O'Keeffe and Alexander A. Alemi},
year={2019},
eprint={1905.00075},
archivePrefix={arXiv},
primaryClass={cs.IR}
}
```
### Contributions
[More Information Needed] | [
-0.6325991749763489,
-0.8447300791740417,
0.169639453291893,
-0.013192567974328995,
-0.11674279719591141,
0.021010661497712135,
-0.35315534472465515,
-0.397440105676651,
0.557470977306366,
0.26059690117836,
-0.5327425599098206,
-0.6568332314491272,
-0.3526149392127991,
0.13467472791671753,... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
lighteval/LegalSupport | lighteval | 2023-05-10T09:20:03Z | 12 | 1 | null | [
"region:us"
] | 2023-05-10T09:20:03Z | 2023-05-10T09:19:30.000Z | 2023-05-10T09:19:30 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
lighteval/synthetic_reasoning_natural | lighteval | 2023-05-12T09:30:32Z | 12 | 3 | null | [
"region:us"
] | 2023-05-12T09:30:32Z | 2023-05-12T08:59:11.000Z | 2023-05-12T08:59:11 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
edarchimbaud/earnings-estimate-stocks | edarchimbaud | 2023-11-11T23:12:52Z | 12 | 1 | null | [
"region:us"
] | 2023-11-11T23:12:52Z | 2023-05-19T12:04:48.000Z | 2023-05-19T12:04:48 | ---
dataset_info:
features:
- name: symbol
dtype: string
- name: date
dtype: string
- name: current_qtr
dtype: string
- name: no_of_analysts_current_qtr
dtype: int64
- name: next_qtr
dtype: string
- name: no_of_analysts_next_qtr
dtype: int64
- name: current_year
dtype: int64
- name: no_of_analysts_current_year
dtype: int64
- name: next_year
dtype: int64
- name: no_of_analysts_next_year
dtype: int64
- name: avg_estimate_current_qtr
dtype: float64
- name: avg_estimate_next_qtr
dtype: float64
- name: avg_estimate_current_year
dtype: float64
- name: avg_estimate_next_year
dtype: float64
- name: low_estimate_current_qtr
dtype: float64
- name: low_estimate_next_qtr
dtype: float64
- name: low_estimate_current_year
dtype: float64
- name: low_estimate_next_year
dtype: float64
- name: high_estimate_current_qtr
dtype: float64
- name: high_estimate_next_qtr
dtype: float64
- name: high_estimate_current_year
dtype: float64
- name: high_estimate_next_year
dtype: float64
- name: year_ago_eps_current_qtr
dtype: float64
- name: year_ago_eps_next_qtr
dtype: float64
- name: year_ago_eps_current_year
dtype: float64
- name: year_ago_eps_next_year
dtype: float64
splits:
- name: train
num_bytes: 4919659
num_examples: 22192
download_size: 630013
dataset_size: 4919659
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "earnings-estimate-sp500"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Repository:** [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Point of Contact:** [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Dataset Summary
The earnings-estimate-sp500 dataset provides earnings estimate data for companies in the S&P 500 index.
### Supported Tasks and Leaderboards
The dataset can be used to analyze earnings estimates for systematic trading or financial analysis tasks. The dataset does not specify any associated leaderboards.
### Languages
[N/A]
## Dataset Structure
### Data Instances
[N/A]
### Data Fields
The dataset contains the following fields:
- symbol (string): A string representing the ticker symbol or abbreviation used to identify the company.
- date (string): The date associated with the earnings estimate data.
- current_qtr (string): The current quarter.
- no_of_analysts_current_qtr (int64): The number of analysts providing estimates for the current quarter.
- next_qtr (string): The next quarter.
- no_of_analysts_next_qtr (int64): The number of analysts providing estimates for the next quarter.
- current_year (int64): The current year.
- no_of_analysts_current_year (int64): The number of analysts providing estimates for the current year.
- next_year (int64): The next year.
- no_of_analysts_next_year (int64): The number of analysts providing estimates for the next year.
- avg_estimate_current_qtr (float64): The average estimate for the current quarter.
- avg_estimate_next_qtr (float64): The average estimate for the next quarter.
- avg_estimate_current_year (float64): The average estimate for the current year.
- avg_estimate_next_year (float64): The average estimate for the next year.
- low_estimate_current_qtr (float64): The low estimate for the current quarter.
- low_estimate_next_qtr (float64): The low estimate for the next quarter.
- low_estimate_current_year (float64): The low estimate for the current year.
- low_estimate_next_year (float64): The low estimate for the next year.
- high_estimate_current_qtr (float64): The high estimate for the current quarter.
- high_estimate_next_qtr (float64): The high estimate for the next quarter.
- high_estimate_current_year (float64): The high estimate for the current year.
- high_estimate_next_year (float64): The high estimate for the next year.
- year_ago_eps_current_qtr (float64): The earnings per share (EPS) for the current quarter a year ago.
- year_ago_eps_next_qtr (float64): The earnings per share (EPS) for the next quarter a year ago.
- year_ago_eps_current_year (float64): The earnings per share (EPS) for the current year a year ago.
- year_ago_eps_next_year (float64): The earnings per share (EPS) for the next year a year ago.
### Data Splits
The dataset consists of a single split, called "train."
## Additional Information
### Dataset Curators
This dataset does not specify any specific curators.
### Licensing Information
The earnings-estimate-sp500 dataset is licensed under the MIT License.
### Citation Information
> https://edarchimbaud.substack.com, earnings-estimate-sp500 dataset, GitHub repository, https://github.com/edarchimbaud
### Contributions
Thanks to [@edarchimbaud](https://github.com/edarchimbaud) for adding this dataset. | [
-0.22878368198871613,
-0.42982128262519836,
0.09807661175727844,
0.3883153200149536,
-0.200959250330925,
0.042940784245729446,
0.11114196479320526,
-0.5976083874702454,
0.8486056327819824,
0.1579354703426361,
-0.8314402103424072,
-0.5093668103218079,
-0.5792982578277588,
-0.117127716541290... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
edarchimbaud/eps-revisions-stocks | edarchimbaud | 2023-11-11T23:13:30Z | 12 | 0 | null | [
"region:us"
] | 2023-11-11T23:13:30Z | 2023-05-19T14:23:43.000Z | 2023-05-19T14:23:43 | ---
dataset_info:
features:
- name: symbol
dtype: string
- name: date
dtype: string
- name: current_qtr
dtype: string
- name: up_last_7_days_current_qtr
dtype: float64
- name: next_qtr
dtype: string
- name: up_last_7_days_next_qtr
dtype: float64
- name: current_year
dtype: int64
- name: up_last_7_days_current_year
dtype: float64
- name: next_year
dtype: int64
- name: up_last_7_days_next_year
dtype: float64
- name: up_last_30_days_current_qtr
dtype: float64
- name: up_last_30_days_next_qtr
dtype: float64
- name: up_last_30_days_current_year
dtype: float64
- name: up_last_30_days_next_year
dtype: float64
- name: down_last_7_days_current_qtr
dtype: 'null'
- name: down_last_7_days_next_qtr
dtype: 'null'
- name: down_last_7_days_current_year
dtype: 'null'
- name: down_last_7_days_next_year
dtype: 'null'
- name: down_last_30_days_current_qtr
dtype: float64
- name: down_last_30_days_next_qtr
dtype: float64
- name: down_last_30_days_current_year
dtype: float64
- name: down_last_30_days_next_year
dtype: float64
splits:
- name: train
num_bytes: 3206767
num_examples: 20208
download_size: 263860
dataset_size: 3206767
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "eps-revisions-sp500"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://edarchimbaud.substack.com
- **Repository:** https://github.com/edarchimbaud
- **Point of Contact:** contact@edarchimbaud.com
### Dataset Summary
The eps-revisions-sp500 dataset provides information on earnings-per-share (EPS) revisions for companies in the S&P 500 index.
### Supported Tasks and Leaderboards
The dataset can be used to analyze EPS revisions and their impact on the performance of companies in the S&P 500 index. It does not specify any particular leaderboard or evaluation metric.
### Languages
[N/A]
## Dataset Structure
### Data Instances
[N/A]
### Data Fields
- symbol (string): A string representing the ticker symbol or abbreviation used to identify the company.
- date (string): A string indicating the date of the recorded data.
- current_qtr (string): A string representing the current quarter.
- up_last_7_days_current_qtr (int64): An integer indicating the number of days the EPS has increased in the current quarter.
- next_qtr (string): A string representing the next quarter.
- up_last_7_days_next_qtr (int64): An integer indicating the number of days the EPS is projected to increase in the next quarter.
- current_year (int64): An integer representing the current year.
- up_last_7_days_current_year (int64): An integer indicating the number of days the EPS has increased in the current year.
- next_year (int64): An integer representing the next year.
- up_last_7_days_next_year (int64): An integer indicating the number of days the EPS is projected to increase in the next year.
- up_last_30_days_current_qtr (int64): An integer indicating the number of days the EPS has increased in the current quarter over the last 30 days.
- up_last_30_days_next_qtr (int64): An integer indicating the number of days the EPS is projected to increase in the next quarter over the last 30 days.
- up_last_30_days_current_year (int64): An integer indicating the number of days the EPS has increased in the current year over the last 30 days.
- up_last_30_days_next_year (int64): An integer indicating the number of days the EPS is projected to increase in the next year over the last 30 days.
- down_last_7_days_current_qtr (null): A null value indicating the absence of data on EPS decrease in the current quarter.
- down_last_7_days_next_qtr (null): A null value indicating the absence of data on EPS decrease in the next quarter.
- down_last_7_days_current_year (null): A null value indicating the absence of data on EPS decrease in the current year.
- down_last_7_days_next_year (null): A null value indicating the absence of data on EPS decrease in the next year.
- down_last_30_days_current_qtr (int64): An integer indicating the number of days the EPS has decreased in the current quarter over the last 30 days.
- down_last_30_days_next_qtr (int64): An integer indicating the number of days the EPS is projected to decrease in the next quarter over the last 30 days.
- down_last_30_days_current_year (int64): An integer indicating the number of days the EPS has decreased in the current year over the last 30 days.
- down_last_30_days_next_year (int64): An integer indicating the number of days the EPS is projected to decrease in the next year over the last 30 days.
### Data Splits
A single split, called train.
## Dataset Creation
### Curation Rationale
The eps-revisions-sp500 dataset was created to provide information on EPS revisions for companies in the S&P 500 index.
### Source Data
#### Initial Data Collection and Normalization
The data was collected from reliable sources and normalized for consistency.
### Annotations
#### Annotation Process
[N/A]
#### Annotators
[N/A]
### Personal and Sensitive Information
[N/A]
## Considerations for Using the Data
### Social Impact of Dataset
[N/A]
### Discussion of Biases
[N/A]
### Other Known Limitations
[N/A]
## Additional Information
### Dataset Curators
The eps-revisions-sp500 dataset was collected by https://edarchimbaud.substack.com.
### Licensing Information
The eps-revisions-sp500 dataset is licensed under the MIT License.
### Citation Information
> https://edarchimbaud.substack.com, eps-revisions-sp500 dataset, GitHub repository, https://github.com/edarchimbaud
### Contributions
Thanks to [@edarchimbaud](https://github.com/edarchimbaud) for adding this dataset. | [
-0.39205077290534973,
-0.1847178339958191,
0.3109132647514343,
0.4360720217227936,
-0.214922234416008,
-0.02796551026403904,
0.2352174073457718,
-0.30489030480384827,
0.6362712383270264,
0.3680174648761749,
-1.0496671199798584,
-0.6090461611747742,
-0.521430253982544,
0.27956071496009827,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
edarchimbaud/eps-trend-stocks | edarchimbaud | 2023-11-11T23:13:43Z | 12 | 2 | null | [
"region:us"
] | 2023-11-11T23:13:43Z | 2023-05-19T15:17:04.000Z | 2023-05-19T15:17:04 | ---
dataset_info:
features:
- name: symbol
dtype: string
- name: date
dtype: string
- name: current_qtr
dtype: string
- name: current_estimate_current_qtr
dtype: float64
- name: next_qtr
dtype: string
- name: current_estimate_next_qtr
dtype: float64
- name: current_year
dtype: int64
- name: current_estimate_current_year
dtype: float64
- name: next_year
dtype: int64
- name: current_estimate_next_year
dtype: float64
- name: 7_days_ago_current_qtr
dtype: float64
- name: 7_days_ago_next_qtr
dtype: float64
- name: 7_days_ago_current_year
dtype: float64
- name: 7_days_ago_next_year
dtype: float64
- name: 30_days_ago_current_qtr
dtype: float64
- name: 30_days_ago_next_qtr
dtype: float64
- name: 30_days_ago_current_year
dtype: float64
- name: 30_days_ago_next_year
dtype: float64
- name: 60_days_ago_current_qtr
dtype: float64
- name: 60_days_ago_next_qtr
dtype: float64
- name: 60_days_ago_current_year
dtype: float64
- name: 60_days_ago_next_year
dtype: float64
- name: 90_days_ago_current_qtr
dtype: float64
- name: 90_days_ago_next_qtr
dtype: float64
- name: 90_days_ago_current_year
dtype: float64
- name: 90_days_ago_next_year
dtype: float64
splits:
- name: train
num_bytes: 4466882
num_examples: 20195
download_size: 790088
dataset_size: 4466882
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "eps-trend-sp500"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Repository:** [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
- **Point of Contact:** [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
### Dataset Summary
The "eps-trend-sp500" dataset contains earnings per share (EPS) trend data for companies in the S&P 500 index. It includes information about the EPS estimates for the current quarter, next quarter, current year, and next year, as well as estimates from 7 days ago, 30 days ago, 60 days ago, and 90 days ago.
### Supported Tasks and Leaderboards
The dataset can be used to analyze EPS trends and perform financial analysis tasks. It does not specify any associated leaderboards.
### Languages
The dataset does not specify any specific language.
## Dataset Structure
### Data Instances
The dataset consists of multiple data instances, where each instance represents the EPS trend data for a specific company and date.
### Data Fields
The dataset contains the following fields:
- symbol (string): A string representing the ticker symbol or abbreviation used to identify the company.
- date (string): The date associated with the EPS trend data.
- current_qtr (string): The current quarter.
- current_estimate_current_qtr (float64): The current estimate for the EPS in the current quarter.
- next_qtr (string): The next quarter.
- current_estimate_next_qtr (float64): The current estimate for the EPS in the next quarter.
- current_year (int64): The current year.
- current_estimate_current_year (float64): The current estimate for the EPS in the current year.
- next_year (int64): The next year.
- current_estimate_next_year (float64): The current estimate for the EPS in the next year.
- 7_days_ago_current_qtr (float64): The EPS estimate for the current quarter from 7 days ago.
- 7_days_ago_next_qtr (float64): The EPS estimate for the next quarter from 7 days ago.
- 7_days_ago_current_year (float64): The EPS estimate for the current year from 7 days ago.
- 7_days_ago_next_year (float64): The EPS estimate for the next year from 7 days ago.
- 30_days_ago_current_qtr (float64): The EPS estimate for the current quarter from 30 days ago.
- 30_days_ago_next_qtr (float64): The EPS estimate for the next quarter from 30 days ago.
- 30_days_ago_current_year (float64): The EPS estimate for the current year from 30 days ago.
- 30_days_ago_next_year (float64): The EPS estimate for the next year from 30 days ago.
- 60_days_ago_current_qtr (float64): The EPS estimate for the current quarter from 60 days ago.
- 60_days_ago_next_qtr (float64): The EPS estimate for the next quarter from 60 days ago.
- 60_days_ago_current_year (float64): The EPS estimate for the current year from 60 days ago.
- 60_days_ago_next_year (float64): The EPS estimate for the next year from 60 days ago.
- 90_days_ago_current_qtr (float64): The EPS estimate for the current quarter from 90 days ago.
- 90_days_ago_next_qtr (float64): The EPS estimate for the next quarter from 90 days ago.
- 90_days_ago_current_year (float64): The EPS estimate for the current year from 90 days ago.
- 90_days_ago_next_year (float64): The EPS estimate for the next year from 90 days ago.
### Data Splits
The dataset consists of a single split, called "train."
## Additional Information
### Dataset Curators
The eps-trend-sp500 dataset was collected by https://edarchimbaud.substack.com.
### Licensing Information
The eps-trend-sp500 dataset is licensed under the MIT License.
### Citation Information
> https://edarchimbaud.substack.com, eps-trend-sp500 dataset, GitHub repository, https://github.com/edarchimbaud
### Contributions
Thanks to [@edarchimbaud](https://github.com/edarchimbaud) for adding this dataset. | [
-0.4052031636238098,
-0.25995925068855286,
0.2753746509552002,
0.41319987177848816,
-0.3836759626865387,
-0.1451946347951889,
0.16050317883491516,
-0.4031265676021576,
0.7505630254745483,
0.18870016932487488,
-0.9192031621932983,
-0.6446003317832947,
-0.6097463369369507,
0.0849860385060310... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
edarchimbaud/revenue-estimate-stocks | edarchimbaud | 2023-11-11T23:15:05Z | 12 | 2 | null | [
"region:us"
] | 2023-11-11T23:15:05Z | 2023-05-19T15:34:56.000Z | 2023-05-19T15:34:56 | ---
dataset_info:
features:
- name: symbol
dtype: string
- name: date
dtype: string
- name: current_qtr
dtype: string
- name: no_of_analysts_current_qtr
dtype: int64
- name: next_qtr
dtype: string
- name: no_of_analysts_next_qtr
dtype: int64
- name: current_year
dtype: int64
- name: no_of_analysts_current_year
dtype: int64
- name: next_year
dtype: int64
- name: no_of_analysts_next_year
dtype: int64
- name: avg_estimate_current_qtr
dtype: string
- name: avg_estimate_next_qtr
dtype: string
- name: avg_estimate_current_year
dtype: string
- name: avg_estimate_next_year
dtype: string
- name: low_estimate_current_qtr
dtype: string
- name: low_estimate_next_qtr
dtype: string
- name: low_estimate_current_year
dtype: string
- name: low_estimate_next_year
dtype: string
- name: high_estimate_current_qtr
dtype: string
- name: high_estimate_next_qtr
dtype: string
- name: high_estimate_current_year
dtype: string
- name: high_estimate_next_year
dtype: string
- name: year_ago_sales_current_qtr
dtype: string
- name: year_ago_sales_next_qtr
dtype: string
- name: year_ago_sales_current_year
dtype: string
- name: year_ago_sales_next_year
dtype: string
- name: sales_growth_yearest_current_qtr
dtype: string
- name: sales_growth_yearest_next_qtr
dtype: string
- name: sales_growth_yearest_current_year
dtype: string
- name: sales_growth_yearest_next_year
dtype: string
splits:
- name: train
num_bytes: 5577663
num_examples: 19712
download_size: 737316
dataset_size: 5577663
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "revenue-estimate-sp500"
## Table of Contents
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://edarchimbaud.substack.com
- **Repository:** https://github.com/edarchimbaud
- **Point of Contact:** contact@edarchimbaud.com
### Dataset Summary
The revenue-estimate-sp500 dataset provides revenue estimate data for companies in the S&P 500 index.
### Supported Tasks and Leaderboards
The dataset can be used to analyze and predict revenue estimates for companies in the S&P 500 index.
## Dataset Structure
### Data Instances
[N/A]
### Data Fields
- symbol (string): A string representing the ticker symbol or abbreviation used to identify the company.
- date (string): A string indicating the date of the recorded data.
- current_qtr (string): A string representing the current quarter.
- no_of_analysts_current_qtr (int64): An integer indicating the number of analysts providing estimates for the current quarter.
- next_qtr (string): A string representing the next quarter.
- no_of_analysts_next_qtr (int64): An integer indicating the number of analysts providing estimates for the next quarter.
- current_year (int64): An integer indicating the current year.
- no_of_analysts_current_year (int64): An integer indicating the number of analysts providing estimates for the current year.
- next_year (int64): An integer indicating the next year.
- no_of_analysts_next_year (int64): An integer indicating the number of analysts providing estimates for the next year.
- avg_estimate_current_qtr (string): A string representing the average estimate for the current quarter.
- avg_estimate_next_qtr (string): A string representing the average estimate for the next quarter.
- avg_estimate_current_year (string): A string representing the average estimate for the current year.
- avg_estimate_next_year (string): A string representing the average estimate for the next year.
- low_estimate_current_qtr (string): A string representing the low estimate for the current quarter.
- low_estimate_next_qtr (string): A string representing the low estimate for the next quarter.
- low_estimate_current_year (string): A string representing the low estimate for the current year.
- low_estimate_next_year (string): A string representing the low estimate for the next year.
- high_estimate_current_qtr (string): A string representing the high estimate for the current quarter.
- high_estimate_next_qtr (string): A string representing the high estimate for the next quarter.
- high_estimate_current_year (string): A string representing the high estimate for the current year.
- high_estimate_next_year (string): A string representing the high estimate for the next year.
- year_ago_sales_current_qtr (string): A string representing the year-ago sales for the current quarter.
- year_ago_sales_next_qtr (string): A string representing the year-ago sales for the next quarter.
- year_ago_sales_current_year (string): A string representing the year-ago sales for the current year.
- year_ago_sales_next_year (string): A string representing the year-ago sales for the next year.
- sales_growth_yearest_current_qtr (string): A string representing the sales growth estimate for the current quarter.
- sales_growth_yearest_next_qtr (string): A string representing the sales growth estimate for the next quarter.
- sales_growth_yearest_current_year (string): A string representing the sales growth estimate for the current year.
- sales_growth_yearest_next_year (string): A string representing the sales growth estimate for the next year.
### Data Splits
A single split, called train.
## Dataset Creation
### Curation Rationale
The revenue-estimate-sp500 dataset was created to provide revenue estimate data for companies in the S&P 500 index.
### Source Data
The data was collected and normalized from reliable sources.
## Additional Information
### Dataset Curators
The revenue-estimate-sp500 dataset was collected by https://edarchimbaud.substack.com.
### Licensing Information
The revenue-estimate-sp500 dataset is licensed under the MIT License.
### Citation Information
> https://edarchimbaud.substack.com, revenue-estimate-sp500 dataset, GitHub repository, https://github.com/edarchimbaud
### Contributions
Thanks to [@edarchimbaud](https://github.com/edarchimbaud) for adding this dataset. | [
-0.28999003767967224,
-0.4560992419719696,
-0.06027951464056969,
0.395132839679718,
-0.17961406707763672,
0.19683776795864105,
0.14180536568164825,
-0.34889310598373413,
0.5777934789657593,
0.3513208031654358,
-0.9699135422706604,
-0.5460978150367737,
-0.21233029663562775,
0.04012131318449... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
VirtualRoyalty/toxic_comments | VirtualRoyalty | 2023-05-26T20:41:18Z | 12 | 0 | null | [
"task_categories:text-classification",
"size_categories:1K<n<10K",
"language:en",
"region:us"
] | 2023-05-26T20:41:18Z | 2023-05-26T20:23:54.000Z | 2023-05-26T20:23:54 | ---
task_categories:
- text-classification
language:
- en
pretty_name: toxic_comments
size_categories:
- 1K<n<10K
--- | [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
edarchimbaud/earnings-surprise-stocks | edarchimbaud | 2023-11-11T23:13:18Z | 12 | 1 | null | [
"region:us"
] | 2023-11-11T23:13:18Z | 2023-05-28T22:48:31.000Z | 2023-05-28T22:48:31 | ---
dataset_info:
features:
- name: symbol
dtype: string
- name: date
dtype: string
- name: id
dtype: int64
- name: fiscal_qtr_end
dtype: string
- name: date_reported
dtype: timestamp[ns]
- name: eps
dtype: float64
- name: consensus_forecast
dtype: string
- name: percentage_surprise
dtype: string
splits:
- name: train
num_bytes: 5573453
num_examples: 76001
download_size: 409406
dataset_size: 5573453
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
<!DOCTYPE html>
<html class="" lang="en">
<head>
<meta charset="utf-8" />
<meta
name="viewport"
content="width=device-width, initial-scale=1.0, user-scalable=no"
/>
<meta
name="description"
content="We're on a journey to advance and democratize artificial intelligence through open source and open science."
/>
<meta property="fb:app_id" content="1321688464574422" />
<meta name="twitter:card" content="summary_large_image" />
<meta name="twitter:site" content="@huggingface" />
<meta
property="og:title"
content="Hugging Face - The AI community building the future."
/>
<meta property="og:type" content="website" />
<title>Hugging Face - The AI community building the future.</title>
<style>
body {
margin: 0;
}
main {
background-color: white;
min-height: 100vh;
padding: 7rem 1rem 8rem 1rem;
text-align: center;
font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system,
BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans,
sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol,
Noto Color Emoji;
}
img {
width: 6rem;
height: 6rem;
margin: 0 auto 1rem;
}
h1 {
font-size: 3.75rem;
line-height: 1;
color: rgba(31, 41, 55, 1);
font-weight: 700;
box-sizing: border-box;
margin: 0 auto;
}
p, a {
color: rgba(107, 114, 128, 1);
font-size: 1.125rem;
line-height: 1.75rem;
max-width: 28rem;
box-sizing: border-box;
margin: 0 auto;
}
.dark main {
background-color: rgb(11, 15, 25);
}
.dark h1 {
color: rgb(209, 213, 219);
}
.dark p, .dark a {
color: rgb(156, 163, 175);
}
</style>
<script>
// On page load or when changing themes, best to add inline in `head` to avoid FOUC
const key = "_tb_global_settings";
let theme = window.matchMedia("(prefers-color-scheme: dark)").matches
? "dark"
: "light";
try {
const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme;
if (storageTheme) {
theme = storageTheme === "dark" ? "dark" : "light";
}
} catch (e) {}
if (theme === "dark") {
document.documentElement.classList.add("dark");
} else {
document.documentElement.classList.remove("dark");
}
</script>
</head>
<body>
<main>
<img
src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg"
alt=""
/>
<div>
<h1>502</h1>
<p>Bad Gateway</p>
</div>
</main>
</body>
</html> | [
-0.7695470452308655,
-0.7302907705307007,
0.14974653720855713,
0.17431773245334625,
-0.14121831953525543,
0.3798690736293793,
-0.17824602127075195,
-0.8036946654319763,
0.9813262224197388,
0.12371528893709183,
-0.9756351709365845,
-0.6976928114891052,
-0.4834411144256592,
0.123603008687496... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
edarchimbaud/short-interest-stocks | edarchimbaud | 2023-11-11T23:15:21Z | 12 | 1 | null | [
"task_categories:tabular-regression",
"language:en",
"license:mit",
"region:us"
] | 2023-11-11T23:15:21Z | 2023-05-28T22:48:52.000Z | 2023-05-28T22:48:52 | ---
language:
- en
license: mit
task_categories:
- tabular-regression
dataset_info:
features:
- name: symbol
dtype: string
- name: date
dtype: string
- name: id
dtype: int64
- name: settlement_date
dtype: timestamp[ns]
- name: interest
dtype: float64
- name: avg_daily_share_volume
dtype: float64
- name: days_to_cover
dtype: float64
splits:
- name: train
num_bytes: 8920052
num_examples: 143902
download_size: 1015695
dataset_size: 8920052
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# Dataset Card for "short-interest-sp500"
## Table of Contents
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Annotations](#annotations)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Homepage:** https://edarchimbaud.substack.com
- **Repository:** https://github.com/edarchimbaud
- **Point of Contact:** contact@edarchimbaud.com
### Dataset Summary
The short-interest-sp500 dataset provides short interest data for companies listed on the S&P 500 index. This includes the number of shares that have been sold short but have not yet been covered or closed out.
### Supported Tasks and Leaderboards
[N/A]
### Languages
[N/A]
## Dataset Structure
### Data Instances
[N/A]
### Data Fields
- symbol (string): A string representing the ticker symbol or abbreviation used to identify the company.
- date (string): A string representing the date when the data was collected.
- id (int64): A unique integer identifier for each data instance.
- settlement_date (timestamp[ns]): The date by which a buyer must pay for the securities delivered by the seller.
- interest (float64): A floating point number representing the short interest of the company on the specified date.
- avg_daily_share_volume (float64): A floating point number representing the average daily trading volume of the company.
- days_to_cover (float64): A floating point number representing the days to cover metric, which is the number of days volume worth of short interest.
### Data Splits
[N/A]
## Dataset Creation
### Curation Rationale
The short-interest-sp500 dataset was created to facilitate the study of market dynamics, particularly the role of short selling.
### Source Data
#### Initial Data Collection and Normalization
The dataset was compiled from publicly available sources.
### Annotations
#### Annotation process
[N/A]
#### Who are the annotators?
[N/A]
### Personal and Sensitive Information
[N/A]
## Considerations for Using the Data
### Social Impact of Dataset
[N/A]
### Discussion of Biases
[N/A]
### Other Known Limitations
[N/A]
## Additional Information
### Dataset Curators
The short-interest-sp500 dataset was collected by https://edarchimbaud.substack.com.
### Licensing Information
The short-interest-sp500 dataset is licensed under the MIT License.
### Citation Information
> https://edarchimbaud.substack.com, short-interest-sp500 dataset, GitHub repository, https://github.com/edarchimbaud
### Contributions
Thanks to [@edarchimbaud](https://github.com/edarchimbaud) for adding this dataset. | [
-0.46300551295280457,
-0.4588553011417389,
0.3976162374019623,
0.1844320446252823,
-0.4106064438819885,
0.10731552541255951,
-0.1188640147447586,
-0.116264209151268,
0.951164186000824,
0.33880046010017395,
-0.9699670076370239,
-0.8679402470588684,
-0.5162820219993591,
-0.016916995868086815... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
TigerResearch/tigerbot-kaggle-leetcodesolutions-en-2k | TigerResearch | 2023-05-31T02:01:37Z | 12 | 9 | null | [
"language:en",
"license:apache-2.0",
"region:us"
] | 2023-05-31T02:01:37Z | 2023-05-30T15:10:06.000Z | 2023-05-30T15:10:06 | ---
license: apache-2.0
language:
- en
---
[Tigerbot](https://github.com/TigerResearch/TigerBot) 基于leetcode-solutions数据集,加工生成的代码类sft数据集
<p align="center" width="40%">
原始来源:[https://www.kaggle.com/datasets/erichartford/leetcode-solutions](https://www.kaggle.com/datasets/erichartford/leetcode-solutions)
## Usage
```python
import datasets
ds_sft = datasets.load_dataset('TigerResearch/tigerbot-kaggle-leetcodesolutions-en-2k')
```
| [
-0.3038867115974426,
-0.5514221787452698,
0.07999593019485474,
0.14224359393119812,
-0.4172312915325165,
0.10359490662813187,
-0.0805525928735733,
0.42975276708602905,
0.5810624957084656,
0.3991675078868866,
-0.6941500306129456,
-0.49567222595214844,
-0.05779416859149933,
0.170260429382324... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Yulong-W/squadorirobustness | Yulong-W | 2023-06-11T03:59:10Z | 12 | 0 | null | [
"region:us"
] | 2023-06-11T03:59:10Z | 2023-06-11T03:51:46.000Z | 2023-06-11T03:51:46 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Yulong-W/squadpararobustness | Yulong-W | 2023-06-11T04:03:20Z | 12 | 0 | null | [
"region:us"
] | 2023-06-11T04:03:20Z | 2023-06-11T04:01:01.000Z | 2023-06-11T04:01:01 | Entry not found | [
-0.32276472449302673,
-0.22568407654762268,
0.8622258901596069,
0.4346148371696472,
-0.5282984972000122,
0.7012965679168701,
0.7915717363357544,
0.07618629932403564,
0.7746022939682007,
0.2563222646713257,
-0.785281777381897,
-0.22573848068714142,
-0.9104482531547546,
0.5715669393539429,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Patt/RTE_TH | Patt | 2023-06-14T16:51:34Z | 12 | 0 | null | [
"task_categories:text-classification",
"language:en",
"language:th",
"arxiv:1907.04307",
"region:us"
] | 2023-06-14T16:51:34Z | 2023-06-12T11:40:00.000Z | 2023-06-12T11:40:00 | ---
task_categories:
- text-classification
language:
- en
- th
---
# Dataset Card for RTE_TH
### Dataset Description
This dataset is Thai translated version of [RTE](https://huggingface.co/datasets/super_glue/viewer/rte) using google translate with [Multilingual Universal Sentence Encoder](https://arxiv.org/abs/1907.04307) to calculate score for Thai translation. | [
-0.11631447076797485,
-0.7136080265045166,
-0.08688075095415115,
0.49498364329338074,
-0.63807612657547,
-0.0762559249997139,
-0.2234584093093872,
-0.2603799104690552,
0.6615583300590515,
0.6077417135238647,
-0.5014963746070862,
-0.831489622592926,
-0.5957807302474976,
0.3055718243122101,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
tianyang/repobench-p | tianyang | 2023-07-19T06:13:35Z | 12 | 2 | null | [
"task_categories:text-retrieval",
"task_categories:text-generation",
"task_ids:document-retrieval",
"language_creators:found",
"multilinguality:multilingual",
"source_datasets:original",
"language:code",
"license:cc-by-nc-nd-4.0",
"code",
"arxiv:2306.03091",
"region:us"
] | 2023-07-19T06:13:35Z | 2023-06-16T09:35:10.000Z | 2023-06-16T09:35:10 | ---
language_creators:
- found
language:
- code
license:
- cc-by-nc-nd-4.0
multilinguality:
- multilingual
pretty_name: RepoBench-Pipeline
source_datasets:
- original
task_categories:
- text-retrieval
- text-generation
task_ids:
- document-retrieval
tags:
- code
---
# Dataset Card for RepoBench-P
## Dataset Description
- **Homepage:** https://github.com/Leolty/repobench
- **Paper:** https://arxiv.org/abs/2306.03091
## Dataset Summary
**RepoBench-P (Pipeline)** is a subtask of **RepoBench**([GitHub](https://github.com/Leolty/repobench), [arXiv](https://arxiv.org/abs/2306.03091)), combinig the retrieval and code completion tasks. Specifically, the retrieval task is used to retrieve the most relevant code snippet first, and then do the code completion task with retrieved code snippet as cross-file context for next-line prediction, which mirrors complex real-world scenarios that a practical auto-completion system would face.
## Settings
- `cff`: short for cross_file_first, indicating the cross-file module in next line is first used in the current file.
- `cfr`: short for cross_file_random, indicating the cross-file module in next line is not first used in the current file.
- `if`: short for in_file, indicating the next line does not contain any cross-file module.
## Supported Languages
- `python` and `java`
## Loading Data
For example, to load the `python` dataset, and you can provide the `split` argument to choose the specific setting.
```python
from datasets import load_dataset
dataset = load_dataset("tianyang/repobench-p", "python", split="cff")
```
> Note: The `split` argument is optional. If not provided, the entire dataset will be loaded.
## Dataset Structure
```json
{
"repo_name": "repository name of the data point",
"file_path": "path/to/current_file",
"context": [
{
"path": "path/to/cross_file_1",
"identifier": "identifier of the cross-file module",
"snippet": "the code snippet of the cross-file module",
"tokenized_snippet": "tokenized code snippet of the cross-file module"
},
// ...
{
"path": "path/to/cross_file_k",
"identifier": "identifier of the cross-file module",
"snippet": "the code snippet of the cross-file module",
"tokenized_snippet": "tokenized code snippet of the cross-file module"
},
],
"import_statement": "all import statements in current file",
"code": "the code for next-line prediction",
"next_line": "the next line of the code",
"gold_snippet_index": 2 // NOTE: Only for "cross_file_first" and "cross_file_random" settings, for "in_file" setting, we set it to -1.
}
```
## Licensing Information
CC BY-NC-ND 4.0
## Citation Information
```bibtex
@misc{liu2023repobench,
title={RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems},
author={Tianyang Liu and Canwen Xu and Julian McAuley},
year={2023},
eprint={2306.03091},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
## Contributions
Thanks to [@Leolty](https://github.com/Leolty) for adding this dataset. | [
-0.3532106280326843,
-0.16137239336967468,
0.0698954239487648,
0.12838201224803925,
-0.2142050415277481,
0.24951927363872528,
-0.11945156008005142,
-0.2881927192211151,
0.3725072741508484,
0.4160609841346741,
-0.6910846829414368,
-0.6835987567901611,
-0.421222060918808,
0.2465852051973343,... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
cenkersisman/viki_soru_cevap | cenkersisman | 2023-07-04T16:37:08Z | 12 | 0 | null | [
"region:us"
] | 2023-07-04T16:37:08Z | 2023-06-16T12:33:31.000Z | 2023-06-16T12:33:31 | ---
dataset_info:
features:
- name: answer
dtype: string
- name: question
dtype: string
- name: title
dtype: string
splits:
- name: train
num_bytes: 5319410
num_examples: 34983
download_size: 2529944
dataset_size: 5319410
---
# Dataset Card for "viki_soru_cevap"
## Hakkında
Bu veri seti, Türkçe Vikipedi üzerindeki içeriklerden oluşturulan bir soru ve cevap veri setidir. Oluşturulan veri seti sentetik olarak üretilmiştir. Cevaplar, context metin üzerinden alınmış olsa da doğruluğu garanti edilmemektedir. Sorular da sentetik olarak üretilmiştir
## Başlıklara göre en fazla soru cevap içeren konular aşağıdadır:
* Futbol rekabetleri listesi: 313 adet
* Cengiz Han: 310 adet
* Triple H: 196 adet
* Lüleburgaz Muharebesi: 158 adet
* Zümrüdüanka Yoldaşlığı: 155 adet
* Shakespeare eserleri çevirileri listesi: 145 adet
* Kırkpınar Yağlı Güreşleri: 142 adet
* Sovyetler Birliği'nin askerî tarihi: 136 adet
* I. Baybars: 135 adet
* Dumbledore'un Ordusu: 126 adet
* Nicolaus Copernicus: 119 adet
* Ermenistan Sovyet Sosyalist Cumhuriyeti: 111 adet
* Boshin Savaşı: 99 adet
* Suvorov Harekâtı: 98 adet
* Gökhan Türkmen: 96 adet
* Wolfgang Amadeus Mozart: 95 adet
* Joachim von Ribbentrop: 95 adet
* Rumyantsev Harekâtı: 94 adet
* Hermann Göring: 93 adet
* Nâzım Hikmet: 90 adet
* Said Nursî: 90 adet
* Emîn: 88 adet
* Antonio Gramsci: 87 adet
* Gilles Deleuze: 86 adet
* Madagaskar: 86 adet
* Faşizm: 85 adet
* Mac OS X Snow Leopard: 85 adet
* Korsun-Şevçenkovski Taarruzu: 84 adet
* Soğuk Savaş: 84 adet
* Adolf Eichmann: 83 adet
* Niccolò Paganini: 83 adet
* II. Dünya Savaşı tankları: 81 adet
* Pergamon: 81 adet
* IV. Mihail: 80 adet
* Bolşeviklere karşı sol ayaklanmalar: 77 adet
* Osman Gazi: 77 adet
* V. Leon: 76 adet
* Ajda Pekkan: 75 adet
* Mehdi Savaşı: 75 adet
* Tsushima Muharebesi: 73 adet
* Mehdî (Abbâsî halifesi): 72 adet
* Franck Ribéry: 72 adet
* I. Basileios: 69 adet
* Antimon: 68 adet
* Kolomb öncesi Amerika: 68 adet
* Otto Skorzeny: 68 adet
* Kâzım Koyuncu: 68 adet
* İmamiye (Şiilik öğretisi): 66 adet
* Oscar Niemeyer: 66 adet
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.7496187090873718,
-0.18986594676971436,
0.10849155485630035,
0.1674014776945114,
-0.3952459692955017,
-0.2451997697353363,
-0.17423921823501587,
-0.16711533069610596,
0.4997805655002594,
0.5069241523742676,
-0.7204461693763733,
-0.8459650278091431,
-0.6705209612846375,
0.100813277065753... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
KaiLv/UDR_DBPedia | KaiLv | 2023-06-21T12:36:18Z | 12 | 0 | null | [
"region:us"
] | 2023-06-21T12:36:18Z | 2023-06-21T12:36:09.000Z | 2023-06-21T12:36:09 | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: label
dtype: int64
- name: headline
dtype: string
- name: sentence
dtype: string
splits:
- name: train
num_bytes: 3276812
num_examples: 10000
- name: test
num_bytes: 981362
num_examples: 3000
- name: debug
num_bytes: 1641080
num_examples: 5000
download_size: 3950542
dataset_size: 5899254
---
# Dataset Card for "DBPedia"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.7255640625953674,
-0.31716740131378174,
0.21683213114738464,
0.19700336456298828,
-0.15026982128620148,
-0.09515123814344406,
0.14091208577156067,
-0.23096847534179688,
0.9280035495758057,
0.42092365026474,
-0.9911530613899231,
-0.7509352564811707,
-0.23996996879577637,
-0.1890749037265... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
KaiLv/UDR_WikiAuto | KaiLv | 2023-06-21T12:52:19Z | 12 | 0 | null | [
"region:us"
] | 2023-06-21T12:52:19Z | 2023-06-21T12:51:10.000Z | 2023-06-21T12:51:10 | ---
dataset_info:
features:
- name: idx
dtype: int64
- name: gem_id
dtype: string
- name: gem_parent_id
dtype: string
- name: source
dtype: string
- name: target
dtype: string
- name: references
list: string
- name: len_source
dtype: int64
- name: len_target
dtype: int64
splits:
- name: train
num_bytes: 171935945
num_examples: 481018
- name: validation
num_bytes: 857630
num_examples: 1999
- name: test_asset
num_bytes: 483952
num_examples: 359
- name: test_turk
num_bytes: 415458
num_examples: 359
- name: test_wiki
num_bytes: 248732
num_examples: 403
- name: debug
num_bytes: 35726046
num_examples: 100000
download_size: 115397698
dataset_size: 209667763
---
# Dataset Card for "UDR_WikiAuto"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.7628813982009888,
-0.08264224231243134,
0.11864981800317764,
0.13975238800048828,
-0.27062270045280457,
-0.11614210903644562,
0.12094434350728989,
-0.264443039894104,
0.7418108582496643,
0.42531755566596985,
-0.8141146898269653,
-0.670433521270752,
-0.52785724401474,
-0.0504380352795124... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
ltkw98/mapping | ltkw98 | 2023-06-22T13:01:48Z | 12 | 0 | null | [
"region:us"
] | 2023-06-22T13:01:48Z | 2023-06-22T13:01:46.000Z | 2023-06-22T13:01:46 | ---
dataset_info:
features:
- name: sentence
dtype: string
- name: tec_name
dtype: string
- name: label
dtype: int64
splits:
- name: train
num_bytes: 369062
num_examples: 2358
download_size: 165236
dataset_size: 369062
---
# Dataset Card for "mapping"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.7185741662979126,
-0.23984882235527039,
0.3420439064502716,
0.28708070516586304,
-0.10375087708234787,
-0.06708861142396927,
0.13813166320323944,
-0.2218891680240631,
0.7393994927406311,
0.5372524857521057,
-0.7562122344970703,
-0.9678282737731934,
-0.7521095871925354,
-0.43539801239967... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
musabg/wizard_vicuna_70k_unfiltered_de | musabg | 2023-06-25T07:09:36Z | 12 | 2 | null | [
"region:us"
] | 2023-06-25T07:09:36Z | 2023-06-25T07:09:12.000Z | 2023-06-25T07:09:12 | ---
dataset_info:
features:
- name: id
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
splits:
- name: train
num_bytes: 159146233
num_examples: 34598
download_size: 79402352
dataset_size: 159146233
---
# Dataset Card for "wizard_vicuna_70k_unfiltered_de"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.5811784863471985,
-0.2547730505466461,
0.06068316474556923,
0.08100758492946625,
-0.49526122212409973,
-0.16139107942581177,
0.19820554554462433,
0.042044591158628464,
0.7063058018684387,
1.074668049812317,
-0.7489149570465088,
-0.8798185586929321,
-0.5852712988853455,
-0.02380375377833... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
BAAI/COIG-PC-Lite | BAAI | 2023-09-26T08:51:45Z | 12 | 21 | null | [
"language:zh",
"license:unknown",
"region:us"
] | 2023-09-26T08:51:45Z | 2023-06-28T02:56:01.000Z | 2023-06-28T02:56:01 | ---
extra_gated_heading: "Acknowledge license to accept the repository"
extra_gated_prompt: |
北京智源人工智能研究院(以下简称“我们”或“研究院”)通过BAAI DataHub(data.baai.ac.cn)和COIG-PC HuggingFace仓库(https://huggingface.co/datasets/BAAI/COIG-PC)向您提供开源数据集(以下或称“数据集”),您可通过下载的方式获取您所需的开源数据集,并在遵守各原始数据集使用规则前提下,基于学习、研究、商业等目的使用相关数据集。
在您获取(包括但不限于访问、下载、复制、传播、使用等处理数据集的行为)开源数据集前,您应认真阅读并理解本《COIG-PC开源数据集使用须知与免责声明》(以下简称“本声明”)。一旦您获取开源数据集,无论您的获取方式为何,您的获取行为均将被视为对本声明全部内容的认可。
1. 平台的所有权与运营权
您应充分了解并知悉,BAAI DataHub和COIG-PC HuggingFace仓库(包括当前版本及全部历史版本)的所有权与运营权归智源人工智能研究院所有,智源人工智能研究院对本平台/本工具及开源数据集开放计划拥有最终解释权和决定权。
您知悉并理解,基于相关法律法规更新和完善以及我们需履行法律合规义务的客观变化,我们保留对本平台/本工具进行不定时更新、维护,或者中止乃至永久终止提供本平台/本工具服务的权利。我们将在合理时间内将可能发生前述情形通过公告或邮件等合理方式告知您,您应当及时做好相应的调整和安排,但我们不因发生前述任何情形对您造成的任何损失承担任何责任。
2. 开源数据集的权利主张
为了便于您基于学习、研究、商业的目的开展数据集获取、使用等活动,我们对第三方原始数据集进行了必要的格式整合、数据清洗、标注、分类、注释等相关处理环节,形成可供本平台/本工具用户使用的开源数据集。
您知悉并理解,我们不对开源数据集主张知识产权中的相关财产性权利,因此我们亦无相应义务对开源数据集可能存在的知识产权进行主动识别和保护,但这不意味着我们放弃开源数据集主张署名权、发表权、修改权和保护作品完整权(如有)等人身性权利。而原始数据集可能存在的知识产权及相应合法权益由原权利人享有。
此外,向您开放和使用经合理编排、加工和处理后的开源数据集,并不意味着我们对原始数据集知识产权、信息内容等真实、准确或无争议的认可,您应当自行筛选、仔细甄别,使用经您选择的开源数据集。您知悉并同意,研究院对您自行选择使用的原始数据集不负有任何无缺陷或无瑕疵的承诺义务或担保责任。
3. 开源数据集的使用限制
您使用数据集不得侵害我们或任何第三方的合法权益(包括但不限于著作权、专利权、商标权等知识产权与其他权益)。
获取开源数据集后,您应确保对开源数据集的使用不超过原始数据集的权利人以公示或协议等形式明确规定的使用规则,包括原始数据的使用范围、目的和合法用途等。我们在此善意地提请您留意,如您对开源数据集的使用超出原始数据集的原定使用范围及用途,您可能面临侵犯原始数据集权利人的合法权益例如知识产权的风险,并可能承担相应的法律责任。
4. 个人信息保护
基于技术限制及开源数据集的公益性质等客观原因,我们无法保证开源数据集中不包含任何个人信息,我们不对开源数据集中可能涉及的个人信息承担任何法律责任。
如开源数据集涉及个人信息,我们不对您使用开源数据集可能涉及的任何个人信息处理行为承担法律责任。我们在此善意地提请您留意,您应依据《个人信息保护法》等相关法律法规的规定处理个人信息。
为了维护信息主体的合法权益、履行可能适用的法律、行政法规的规定,如您在使用开源数据集的过程中发现涉及或者可能涉及个人信息的内容,应立即停止对数据集中涉及个人信息部分的使用,并及时通过“6. 投诉与通知”中载明的联系我们。
5. 信息内容管理
我们不对开源数据集可能涉及的违法与不良信息承担任何法律责任。
如您在使用开源数据集的过程中发现开源数据集涉及或者可能涉及任何违法与不良信息,您应立即停止对数据集中涉及违法与不良信息部分的使用,并及时通过“6. 投诉与通知”中载明的联系我们。
6. 投诉与通知
如您认为开源数据集侵犯了您的合法权益,您可通过010-50955974联系我们,我们会及时依法处理您的主张与投诉。
为了处理您的主张和投诉,我们可能需要您提供联系方式、侵权证明材料以及身份证明等材料。请注意,如果您恶意投诉或陈述失实,您将承担由此造成的全部法律责任(包括但不限于合理的费用赔偿等)。
7. 责任声明
您理解并同意,基于开源数据集的性质,数据集中可能包含来自不同来源和贡献者的数据,其真实性、准确性、客观性等可能会有所差异,我们无法对任何数据集的可用性、可靠性等做出任何承诺。
在任何情况下,我们不对开源数据集可能存在的个人信息侵权、违法与不良信息传播、知识产权侵权等任何风险承担任何法律责任。
在任何情况下,我们不对您因开源数据集遭受的或与之相关的任何损失(包括但不限于直接损失、间接损失以及可得利益损失等)承担任何法律责任。
8. 其他
开源数据集处于不断发展、变化的阶段,我们可能因业务发展、第三方合作、法律法规变动等原因更新、调整所提供的开源数据集范围,或中止、暂停、终止开源数据集提供业务。
extra_gated_fields:
Name: text
Affiliation: text
Country: text
I agree to use this model for non-commercial use ONLY: checkbox
extra_gated_button_content: "Acknowledge license"
license: unknown
language:
- zh
configs:
- config_name: default
data_files:
- split: full
path: data/full-*
- split: train
path: data/train-*
- split: valid
path: data/valid-*
- split: test
path: data/test-*
- split: Top50PerTask
path: data/Top50PerTask-*
- split: Top100PerTask
path: data/Top100PerTask-*
- split: Top200PerTask
path: data/Top200PerTask-*
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: output
dtype: string
- name: split
dtype: string
- name: task_name_in_eng
dtype: string
- name: task_type
struct:
- name: major
sequence: string
- name: minor
sequence: string
- name: domain
sequence: string
- name: other
dtype: string
- name: filename
dtype: string
splits:
- name: full
num_bytes: 1099400407
num_examples: 650147
- name: train
num_bytes: 410204689
num_examples: 216691
- name: valid
num_bytes: 12413560
num_examples: 16148
- name: test
num_bytes: 51472090
num_examples: 69301
- name: Top50PerTask
num_bytes: 14763925
num_examples: 19274
- name: Top100PerTask
num_bytes: 28489139
num_examples: 37701
- name: Top200PerTask
num_bytes: 51472090
num_examples: 69301
download_size: 53939740
dataset_size: 1668215900
---
# COIG Prompt Collection
## License
**Default Licensing for Sub-Datasets Without Specific License Declaration**: In instances where sub-datasets within the COIG-PC Dataset do not have a specific license declaration, the Apache License 2.0 (Apache-2.0) will be the applicable licensing terms by default.
**Precedence of Declared Licensing for Sub-Datasets**: For any sub-dataset within the COIG-PC Dataset that has an explicitly declared license, the terms and conditions of the declared license shall take precedence and govern the usage of that particular sub-dataset.
Users and developers utilizing the COIG-PC Dataset must ensure compliance with the licensing terms as outlined above. It is imperative to review and adhere to the specified licensing conditions of each sub-dataset, as they may vary.
## What is COIG-PC?
The COIG-PC Dataset is a meticulously curated and comprehensive collection of Chinese tasks and data, designed to facilitate the fine-tuning and optimization of language models for Chinese natural language processing (NLP). The dataset aims to provide researchers and developers with a rich set of resources to improve the capabilities of language models in handling Chinese text, which can be utilized in various fields such as text generation, information extraction, sentiment analysis, machine translation, among others.
COIG-PC-Lite is a subset of COIG-PC with only 200 samples from each task file. If you are looking for COIG-PC, please refer to https://huggingface.co/datasets/BAAI/COIG-PC.
## Why COIG-PC?
The COIG-PC Dataset is an invaluable resource for the domain of natural language processing (NLP) for various compelling reasons:
**Addressing Language Complexity**: Chinese is known for its intricacy, with a vast array of characters and diverse grammatical structures. A specialized dataset like COIG-PC, which is tailored for the Chinese language, is essential to adequately address these complexities during model training.
**Comprehensive Data Aggregation**: The COIG-PC Dataset is a result of an extensive effort in integrating almost all available Chinese datasets in the market. This comprehensive aggregation makes it one of the most exhaustive collections for Chinese NLP.
**Data Deduplication and Normalization**: The COIG-PC Dataset underwent rigorous manual processing to eliminate duplicate data and perform normalization. This ensures that the dataset is free from redundancy, and the data is consistent and well-structured, making it more user-friendly and efficient for model training.
**Fine-tuning and Optimization**: The dataset’s instruction-based phrasing facilitates better fine-tuning and optimization of language models. This structure allows models to better understand and execute tasks, which is particularly beneficial in improving performance on unseen or novel tasks.
The COIG-PC Dataset, with its comprehensive aggregation, meticulous selection, deduplication, and normalization of data, stands as an unmatched resource for training and optimizing language models tailored for the Chinese language and culture. It addresses the unique challenges of Chinese language processing and serves as a catalyst for advancements in Chinese NLP.
## Who builds COIG-PC?
The bedrock of COIG-PC is anchored in the dataset furnished by stardust.ai, which comprises an aggregation of data collected from the Internet.
And COIG-PC is the result of a collaborative effort involving engineers and experts from over twenty distinguished universities both domestically and internationally. Due to space constraints, it is not feasible to list all of them; however, the following are a few notable institutions among the collaborators:
- Beijing Academy of Artificial Intelligence, China
<img src="https://huggingface.co/datasets/BAAI/COIG-PC-Lite/resolve/main/assets/baai.png" alt= “BAAI” height="100" width="150">
- Peking University, China
<img src="https://huggingface.co/datasets/BAAI/COIG-PC-Lite/resolve/main/assets/pku.png" alt= “PKU” height="100" width="200">
- The Hong Kong University of Science and Technology (HKUST), China
<img src="https://huggingface.co/datasets/BAAI/COIG-PC-Lite/resolve/main/assets/hkust.png" alt= “HKUST” height="100" width="200">
- The University of Waterloo, Canada
<img src="https://huggingface.co/datasets/BAAI/COIG-PC-Lite/resolve/main/assets/waterloo.png" alt= “Waterloo” height="100" width="150">
- The University of Sheffield, United Kingdom
<img src="https://huggingface.co/datasets/BAAI/COIG-PC-Lite/resolve/main/assets/sheffield.png" alt= “Sheffield” height="100" width="200">
- Beijing University of Posts and Telecommunications, China
<img src="https://huggingface.co/datasets/BAAI/COIG-PC-Lite/resolve/main/assets/bupt.png" alt= “BUPT” height="100" width="200">
- [Multimodal Art Projection](https://huggingface.co/m-a-p)
<img src="https://huggingface.co/datasets/BAAI/COIG-PC-Lite/resolve/main/assets/map.png" alt= “M.A.P” height="100" width="200">
- stardust.ai, China
<img src="https://huggingface.co/datasets/BAAI/COIG-PC-Lite/resolve/main/assets/stardust.png" alt= “stardust.ai” height="100" width="200">
- LinkSoul.AI, China
<img src="https://huggingface.co/datasets/BAAI/COIG-PC-Lite/resolve/main/assets/linksoul.png" alt= “linksoul.ai” height="100" width="200">
For the detailed list of engineers involved in the creation and refinement of COIG-PC, please refer to the paper that will be published subsequently. This paper will provide in-depth information regarding the contributions and the specifics of the dataset’s development process.
## How to use COIG-PC?
COIG-PC is structured in a **.jsonl** file format. Each line in the file represents a single data record and is structured in JSON (JavaScript Object Notation) format. Below is a breakdown of the elements within each line:
**instruction**: This is a text string that provides the instruction for the task. For example, it might tell the model what to do with the input data.
**input**: This is the input data that the model needs to process. In the context of translation, it would be the text that needs to be translated.
**output**: This contains the expected output data after processing the input. In the context of translation, it would be the translated text.
**split**: Indicates the official split of the original dataset, which is used to categorize data for different phases of model training and evaluation. It can be 'train', 'test', 'valid', etc.
**task_type**: Contains major and minor categories for the dataset. Major categories are broader, while minor categories can be more specific subcategories.
**domain**: Indicates the domain or field to which the data belongs.
**other**: This field can contain additional information or metadata regarding the data record. If there is no additional information, it may be set to null.
### Example
Here is an example of how a line in the COIG-PC dataset might be structured:
```
{
"instruction": "请把下面的中文句子翻译成英文",
"input": "我爱你。",
"output": "I love you.",
"split": "train",
"task_type": {
"major": ["翻译"],
"minor": ["翻译", "中译英"]
},
"domain": ["通用"],
"other": null
}
```
In this example:
**instruction** tells the model to translate the following Chinese sentence into English.
**input** contains the Chinese text "我爱你" which means "I love you".
**output** contains the expected translation in English: "I love you".
**split** indicates that this data record is part of the training set.
**task_type** specifies that the major category is "Translation" and the minor categories are "Translation" and "Chinese to English".
**domain** specifies that this data record belongs to the general domain.
**other** is set to null as there is no additional information for this data record.
## Update: Aug. 30, 2023
- v1.2: Delete 31 bad task files. Update 99 task files. Rename 2 task files. Add 3 new task files. COIG-PC now has 3339 tasks in total.
- v1.1: Fix 00040-001-000 and 00050-003-000, ignore 00930 and 01373.
- v1.0: First version for arXiv paper.
- v0.6: Upload 28 new tasks. COIG-PC now has 3367 tasks in total.
- v0.5: Upload 202 new tasks. COIG-PC now has 3339 tasks in total.
- v0.4: Upload 1049 new tasks. COIG-PC now has 3137 tasks in total.
- v0.3: Upload 1139 new tasks. COIG-PC now has 2088 tasks in total.
- v0.2: Upload 422 new tasks. COIG-PC now has 949 tasks in total. Add "TopSamplenumPerTask" split where only "Samplenum" samples are used from each task.
- v0.1: Upload 527 tasks.
## COIG-PC Citation
If you want to cite COIG-PC dataset, you could use this:
```
```
## Contact Us
To contact us feel free to create an Issue in this repository.
| [
-0.46910330653190613,
-0.6885352730751038,
-0.07270832359790802,
0.32628458738327026,
-0.2499954104423523,
-0.14340496063232422,
-0.27672630548477173,
-0.5663347840309143,
0.2040579915046692,
0.2398177832365036,
-0.8083123564720154,
-0.5314859747886658,
-0.3061283230781555,
0.0675348937511... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
llm-lens/lens_sample_test | llm-lens | 2023-09-18T01:27:52Z | 12 | 0 | null | [
"region:us"
] | 2023-09-18T01:27:52Z | 2023-06-29T03:45:56.000Z | 2023-06-29T03:45:56 | ---
dataset_info:
features:
- name: image
dtype: image
- name: label
dtype:
class_label:
names:
'0': abyssinian
'1': american bulldog
'2': american pit bull terrier
'3': basset hound
'4': beagle
'5': bengal
'6': birman
'7': bombay
'8': boxer
'9': british shorthair
'10': chihuahua
'11': egyptian mau
'12': english cocker spaniel
'13': english setter
'14': german shorthaired
'15': great pyrenees
'16': havanese
'17': japanese chin
'18': keeshond
'19': leonberger
'20': maine coon
'21': miniature pinscher
'22': newfoundland
'23': persian
'24': pomeranian
'25': pug
'26': ragdoll
'27': russian blue
'28': saint bernard
'29': samoyed
'30': scottish terrier
'31': shiba inu
'32': siamese
'33': sphynx
'34': staffordshire bull terrier
'35': wheaten terrier
'36': yorkshire terrier
- name: id
dtype: int64
- name: tags_laion-ViT-H-14-2B
sequence: string
- name: attributes_laion-ViT-H-14-2B
sequence: string
- name: caption_Salesforce-blip-image-captioning-large
dtype: string
- name: intensive_captions_Salesforce-blip-image-captioning-large
sequence: string
splits:
- name: test
num_bytes: 183543.0
num_examples: 10
download_size: 162581
dataset_size: 183543.0
---
# Dataset Card for "lens_sample_test"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.6850638389587402,
-0.3349848687648773,
0.21557217836380005,
0.11079677194356918,
-0.0854586586356163,
-0.22168032824993134,
0.20493869483470917,
-0.15433785319328308,
0.6129616498947144,
0.32188090682029724,
-1.108965516090393,
-0.7712062001228333,
-0.39217740297317505,
-0.2253899574279... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
TrainingDataPro/monitors-replay-attacks-dataset | TrainingDataPro | 2023-09-14T16:54:44Z | 12 | 2 | null | [
"task_categories:video-classification",
"language:en",
"license:cc-by-nc-nd-4.0",
"legal",
"region:us"
] | 2023-09-14T16:54:44Z | 2023-06-29T14:18:47.000Z | 2023-06-29T14:18:47 | ---
license: cc-by-nc-nd-4.0
task_categories:
- video-classification
language:
- en
tags:
- legal
dataset_info:
features:
- name: file
dtype: string
- name: phone
dtype: string
- name: computer
dtype: string
- name: gender
dtype: string
- name: age
dtype: int16
- name: country
dtype: string
splits:
- name: train
num_bytes: 588
num_examples: 10
download_size: 342902185
dataset_size: 588
---
# Monitors Replay Attacks Dataset
The dataset consists of videos of replay attacks played on different models of computers. The dataset solves tasks in the field of anti-spoofing and it is useful for buisness and safety systems.
The dataset includes: **replay attacks** - videos of real people played on a computer and filmed on the phone.

# Get the dataset
### This is just an example of the data
Leave a request on [**https://trainingdata.pro/data-market**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=monitors-replay-attacks-dataset) to discuss your requirements, learn about the price and buy the dataset.
# Content
The folder "attacks" includes videos of replay attacks
### Computer companies in the datset:
- Dell
- LG
- ASUS
- HP
- Redmi
- AOC
- Samsung
### File with the extension .csv
includes the following information for each media file:
- **file**: link to access the replay video,
- **phone**: the device used to capture the replay video,
- **computer**: the device used to play the video,
- **gender**: gender of a person in the video,
- **age**: age of the person in the video,
- **country**: country of the person
## [**TrainingData**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=monitors-replay-attacks-dataset) provides high-quality data annotation tailored to your needs
More datasets in TrainingData's Kaggle account: **https://www.kaggle.com/trainingdatapro/datasets**
TrainingData's GitHub: **https://github.com/Trainingdata-datamarket/TrainingData_All_datasets** | [
-0.41592928767204285,
-0.7653676867485046,
-0.13320162892341614,
0.1932557076215744,
-0.19493162631988525,
0.2898358702659607,
0.17224377393722534,
-0.05186847224831581,
0.6642356514930725,
0.5636507272720337,
-0.7002637386322021,
-0.4234853684902191,
-0.5771667957305908,
-0.28084149956703... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
Falah/sentiments-dataset-381-classes | Falah | 2023-07-05T10:31:19Z | 12 | 1 | null | [
"task_categories:text-classification",
"size_categories:1K<n<10K",
"language:en",
"license:apache-2.0",
"region:us"
] | 2023-07-05T10:31:19Z | 2023-07-05T10:08:25.000Z | 2023-07-05T10:08:25 | ---
dataset_info:
features:
- name: text
dtype: string
- name: sentiment
dtype: string
splits:
- name: train
num_bytes: 104602
num_examples: 1061
download_size: 48213
dataset_size: 104602
license: apache-2.0
task_categories:
- text-classification
language:
- en
pretty_name: sentiments-dataset-381-classes
size_categories:
- 1K<n<10K
---
# Sentiments Dataset (381 Classes)
## Dataset Description
This dataset contains a collection of labeled sentences categorized into 381 different sentiment classes. The dataset provides a wide range of sentiment labels to facilitate fine-grained sentiment analysis tasks. Each sentence is associated with a sentiment class name.
## Dataset Information
- Number of classes: 381
- Features: `text` (string), `sentiment` (string)
- Number of examples: 1,061
## Class Names
The dataset includes the following sentiment class names as examples:
- Positive
- Negative
- Neutral
- Joyful
- Disappointed
- Worried
- Surprised
- Grateful
- Indifferent
- Sad
- Angry
- Relieved
- Sentiment
- Excited
- Hopeful
- Anxious
- Satisfied
- Happy
- Nostalgic
- Inspired
- Impressed
- Amazed
- Touched
- Proud
- Intrigued
- Relaxed
- Content
- Comforted
- Motivated
- Frustrated
- Delighted
- Moved
- Curious
- Fascinated
- Engrossed
- Addicted
- Eager
- Provoked
- Energized
- Controversial
- Significant
- Revolutionary
- Optimistic
- Impactful
- Compelling
- Enchanted
- Peaceful
- Disillusioned
- Thrilled
- Consumed
- Engaged
- Trendy
- Informative
- Appreciative
- Enthralled
- Enthusiastic
- Influenced
- Validated
- Reflective
- Emotional
- Concerned
- Promising
- Empowered
- Memorable
- Transformative
- Inclusive
- Groundbreaking
- Evocative
- Respectful
- Outraged
- Unity
- Enlightening
- Artistic
- Cultural
- Diverse
- Vibrant
- Prideful
- Captivated
- Revealing
- Inspiring
- Admiring
- Empowering
- Connecting
- Challenging
- Symbolic
- Immersed
- Evolving
- Insightful
- Reformative
- Celebratory
- Validating
- Diversity
- Eclectic
- Comprehensive
- Uniting
- Influential
- Honoring
- Transporting
- Resonating
- Chronicle
- Preserving
- Replicated
- Impressive
- Fascinating
- Tributary
- Momentum
- Awe-inspiring
- Unearthing
- Exploratory
- Immersive
- Transportive
- Personal
- Resilient
- Mesmerized
- Legendary
- Awareness
- Evidence-based
- Contemporary
- Connected
- Valuable
- Referencing
- Camaraderie
- Inspirational
- Evoke
- Emotive
- Chronicling
- Educational
- Serene
- Colorful
- Melodious
- Dramatic
- Enlivened
- Wonderstruck
- Enchanting
- Grandiose
- Abundant
- Harmonious
- Captivating
- Mesmerizing
- Dedicated
- Powerful
- Mystical
- Picturesque
- Opulent
- Revitalizing
- Fragrant
- Spellbinding
- Lush
- Breathtaking
- Passionate
- Melodic
- Wonderland
- Invigorating
- Dappled
- Flourishing
- Ethereal
- Elaborate
- Kaleidoscope
- Harmonizing
- Tragic
- Transforming
- Marveling
- Enveloped
- Reverberating
- Sanctuary
- Graceful
- Spectacular
- Golden
- Melancholic
- Transcendent
- Delicate
- Awakening
- Intertwined
- Indelible
- Verdant
- Heartrending
- Fiery
- Inviting
- Majestic
- Lullaby-like
- Kissed
- Behold
- Soulful
- Splendid
- Whispering
- Masterpiece
- Moving
- Crystalline
- Tapestry
- Haunting
- Renewal
- Wisdom-filled
- Stunning
- Sun-kissed
- Symphony
- Awestruck
- Dancing
- Heart-wrenching
- Magical
- Gentle
- Emotion-evoking
- Embracing
- Floating
- Tranquil
- Celestial
- Breathless
- Symphonic
- Stillness
- Delightful
- Flawless
- Commanding
- Embraced
- Heartfelt
- Precise
- Adorned
- Beautiful
- Scattering
- Timeless
- Radiant
- Regal
- Sparkling
- Resilience
- Recognized
- Echoing
- Rebirth
- Cradled
- Tirelessly
- Glowing
- Icy
- Brilliant
- Anticipation
- Awakened
- Blossoming
- Enthralling
- Excitement
- Vivid
- Spellbound
- Mellifluous
- Intricate
- Silent
- Contrasting
- Poignant
- Perfumed
- Pure
- Magnificent
- Exquisite
- Anguished
- Harmonic
- Kaleidoscopic
- Gripping
- Soothing
- Intense
- Poetic
- Fragile
- Unwavering
- Intriguing
- Fairy-tale
- Ephemeral
- Joyous
- Resplendent
- Elegant
- Coaxing
- Illuminating
- Thunderous
- Cool
- Exciting
- Teeming
- Blissful
- Enduring
- Raw
- Adventurous
- Mysterious
- Enrapturing
- Marvelous
- Swirling
- Resonant
- Careful
- Whimsical
- Intertwining
- - and more
## Usage example
```python
from datasets import load_dataset
#Load the dataset
dataset = load_dataset("Falah/sentiments-dataset-381-classes")
#Convert the dataset to a pandas DataFrame
df = pd.DataFrame(dataset['train'])
#Get the unique class names from the "sentiment" column
class_names = df['sentiment'].unique()
#Print the unique class names
for name in class_names:
print(f"Class Name: {name}")
```
## Application
The Sentiments Dataset (381 Classes) can be applied in various NLP applications, such as sentiment analysis and text classification.
## Citation
If you use this dataset in your research or publication, please cite it as follows:
For more information or inquiries about the dataset, please contact the dataset author(s) mentioned in the citation.
```
@dataset{sentiments_dataset_381_classes),
author = {Falah.G.Salieh},
title = {Sentiments Dataset (381 Classes)},
year = {2023},
publisher = {Hugging Face},
url = {https://huggingface.co/datasets/Falah/sentiments-dataset-381-classes},
}
``` | [
-0.5930806398391724,
-0.39474794268608093,
0.107637919485569,
0.5748068690299988,
-0.2796781361103058,
0.17621876299381256,
-0.2659318149089813,
-0.19859348237514496,
0.5933912396430969,
0.36789995431900024,
-0.740088701248169,
-0.98158198595047,
-0.6383050680160522,
0.3982381820678711,
... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
pie/tacred | pie | 2023-11-27T10:00:59Z | 12 | 0 | null | [
"region:us"
] | 2023-11-27T10:00:59Z | 2023-07-06T15:44:15.000Z | 2023-07-06T15:44:15 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622263669967651,
0.43461522459983826,
-0.52829909324646,
0.7012971639633179,
0.7915719747543335,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104475975036621,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
TrainingDataPro/generated-usa-passeports-dataset | TrainingDataPro | 2023-09-14T16:57:10Z | 12 | 1 | null | [
"task_categories:image-to-image",
"language:en",
"license:cc-by-nc-nd-4.0",
"region:us"
] | 2023-09-14T16:57:10Z | 2023-07-07T11:32:28.000Z | 2023-07-07T11:32:28 | ---
license: cc-by-nc-nd-4.0
task_categories:
- image-to-image
language:
- en
dataset_info:
features:
- name: original
dtype: image
- name: us_pass_augmentated_1
dtype: image
- name: us_pass_augmentated_2
dtype: image
- name: us_pass_augmentated_3
dtype: image
splits:
- name: train
num_bytes: 224948826
num_examples: 23
download_size: 142865341
dataset_size: 224948826
---
# GENERATED USA Passports Dataset
**Data generation** in machine learning involves creating or manipulating data to train and evaluate machine learning models. The purpose of data generation is to provide diverse and representative examples that cover a wide range of scenarios, ensuring the model's robustness and generalization.
Data augmentation techniques involve applying various transformations to existing data samples to create new ones. These transformations include: *random rotations, translations, scaling, flips, and more*. Augmentation helps in increasing the dataset size, introducing natural variations, and improving model performance by making it more invariant to specific transformations.
The dataset contains **GENERATED** USA passports, which are replicas of official passports but with randomly generated details, such as name, date of birth etc. The primary intention of generating these fake passports is to demonstrate the structure and content of a typical passport document and to train the neural network to identify this type of document.
Generated passports can assist in conducting research without accessing or compromising real user data that is often sensitive and subject to privacy regulations. Synthetic data generation allows researchers to develop and refine models using simulated passport data without risking privacy leaks.
### The dataset is solely for informational or educational purposes and should not be used for any fraudulent or deceptive activities.
.png?generation=1688719414649908&alt=media)
# Get the dataset
### This is just an example of the data
Leave a request on [**https://trainingdata.pro/data-market**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=generated-usa-passeports-dataset) to discuss your requirements, learn about the price and buy the dataset.
# Content
### Folders
- **original**: includes original generated images of USA passports
- **augmentation**: contains subfolders, corresponding to the original photos and including 3 black and white generated passport scans with different photo editing.
The augmentated photos are presented with random rotations, noise and brightness. Augmentation varies depending on the amount of noise and blur in the passport images, from slight (**us_pass_augmentated_1**) to significant (**us_pass_augmentated_3**).
### File with the extension .csv
includes the following information for each media file:
- **original**: link to access the image of the generated passport,
- **us_pass_augmentated_1**: link to the first augmentated image,
- **us_pass_augmentated_2**: link to the second augmentated image,
- **us_pass_augmentated_3**: link to the third augmentated image
# USA Passeport Photos might be generated in accordance with your requirements.
## [**TrainingData**](https://trainingdata.pro/data-market?utm_source=huggingface&utm_medium=cpc&utm_campaign=generated-usa-passeports-dataset) provides high-quality data annotation tailored to your needs
More datasets in TrainingData's Kaggle account: **https://www.kaggle.com/trainingdatapro/datasets**
TrainingData's GitHub: **https://github.com/Trainingdata-datamarket/TrainingData_All_datasets** | [
-0.41834700107574463,
-0.48789528012275696,
0.09860342741012573,
-0.2666681110858917,
-0.26856285333633423,
0.22619257867336273,
0.16113707423210144,
-0.38686853647232056,
0.16661131381988525,
0.7640494704246521,
-0.7586113214492798,
-0.5340437889099121,
-0.25106173753738403,
0.18118953704... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
MightyStudent/Egyptian-ASR-MGB-3 | MightyStudent | 2023-08-31T08:30:42Z | 12 | 0 | null | [
"task_categories:automatic-speech-recognition",
"size_categories:1K<n<10K",
"language:ar",
"arabic",
"egypt",
"egyptian",
"ASR",
"automatic speech recognition",
"arxiv:1709.07276",
"region:us"
] | 2023-08-31T08:30:42Z | 2023-07-07T13:56:34.000Z | 2023-07-07T13:56:34 | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
splits:
- name: train
num_bytes: 1094421637.73819888
num_examples: 1138
download_size: 888280535
dataset_size: 1094421637.7381988
language:
- ar
tags:
- arabic
- egypt
- egyptian
- ASR
- automatic speech recognition
pretty_name: 'Egyptian Arabic dialect automatic speech recognition '
size_categories:
- 1K<n<10K
task_categories:
- automatic-speech-recognition
---
# Egyptian Arabic dialect automatic speech recognition
## Dataset Description
- **Homepage:**
- **Repository:**
- **Paper:**
- **Leaderboard:**
- **Point of Contact:**
### Dataset Summary
This dataset was collected, cleaned and adjusted for huggingface hub and ready to be used for whisper finetunning/training.
[From MGB-3 website](http://www.mgb-challenge.org/MGB-3.html):
*The MGB-3 is using 16 hours multi-genre data collected from different YouTube channels. The 16 hours have been manually transcribed.
The chosen Arabic dialect for this year is Egyptian.
Given that dialectal Arabic has no orthographic rules, each program has been transcribed by four different transcribers using this transcription guidelines.*
### Supported Tasks and Leaderboards
ASR: automatic speech recognition
### Languages
Arabic - Egyptian dialect
### Data Fields
* audio: sampled in 16000HZ and have a max duration of 30 sec (ideal for whispear and others ASR models)
* sentence: the transcription in Egyptian Arabic
## Dataset Creation
The youtube videos that are still avalible (some of them got deleted/ made private) were downloaded and synced with the provided transcription.
Then the 12 min of each youtube video were cut down into 30 sec segments.
the resulting dataset was uploaded to huggingface.
[From MGB-3 website](http://www.mgb-challenge.org/MGB-3.html):
*Egyptian broadcast data collected from YouTube.This year, we collected about 80 programs from different YouTube channels. The first 12 minutes from each program has been transcribed and released. This sums up to roughly 16 hours in total*
### Source Data
* [http://www.mgb-challenge.org/MGB-3.html](MGB challenge website)
* [Youtube.com](youtube)
#### Initial Data Collection and Normalization
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
#### Who are the source language producers?
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
### Annotations
#### Annotation process
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
#### Who are the annotators?
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
### Personal and Sensitive Information
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
### Social Impact of Dataset
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
### Discussion of Biases
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
### Other Known Limitations
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
## Additional Information
### Dataset Curators
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
### Licensing Information
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html):
### Citation Information
[Available on MGB website ](http://www.mgb-challenge.org/MGB-3.html)
[Speech Recognition Challenge in the Wild: Arabic MGB-3](https://arxiv.org/abs/1709.07276)
### Contributions | [
-0.6540611982345581,
-0.5860259532928467,
0.14705461263656616,
0.44132310152053833,
-0.062171194702386856,
0.1158243790268898,
-0.20850428938865662,
-0.5637907385826111,
0.33025091886520386,
0.30930593609809875,
-0.591343343257904,
-0.7779545187950134,
-0.8474504947662354,
0.14045988023281... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
pdearena/NavierStokes-2D | pdearena | 2023-08-07T12:29:08Z | 12 | 0 | null | [
"license:mit",
"region:us"
] | 2023-08-07T12:29:08Z | 2023-07-10T21:34:45.000Z | 2023-07-10T21:34:45 | ---
license: mit
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
aiknight87/sample-finance-qas | aiknight87 | 2023-07-11T08:45:04Z | 12 | 1 | null | [
"region:us"
] | 2023-07-11T08:45:04Z | 2023-07-11T08:02:14.000Z | 2023-07-11T08:02:14 | Entry not found | [
-0.3227645754814148,
-0.22568479180335999,
0.8622264862060547,
0.43461528420448303,
-0.52829909324646,
0.7012971639633179,
0.7915720343589783,
0.07618614286184311,
0.774603009223938,
0.2563217282295227,
-0.7852813005447388,
-0.22573819756507874,
-0.9104477167129517,
0.5715674161911011,
-... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
DynamicSuperb/ChordClassification_AcousticGuitarAndPiano | DynamicSuperb | 2023-07-12T11:14:25Z | 12 | 0 | null | [
"region:us"
] | 2023-07-12T11:14:25Z | 2023-07-12T08:48:17.000Z | 2023-07-12T08:48:17 | ---
dataset_info:
features:
- name: file
dtype: string
- name: audio
dtype: audio
- name: label
dtype: string
- name: instruction
dtype: string
splits:
- name: test
num_bytes: 169780426.0
num_examples: 859
download_size: 148236033
dataset_size: 169780426.0
---
# Dataset Card for "chord_classification_acoustic_guitar_and_piano"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.6824198365211487,
-0.30637016892433167,
0.2187473028898239,
0.19072742760181427,
-0.06007228046655655,
0.18628688156604767,
-0.14727558195590973,
-0.1720750480890274,
0.6031666994094849,
0.3110376000404358,
-0.6145316958427429,
-1.0698920488357544,
-0.26642748713493347,
-0.2522842288017... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
csebuetnlp/dailydialogue_bn | csebuetnlp | 2023-07-22T07:41:50Z | 12 | 2 | null | [
"task_categories:conversational",
"task_categories:text-generation",
"task_categories:text2text-generation",
"annotations_creators:machine-generated",
"language_creators:found",
"multilinguality:monolingual",
"size_categories:100K<n<1M",
"source_datasets:extended",
"language:bn",
"license:cc-by-nc... | 2023-07-22T07:41:50Z | 2023-07-15T08:52:05.000Z | 2023-07-15T08:52:05 | ---
annotations_creators:
- machine-generated
language_creators:
- found
multilinguality:
- monolingual
size_categories:
- 100K<n<1M
source_datasets:
- extended
task_categories:
- conversational
- text-generation
- text2text-generation
language:
- bn
license:
- cc-by-nc-sa-4.0
---
# Dataset Card for `dailydialogue_bn`
## Table of Contents
- [Dataset Card for `dailydialogue_bn`](#dataset-card-for-dailydialogue_bn)
- [Table of Contents](#table-of-contents)
- [Dataset Description](#dataset-description)
- [Dataset Summary](#dataset-summary)
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
- [Languages](#languages)
- [Usage](#usage)
- [Dataset Structure](#dataset-structure)
- [Data Instances](#data-instances)
- [Data Fields](#data-fields)
- [Data Splits](#data-splits)
- [Dataset Creation](#dataset-creation)
- [Curation Rationale](#curation-rationale)
- [Source Data](#source-data)
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
- [Who are the source language producers?](#who-are-the-source-language-producers)
- [Annotations](#annotations)
- [Annotation process](#annotation-process)
- [Who are the annotators?](#who-are-the-annotators)
- [Personal and Sensitive Information](#personal-and-sensitive-information)
- [Considerations for Using the Data](#considerations-for-using-the-data)
- [Social Impact of Dataset](#social-impact-of-dataset)
- [Discussion of Biases](#discussion-of-biases)
- [Other Known Limitations](#other-known-limitations)
- [Additional Information](#additional-information)
- [Dataset Curators](#dataset-curators)
- [Licensing Information](#licensing-information)
- [Citation Information](#citation-information)
- [Contributions](#contributions)
## Dataset Description
- **Repository:** [https://github.com/csebuetnlp/BanglaNLG](https://github.com/csebuetnlp/BanglaNLG)
- **Paper:** [**"BanglaNLG and BanglaT5: Benchmarks and Resources for Evaluating Low-Resource Natural Language Generation in Bangla"**](https://aclanthology.org/2023.findings-eacl.54/)
- **Point of Contact:** [Tahmid Hasan](mailto:tahmidhasan@cse.buet.ac.bd)
### Dataset Summary
This is a Multi-turn dialogue dataset for Bengali, curated from the original English [DailyDialogue]() dataset and using the state-of-the-art English to Bengali translation model introduced **[here](https://aclanthology.org/2020.emnlp-main.207/).**
### Supported Tasks and Leaderboards
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
### Languages
* `Bengali`
### Usage
```python
from datasets import load_dataset
dataset = load_dataset("csebuetnlp/dailydialogue_bn")
```
## Dataset Structure
### Data Instances
One example from the dataset is given below in JSON format. Each element of the `dialogue` feature represents a single turn of the conversation.
```
{
"id": "130",
"dialogue":
[
"তোমার জন্মদিনের জন্য তুমি কি করবে?",
"আমি আমার বন্ধুদের সাথে পিকনিক করতে চাই, মা।",
"বাড়িতে পার্টি হলে কেমন হয়? এভাবে আমরা একসাথে হয়ে উদযাপন করতে পারি।",
"ঠিক আছে, মা। আমি আমার বন্ধুদের বাড়িতে আমন্ত্রণ জানাবো।"
]
}
```
### Data Fields
The data fields are as follows:
- `id`: a `string` feature.
- `dialogue`: a List of `string` feature.
### Data Splits
| split |count |
|----------|--------|
|`train`| 11118 |
|`validation`| 1000 |
|`test`| 1000 |
## Dataset Creation
For the training set, we translated the complete [DailyDialogue](https://aclanthology.org/N18-1101/) dataset using the English to Bangla translation model introduced [here](https://aclanthology.org/2020.emnlp-main.207/). Due to the possibility of incursions of error during automatic translation, we used the [Language-Agnostic BERT Sentence Embeddings (LaBSE)](https://arxiv.org/abs/2007.01852) of the translations and original sentences to compute their similarity. A datapoint was accepted if all of its constituent sentences had a similarity score over 0.7.
### Curation Rationale
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
### Source Data
[DailyDialogue](https://arxiv.org/abs/1606.05250)
#### Initial Data Collection and Normalization
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
#### Who are the source language producers?
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
### Annotations
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
#### Annotation process
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
#### Who are the annotators?
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
### Personal and Sensitive Information
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
## Considerations for Using the Data
### Social Impact of Dataset
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
### Discussion of Biases
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
### Other Known Limitations
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
## Additional Information
### Dataset Curators
[More information needed](https://github.com/csebuetnlp/BanglaNLG)
### Licensing Information
Contents of this repository are restricted to only non-commercial research purposes under the [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/). Copyright of the dataset contents belongs to the original copyright holders.
### Citation Information
If you use the dataset, please cite the following paper:
```
@inproceedings{bhattacharjee-etal-2023-banglanlg,
title = "{B}angla{NLG} and {B}angla{T}5: Benchmarks and Resources for Evaluating Low-Resource Natural Language Generation in {B}angla",
author = "Bhattacharjee, Abhik and
Hasan, Tahmid and
Ahmad, Wasi Uddin and
Shahriyar, Rifat",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-eacl.54",
pages = "726--735",
abstract = "This work presents {`}BanglaNLG,{'} a comprehensive benchmark for evaluating natural language generation (NLG) models in Bangla, a widely spoken yet low-resource language. We aggregate six challenging conditional text generation tasks under the BanglaNLG benchmark, introducing a new dataset on dialogue generation in the process. Furthermore, using a clean corpus of 27.5 GB of Bangla data, we pretrain {`}BanglaT5{'}, a sequence-to-sequence Transformer language model for Bangla. BanglaT5 achieves state-of-the-art performance in all of these tasks, outperforming several multilingual models by up to 9{\%} absolute gain and 32{\%} relative gain. We are making the new dialogue dataset and the BanglaT5 model publicly available at https://github.com/csebuetnlp/BanglaNLG in the hope of advancing future research on Bangla NLG.",
}
```
### Contributions
Thanks to [@abhik1505040](https://github.com/abhik1505040) and [@Tahmid](https://github.com/Tahmid04) for adding this dataset. | [
-0.40525349974632263,
-0.8471981883049011,
-0.04779152199625969,
0.45513519644737244,
-0.3477810323238373,
0.06908595561981201,
-0.3972528278827667,
-0.4897589087486267,
0.044120512902736664,
0.47415220737457275,
-0.7738133072853088,
-0.809761106967926,
-0.45535239577293396,
0.432430684566... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
MajdTannous/Test2 | MajdTannous | 2023-07-17T12:27:33Z | 12 | 0 | null | [
"license:other",
"region:us"
] | 2023-07-17T12:27:33Z | 2023-07-15T18:09:25.000Z | 2023-07-15T18:09:25 | ---
license: other
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
MajdTannous/Test3 | MajdTannous | 2023-10-20T17:31:10Z | 12 | 0 | null | [
"license:other",
"region:us"
] | 2023-10-20T17:31:10Z | 2023-07-17T11:53:26.000Z | 2023-07-17T11:53:26 | ---
license: other
---
| [
-0.1285335123538971,
-0.1861683875322342,
0.6529128551483154,
0.49436232447624207,
-0.19319400191307068,
0.23607441782951355,
0.36072009801864624,
0.05056373029947281,
0.5793656706809998,
0.7400146722793579,
-0.650810182094574,
-0.23784008622169495,
-0.7102247476577759,
-0.0478255338966846... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
richardr1126/spider-skeleton-context-instruct | richardr1126 | 2023-07-18T17:55:47Z | 12 | 2 | null | [
"source_datasets:spider",
"language:en",
"license:cc-by-4.0",
"text-to-sql",
"SQL",
"Spider",
"fine-tune",
"region:us"
] | 2023-07-18T17:55:47Z | 2023-07-18T17:53:25.000Z | 2023-07-18T17:53:25 | ---
language:
- en
license:
- cc-by-4.0
source_datasets:
- spider
pretty_name: Spider Skeleton Context Instruct
tags:
- text-to-sql
- SQL
- Spider
- fine-tune
dataset_info:
features:
- name: db_id
dtype: string
- name: text
dtype: string
---
# Dataset Card for Spider Skeleton Context Instruct
### Dataset Summary
Spider is a large-scale complex and cross-domain semantic parsing and text-to-SQL dataset annotated by 11 Yale students
The goal of the Spider challenge is to develop natural language interfaces to cross-domain databases.
This dataset was created to finetune LLMs in a `### Instruction:` and `### Response:` format with database context.
### Yale Lily Spider Leaderboards
The leaderboard can be seen at https://yale-lily.github.io/spider
### Languages
The text in the dataset is in English.
### Licensing Information
The spider dataset is licensed under
the [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/legalcode)
### Citation
```
@article{yu2018spider,
title={Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task},
author={Yu, Tao and Zhang, Rui and Yang, Kai and Yasunaga, Michihiro and Wang, Dongxu and Li, Zifan and Ma, James and Li, Irene and Yao, Qingning and Roman, Shanelle and others},
journal={arXiv preprint arXiv:1809.08887},
year={2018}
}
``` | [
0.08934710919857025,
-0.35434380173683167,
0.22070619463920593,
0.08206841349601746,
-0.3319830298423767,
0.20938323438167572,
0.027006588876247406,
-0.5437268018722534,
0.5653551816940308,
0.2820228040218353,
-0.6630470156669617,
-0.9492812156677246,
-0.5083998441696167,
0.509992659091949... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
jensjorisdecorte/Synthetic-ESCO-skill-sentences | jensjorisdecorte | 2023-07-25T21:40:31Z | 12 | 3 | null | [
"task_categories:text-classification",
"size_categories:100K<n<1M",
"language:en",
"license:cc",
"Skill Extraction",
"Synthetic Data",
"arxiv:2307.10778",
"region:us"
] | 2023-07-25T21:40:31Z | 2023-07-20T10:53:22.000Z | 2023-07-20T10:53:22 | ---
license: cc
task_categories:
- text-classification
language:
- en
tags:
- Skill Extraction
- Synthetic Data
pretty_name: Synthetic ESCO skill sentences
size_categories:
- 100K<n<1M
---
# Synthetic job ads for all ESCO skills
## Dataset Description
- **Homepage:** coming soon
- **Repository:** coming soon
- **Paper:** https://arxiv.org/abs/2307.10778
- **Point of Contact:** jensjoris@techwolf.ai
### Dataset Summary
This dataset contains 10 synthetically generated job ad sentences for almost all (99.5%) skills in ESCO v1.1.0.
### Languages
We use the English version of ESCO, and all generated sentences are in English.
## Dataset Structure
The dataset consists of 138,260 `(sentence, skill)` pairs.
### Citation Information
[More Information Needed] | [
-0.016561677679419518,
-0.7072097063064575,
0.4595833718776703,
0.3664584159851074,
0.015908721834421158,
-0.09894219040870667,
0.027588864788413048,
-0.48216021060943604,
0.6902540326118469,
0.7236670255661011,
-0.6436214447021484,
-0.9962788820266724,
-0.2627415955066681,
0.4389002621173... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
AWfaw/ai-hdlcoder-dataset | AWfaw | 2023-07-27T10:46:56Z | 12 | 0 | null | [
"task_categories:text-generation",
"task_ids:language-modeling",
"size_categories:100K<n<1M",
"language:code",
"license:mit",
"region:us"
] | 2023-07-27T10:46:56Z | 2023-07-20T23:01:57.000Z | 2023-07-20T23:01:57 | ---
annotations_creators: []
language:
- code
license:
- mit
pretty_name: github-code
size_categories:
- 100K<n<1M
source_datasets: []
task_categories:
- text-generation
task_ids:
- language-modeling
---
# Dataset Card for AI-HDLCoder
## Dataset Description
The GitHub Code dataset consists of 100M code files from GitHub in VHDL programming language with extensions totaling in 1.94 GB of data. The dataset was created from the public GitHub dataset on Google BiqQuery at Anhalt University of Applied Sciences.
## Considerations for Using the Data
The dataset is created for research purposes and consists of source code from a wide range of repositories. As such they can potentially include harmful or biased code as well as sensitive information like passwords or usernames.
### Languages
```python
{
"VHDL": [".vhdl",".vhd" ]
}
```
## Dataset Structure
### Data Instances
```python
{
"repo_name": "sebgod/linguist",
"path": "samples/VHDL/foo.vhd",
"copies": "91",
"size": "217",
"content": "-- VHDL example file\n\nlibrary ieee;\nuse ieee.std_logic_1164.all;\n\nentity inverter is\n\tport(a : in std_logic;\n\t b : out std_logic);\nend entity;\n\narchitecture rtl of inverter is\nbegin\n\tb \u003c\u003d not a;\nend architecture;\n",
"license": "mit"
}
```
### Data Fields
|Field|Type|Description|
|---|---|---|
|content|string|content of source file|
|repo_name|string|name of the GitHub repository|
|path|string|path of file in GitHub repository|
|license|string|license of GitHub repository|
|size|int|size of source file in bytes|
### Data Splits
The dataset contains a train split only
### Licensing Information
```python
[
'agpl-3.0',
'artistic-2.0',
'mpl-2.0',
'cc0-1.0',
'mit',
'gpl-2.0',
'gpl-3.0',
'lgpl-3.0',
'apache-2.0',
'bsd-3-clause'
]
```
### v1.0
- Initial release of dataset
- The query was executed on 21.07.2023, 00:02:38 UTC+2 | [
-0.5522188544273376,
-0.586563229560852,
0.011863553896546364,
-0.14204373955726624,
-0.21544618904590607,
0.12193586677312851,
-0.0004485829558689147,
-0.11442138254642487,
0.13342633843421936,
0.49724283814430237,
-0.5322933793067932,
-0.6965351700782776,
-0.23910120129585266,
-0.0939110... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
emozilla/booksum-summary-analysis_llama-8192 | emozilla | 2023-07-23T18:20:24Z | 12 | 9 | null | [
"region:us"
] | 2023-07-23T18:20:24Z | 2023-07-23T18:20:04.000Z | 2023-07-23T18:20:04 | ---
dataset_info:
features:
- name: chapter
dtype: string
- name: text
dtype: string
- name: type
dtype: string
splits:
- name: train
num_bytes: 181882155.9809025
num_examples: 10201
- name: validation
num_bytes: 33836910.18621307
num_examples: 1724
- name: test
num_bytes: 25274232.87394451
num_examples: 1545
download_size: 84868415
dataset_size: 240993299.0410601
---
# Dataset Card for "booksum-summary-analysis_llama-8192"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) | [
-0.4340275824069977,
-0.013391993008553982,
0.1051766574382782,
0.14884914457798004,
-0.5061832666397095,
-0.04078435152769089,
0.42441293597221375,
0.020638564601540565,
0.9064136743545532,
0.6441218852996826,
-0.694363534450531,
-0.8582445383071899,
-0.7745604515075684,
0.039073605090379... | null | null | null | null | null | null | null | null | null | null | null | null | null | |
fedryanto/UnibQuADV2 | fedryanto | 2023-08-18T14:20:43Z | 12 | 0 | null | [
"region:us"
] | 2023-08-18T14:20:43Z | 2023-07-25T10:17:29.000Z | 2023-07-25T10:17:29 | Entry not found | [
-0.32276487350463867,
-0.22568444907665253,
0.8622263073921204,
0.43461570143699646,
-0.5282988548278809,
0.7012969255447388,
0.7915717363357544,
0.07618642598390579,
0.7746027112007141,
0.25632190704345703,
-0.7852815389633179,
-0.22573848068714142,
-0.910447895526886,
0.5715675354003906,... | null | null | null | null | null | null | null | null | null | null | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.