Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Languages:
Korean
Size:
1K - 10K
Tags:
sarcasm-detection
License:
Convert dataset to Parquet
#5
by
albertvillanova
HF Staff
- opened
- README.md +11 -4
- data/test-00000-of-00001.parquet +3 -0
- data/train-00000-of-00001.parquet +3 -0
- kor_sarcasm.py +0 -77
README.md
CHANGED
|
@@ -31,13 +31,20 @@ dataset_info:
|
|
| 31 |
'1': sarcasm
|
| 32 |
splits:
|
| 33 |
- name: train
|
| 34 |
-
num_bytes:
|
| 35 |
num_examples: 9000
|
| 36 |
- name: test
|
| 37 |
-
num_bytes:
|
| 38 |
num_examples: 301
|
| 39 |
-
download_size:
|
| 40 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
---
|
| 42 |
|
| 43 |
# Dataset Card for Korean Sarcasm Detection
|
|
|
|
| 31 |
'1': sarcasm
|
| 32 |
splits:
|
| 33 |
- name: train
|
| 34 |
+
num_bytes: 1012026
|
| 35 |
num_examples: 9000
|
| 36 |
- name: test
|
| 37 |
+
num_bytes: 32476
|
| 38 |
num_examples: 301
|
| 39 |
+
download_size: 719466
|
| 40 |
+
dataset_size: 1044502
|
| 41 |
+
configs:
|
| 42 |
+
- config_name: default
|
| 43 |
+
data_files:
|
| 44 |
+
- split: train
|
| 45 |
+
path: data/train-*
|
| 46 |
+
- split: test
|
| 47 |
+
path: data/test-*
|
| 48 |
---
|
| 49 |
|
| 50 |
# Dataset Card for Korean Sarcasm Detection
|
data/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:190ea90e6f639c7eab2436f2420b7750a4c713186f2652fbf5ef8a573ffb5fd8
|
| 3 |
+
size 24311
|
data/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b72d27688188d43b0dcbedbc9cb8cd94a96643755bcebd69944c833e08734c30
|
| 3 |
+
size 695155
|
kor_sarcasm.py
DELETED
|
@@ -1,77 +0,0 @@
|
|
| 1 |
-
# coding=utf-8
|
| 2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 3 |
-
#
|
| 4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
-
# you may not use this file except in compliance with the License.
|
| 6 |
-
# You may obtain a copy of the License at
|
| 7 |
-
#
|
| 8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
-
#
|
| 10 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
-
# See the License for the specific language governing permissions and
|
| 14 |
-
# limitations under the License.
|
| 15 |
-
"""Korean Sarcasm Detection Dataset"""
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
import csv
|
| 19 |
-
|
| 20 |
-
import datasets
|
| 21 |
-
from datasets.tasks import TextClassification
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
_DESCRIPTION = """\
|
| 25 |
-
This is a dataset designed to detect sarcasm in Korean because it distorts the literal meaning of a sentence
|
| 26 |
-
and is highly related to sentiment classification.
|
| 27 |
-
"""
|
| 28 |
-
|
| 29 |
-
_HOMEPAGE = "https://github.com/SpellOnYou/korean-sarcasm"
|
| 30 |
-
|
| 31 |
-
_LICENSE = "MIT License"
|
| 32 |
-
|
| 33 |
-
_TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/SpellOnYou/korean-sarcasm/master/data/jiwon/train.csv"
|
| 34 |
-
_TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/SpellOnYou/korean-sarcasm/master/data/jiwon/test.csv"
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
class KorSarcasm(datasets.GeneratorBasedBuilder):
|
| 38 |
-
"""Korean Sarcasm Detection Dataset"""
|
| 39 |
-
|
| 40 |
-
VERSION = datasets.Version("1.1.0")
|
| 41 |
-
|
| 42 |
-
def _info(self):
|
| 43 |
-
|
| 44 |
-
return datasets.DatasetInfo(
|
| 45 |
-
description=_DESCRIPTION,
|
| 46 |
-
features=datasets.Features(
|
| 47 |
-
{
|
| 48 |
-
"tokens": datasets.Value("string"),
|
| 49 |
-
"label": datasets.features.ClassLabel(names=["no_sarcasm", "sarcasm"]),
|
| 50 |
-
}
|
| 51 |
-
),
|
| 52 |
-
supervised_keys=None,
|
| 53 |
-
homepage=_HOMEPAGE,
|
| 54 |
-
license=_LICENSE,
|
| 55 |
-
task_templates=[TextClassification(text_column="tokens", label_column="label")],
|
| 56 |
-
)
|
| 57 |
-
|
| 58 |
-
def _split_generators(self, dl_manager):
|
| 59 |
-
"""Returns SplitGenerators."""
|
| 60 |
-
|
| 61 |
-
train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
|
| 62 |
-
test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
|
| 63 |
-
return [
|
| 64 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
|
| 65 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
|
| 66 |
-
]
|
| 67 |
-
|
| 68 |
-
def _generate_examples(self, filepath):
|
| 69 |
-
"""Generate Korean sarcasm examples"""
|
| 70 |
-
|
| 71 |
-
with open(filepath, encoding="utf-8") as csv_file:
|
| 72 |
-
data = csv.reader(csv_file, delimiter=",")
|
| 73 |
-
next(data, None)
|
| 74 |
-
for id_, row in enumerate(data):
|
| 75 |
-
row = row[1:3]
|
| 76 |
-
tokens, label = row
|
| 77 |
-
yield id_, {"tokens": tokens, "label": int(label)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|