holylovenia commited on
Commit
a4633a6
·
1 Parent(s): 08e8dc1

Upload id_hatespeech.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. id_hatespeech.py +123 -0
id_hatespeech.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from nusacrowd.utils import schemas
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks
10
+
11
+ _CITATION = """\
12
+ @inproceedings{inproceedings,
13
+ author = {Alfina, Ika and Mulia, Rio and Fanany, Mohamad Ivan and Ekanata, Yudo},
14
+ year = {2017},
15
+ month = {10},
16
+ pages = {},
17
+ title = {Hate Speech Detection in the Indonesian Language: A Dataset and Preliminary Study},
18
+ doi = {10.1109/ICACSIS.2017.8355039}
19
+ }
20
+ """
21
+
22
+ _LOCAL = False
23
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
24
+ _DATASETNAME = "id_hatespeech"
25
+
26
+ _DESCRIPTION = """\
27
+ The ID Hatespeech dataset is collection of 713 tweets related to a political event, the Jakarta Governor Election 2017
28
+ designed for hate speech detection NLP task. This dataset is crawled from Twitter, and then filtered
29
+ and annotated manually. The dataset labelled into two; HS if the tweet contains hate speech and Non_HS if otherwise
30
+ """
31
+
32
+ _HOMEPAGE = "https://www.researchgate.net/publication/320131169_Hate_Speech_Detection_in_the_Indonesian_Language_A_Dataset_and_Preliminary_Study"
33
+ _LICENSE = "Unknown"
34
+ _URLS = {
35
+ _DATASETNAME: "https://raw.githubusercontent.com/ialfina/id-hatespeech-detection/master/IDHSD_RIO_unbalanced_713_2017.txt",
36
+ }
37
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
38
+ _SOURCE_VERSION = "1.0.0"
39
+ _NUSANTARA_VERSION = "1.0.0"
40
+
41
+
42
+ class IdHatespeech(datasets.GeneratorBasedBuilder):
43
+ """The ID Hatespeech dataset is collection of tweets related to a political event, the Jakarta Governor Election 2017
44
+ designed for hate speech detection NLP task."""
45
+
46
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
47
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
48
+
49
+ BUILDER_CONFIGS = [
50
+ NusantaraConfig(
51
+ name="id_hatespeech_source",
52
+ version=SOURCE_VERSION,
53
+ description="ID Hatespeech source schema",
54
+ schema="source",
55
+ subset_id="id_hatespeech",
56
+ ),
57
+ NusantaraConfig(
58
+ name="id_hatespeech_nusantara_text",
59
+ version=NUSANTARA_VERSION,
60
+ description="ID Hatespeech Nusantara schema",
61
+ schema="nusantara_text",
62
+ subset_id="id_hatespeech",
63
+ ),
64
+ ]
65
+
66
+ DEFAULT_CONFIG_NAME = "id_hatespeech_source"
67
+
68
+ def _info(self) -> datasets.DatasetInfo:
69
+ if self.config.schema == "source":
70
+ features = datasets.Features({"tweet": datasets.Value("string"), "label": datasets.Value("string")})
71
+ elif self.config.schema == "nusantara_text":
72
+ features = schemas.text_features(["Non_HS", "HS"])
73
+
74
+
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=features,
78
+ homepage=_HOMEPAGE,
79
+ license=_LICENSE,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
84
+ """Returns SplitGenerators."""
85
+ # Dataset does not have predetermined split, putting all as TRAIN
86
+ urls = _URLS[_DATASETNAME]
87
+ base_dir = Path(dl_manager.download_and_extract(urls))
88
+ data_files = {"train": base_dir}
89
+
90
+ return [
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TRAIN,
93
+ gen_kwargs={
94
+ "filepath": data_files["train"],
95
+ "split": "train",
96
+ },
97
+ ),
98
+ ]
99
+
100
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
101
+ """Yields examples as (key, example) tuples."""
102
+ # Dataset does not have id, using row index as id
103
+ df = pd.read_csv(filepath, sep="\t", encoding="ISO-8859-1").reset_index()
104
+ df.columns = ["id", "label", "tweet"]
105
+
106
+ if self.config.schema == "source":
107
+ for row in df.itertuples():
108
+ ex = {
109
+ "tweet": row.tweet,
110
+ "label": row.label,
111
+ }
112
+ yield row.id, ex
113
+
114
+ elif self.config.schema == "nusantara_text":
115
+ for row in df.itertuples():
116
+ ex = {
117
+ "id": str(row.id),
118
+ "text": row.tweet,
119
+ "label": row.label,
120
+ }
121
+ yield row.id, ex
122
+ else:
123
+ raise ValueError(f"Invalid config: {self.config.name}")