dp011 commited on
Commit
002caeb
·
verified ·
1 Parent(s): e475779

Upload hatexplain.py

Browse files
Files changed (1) hide show
  1. hatexplain.py +126 -0
hatexplain.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Hatexplain: A Benchmark Dataset for Explainable Hate Speech Detection"""
16
+
17
+
18
+ import json
19
+
20
+ import datasets
21
+
22
+
23
+ _CITATION = """\
24
+ @misc{mathew2020hatexplain,
25
+ title={HateXplain: A Benchmark Dataset for Explainable Hate Speech Detection},
26
+ author={Binny Mathew and Punyajoy Saha and Seid Muhie Yimam and Chris Biemann and Pawan Goyal and Animesh Mukherjee},
27
+ year={2020},
28
+ eprint={2012.10289},
29
+ archivePrefix={arXiv},
30
+ primaryClass={cs.CL}
31
+ }
32
+ """
33
+
34
+ # You can copy an official description
35
+ _DESCRIPTION = """\
36
+ Hatexplain is the first benchmark hate speech dataset covering multiple aspects of the issue. \
37
+ Each post in the dataset is annotated from three different perspectives: the basic, commonly used 3-class classification \
38
+ (i.e., hate, offensive or normal), the target community (i.e., the community that has been the victim of \
39
+ hate speech/offensive speech in the post), and the rationales, i.e., the portions of the post on which their labelling \
40
+ decision (as hate, offensive or normal) is based.
41
+ """
42
+
43
+ _HOMEPAGE = ""
44
+
45
+ _LICENSE = "cc-by-4.0"
46
+
47
+ _URL = "https://raw.githubusercontent.com/hate-alert/HateXplain/master/Data/"
48
+ _URLS = {
49
+ "dataset": _URL + "dataset.json",
50
+ "post_id_divisions": _URL + "post_id_divisions.json",
51
+ }
52
+
53
+
54
+ class HatexplainConfig(datasets.BuilderConfig):
55
+ """BuilderConfig for Hatexplain."""
56
+
57
+ def __init__(self, **kwargs):
58
+ """BuilderConfig for Hatexplain.
59
+ Args:
60
+ **kwargs: keyword arguments forwarded to super.
61
+ """
62
+ super(HatexplainConfig, self).__init__(**kwargs)
63
+
64
+
65
+ class Hatexplain(datasets.GeneratorBasedBuilder):
66
+ """Hatexplain: A Benchmark Dataset for Explainable Hate Speech Detection"""
67
+
68
+ BUILDER_CONFIGS = [
69
+ HatexplainConfig(
70
+ name="plain_text",
71
+ version=datasets.Version("1.0.0", ""),
72
+ description="Plain text",
73
+ ),
74
+ ]
75
+
76
+ def _info(self):
77
+ return datasets.DatasetInfo(
78
+ description=_DESCRIPTION,
79
+ features=datasets.Features(
80
+ {
81
+ "id": datasets.Value("string"),
82
+ "annotators": datasets.features.Sequence(
83
+ {
84
+ "label": datasets.ClassLabel(names=["hatespeech", "normal", "offensive"]),
85
+ "annotator_id": datasets.Value("int32"),
86
+ "target": datasets.Sequence(datasets.Value("string")),
87
+ }
88
+ ),
89
+ "rationales": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("int32"))),
90
+ "post_tokens": datasets.features.Sequence(datasets.Value("string")),
91
+ }
92
+ ),
93
+ supervised_keys=None,
94
+ homepage="",
95
+ citation=_CITATION,
96
+ license=_LICENSE,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+ downloaded_files = dl_manager.download_and_extract(_URLS)
101
+
102
+ return [
103
+ datasets.SplitGenerator(
104
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files, "split": "train"}
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files, "split": "val"}
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files, "split": "test"}
111
+ ),
112
+ ]
113
+
114
+ def _generate_examples(self, filepath, split):
115
+ """This function returns the examples depending on split"""
116
+
117
+ with open(filepath["post_id_divisions"], encoding="utf-8") as f:
118
+ post_id_divisions = json.load(f)
119
+ with open(filepath["dataset"], encoding="utf-8") as f:
120
+ dataset = json.load(f)
121
+
122
+ for id_, tweet_id in enumerate(post_id_divisions[split]):
123
+ info = dataset[tweet_id]
124
+ annotators, rationales, post_tokens = info["annotators"], info["rationales"], info["post_tokens"]
125
+
126
+ yield id_, {"id": tweet_id, "annotators": annotators, "rationales": rationales, "post_tokens": post_tokens}