Vadim Alperovich commited on
Commit
e503d76
·
1 Parent(s): 63a3deb

Create toxic_comments.py

Browse files
Files changed (1) hide show
  1. toxic_comments.py +63 -0
toxic_comments.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ """20ng question classification dataset."""
3
+
4
+
5
+ import csv
6
+
7
+ import datasets
8
+ from datasets.tasks import TextClassification
9
+ import sys
10
+ csv.field_size_limit(sys.maxsize)
11
+
12
+ _DESCRIPTION = """\
13
+ This data collection contains all the data used in our learning question classification experiments(see [1]), which has question class definitions, the training and testing question sets, examples of preprocessing the questions, feature definition scripts and examples of semantically related word features.
14
+ This work has been done by Xin Li and Dan Roth and supported by [2].
15
+ """
16
+
17
+ _CITATION = """"""
18
+
19
+ _TRAIN_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/toxic_comments/raw/main/train.csv"
20
+ _TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/toxic_comments/raw/main/test.csv"
21
+ _VALID_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/toxic_comments/raw/main/validaton.csv"
22
+
23
+
24
+ CATEGORY_MAPPING = ['neutral', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
25
+
26
+ class NG(datasets.GeneratorBasedBuilder):
27
+ """20ng classification dataset."""
28
+
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ description=_DESCRIPTION,
32
+ features=datasets.Features(
33
+ {
34
+ "text": datasets.Value("string"),
35
+ "label": datasets.Sequence(datasets.ClassLabel(names=CATEGORY_MAPPING)),
36
+ }
37
+ ),
38
+ homepage="",
39
+ citation=_CITATION,
40
+ task_templates=[TextClassification(text_column="text", label_column="label")],
41
+ )
42
+
43
+ def _split_generators(self, dl_manager):
44
+ train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
45
+ test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
46
+ valid_path = dl_manager.download_and_extract(_VALID_DOWNLOAD_URL)
47
+ return [
48
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
49
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
50
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_path}),
51
+ ]
52
+
53
+ def _generate_examples(self, filepath):
54
+ """Generate examples."""
55
+ with open(filepath, encoding="utf-8") as csv_file:
56
+ csv_reader = csv.reader(
57
+ csv_file, quotechar='"', delimiter=";", quoting=csv.QUOTE_ALL, skipinitialspace=True
58
+ )
59
+ _ = next(csv_reader) # skip header
60
+ for id_, row in enumerate(csv_reader):
61
+ text, label = row
62
+ label = CATEGORY_MAPPING[label]
63
+ yield id_, {"text": text, "label": label}