File size: 3,270 Bytes
57baf84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9188868
57baf84
 
 
bb2464c
 
 
 
 
cc880bb
bb2464c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc880bb
 
bb2464c
 
 
 
 
 
 
 
 
 
 
 
 
57baf84
 
 
 
 
 
 
 
 
 
cc880bb
57baf84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233492b
57baf84
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# Lint as: python3
"""QC question classification dataset."""


import csv

import datasets
from datasets.tasks import TextClassification


_DESCRIPTION = """\
This data collection contains all the data used in our learning question classification experiments(see [1]), which has question class definitions, the training and testing question sets, examples of preprocessing the questions, feature definition scripts and examples of semantically related word features. 
This work has been done by Xin Li and Dan Roth and supported by [2].
"""

_CITATION = """"""

_TRAIN_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/QC/raw/main/train.csv"
_TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/vmalperovich/QC/raw/main/test.csv"


CATEGORY_MAPPING = {'ENTY_cremat': 0,
 'DESC_manner': 1,
 'ENTY_animal': 2,
 'ABBR_exp': 3,
 'HUM_ind': 4,
 'HUM_gr': 5,
 'HUM_title': 6,
 'DESC_def': 7,
 'NUM_date': 8,
 'DESC_reason': 9,
 'ENTY_event': 10,
 'LOC_state': 11,
 'DESC_desc': 12,
 'NUM_count': 13,
 'ENTY_other': 14,
 'ENTY_letter': 15,
 'LOC_other': 16,
 'ENTY_religion': 17,
 'ENTY_food': 18,
 'LOC_country': 19,
 'ENTY_color': 20,
 'ENTY_termeq': 21,
 'LOC_city': 22,
 'ENTY_body': 23,
 'ENTY_dismed': 24,
 'LOC_mount': 25,
 'NUM_money': 26,
 'ENTY_product': 27,
 'NUM_period': 28,
 'ENTY_substance': 29,
 'ENTY_sport': 30,
 'ENTY_plant': 31,
 'ENTY_techmeth': 32,
 'NUM_volsize': 33,
 'HUM_desc': 34,
 'ENTY_instru': 35,
 'ABBR_abb': 36,
 'NUM_other': 37,
 'NUM_speed': 38,
 'ENTY_word': 39,
 'ENTY_lang': 40,
 'NUM_perc': 41,
 'NUM_code': 42,
 'NUM_dist': 43,
 'NUM_temp': 44,
 'ENTY_symbol': 45,
 'NUM_ord': 46,
 'ENTY_veh': 47,
 'NUM_weight': 48,
 'ENTY_currency': 49}

class AGNews(datasets.GeneratorBasedBuilder):
    """AG News topic classification dataset."""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "label": datasets.features.ClassLabel(names=list(CATEGORY_MAPPING.keys())),
                }
            ),
            homepage="https://cogcomp.seas.upenn.edu/Data/QA/QC/",
            citation=_CITATION,
            task_templates=[TextClassification(text_column="text", label_column="label")],
        )

    def _split_generators(self, dl_manager):
        train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
        test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
        ]

    def _generate_examples(self, filepath):
        """Generate QC News examples."""
        with open(filepath, encoding="utf-8") as csv_file:
            csv_reader = csv.reader(
                csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
            )
            _ = next(csv_reader) # skip header
            for id_, row in enumerate(csv_reader):
                text, label = row
                label = CATEGORY_MAPPING[label]
                yield id_, {"text": text, "label": label}