Datasets:

Languages:
Burmese
ArXiv:
License:
holylovenia commited on
Commit
6c241af
·
verified ·
1 Parent(s): 18e82e5

Upload gklmip_sentiment.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. gklmip_sentiment.py +147 -0
gklmip_sentiment.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple
20
+
21
+ import datasets
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ _CITATION = """\
28
+ @InProceedings{,
29
+ author="Jiang, Shengyi
30
+ and Huang, Xiuwen
31
+ and Cai, Xiaonan
32
+ and Lin, Nankai",
33
+ title="Pre-trained Models and Evaluation Data for the Myanmar Language",
34
+ booktitle="The 28th International Conference on Neural Information Processing",
35
+ year="2021",
36
+ publisher="Springer International Publishing",
37
+ address="Cham",
38
+ }
39
+ """
40
+
41
+ _DATASETNAME = "gklmip_sentiment"
42
+ _DESCRIPTION = """\
43
+ The GKLMIP Product Sentiment Dataset is a Burmese dataset for sentiment analysis. \
44
+ It was created by crawling comments on an e-commerce website. The sentiment labels range \
45
+ from 1 to 5, with 1 and 2 being negative, 3 and 4 being neutral, and 5 being positive.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/GKLMIP/Pretrained-Models-For-Myanmar/tree/main"
49
+ _LANGUAGES = ["mya"]
50
+ _LICENSE = Licenses.UNKNOWN.value
51
+ _LOCAL = False
52
+
53
+ _URLS = {
54
+ _DATASETNAME: "https://github.com/GKLMIP/Pretrained-Models-For-Myanmar/raw/main/Product%20Sentiment%20Dataset.zip",
55
+ }
56
+
57
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
58
+
59
+ _SOURCE_VERSION = "1.0.0"
60
+
61
+ _SEACROWD_VERSION = "2024.06.20"
62
+
63
+ _LABELS = [1, 2, 3, 4, 5]
64
+
65
+
66
+ class GklmipSentimentDataset(datasets.GeneratorBasedBuilder):
67
+ """The GKLMIP Product Sentiment Dataset is a Burmese dataset for sentiment analysis."""
68
+
69
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
70
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
71
+ SEACROWD_SCHEMA_NAME = "text"
72
+
73
+ BUILDER_CONFIGS = [
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_source",
76
+ version=SOURCE_VERSION,
77
+ description=f"{_DATASETNAME} source schema",
78
+ schema="source",
79
+ subset_id=f"{_DATASETNAME}",
80
+ ),
81
+ SEACrowdConfig(
82
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
83
+ version=SEACROWD_VERSION,
84
+ description=f"{_DATASETNAME} SEACrowd schema",
85
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
86
+ subset_id=f"{_DATASETNAME}",
87
+ ),
88
+ ]
89
+
90
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
91
+
92
+ def _info(self) -> datasets.DatasetInfo:
93
+ if self.config.schema == "source":
94
+ features = datasets.Features({"bpe": datasets.Value("string"), "text": datasets.Value("string"), "label": datasets.Value("string")})
95
+
96
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
97
+ features = schemas.text_features(_LABELS)
98
+
99
+ return datasets.DatasetInfo(
100
+ description=_DESCRIPTION,
101
+ features=features,
102
+ homepage=_HOMEPAGE,
103
+ license=_LICENSE,
104
+ citation=_CITATION,
105
+ )
106
+
107
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
108
+ """Returns SplitGenerators."""
109
+
110
+ urls = _URLS[_DATASETNAME]
111
+ data_dir = dl_manager.download_and_extract(urls)
112
+
113
+ return [
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TRAIN,
116
+ gen_kwargs={
117
+ "filepath": os.path.join(data_dir, "product_sentiment_dataset_train.json"),
118
+ "split": "train",
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name=datasets.Split.TEST,
123
+ gen_kwargs={
124
+ "filepath": os.path.join(data_dir, "product_sentiment_dataset_test.json"),
125
+ "split": "test",
126
+ },
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.VALIDATION,
130
+ gen_kwargs={
131
+ "filepath": os.path.join(data_dir, "product_sentiment_dataset_dev.json"),
132
+ "split": "validation",
133
+ },
134
+ ),
135
+ ]
136
+
137
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
138
+ with open(filepath) as file:
139
+ dataset = json.load(file)
140
+
141
+ if self.config.schema == "source":
142
+ for i, line in enumerate(dataset):
143
+ yield i, {"bpe": line["bpe"], "text": line["text"], "label": line["label"]}
144
+
145
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
146
+ for i, line in enumerate(dataset):
147
+ yield i, {"id": i, "text": line["text"], "label": line["label"]}