Yaxin commited on
Commit
387d329
·
1 Parent(s): c49e7d4

Create SemEval2016.py

Browse files
Files changed (1) hide show
  1. SemEval2016.py +205 -0
SemEval2016.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """The Multilingual SemEval2016 Task5 Reviews Corpus"""
17
+
18
+ import datasets
19
+
20
+ _CITATION = """\
21
+ @inproceedings{pontiki2016semeval,
22
+ title={Semeval-2016 task 5: Aspect based sentiment analysis},
23
+ author={Pontiki, Maria and Galanis, Dimitrios and Papageorgiou, Haris and Androutsopoulos, Ion and Manandhar, Suresh and Al-Smadi, Mohammad and Al-Ayyoub, Mahmoud and Zhao, Yanyan and Qin, Bing and De Clercq, Orph{\'e}e and others},
24
+ booktitle={International workshop on semantic evaluation},
25
+ pages={19--30},
26
+ year={2016}
27
+ }
28
+ """
29
+
30
+ _LICENSE = """\
31
+ Please click on the homepage URL for license details.
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.
36
+ """
37
+
38
+ _CONFIG = [
39
+ # restaruants Domain
40
+ "restaurants_english",
41
+ "restaurants_french",
42
+ "restaurants_spanish",
43
+ "restaurants_russian",
44
+ "restaurants_dutch",
45
+ "restaurants_turkish",
46
+
47
+ # hotels domain
48
+ "hotels_arabic",
49
+
50
+ # Consumer Electronics Domain
51
+ "mobilephones_dutch",
52
+ "mobilephones_chinese",
53
+ "laptops_english",
54
+ "digitalcameras_chinese"
55
+ ]
56
+
57
+ _VERSION = "0.0.1"
58
+
59
+ _HOMEPAGE_URL = "https://alt.qcri.org/semeval2016/task5/index.php?id=data-and-tools/"
60
+ _DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2016Task5/{split}/{domain}_{split}_{lang}.xml"
61
+
62
+
63
+ class SemEval2016MultiConfig(datasets.BuilderConfig):
64
+ """BuilderConfig for SemEval2016MultiConfig."""
65
+
66
+ def __init__(self, _CONFIG, **kwargs):
67
+ super(SemEval2016MultiConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
68
+ self.configs = _CONFIG
69
+
70
+
71
+ class SemEval2016Multi(datasets.GeneratorBasedBuilder):
72
+ """The Multilingual Amazon Reviews Corpus"""
73
+
74
+ BUILDER_CONFIGS = [
75
+ SemEval2016MultiConfig(
76
+ name="All",
77
+ _CONFIG=_CONFIG,
78
+ description="A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.",
79
+ )
80
+ ] + [
81
+ SemEval2016MultiConfig(
82
+ name=config,
83
+ _CONFIG=[config],
84
+ description=f"{config} of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis",
85
+ )
86
+ for config in _CONFIG
87
+ ]
88
+
89
+ BUILDER_CONFIG_CLASS = SemEval2016MultiConfig
90
+ DEFAULT_CONFIG_NAME = "All"
91
+
92
+ def _info(self):
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=datasets.Features(
96
+ {'text': datasets.Value(dtype='string'),
97
+ 'opinions': [
98
+ {'category': datasets.Value(dtype='string'),
99
+ 'from': datasets.Value(dtype='string'),
100
+ 'polarity': datasets.Value(dtype='string'),
101
+ 'target': datasets.Value(dtype='string'),
102
+ 'to': datasets.Value(dtype='string')}
103
+ ],
104
+ 'language': datasets.Value(dtype='string'),
105
+ 'domain': datasets.Value(dtype='string'),
106
+ 'reviewId': datasets.Value(dtype='string'),
107
+ 'sentenceId': datasets.Value(dtype='string')
108
+ }
109
+ ),
110
+ supervised_keys=None,
111
+ license=_LICENSE,
112
+ homepage=_HOMEPAGE_URL,
113
+ citation=_CITATION,
114
+ )
115
+
116
+ def _split_generators(self, dl_manager):
117
+
118
+ lang_list = []
119
+ domain_list = []
120
+
121
+ for config in self.config.configs:
122
+ domain_list.append(config.split('_')[0])
123
+ lang_list.append(config.split('_')[1])
124
+
125
+ train_urls = [_DOWNLOAD_URL.format(split="train", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
126
+ dev_urls = [_DOWNLOAD_URL.format(split="trial", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
127
+ test_urls = [_DOWNLOAD_URL.format(split="test", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
128
+
129
+ train_paths = dl_manager.download_and_extract(train_urls)
130
+ dev_paths = dl_manager.download_and_extract(dev_urls)
131
+ test_paths = dl_manager.download_and_extract(test_urls)
132
+
133
+ return [
134
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths, "lang_list": lang_list, "domain_list": domain_list}),
135
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths, "lang_list": lang_list, "domain_list": domain_list}),
136
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths, "lang_list": lang_list, "domain_list": domain_list}),
137
+ ]
138
+
139
+ def _generate_examples(self, file_paths, lang_list, domain_list):
140
+ row_count = 0
141
+ assert len(file_paths)==len(lang_list) and len(lang_list)==len(domain_list)
142
+
143
+ for i in range(len(file_paths)):
144
+ file_path, domain, language = file_paths[i], domain_list[i], lang_list[i]
145
+ semEvalDataset = SemEvalXMLDataset(file_path, language, domain)
146
+
147
+ for example in semEvalDataset.SentenceWithOpinions:
148
+
149
+ yield row_count, example
150
+ row_count += 1
151
+
152
+
153
+ # 输入:xlm文件的文件路径
154
+ # 输出:一个DataSet,每个样例包含[reviewid, sentenceId, text, UniOpinions]
155
+ # 每个样例包含的Opinion,是一个列表,包含的是单个Opinion的详情
156
+
157
+ from xml.dom.minidom import parse
158
+
159
+ class SemEvalXMLDataset():
160
+ def __init__(self, file_name, language, domain):
161
+ # 获得SentenceWithOpinions,一个List包含(reviewId, sentenceId, text, Opinions)
162
+
163
+ self.SentenceWithOpinions = []
164
+ self.xml_path = file_name
165
+
166
+ self.sentenceXmlList = parse(self.xml_path).getElementsByTagName('sentence')
167
+
168
+ for sentenceXml in self.sentenceXmlList:
169
+ reviewId = sentenceXml.getAttribute("id").split(':')[0]
170
+ sentenceId = sentenceXml.getAttribute("id")
171
+ if len(sentenceXml.getElementsByTagName("text")[0].childNodes) < 1:
172
+ # skip no reviews part
173
+ continue
174
+ text = sentenceXml.getElementsByTagName("text")[0].childNodes[0].nodeValue
175
+ OpinionXmlList = sentenceXml.getElementsByTagName("Opinion")
176
+ Opinions = []
177
+ for opinionXml in OpinionXmlList:
178
+ # some text maybe have no opinion
179
+ target = opinionXml.getAttribute("target")
180
+ category = opinionXml.getAttribute("category")
181
+ polarity = opinionXml.getAttribute("polarity")
182
+ from_ = opinionXml.getAttribute("from")
183
+ to = opinionXml.getAttribute("to")
184
+
185
+ opinionDict = {
186
+ "target": target,
187
+ "category": category,
188
+ "polarity": polarity,
189
+ "from": from_,
190
+ "to": to
191
+ }
192
+ Opinions.append(opinionDict)
193
+
194
+ Opinions.sort(key=lambda x: x["from"])
195
+ # 从小到大排序
196
+
197
+ self.SentenceWithOpinions.append({
198
+ "text": text,
199
+ "opinions": Opinions,
200
+ "language": language,
201
+ "domain": domain,
202
+ "reviewId": reviewId,
203
+ "sentenceId": sentenceId
204
+ }
205
+ )