Yaxin commited on
Commit
af3e005
·
1 Parent(s): 6e92c2a

Create new file

Browse files
Files changed (1) hide show
  1. SemEval2016Task5Raw.py +203 -0
SemEval2016Task5Raw.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """The Multilingual SemEval2016 Task5 Reviews Corpus"""
17
+
18
+ import datasets
19
+
20
+ _CITATION = """\
21
+ @inproceedings{pontiki2016semeval,
22
+ title={Semeval-2016 task 5: Aspect based sentiment analysis},
23
+ author={Pontiki, Maria and Galanis, Dimitrios and Papageorgiou, Haris and Androutsopoulos, Ion and Manandhar, Suresh and Al-Smadi, Mohammad and Al-Ayyoub, Mahmoud and Zhao, Yanyan and Qin, Bing and De Clercq, Orph{\'e}e and others},
24
+ booktitle={International workshop on semantic evaluation},
25
+ pages={19--30},
26
+ year={2016}
27
+ }
28
+ """
29
+
30
+ _LICENSE = """\
31
+ Please click on the homepage URL for license details.
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.
36
+ """
37
+
38
+ _CONFIG = [
39
+ # restaruants Domain
40
+ "restaurants_english",
41
+ "restaurants_french",
42
+ "restaurants_spanish",
43
+ "restaurants_russian",
44
+ "restaurants_dutch",
45
+ "restaurants_turkish",
46
+
47
+ # hotels domain
48
+ "hotels_arabic",
49
+
50
+ # Consumer Electronics Domain
51
+ "mobilephones_dutch",
52
+ "mobilephones_chinese",
53
+ "laptops_english",
54
+ "digitalcameras_chinese"
55
+ ]
56
+
57
+ _VERSION = "0.0.1"
58
+
59
+ _HOMEPAGE_URL = "https://alt.qcri.org/semeval2016/task5/index.php?id=data-and-tools/"
60
+ _DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2016Task5Corrected/{split}/{domain}_{split}_{lang}.xml"
61
+
62
+
63
+ class SemEval2016Task5RawConfig(datasets.BuilderConfig):
64
+ """BuilderConfig for SemEval2016Config."""
65
+
66
+ def __init__(self, _CONFIG, **kwargs):
67
+ super(SemEval2016Task5RawConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
68
+ self.configs = _CONFIG
69
+
70
+ class SemEval2016Task5Raw(datasets.GeneratorBasedBuilder):
71
+ """The Multilingual SemEval2016 ABSA Corpus"""
72
+
73
+ BUILDER_CONFIGS = [
74
+ SemEval2016Task5RawConfig(
75
+ name="All",
76
+ _CONFIG=_CONFIG,
77
+ description="A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.",
78
+ )
79
+ ] + [
80
+ SemEval2016Task5RawConfig(
81
+ name=config,
82
+ _CONFIG=[config],
83
+ description=f"{config} of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis",
84
+ )
85
+ for config in _CONFIG
86
+ ]
87
+
88
+ BUILDER_CONFIG_CLASS = SemEval2016Task5RawConfig
89
+ DEFAULT_CONFIG_NAME = "restaurants_english"
90
+
91
+ def _info(self):
92
+ return datasets.DatasetInfo(
93
+ description=_DESCRIPTION,
94
+ features=datasets.Features(
95
+ {'text': datasets.Value(dtype='string'),
96
+ 'opinions': [
97
+ {'category': datasets.Value(dtype='string'),
98
+ 'from': datasets.Value(dtype='string'),
99
+ 'polarity': datasets.Value(dtype='string'),
100
+ 'target': datasets.Value(dtype='string'),
101
+ 'to': datasets.Value(dtype='string')}
102
+ ],
103
+ 'language': datasets.Value(dtype='string'),
104
+ 'domain': datasets.Value(dtype='string'),
105
+ 'reviewId': datasets.Value(dtype='string'),
106
+ 'sentenceId': datasets.Value(dtype='string')
107
+ }
108
+ ),
109
+ supervised_keys=None,
110
+ license=_LICENSE,
111
+ homepage=_HOMEPAGE_URL,
112
+ citation=_CITATION,
113
+ )
114
+
115
+ def _split_generators(self, dl_manager):
116
+
117
+ lang_list = []
118
+ domain_list = []
119
+
120
+ for config in self.config.configs:
121
+ domain_list.append(config.split('_')[0])
122
+ lang_list.append(config.split('_')[1])
123
+
124
+ train_urls = [_DOWNLOAD_URL.format(split="train", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
125
+ dev_urls = [_DOWNLOAD_URL.format(split="trial", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
126
+ test_urls = [_DOWNLOAD_URL.format(split="test", domain=config.split('_')[0], lang=config.split('_')[1]) for config in self.config.configs]
127
+
128
+ train_paths = dl_manager.download_and_extract(train_urls)
129
+ dev_paths = dl_manager.download_and_extract(dev_urls)
130
+ test_paths = dl_manager.download_and_extract(test_urls)
131
+
132
+ return [
133
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths, "lang_list": lang_list, "domain_list": domain_list}),
134
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths, "lang_list": lang_list, "domain_list": domain_list}),
135
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths, "lang_list": lang_list, "domain_list": domain_list}),
136
+ ]
137
+
138
+ def _generate_examples(self, file_paths, lang_list, domain_list):
139
+ row_count = 0
140
+ assert len(file_paths)==len(lang_list) and len(lang_list)==len(domain_list)
141
+
142
+ for i in range(len(file_paths)):
143
+ file_path, domain, language = file_paths[i], domain_list[i], lang_list[i]
144
+ semEvalDataset = SemEvalXMLDataset(file_path, language, domain)
145
+
146
+ for example in semEvalDataset.SentenceWithOpinions:
147
+
148
+ yield row_count, example
149
+ row_count += 1
150
+
151
+
152
+ # 输入:xlm文件的文件路径
153
+ # 输出:一个DataSet,每个样例包含[reviewid, sentenceId, text, UniOpinions]
154
+ # 每个样例包含的Opinion,是一个列表,包含的是单个Opinion的详情
155
+
156
+ from xml.dom.minidom import parse
157
+
158
+ class SemEvalXMLDataset():
159
+ def __init__(self, file_name, language, domain):
160
+ # 获得SentenceWithOpinions,一个List包含(reviewId, sentenceId, text, Opinions)
161
+
162
+ self.SentenceWithOpinions = []
163
+ self.xml_path = file_name
164
+
165
+ self.sentenceXmlList = parse(self.xml_path).getElementsByTagName('sentence')
166
+
167
+ for sentenceXml in self.sentenceXmlList:
168
+ reviewId = sentenceXml.getAttribute("id").split(':')[0]
169
+ sentenceId = sentenceXml.getAttribute("id")
170
+ if len(sentenceXml.getElementsByTagName("text")[0].childNodes) < 1:
171
+ # skip no reviews part
172
+ continue
173
+ text = sentenceXml.getElementsByTagName("text")[0].childNodes[0].nodeValue
174
+ OpinionXmlList = sentenceXml.getElementsByTagName("Opinion")
175
+ Opinions = []
176
+ for opinionXml in OpinionXmlList:
177
+ # some text maybe have no opinion
178
+ target = opinionXml.getAttribute("target")
179
+ category = opinionXml.getAttribute("category")
180
+ polarity = opinionXml.getAttribute("polarity")
181
+ from_ = opinionXml.getAttribute("from")
182
+ to = opinionXml.getAttribute("to")
183
+
184
+ opinionDict = {
185
+ "target": target,
186
+ "category": category,
187
+ "polarity": polarity,
188
+ "from": from_,
189
+ "to": to
190
+ }
191
+ Opinions.append(opinionDict)
192
+
193
+ Opinions.sort(key=lambda x: x["from"])
194
+ # 从小到大排序
195
+ self.SentenceWithOpinions.append({
196
+ "text": text,
197
+ "opinions": Opinions,
198
+ "language": language,
199
+ "domain": domain,
200
+ "reviewId": reviewId,
201
+ "sentenceId": sentenceId
202
+ })
203
+