Yaxin commited on
Commit
89b0ef1
·
1 Parent(s): ff5a8a9

Create new file

Browse files
Files changed (1) hide show
  1. SemEval2015Task12NLTK.py +276 -0
SemEval2015Task12NLTK.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """The SemEval2015 Task12 Reviews Corpus"""
17
+
18
+ import datasets
19
+
20
+ _CITATION = """\
21
+ @inproceedings{pontiki2015semeval,
22
+ title={Semeval-2015 task 12: Aspect based sentiment analysis},
23
+ author={Pontiki, Maria and Galanis, Dimitrios and Papageorgiou, Harris and Manandhar, Suresh and Androutsopoulos, Ion},
24
+ booktitle={Proceedings of the 9th international workshop on semantic evaluation (SemEval 2015)},
25
+ pages={486--495},
26
+ year={2015}
27
+ }
28
+ """
29
+
30
+ _LICENSE = """\
31
+ Please click on the homepage URL for license details.
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ A collection of SemEval2015 specifically designed to aid research in Aspect Based Sentiment Analysis.
36
+ """
37
+
38
+ _CONFIG = [
39
+ # restaruants Domain
40
+ "restaurants",
41
+ # Consumer Electronics Domain
42
+ "laptops"
43
+ ]
44
+
45
+ _VERSION = "0.1.0"
46
+
47
+ _HOMEPAGE_URL = "https://alt.qcri.org/semeval2015/task12/index.php?id=data-and-tools/"
48
+ _DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2015Task12Corrected/{split}/{domain}_{split}.xml"
49
+
50
+
51
+ class SemEval2015Task12NLTKConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for SemEval2015Config."""
53
+
54
+ def __init__(self, _CONFIG, **kwargs):
55
+ super(SemEval2015Task12NLTKConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
56
+ self.configs = _CONFIG
57
+
58
+
59
+ class SemEval2015Task12NLTK(datasets.GeneratorBasedBuilder):
60
+ """The lingual Amazon Reviews Corpus"""
61
+
62
+ BUILDER_CONFIGS = [
63
+ SemEval2015Task12NLTKConfig(
64
+ name="All",
65
+ _CONFIG=_CONFIG,
66
+ description="A collection of SemEval2015 specifically designed to aid research in lingual Aspect Based Sentiment Analysis.",
67
+ )
68
+ ] + [
69
+ SemEval2015Task12NLTKConfig(
70
+ name=config,
71
+ _CONFIG=[config],
72
+ description=f"{config} of SemEval2015 specifically designed to aid research in lingual Aspect Based Sentiment Analysis",
73
+ )
74
+ for config in _CONFIG
75
+ ]
76
+
77
+ BUILDER_CONFIG_CLASS = SemEval2015Task12NLTKConfig
78
+ DEFAULT_CONFIG_NAME = "All"
79
+
80
+ def _info(self):
81
+ return datasets.DatasetInfo(
82
+ description=_DESCRIPTION,
83
+ features=datasets.Features(
84
+ {'text': datasets.Value(dtype='string'),
85
+ 'opinions': [
86
+ {'category': datasets.Value(dtype='string'),
87
+ 'from': datasets.Value(dtype='string'),
88
+ 'polarity': datasets.Value(dtype='string'),
89
+ 'target': datasets.Value(dtype='string'),
90
+ 'to': datasets.Value(dtype='string')}
91
+ ],
92
+ 'tokens': [datasets.Value(dtype='string')],
93
+ 'ATESP_BIEOS_tags': [datasets.Value(dtype='string')],
94
+ 'ATESP_BIO_tags': [datasets.Value(dtype='string')],
95
+ 'ATE_BIEOS_tags': [datasets.Value(dtype='string')],
96
+ 'ATE_BIO_tags': [datasets.Value(dtype='string')],
97
+
98
+ 'domain': datasets.Value(dtype='string'),
99
+ 'reviewId': datasets.Value(dtype='string'),
100
+ 'sentenceId': datasets.Value(dtype='string')
101
+ }
102
+ ),
103
+ supervised_keys=None,
104
+ license=_LICENSE,
105
+ homepage=_HOMEPAGE_URL,
106
+ citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager):
110
+
111
+ train_urls = [_DOWNLOAD_URL.format(split="train", domain=config) for config in self.config.configs]
112
+ dev_urls = [_DOWNLOAD_URL.format(split="trial", domain=config) for config in self.config.configs]
113
+ test_urls = [_DOWNLOAD_URL.format(split="test", domain=config) for config in self.config.configs]
114
+
115
+ train_paths = dl_manager.download_and_extract(train_urls)
116
+ dev_paths = dl_manager.download_and_extract(dev_urls)
117
+ test_paths = dl_manager.download_and_extract(test_urls)
118
+
119
+ return [
120
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths, "domain_list": self.config.configs}),
121
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths, "domain_list": self.config.configs}),
122
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths, "domain_list": self.config.configs}),
123
+ ]
124
+
125
+ def _generate_examples(self, file_paths, domain_list):
126
+ row_count = 0
127
+ assert len(file_paths)==len(domain_list)
128
+
129
+ for i in range(len(file_paths)):
130
+ file_path, domain = file_paths[i], domain_list[i]
131
+ semEvalDataset = SemEvalXMLDataset(file_path, domain)
132
+
133
+ for example in semEvalDataset.SentenceWithOpinions:
134
+
135
+ yield row_count, example
136
+ row_count += 1
137
+
138
+
139
+ # 输入:xlm文件的文件路径
140
+ # 输出:一个DataSet,每个样例包含[reviewid, sentenceId, text, UniOpinions]
141
+ # 每个样例包含的Opinion,是一个列表,包含的是单个Opinion的详情
142
+
143
+ from xml.dom.minidom import parse
144
+
145
+ class SemEvalXMLDataset():
146
+ def __init__(self, file_name, domain):
147
+ # 获得SentenceWithOpinions,一个List包含(reviewId, sentenceId, text, Opinions)
148
+
149
+ self.SentenceWithOpinions = []
150
+ self.xml_path = file_name
151
+
152
+ self.sentenceXmlList = parse(self.xml_path).getElementsByTagName('sentence')
153
+
154
+ for sentenceXml in self.sentenceXmlList:
155
+ reviewId = sentenceXml.getAttribute("id").split(':')[0]
156
+ sentenceId = sentenceXml.getAttribute("id")
157
+ if len(sentenceXml.getElementsByTagName("text")[0].childNodes) < 1:
158
+ # skip no reviews part
159
+ continue
160
+ text = sentenceXml.getElementsByTagName("text")[0].childNodes[0].nodeValue
161
+ OpinionXmlList = sentenceXml.getElementsByTagName("Opinion")
162
+ Opinions = []
163
+ for opinionXml in OpinionXmlList:
164
+ # some text maybe have no opinion
165
+ target = opinionXml.getAttribute("target")
166
+ category = opinionXml.getAttribute("category")
167
+ polarity = opinionXml.getAttribute("polarity")
168
+ from_ = opinionXml.getAttribute("from")
169
+ to = opinionXml.getAttribute("to")
170
+
171
+ opinionDict = {
172
+ "target": target,
173
+ "category": category,
174
+ "polarity": polarity,
175
+ "from": from_,
176
+ "to": to
177
+ }
178
+ Opinions.append(opinionDict)
179
+
180
+ Opinions.sort(key=lambda x: x["from"])
181
+ # 从小到大排序
182
+ example = {
183
+ "text": text,
184
+ "opinions": Opinions,
185
+ "domain": domain,
186
+ "reviewId": reviewId,
187
+ "sentenceId": sentenceId
188
+ }
189
+ example = addTokenAndLabel(example)
190
+ self.SentenceWithOpinions.append(example)
191
+
192
+ import nltk
193
+
194
+ def clearOpinion(example):
195
+ opinions = example['opinions']
196
+ skipNullOpinions = []
197
+ # 去掉NULL的opinion
198
+ for opinion in opinions:
199
+ targetKey = 'target'
200
+ target = opinion[targetKey]
201
+ from_ = opinion['from']
202
+ to = opinion['to']
203
+ # skill NULL
204
+ if target.lower() == 'null' or target == '' or from_ == to:
205
+ continue
206
+ skipNullOpinions.append(opinion)
207
+
208
+ # delete repeate Opinions
209
+ skipNullOpinions.sort(key=lambda x: int(x['from'])) # 从小到大排序
210
+ UniOpinions = []
211
+ for opinion in skipNullOpinions:
212
+ if len(UniOpinions) < 1:
213
+ UniOpinions.append(opinion)
214
+ else:
215
+ if opinion['from'] != UniOpinions[-1]['from'] and opinion['to'] != UniOpinions[-1]['to']:
216
+ UniOpinions.append(opinion)
217
+ return UniOpinions
218
+
219
+
220
+ def addTokenAndLabel(example):
221
+ tokens = []
222
+ labels = []
223
+
224
+ text = example['text']
225
+ UniOpinions = clearOpinion(example)
226
+ text_begin = 0
227
+
228
+ for aspect in UniOpinions:
229
+ polarity = aspect['polarity'][:3].upper()
230
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: int(aspect['from'])])
231
+ tokens.extend(pre_O_tokens)
232
+ labels.extend(['O']*len(pre_O_tokens))
233
+
234
+ BIES_tokens = nltk.word_tokenize(text[int(aspect['from']): int(aspect['to'])])
235
+ tokens.extend(BIES_tokens)
236
+
237
+ assert len(BIES_tokens) > 0, print('error in BIES_tokens length')
238
+
239
+ if len(BIES_tokens)==1:
240
+ labels.append('S-'+polarity)
241
+ elif len(BIES_tokens)==2:
242
+ labels.append('B-'+polarity)
243
+ labels.append('E-'+polarity)
244
+ else:
245
+ labels.append('B-'+polarity)
246
+ labels.extend(['I-'+polarity]*(len(BIES_tokens)-2))
247
+ labels.append('E-'+polarity)
248
+
249
+ text_begin = int(aspect['to'])
250
+
251
+
252
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: ])
253
+ labels.extend(['O']*len(pre_O_tokens))
254
+ tokens.extend(pre_O_tokens)
255
+
256
+ example['tokens'] = tokens
257
+ example['ATESP_BIEOS_tags'] = labels
258
+
259
+
260
+ ATESP_BIO_labels = []
261
+ for label in labels:
262
+ ATESP_BIO_labels.append(label.replace('E-', 'I-').replace('S-', 'B-'))
263
+ example['ATESP_BIO_tags'] = ATESP_BIO_labels
264
+
265
+
266
+ ATE_BIEOS_labels = []
267
+ for label in labels:
268
+ ATE_BIEOS_labels.append(label[0])
269
+ example['ATE_BIEOS_tags'] = ATE_BIEOS_labels
270
+
271
+ ATE_BIO_labels = []
272
+ for label in ATESP_BIO_labels:
273
+ ATE_BIO_labels.append(label[0])
274
+ example['ATE_BIO_tags'] = ATE_BIO_labels
275
+
276
+ return example