Yaxin commited on
Commit
b42e433
·
1 Parent(s): 037435b

Create new file

Browse files
Files changed (1) hide show
  1. SemEval2014Task4NLTK.py +283 -0
SemEval2014Task4NLTK.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """The lingual SemEval2014 Task5 Reviews Corpus"""
17
+
18
+ import datasets
19
+
20
+ _CITATION = """\
21
+ @article{2014SemEval,
22
+ title={SemEval-2014 Task 4: Aspect Based Sentiment Analysis},
23
+ author={ Pontiki, M. and D Galanis and Pavlopoulos, J. and Papageorgiou, H. and Manandhar, S. },
24
+ journal={Proceedings of International Workshop on Semantic Evaluation at},
25
+ year={2014},
26
+ }
27
+ """
28
+
29
+ _LICENSE = """\
30
+ Please click on the homepage URL for license details.
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ A collection of SemEval2014 specifically designed to aid research in Aspect Based Sentiment Analysis.
35
+ """
36
+
37
+ _CONFIG = [
38
+
39
+ # restaurants domain
40
+ "restaurants",
41
+ # laptops domain
42
+ "laptops",
43
+ ]
44
+
45
+ _VERSION = "0.0.1"
46
+
47
+ _HOMEPAGE_URL = "https://alt.qcri.org/semeval2014/task4/index.php?id=data-and-tools"
48
+ _DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2014Task4/{split}/{domain}_{split}.xml"
49
+
50
+
51
+ class SemEval2014Task4NLTKConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for SemEval2014Config."""
53
+
54
+ def __init__(self, _CONFIG, **kwargs):
55
+ super(SemEval2014Task4NLTKConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
56
+ self.configs = _CONFIG
57
+
58
+
59
+ class SemEval2014Task4NLTK(datasets.GeneratorBasedBuilder):
60
+ """The lingual Amazon Reviews Corpus"""
61
+
62
+ BUILDER_CONFIGS = [
63
+ SemEval2014Task4NLTKConfig(
64
+ name="All",
65
+ _CONFIG=_CONFIG,
66
+ description="A collection of SemEval2014 specifically designed to aid research in lingual Aspect Based Sentiment Analysis.",
67
+ )
68
+ ] + [
69
+ SemEval2014Task4NLTKConfig(
70
+ name=config,
71
+ _CONFIG=[config],
72
+ description=f"{config} of SemEval2014 specifically designed to aid research in Aspect Based Sentiment Analysis",
73
+ )
74
+ for config in _CONFIG
75
+ ]
76
+
77
+ BUILDER_CONFIG_CLASS = SemEval2014Task4NLTKConfig
78
+ DEFAULT_CONFIG_NAME = "All"
79
+
80
+ def _info(self):
81
+ return datasets.DatasetInfo(
82
+ description=_DESCRIPTION,
83
+ features=datasets.Features(
84
+ {'text': datasets.Value(dtype='string'),
85
+ 'aspectTerms': [
86
+ {'from': datasets.Value(dtype='string'),
87
+ 'polarity': datasets.Value(dtype='string'),
88
+ 'term': datasets.Value(dtype='string'),
89
+ 'to': datasets.Value(dtype='string')}
90
+ ],
91
+ 'tokens': [datasets.Value(dtype='string')],
92
+ 'ATESP_BIEOS_tags': [datasets.Value(dtype='string')],
93
+ 'ATESP_BIO_tags': [datasets.Value(dtype='string')],
94
+ 'ATE_BIEOS_tags': [datasets.Value(dtype='string')],
95
+ 'ATE_BIO_tags': [datasets.Value(dtype='string')],
96
+
97
+ 'aspectCategories': [
98
+ {'category': datasets.Value(dtype='string'),
99
+ 'polarity': datasets.Value(dtype='string')}
100
+ ],
101
+ 'domain': datasets.Value(dtype='string'),
102
+ 'sentenceId': datasets.Value(dtype='string')
103
+ }
104
+ ),
105
+ supervised_keys=None,
106
+ license=_LICENSE,
107
+ homepage=_HOMEPAGE_URL,
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _split_generators(self, dl_manager):
112
+
113
+ train_urls = [_DOWNLOAD_URL.format(split="train", domain=config) for config in self.config.configs]
114
+ dev_urls = [_DOWNLOAD_URL.format(split="trial", domain=config) for config in self.config.configs]
115
+ test_urls = [_DOWNLOAD_URL.format(split="test", domain=config) for config in self.config.configs]
116
+
117
+ train_paths = dl_manager.download_and_extract(train_urls)
118
+ dev_paths = dl_manager.download_and_extract(dev_urls)
119
+ test_paths = dl_manager.download_and_extract(test_urls)
120
+
121
+ return [
122
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths, "domain_list": self.config.configs}),
123
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths, "domain_list": self.config.configs}),
124
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths, "domain_list": self.config.configs}),
125
+ ]
126
+
127
+ def _generate_examples(self, file_paths, domain_list):
128
+ row_count = 0
129
+ assert len(file_paths)==len(domain_list)
130
+
131
+ for i in range(len(file_paths)):
132
+ file_path, domain = file_paths[i], domain_list[i]
133
+ semEvalDataset = SemEvalXMLDataset(file_path, domain)
134
+
135
+ for example in semEvalDataset.SentenceWithOpinions:
136
+ yield row_count, example
137
+ row_count += 1
138
+
139
+ from xml.dom.minidom import parse
140
+
141
+ class SemEvalXMLDataset():
142
+ def __init__(self, file_name, domain):
143
+ # 获得SentenceWithOpinions,一个List包含(reviewId, sentenceId, text, Opinions)
144
+
145
+ self.SentenceWithOpinions = []
146
+ self.xml_path = file_name
147
+
148
+ self.sentenceXmlList = parse(self.xml_path).getElementsByTagName('sentence')
149
+
150
+ for sentenceXml in self.sentenceXmlList:
151
+
152
+ sentenceId = sentenceXml.getAttribute("id")
153
+ if len(sentenceXml.getElementsByTagName("text")[0].childNodes) < 1:
154
+ # skip no reviews part
155
+ continue
156
+ text = sentenceXml.getElementsByTagName("text")[0].childNodes[0].nodeValue
157
+
158
+ aspectTermsXLMList = sentenceXml.getElementsByTagName("aspectTerm")
159
+ aspectTerms = []
160
+ for opinionXml in aspectTermsXLMList:
161
+ # some text maybe have no opinion
162
+ term = opinionXml.getAttribute("term")
163
+ polarity = opinionXml.getAttribute("polarity")
164
+ from_ = opinionXml.getAttribute("from")
165
+ to = opinionXml.getAttribute("to")
166
+ aspectTermDict = {
167
+ "term": term,
168
+ "polarity": polarity,
169
+ "from": from_,
170
+ "to": to
171
+ }
172
+ aspectTerms.append(aspectTermDict)
173
+
174
+ # 从小到大排序
175
+ aspectTerms.sort(key=lambda x: int(x["from"]))
176
+
177
+ aspectCategoriesXmlList = sentenceXml.getElementsByTagName("aspectCategory")
178
+ aspectCategories = []
179
+ for aspectCategoryXml in aspectCategoriesXmlList:
180
+ category = aspectCategoryXml.getAttribute("category")
181
+ polarity = aspectCategoryXml.getAttribute("polarity")
182
+ aspectCategoryDict = {
183
+ "category": category,
184
+ "polarity": polarity
185
+ }
186
+ aspectCategories.append(aspectCategoryDict)
187
+
188
+ example = {
189
+ "text": text,
190
+ "aspectTerms": aspectTerms,
191
+ "aspectCategories": aspectCategories,
192
+ "domain": domain,
193
+ "sentenceId": sentenceId
194
+ }
195
+ example = addTokenAndLabel(example)
196
+ self.SentenceWithOpinions.append(example)
197
+
198
+ import nltk
199
+
200
+ def clearOpinion(example):
201
+ opinions = example['aspectTerms']
202
+ skipNullOpinions = []
203
+ # 去掉NULL的opinion
204
+ for opinion in opinions:
205
+ targetKey = 'term'
206
+ target = opinion[targetKey]
207
+ from_ = opinion['from']
208
+ to = opinion['to']
209
+ # skill NULL
210
+ if target.lower() == 'null' or target == '' or from_ == to:
211
+ continue
212
+ skipNullOpinions.append(opinion)
213
+
214
+ # delete repeate Opinions
215
+ skipNullOpinions.sort(key=lambda x: int(x['from'])) # 从小到大排序
216
+ UniOpinions = []
217
+ for opinion in skipNullOpinions:
218
+ if len(UniOpinions) < 1:
219
+ UniOpinions.append(opinion)
220
+ else:
221
+ if opinion['from'] != UniOpinions[-1]['from'] and opinion['to'] != UniOpinions[-1]['to']:
222
+ UniOpinions.append(opinion)
223
+ return UniOpinions
224
+
225
+
226
+ def addTokenAndLabel(example):
227
+ tokens = []
228
+ labels = []
229
+
230
+ text = example['text']
231
+ # UniOpinions = clearOpinion(example)
232
+ UniOpinions = example['aspectTerms']
233
+ text_begin = 0
234
+
235
+ for aspect in UniOpinions:
236
+ polarity = aspect['polarity'][:3].upper()
237
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: int(aspect['from'])])
238
+ tokens.extend(pre_O_tokens)
239
+ labels.extend(['O']*len(pre_O_tokens))
240
+
241
+ BIES_tokens = nltk.word_tokenize(text[int(aspect['from']): int(aspect['to'])])
242
+ tokens.extend(BIES_tokens)
243
+
244
+ assert len(BIES_tokens) > 0, print('error in BIES_tokens length')
245
+
246
+ if len(BIES_tokens)==1:
247
+ labels.append('S-'+polarity)
248
+ elif len(BIES_tokens)==2:
249
+ labels.append('B-'+polarity)
250
+ labels.append('E-'+polarity)
251
+ else:
252
+ labels.append('B-'+polarity)
253
+ labels.extend(['I-'+polarity]*(len(BIES_tokens)-2))
254
+ labels.append('E-'+polarity)
255
+
256
+ text_begin = int(aspect['to'])
257
+
258
+
259
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: ])
260
+ labels.extend(['O']*len(pre_O_tokens))
261
+ tokens.extend(pre_O_tokens)
262
+
263
+ example['tokens'] = tokens
264
+ example['ATESP_BIEOS_tags'] = labels
265
+
266
+
267
+ ATESP_BIO_labels = []
268
+ for label in labels:
269
+ ATESP_BIO_labels.append(label.replace('E-', 'I-').replace('S-', 'B-'))
270
+ example['ATESP_BIO_tags'] = ATESP_BIO_labels
271
+
272
+
273
+ ATE_BIEOS_labels = []
274
+ for label in labels:
275
+ ATE_BIEOS_labels.append(label[0])
276
+ example['ATE_BIEOS_tags'] = ATE_BIEOS_labels
277
+
278
+ ATE_BIO_labels = []
279
+ for label in ATESP_BIO_labels:
280
+ ATE_BIO_labels.append(label[0])
281
+ example['ATE_BIO_tags'] = ATE_BIO_labels
282
+
283
+ return example