Yaxin commited on
Commit
c0b08c3
·
1 Parent(s): b084b7c

Update SemEval2016.py

Browse files
Files changed (1) hide show
  1. SemEval2016.py +104 -15
SemEval2016.py CHANGED
@@ -54,31 +54,31 @@ _CONFIG = [
54
  "digitalcameras_chinese"
55
  ]
56
 
57
- _VERSION = "0.0.1"
58
 
59
  _HOMEPAGE_URL = "https://alt.qcri.org/semeval2016/task5/index.php?id=data-and-tools/"
60
  _DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2016Task5Corrected/{split}/{domain}_{split}_{lang}.xml"
61
 
62
 
63
- class SemEval2016MultiConfig(datasets.BuilderConfig):
64
- """BuilderConfig for SemEval2016MultiConfig."""
65
 
66
  def __init__(self, _CONFIG, **kwargs):
67
- super(SemEval2016MultiConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
68
  self.configs = _CONFIG
69
 
70
 
71
- class SemEval2016Multi(datasets.GeneratorBasedBuilder):
72
- """The Multilingual Amazon Reviews Corpus"""
73
 
74
  BUILDER_CONFIGS = [
75
- SemEval2016MultiConfig(
76
  name="All",
77
  _CONFIG=_CONFIG,
78
  description="A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.",
79
  )
80
  ] + [
81
- SemEval2016MultiConfig(
82
  name=config,
83
  _CONFIG=[config],
84
  description=f"{config} of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis",
@@ -86,7 +86,7 @@ class SemEval2016Multi(datasets.GeneratorBasedBuilder):
86
  for config in _CONFIG
87
  ]
88
 
89
- BUILDER_CONFIG_CLASS = SemEval2016MultiConfig
90
  DEFAULT_CONFIG_NAME = "All"
91
 
92
  def _info(self):
@@ -101,7 +101,12 @@ class SemEval2016Multi(datasets.GeneratorBasedBuilder):
101
  'target': datasets.Value(dtype='string'),
102
  'to': datasets.Value(dtype='string')}
103
  ],
104
- 'language': datasets.Value(dtype='string'),
 
 
 
 
 
105
  'domain': datasets.Value(dtype='string'),
106
  'reviewId': datasets.Value(dtype='string'),
107
  'sentenceId': datasets.Value(dtype='string')
@@ -193,13 +198,97 @@ class SemEvalXMLDataset():
193
 
194
  Opinions.sort(key=lambda x: x["from"])
195
  # 从小到大排序
196
-
197
- self.SentenceWithOpinions.append({
198
  "text": text,
199
  "opinions": Opinions,
200
- "language": language,
201
  "domain": domain,
202
  "reviewId": reviewId,
203
  "sentenceId": sentenceId
204
- }
205
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  "digitalcameras_chinese"
55
  ]
56
 
57
+ _VERSION = "0.1.0"
58
 
59
  _HOMEPAGE_URL = "https://alt.qcri.org/semeval2016/task5/index.php?id=data-and-tools/"
60
  _DOWNLOAD_URL = "https://raw.githubusercontent.com/YaxinCui/ABSADataset/main/SemEval2016Task5Corrected/{split}/{domain}_{split}_{lang}.xml"
61
 
62
 
63
+ class SemEval2016Config(datasets.BuilderConfig):
64
+ """BuilderConfig for SemEval2016Config."""
65
 
66
  def __init__(self, _CONFIG, **kwargs):
67
+ super(SemEval2016Config, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs),
68
  self.configs = _CONFIG
69
 
70
 
71
+ class SemEval2016(datasets.GeneratorBasedBuilder):
72
+ """The Multilingual SemEval2016 ABSA Corpus"""
73
 
74
  BUILDER_CONFIGS = [
75
+ SemEval2016Config(
76
  name="All",
77
  _CONFIG=_CONFIG,
78
  description="A collection of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis.",
79
  )
80
  ] + [
81
+ SemEval2016Config(
82
  name=config,
83
  _CONFIG=[config],
84
  description=f"{config} of SemEval2016 specifically designed to aid research in multilingual Aspect Based Sentiment Analysis",
 
86
  for config in _CONFIG
87
  ]
88
 
89
+ BUILDER_CONFIG_CLASS = SemEval2016Config
90
  DEFAULT_CONFIG_NAME = "All"
91
 
92
  def _info(self):
 
101
  'target': datasets.Value(dtype='string'),
102
  'to': datasets.Value(dtype='string')}
103
  ],
104
+ 'tokens': [datasets.Value(dtype='string')],
105
+ 'ATESP_BIEOS_tags': [datasets.Value(dtype='string')],
106
+ 'ATESP_BIO_tags': [datasets.Value(dtype='string')],
107
+ 'ATE_BIEOS_tags': [datasets.Value(dtype='string')],
108
+ 'ATE_BIO_tags': [datasets.Value(dtype='string')],
109
+
110
  'domain': datasets.Value(dtype='string'),
111
  'reviewId': datasets.Value(dtype='string'),
112
  'sentenceId': datasets.Value(dtype='string')
 
198
 
199
  Opinions.sort(key=lambda x: x["from"])
200
  # 从小到大排序
201
+ example = {
 
202
  "text": text,
203
  "opinions": Opinions,
 
204
  "domain": domain,
205
  "reviewId": reviewId,
206
  "sentenceId": sentenceId
207
+ }
208
+ example = addTokenAndLabel(example)
209
+ self.SentenceWithOpinions.append(example)
210
+
211
+ import nltk
212
+
213
+ def clearOpinion(example):
214
+ opinions = example['opinions']
215
+ skipNullOpinions = []
216
+ # 去掉NULL的opinion
217
+ for opinion in opinions:
218
+ targetKey = 'target'
219
+ target = opinion[targetKey]
220
+ from_ = opinion['from']
221
+ to = opinion['to']
222
+ # skill NULL
223
+ if target.lower() == 'null' or target == '' or from_ == to:
224
+ continue
225
+ skipNullOpinions.append(opinion)
226
+
227
+ # delete repeate Opinions
228
+ skipNullOpinions.sort(key=lambda x: int(x['from'])) # 从小到大排序
229
+ UniOpinions = []
230
+ for opinion in skipNullOpinions:
231
+ if len(UniOpinions) < 1:
232
+ UniOpinions.append(opinion)
233
+ else:
234
+ if opinion['from'] != UniOpinions[-1]['from'] and opinion['to'] != UniOpinions[-1]['to']:
235
+ UniOpinions.append(opinion)
236
+ return UniOpinions
237
+
238
+
239
+ def addTokenAndLabel(example):
240
+ tokens = []
241
+ labels = []
242
+
243
+ text = example['text']
244
+ UniOpinions = clearOpinion(example)
245
+ text_begin = 0
246
+
247
+ for aspect in UniOpinions:
248
+ polarity = aspect['polarity'][:3].upper()
249
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: int(aspect['from'])])
250
+ tokens.extend(pre_O_tokens)
251
+ labels.extend(['O']*len(pre_O_tokens))
252
+
253
+ BIES_tokens = nltk.word_tokenize(text[int(aspect['from']): int(aspect['to'])])
254
+ tokens.extend(BIES_tokens)
255
+
256
+ assert len(BIES_tokens) > 0, print('error in BIES_tokens length')
257
+
258
+ if len(BIES_tokens)==1:
259
+ labels.append('S-'+polarity)
260
+ elif len(BIES_tokens)==2:
261
+ labels.append('B-'+polarity)
262
+ labels.append('E-'+polarity)
263
+ else:
264
+ labels.append('B-'+polarity)
265
+ labels.extend(['I-'+polarity]*(len(BIES_tokens)-2))
266
+ labels.append('E-'+polarity)
267
+
268
+ text_begin = int(aspect['to'])
269
+
270
+
271
+ pre_O_tokens = nltk.word_tokenize(text[text_begin: ])
272
+ labels.extend(['O']*len(pre_O_tokens))
273
+ tokens.extend(pre_O_tokens)
274
+
275
+ example['tokens'] = tokens
276
+ example['ATESP_BIEOS_tags'] = labels
277
+
278
+ ATESP_BIO_labels = []
279
+ for label in labels:
280
+ ATESP_BIO_labels.append(label.replace('E-', 'I-').replace('S-', 'B-'))
281
+ example['ATESP_BIO_tags'] = ATESP_BIO_labels
282
+
283
+
284
+ ATE_BIEOS_labels = []
285
+ for label in labels:
286
+ ATE_BIEOS_labels.append(label[0])
287
+ example['ATE_BIEOS_tags'] = ATE_BIEOS_labels
288
+
289
+ ATE_BIO_labels = []
290
+ for label in ATESP_BIO_labels:
291
+ ATE_BIO_labels.append(label[0])
292
+ example['ATE_BIO_tags'] = ATE_BIO_labels
293
+
294
+ return example