ju-resplande commited on
Commit
39fe54b
·
1 Parent(s): 586f75b

rmeove script

Browse files
Files changed (1) hide show
  1. plue.py +0 -677
plue.py DELETED
@@ -1,677 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- # https://github.com/huggingface/datasets/blob/master/datasets/glue/glue.py
18
- # https://github.com/huggingface/datasets/blob/master/datasets/scitail/scitail.py
19
- """The General Language Understanding Evaluation (GLUE) benchmark."""
20
-
21
-
22
- import csv
23
- import os
24
- import textwrap
25
-
26
- import numpy as np
27
-
28
- import datasets
29
-
30
-
31
- _PLUE_CITATION = """\
32
- @misc{Gomes2020,
33
- author = {GOMES, J. R. S.},
34
- title = {Portuguese Language Understanding Evaluation},
35
- year = {2020},
36
- publisher = {GitHub},
37
- journal = {GitHub repository},
38
- howpublished = {\\url{https://github.com/jubs12/PLUE}},
39
- commit = {CURRENT_COMMIT}
40
- }
41
-
42
- @inproceedings{wang2019glue,
43
- title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
44
- author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
45
- note={In the Proceedings of ICLR.},
46
- year={2019}
47
- }
48
- """
49
-
50
- _PLUE_DESCRIPTION = """\
51
- PLUE: Portuguese Language Understanding Evaluationis a Portuguese translation of
52
- the GLUE benchmark and Scitail using OPUS-MT model and Google Cloud Translation.
53
- """
54
-
55
- MNLI_URL = "https://github.com/ju-resplande/PLUE/releases/download/v1.0.0/MNLI.zip"
56
- SNLI_URL = "https://github.com/ju-resplande/PLUE/releases/download/v1.0.0/SNLI.zip"
57
-
58
- _MNLI_BASE_KWARGS = dict(
59
- text_features={"premise": "sentence1", "hypothesis": "sentence2",},
60
- label_classes=["entailment", "neutral", "contradiction"],
61
- label_column="gold_label",
62
- data_dir="MNLI",
63
- citation=textwrap.dedent(
64
- """\
65
- @InProceedings{N18-1101,
66
- author = "Williams, Adina
67
- and Nangia, Nikita
68
- and Bowman, Samuel",
69
- title = "A Broad-Coverage Challenge Corpus for
70
- Sentence Understanding through Inference",
71
- booktitle = "Proceedings of the 2018 Conference of
72
- the North American Chapter of the
73
- Association for Computational Linguistics:
74
- Human Language Technologies, Volume 1 (Long
75
- Papers)",
76
- year = "2018",
77
- publisher = "Association for Computational Linguistics",
78
- pages = "1112--1122",
79
- location = "New Orleans, Louisiana",
80
- url = "http://aclweb.org/anthology/N18-1101"
81
- }
82
- @article{bowman2015large,
83
- title={A large annotated corpus for learning natural language inference},
84
- author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
85
- journal={arXiv preprint arXiv:1508.05326},
86
- year={2015}
87
- }"""
88
- ),
89
- url="http://www.nyu.edu/projects/bowman/multinli/",
90
- )
91
-
92
-
93
- class PlueConfig(datasets.BuilderConfig):
94
- """BuilderConfig for GLUE."""
95
-
96
- def __init__(
97
- self,
98
- text_features,
99
- label_column,
100
- data_dir,
101
- citation,
102
- url,
103
- label_classes=None,
104
- process_label=lambda x: x,
105
- **kwargs,
106
- ):
107
- """BuilderConfig for GLUE.
108
-
109
- Args:
110
- text_features: `dict[string, string]`, map from the name of the feature
111
- dict for each text field to the name of the column in the tsv file
112
- label_column: `string`, name of the column in the tsv file corresponding
113
- to the label
114
- data_url: `string`, url to download the zip file from
115
- data_dir: `string`, the path to the folder containing the tsv files in the
116
- downloaded zip
117
- citation: `string`, citation for the data set
118
- url: `string`, url for information about the data set
119
- label_classes: `list[string]`, the list of classes if the label is
120
- categorical. If not provided, then the label will be of type
121
- `datasets.Value('float32')`.
122
- process_label: `Function[string, any]`, function taking in the raw value
123
- of the label and processing it to the form required by the label feature
124
- **kwargs: keyword arguments forwarded to super.
125
- """
126
- super(PlueConfig, self).__init__(
127
- version=datasets.Version("1.0.3", ""), **kwargs
128
- )
129
- self.text_features = text_features
130
- self.label_column = label_column
131
- self.label_classes = label_classes
132
- self.data_url = (
133
- "https://github.com/ju-resplande/PLUE/archive/refs/tags/v1.0.1.zip"
134
- )
135
- self.data_dir = data_dir
136
- self.citation = citation
137
- self.url = url
138
- self.process_label = process_label
139
-
140
-
141
- class Plue(datasets.GeneratorBasedBuilder):
142
- """The General Language Understanding Evaluation (GLUE) benchmark."""
143
-
144
- BUILDER_CONFIGS = [
145
- PlueConfig(
146
- name="cola",
147
- description=textwrap.dedent(
148
- """\
149
- The Corpus of Linguistic Acceptability consists of English
150
- acceptability judgments drawn from books and journal articles on
151
- linguistic theory. Each example is a sequence of words annotated
152
- with whether it is a grammatical English sentence."""
153
- ),
154
- text_features={"sentence": "sentence"},
155
- label_classes=["unacceptable", "acceptable"],
156
- label_column="is_acceptable",
157
- data_dir="PLUE-1.0.1/datasets/CoLA",
158
- citation=textwrap.dedent(
159
- """\
160
- @article{warstadt2018neural,
161
- title={Neural Network Acceptability Judgments},
162
- author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
163
- journal={arXiv preprint arXiv:1805.12471},
164
- year={2018}
165
- }"""
166
- ),
167
- url="https://nyu-mll.github.io/CoLA/",
168
- ),
169
- PlueConfig(
170
- name="sst2",
171
- description=textwrap.dedent(
172
- """\
173
- The Stanford Sentiment Treebank consists of sentences from movie reviews and
174
- human annotations of their sentiment. The task is to predict the sentiment of a
175
- given sentence. We use the two-way (positive/negative) class split, and use only
176
- sentence-level labels."""
177
- ),
178
- text_features={"sentence": "sentence"},
179
- label_classes=["negative", "positive"],
180
- label_column="label",
181
- data_dir="PLUE-1.0.1/datasets/SST-2",
182
- citation=textwrap.dedent(
183
- """\
184
- @inproceedings{socher2013recursive,
185
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
186
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
187
- booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
188
- pages={1631--1642},
189
- year={2013}
190
- }"""
191
- ),
192
- url="https://datasets.stanford.edu/sentiment/index.html",
193
- ),
194
- PlueConfig(
195
- name="mrpc",
196
- description=textwrap.dedent(
197
- """\
198
- The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
199
- sentence pairs automatically extracted from online news sources, with human annotations
200
- for whether the sentences in the pair are semantically equivalent."""
201
- ), # pylint: disable=line-too-long
202
- text_features={"sentence1": "", "sentence2": ""},
203
- label_classes=["not_equivalent", "equivalent"],
204
- label_column="Quality",
205
- data_dir="PLUE-1.0.1/datasets/MRPC",
206
- citation=textwrap.dedent(
207
- """\
208
- @inproceedings{dolan2005automatically,
209
- title={Automatically constructing a corpus of sentential paraphrases},
210
- author={Dolan, William B and Brockett, Chris},
211
- booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
212
- year={2005}
213
- }"""
214
- ),
215
- url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
216
- ),
217
- PlueConfig(
218
- name="qqp_v2",
219
- description=textwrap.dedent(
220
- """\
221
- The Quora Question Pairs2 dataset is a collection of question pairs from the
222
- community question-answering website Quora. The task is to determine whether a
223
- pair of questions are semantically equivalent."""
224
- ),
225
- text_features={"question1": "question1", "question2": "question2",},
226
- label_classes=["not_duplicate", "duplicate"],
227
- label_column="is_duplicate",
228
- data_dir="PLUE-1.0.1/datasets/QQP_v2",
229
- citation=textwrap.dedent(
230
- """\
231
- @online{WinNT,
232
- author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
233
- title = {First Quora Dataset Release: Question Pairs},
234
- year = {2017},
235
- url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
236
- urldate = {2019-04-03}
237
- }"""
238
- ),
239
- url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
240
- ),
241
- PlueConfig(
242
- name="stsb",
243
- description=textwrap.dedent(
244
- """\
245
- The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
246
- sentence pairs drawn from news headlines, video and image captions, and natural
247
- language inference data. Each pair is human-annotated with a similarity score
248
- from 1 to 5."""
249
- ),
250
- text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
251
- label_column="score",
252
- data_dir="PLUE-1.0.1/datasets/STS-B",
253
- citation=textwrap.dedent(
254
- """\
255
- @article{cer2017semeval,
256
- title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
257
- author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
258
- journal={arXiv preprint arXiv:1708.00055},
259
- year={2017}
260
- }"""
261
- ),
262
- url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
263
- process_label=np.float32,
264
- ),
265
- PlueConfig(
266
- name="snli",
267
- description=textwrap.dedent(
268
- """\
269
- The SNLI corpus (version 1.0) is a collection of 570k human-written English
270
- sentence pairs manually labeled for balanced classification with the labels
271
- entailment, contradiction, and neutral, supporting the task of natural language
272
- inference (NLI), also known as recognizing textual entailment (RTE).
273
- """
274
- ),
275
- text_features={"premise": "sentence1", "hypothesis": "sentence2",},
276
- label_classes=["entailment", "neutral", "contradiction"],
277
- label_column="gold_label",
278
- data_dir="SNLI",
279
- citation=textwrap.dedent(
280
- """\
281
- @inproceedings{snli:emnlp2015,
282
- Author = {Bowman, Samuel R. and Angeli, Gabor and Potts, Christopher, and Manning, Christopher D.},
283
- Booktitle = {Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
284
- Publisher = {Association for Computational Linguistics},
285
- Title = {A large annotated corpus for learning natural language inference},
286
- Year = {2015}
287
- }
288
- """
289
- ),
290
- url="https://nlp.stanford.edu/projects/snli/",
291
- ),
292
- PlueConfig(
293
- name="mnli",
294
- description=textwrap.dedent(
295
- """\
296
- The Multi-Genre Natural Language Inference Corpus is a crowdsourced
297
- collection of sentence pairs with textual entailment annotations. Given a premise sentence
298
- and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
299
- (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
300
- gathered from ten different sources, including transcribed speech, fiction, and government reports.
301
- We use the standard test set, for which we obtained private labels from the authors, and evaluate
302
- on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
303
- the SNLI corpus as 550k examples of auxiliary training data."""
304
- ),
305
- **_MNLI_BASE_KWARGS,
306
- ),
307
- PlueConfig(
308
- name="mnli_mismatched",
309
- description=textwrap.dedent(
310
- """\
311
- The mismatched validation and test splits from MNLI.
312
- See the "mnli" BuilderConfig for additional information."""
313
- ),
314
- **_MNLI_BASE_KWARGS,
315
- ),
316
- PlueConfig(
317
- name="mnli_matched",
318
- description=textwrap.dedent(
319
- """\
320
- The matched validation and test splits from MNLI.
321
- See the "mnli" BuilderConfig for additional information."""
322
- ),
323
- **_MNLI_BASE_KWARGS,
324
- ),
325
- PlueConfig(
326
- name="qnli",
327
- description=textwrap.dedent(
328
- """\
329
- The Stanford Question Answering Dataset is a question-answering
330
- dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
331
- from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
332
- convert the task into sentence pair classification by forming a pair between each question and each
333
- sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
334
- question and the context sentence. The task is to determine whether the context sentence contains
335
- the answer to the question. This modified version of the original task removes the requirement that
336
- the model select the exact answer, but also removes the simplifying assumptions that the answer
337
- is always present in the input and that lexical overlap is a reliable cue."""
338
- ), # pylint: disable=line-too-long
339
- text_features={"question": "question", "sentence": "sentence",},
340
- label_classes=["entailment", "not_entailment"],
341
- label_column="label",
342
- data_dir="PLUE-1.0.1/datasets/QNLI",
343
- citation=textwrap.dedent(
344
- """\
345
- @article{rajpurkar2016squad,
346
- title={Squad: 100,000+ questions for machine comprehension of text},
347
- author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
348
- journal={arXiv preprint arXiv:1606.05250},
349
- year={2016}
350
- }"""
351
- ),
352
- url="https://rajpurkar.github.io/SQuAD-explorer/",
353
- ),
354
- PlueConfig(
355
- name="qnli_v2",
356
- description=textwrap.dedent(
357
- """\
358
- The Stanford Question Answering Dataset is a question-answering
359
- dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
360
- from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
361
- convert the task into sentence pair classification by forming a pair between each question and each
362
- sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
363
- question and the context sentence. The task is to determine whether the context sentence contains
364
- the answer to the question. This modified version of the original task removes the requirement that
365
- the model select the exact answer, but also removes the simplifying assumptions that the answer
366
- is always present in the input and that lexical overlap is a reliable cue."""
367
- ), # pylint: disable=line-too-long
368
- text_features={"question": "question", "sentence": "sentence",},
369
- label_classes=["entailment", "not_entailment"],
370
- label_column="label",
371
- data_dir="PLUE-1.0.1/datasets/QNLI_v2",
372
- citation=textwrap.dedent(
373
- """\
374
- @article{rajpurkar2016squad,
375
- title={Squad: 100,000+ questions for machine comprehension of text},
376
- author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
377
- journal={arXiv preprint arXiv:1606.05250},
378
- year={2016}
379
- }"""
380
- ),
381
- url="https://rajpurkar.github.io/SQuAD-explorer/",
382
- ),
383
- PlueConfig(
384
- name="rte",
385
- description=textwrap.dedent(
386
- """\
387
- The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
388
- entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
389
- et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
390
- constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
391
- for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
392
- ), # pylint: disable=line-too-long
393
- text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
394
- label_classes=["entailment", "not_entailment"],
395
- label_column="label",
396
- data_dir="PLUE-1.0.1/datasets/RTE",
397
- citation=textwrap.dedent(
398
- """\
399
- @inproceedings{dagan2005pascal,
400
- title={The PASCAL recognising textual entailment challenge},
401
- author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
402
- booktitle={Machine Learning Challenges Workshop},
403
- pages={177--190},
404
- year={2005},
405
- organization={Springer}
406
- }
407
- @inproceedings{bar2006second,
408
- title={The second pascal recognising textual entailment challenge},
409
- author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
410
- booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
411
- volume={6},
412
- number={1},
413
- pages={6--4},
414
- year={2006},
415
- organization={Venice}
416
- }
417
- @inproceedings{giampiccolo2007third,
418
- title={The third pascal recognizing textual entailment challenge},
419
- author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
420
- booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
421
- pages={1--9},
422
- year={2007},
423
- organization={Association for Computational Linguistics}
424
- }
425
- @inproceedings{bentivogli2009fifth,
426
- title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
427
- author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
428
- booktitle={TAC},
429
- year={2009}
430
- }"""
431
- ),
432
- url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
433
- ),
434
- PlueConfig(
435
- name="wnli",
436
- description=textwrap.dedent(
437
- """\
438
- The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
439
- in which a system must read a sentence with a pronoun and select the referent of that pronoun from
440
- a list of choices. The examples are manually constructed to foil simple statistical methods: Each
441
- one is contingent on contextual information provided by a single word or phrase in the sentence.
442
- To convert the problem into sentence pair classification, we construct sentence pairs by replacing
443
- the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
444
- pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
445
- new examples derived from fiction books that was shared privately by the authors of the original
446
- corpus. While the included training set is balanced between two classes, the test set is imbalanced
447
- between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
448
- hypotheses are sometimes shared between training and development examples, so if a model memorizes the
449
- training examples, they will predict the wrong label on corresponding development set
450
- example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
451
- between a model's score on this task and its score on the unconverted original task. We
452
- call converted dataset WNLI (Winograd NLI)."""
453
- ),
454
- text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
455
- label_classes=["not_entailment", "entailment"],
456
- label_column="label",
457
- data_dir="PLUE-1.0.1/datasets/WNLI",
458
- citation=textwrap.dedent(
459
- """\
460
- @inproceedings{levesque2012winograd,
461
- title={The winograd schema challenge},
462
- author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
463
- booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
464
- year={2012}
465
- }"""
466
- ),
467
- url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
468
- ),
469
- PlueConfig(
470
- name="scitail",
471
- description=textwrap.dedent(
472
- """\
473
- The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question
474
- and the correct answer choice are converted into an assertive statement to form the hypothesis. We use information
475
- retrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We
476
- crowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create
477
- the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples
478
- with neutral label"""
479
- ),
480
- text_features={"premise": "premise", "hypothesis": "hypothesis",},
481
- label_classes=["entails", "neutral"],
482
- label_column="label",
483
- data_dir="PLUE-1.0.1/datasets/SciTail",
484
- citation=""""\
485
- inproceedings{scitail,
486
- Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},
487
- Booktitle = {AAAI},
488
- Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},
489
- Year = {2018}
490
- }
491
- """,
492
- url="https://gluebenchmark.com/diagnostics",
493
- ),
494
- ]
495
-
496
- def _info(self):
497
- features = {
498
- text_feature: datasets.Value("string")
499
- for text_feature in self.config.text_features.keys()
500
- }
501
- if self.config.label_classes:
502
- features["label"] = datasets.features.ClassLabel(
503
- names=self.config.label_classes
504
- )
505
- else:
506
- features["label"] = datasets.Value("float32")
507
- features["idx"] = datasets.Value("int32")
508
- return datasets.DatasetInfo(
509
- description=_PLUE_DESCRIPTION,
510
- features=datasets.Features(features),
511
- homepage=self.config.url,
512
- citation=self.config.citation + "\n" + _PLUE_CITATION,
513
- )
514
-
515
- def _split_generators(self, dl_manager):
516
- if self.config.name == "mnli":
517
- data_url = MNLI_URL
518
- elif self.config.name == "snli":
519
- data_url = SNLI_URL
520
- else:
521
- data_url = self.config.data_url
522
-
523
- dl_dir = dl_manager.download_and_extract(data_url)
524
- data_dir = os.path.join(dl_dir, self.config.data_dir)
525
-
526
- train_split = datasets.SplitGenerator(
527
- name=datasets.Split.TRAIN,
528
- gen_kwargs={
529
- "data_file": os.path.join(data_dir or "", "train.tsv"),
530
- "split": "train",
531
- },
532
- )
533
- if self.config.name == "mnli":
534
- return [
535
- train_split,
536
- _mnli_split_generator(
537
- "validation_matched", data_dir, "dev", matched=True
538
- ),
539
- _mnli_split_generator(
540
- "validation_mismatched", data_dir, "dev", matched=False
541
- ),
542
- _mnli_split_generator("test_matched", data_dir, "test", matched=True),
543
- _mnli_split_generator(
544
- "test_mismatched", data_dir, "test", matched=False
545
- ),
546
- ]
547
- elif self.config.name == "mnli_matched":
548
- return [
549
- _mnli_split_generator("validation", data_dir, "dev", matched=True),
550
- _mnli_split_generator("test", data_dir, "test", matched=True),
551
- ]
552
- elif self.config.name == "mnli_mismatched":
553
- return [
554
- _mnli_split_generator("validation", data_dir, "dev", matched=False),
555
- _mnli_split_generator("test", data_dir, "test", matched=False),
556
- ]
557
- else:
558
- return [
559
- train_split,
560
- datasets.SplitGenerator(
561
- name=datasets.Split.VALIDATION,
562
- gen_kwargs={
563
- "data_file": os.path.join(data_dir or "", "dev.tsv"),
564
- "split": "dev",
565
- },
566
- ),
567
- datasets.SplitGenerator(
568
- name=datasets.Split.TEST,
569
- gen_kwargs={
570
- "data_file": os.path.join(data_dir or "", "test.tsv"),
571
- "split": "test",
572
- },
573
- ),
574
- ]
575
-
576
- def _generate_examples(self, data_file, split):
577
- if self.config.name in ["mrpc", "scitail"]:
578
- if self.config.name == "mrpc":
579
- examples = self._generate_example_mrpc_files(
580
- data_file=data_file, split=split
581
- )
582
- elif self.config.name == "scitail":
583
- examples = self._generate_example_scitail_files(
584
- data_file=data_file, split=split
585
- )
586
-
587
- for example in examples:
588
- yield example["idx"], example
589
-
590
- else:
591
- process_label = self.config.process_label
592
- label_classes = self.config.label_classes
593
-
594
- # The train and dev files for CoLA are the only tsv files without a
595
- # header.
596
- is_cola_non_test = self.config.name == "cola" and split != "test"
597
-
598
- with open(data_file, encoding="utf8") as f:
599
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
600
- if is_cola_non_test:
601
- reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
602
-
603
- for n, row in enumerate(reader):
604
- if is_cola_non_test:
605
- row = {
606
- "sentence": row[3],
607
- "is_acceptable": row[1],
608
- }
609
-
610
- example = {
611
- feat: row[col]
612
- for feat, col in self.config.text_features.items()
613
- }
614
- example["idx"] = n
615
-
616
- if self.config.label_column in row:
617
- label = row[self.config.label_column]
618
- # For some tasks, the label is represented as 0 and 1 in the tsv
619
- # files and needs to be cast to integer to work with the feature.
620
- if label_classes and label not in label_classes:
621
- label = int(label) if label else None
622
- example["label"] = process_label(label)
623
- else:
624
- example["label"] = process_label(-1)
625
-
626
- # Filter out corrupted rows.
627
- for value in example.values():
628
- if value is None:
629
- break
630
- else:
631
- yield example["idx"], example
632
-
633
- def _generate_example_mrpc_files(self, data_file, split):
634
- print(data_file)
635
-
636
- with open(data_file, encoding="utf8") as f:
637
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
638
- for idx, row in enumerate(reader):
639
- label = row["Quality"] if split != "test" else -1
640
-
641
- yield {
642
- "sentence1": row["#1 String"],
643
- "sentence2": row["#2 String"],
644
- "label": int(label),
645
- "idx": idx,
646
- }
647
-
648
- def _generate_example_scitail_files(self, data_file, split):
649
- with open(data_file, encoding="utf8") as f:
650
- reader = csv.DictReader(
651
- f,
652
- delimiter="\t",
653
- quoting=csv.QUOTE_NONE,
654
- fieldnames=["premise", "hypothesis", "label"],
655
- )
656
- for idx, row in enumerate(reader):
657
- label = row["label"] if split != "test" else -1
658
-
659
- yield {
660
- "premise": row["premise"],
661
- "hypothesis": row["hypothesis"],
662
- "label": label,
663
- "idx": idx,
664
- }
665
-
666
-
667
- def _mnli_split_generator(name, data_dir, split, matched):
668
- return datasets.SplitGenerator(
669
- name=name,
670
- gen_kwargs={
671
- "data_file": os.path.join(
672
- data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")
673
- ),
674
- "split": split,
675
- },
676
- )
677
-