matthewspring parquet-converter commited on
Commit
5e555a3
·
verified ·
0 Parent(s):

Duplicate from nyu-mll/glue

Browse files

Co-authored-by: Parquet-converter (BOT) <parquet-converter@users.noreply.huggingface.co>

Files changed (36) hide show
  1. .gitattributes +27 -0
  2. README.md +1189 -0
  3. ax/test-00000-of-00001.parquet +3 -0
  4. cola/test-00000-of-00001.parquet +3 -0
  5. cola/train-00000-of-00001.parquet +3 -0
  6. cola/validation-00000-of-00001.parquet +3 -0
  7. mnli/test_matched-00000-of-00001.parquet +3 -0
  8. mnli/test_mismatched-00000-of-00001.parquet +3 -0
  9. mnli/train-00000-of-00001.parquet +3 -0
  10. mnli/validation_matched-00000-of-00001.parquet +3 -0
  11. mnli/validation_mismatched-00000-of-00001.parquet +3 -0
  12. mnli_matched/test-00000-of-00001.parquet +3 -0
  13. mnli_matched/validation-00000-of-00001.parquet +3 -0
  14. mnli_mismatched/test-00000-of-00001.parquet +3 -0
  15. mnli_mismatched/validation-00000-of-00001.parquet +3 -0
  16. mrpc/test-00000-of-00001.parquet +3 -0
  17. mrpc/train-00000-of-00001.parquet +3 -0
  18. mrpc/validation-00000-of-00001.parquet +3 -0
  19. qnli/test-00000-of-00001.parquet +3 -0
  20. qnli/train-00000-of-00001.parquet +3 -0
  21. qnli/validation-00000-of-00001.parquet +3 -0
  22. qqp/test-00000-of-00001.parquet +3 -0
  23. qqp/train-00000-of-00001.parquet +3 -0
  24. qqp/validation-00000-of-00001.parquet +3 -0
  25. rte/test-00000-of-00001.parquet +3 -0
  26. rte/train-00000-of-00001.parquet +3 -0
  27. rte/validation-00000-of-00001.parquet +3 -0
  28. sst2/test-00000-of-00001.parquet +3 -0
  29. sst2/train-00000-of-00001.parquet +3 -0
  30. sst2/validation-00000-of-00001.parquet +3 -0
  31. stsb/test-00000-of-00001.parquet +3 -0
  32. stsb/train-00000-of-00001.parquet +3 -0
  33. stsb/validation-00000-of-00001.parquet +3 -0
  34. wnli/test-00000-of-00001.parquet +3 -0
  35. wnli/train-00000-of-00001.parquet +3 -0
  36. wnli/validation-00000-of-00001.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,1189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - other
4
+ language_creators:
5
+ - other
6
+ language:
7
+ - en
8
+ license:
9
+ - other
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 10K<n<100K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - text-classification
18
+ task_ids:
19
+ - acceptability-classification
20
+ - natural-language-inference
21
+ - semantic-similarity-scoring
22
+ - sentiment-classification
23
+ - text-scoring
24
+ paperswithcode_id: glue
25
+ pretty_name: GLUE (General Language Understanding Evaluation benchmark)
26
+ config_names:
27
+ - ax
28
+ - cola
29
+ - mnli
30
+ - mnli_matched
31
+ - mnli_mismatched
32
+ - mrpc
33
+ - qnli
34
+ - qqp
35
+ - rte
36
+ - sst2
37
+ - stsb
38
+ - wnli
39
+ tags:
40
+ - qa-nli
41
+ - coreference-nli
42
+ - paraphrase-identification
43
+ dataset_info:
44
+ - config_name: ax
45
+ features:
46
+ - name: premise
47
+ dtype: string
48
+ - name: hypothesis
49
+ dtype: string
50
+ - name: label
51
+ dtype:
52
+ class_label:
53
+ names:
54
+ '0': entailment
55
+ '1': neutral
56
+ '2': contradiction
57
+ - name: idx
58
+ dtype: int32
59
+ splits:
60
+ - name: test
61
+ num_bytes: 237694
62
+ num_examples: 1104
63
+ download_size: 80767
64
+ dataset_size: 237694
65
+ - config_name: cola
66
+ features:
67
+ - name: sentence
68
+ dtype: string
69
+ - name: label
70
+ dtype:
71
+ class_label:
72
+ names:
73
+ '0': unacceptable
74
+ '1': acceptable
75
+ - name: idx
76
+ dtype: int32
77
+ splits:
78
+ - name: train
79
+ num_bytes: 484869
80
+ num_examples: 8551
81
+ - name: validation
82
+ num_bytes: 60322
83
+ num_examples: 1043
84
+ - name: test
85
+ num_bytes: 60513
86
+ num_examples: 1063
87
+ download_size: 326394
88
+ dataset_size: 605704
89
+ - config_name: mnli
90
+ features:
91
+ - name: premise
92
+ dtype: string
93
+ - name: hypothesis
94
+ dtype: string
95
+ - name: label
96
+ dtype:
97
+ class_label:
98
+ names:
99
+ '0': entailment
100
+ '1': neutral
101
+ '2': contradiction
102
+ - name: idx
103
+ dtype: int32
104
+ splits:
105
+ - name: train
106
+ num_bytes: 74619646
107
+ num_examples: 392702
108
+ - name: validation_matched
109
+ num_bytes: 1833783
110
+ num_examples: 9815
111
+ - name: validation_mismatched
112
+ num_bytes: 1949231
113
+ num_examples: 9832
114
+ - name: test_matched
115
+ num_bytes: 1848654
116
+ num_examples: 9796
117
+ - name: test_mismatched
118
+ num_bytes: 1950703
119
+ num_examples: 9847
120
+ download_size: 57168425
121
+ dataset_size: 82202017
122
+ - config_name: mnli_matched
123
+ features:
124
+ - name: premise
125
+ dtype: string
126
+ - name: hypothesis
127
+ dtype: string
128
+ - name: label
129
+ dtype:
130
+ class_label:
131
+ names:
132
+ '0': entailment
133
+ '1': neutral
134
+ '2': contradiction
135
+ - name: idx
136
+ dtype: int32
137
+ splits:
138
+ - name: validation
139
+ num_bytes: 1833783
140
+ num_examples: 9815
141
+ - name: test
142
+ num_bytes: 1848654
143
+ num_examples: 9796
144
+ download_size: 2435055
145
+ dataset_size: 3682437
146
+ - config_name: mnli_mismatched
147
+ features:
148
+ - name: premise
149
+ dtype: string
150
+ - name: hypothesis
151
+ dtype: string
152
+ - name: label
153
+ dtype:
154
+ class_label:
155
+ names:
156
+ '0': entailment
157
+ '1': neutral
158
+ '2': contradiction
159
+ - name: idx
160
+ dtype: int32
161
+ splits:
162
+ - name: validation
163
+ num_bytes: 1949231
164
+ num_examples: 9832
165
+ - name: test
166
+ num_bytes: 1950703
167
+ num_examples: 9847
168
+ download_size: 2509009
169
+ dataset_size: 3899934
170
+ - config_name: mrpc
171
+ features:
172
+ - name: sentence1
173
+ dtype: string
174
+ - name: sentence2
175
+ dtype: string
176
+ - name: label
177
+ dtype:
178
+ class_label:
179
+ names:
180
+ '0': not_equivalent
181
+ '1': equivalent
182
+ - name: idx
183
+ dtype: int32
184
+ splits:
185
+ - name: train
186
+ num_bytes: 943843
187
+ num_examples: 3668
188
+ - name: validation
189
+ num_bytes: 105879
190
+ num_examples: 408
191
+ - name: test
192
+ num_bytes: 442410
193
+ num_examples: 1725
194
+ download_size: 1033400
195
+ dataset_size: 1492132
196
+ - config_name: qnli
197
+ features:
198
+ - name: question
199
+ dtype: string
200
+ - name: sentence
201
+ dtype: string
202
+ - name: label
203
+ dtype:
204
+ class_label:
205
+ names:
206
+ '0': entailment
207
+ '1': not_entailment
208
+ - name: idx
209
+ dtype: int32
210
+ splits:
211
+ - name: train
212
+ num_bytes: 25612443
213
+ num_examples: 104743
214
+ - name: validation
215
+ num_bytes: 1368304
216
+ num_examples: 5463
217
+ - name: test
218
+ num_bytes: 1373093
219
+ num_examples: 5463
220
+ download_size: 19278324
221
+ dataset_size: 28353840
222
+ - config_name: qqp
223
+ features:
224
+ - name: question1
225
+ dtype: string
226
+ - name: question2
227
+ dtype: string
228
+ - name: label
229
+ dtype:
230
+ class_label:
231
+ names:
232
+ '0': not_duplicate
233
+ '1': duplicate
234
+ - name: idx
235
+ dtype: int32
236
+ splits:
237
+ - name: train
238
+ num_bytes: 50900820
239
+ num_examples: 363846
240
+ - name: validation
241
+ num_bytes: 5653754
242
+ num_examples: 40430
243
+ - name: test
244
+ num_bytes: 55171111
245
+ num_examples: 390965
246
+ download_size: 73982265
247
+ dataset_size: 111725685
248
+ - config_name: rte
249
+ features:
250
+ - name: sentence1
251
+ dtype: string
252
+ - name: sentence2
253
+ dtype: string
254
+ - name: label
255
+ dtype:
256
+ class_label:
257
+ names:
258
+ '0': entailment
259
+ '1': not_entailment
260
+ - name: idx
261
+ dtype: int32
262
+ splits:
263
+ - name: train
264
+ num_bytes: 847320
265
+ num_examples: 2490
266
+ - name: validation
267
+ num_bytes: 90728
268
+ num_examples: 277
269
+ - name: test
270
+ num_bytes: 974053
271
+ num_examples: 3000
272
+ download_size: 1274409
273
+ dataset_size: 1912101
274
+ - config_name: sst2
275
+ features:
276
+ - name: sentence
277
+ dtype: string
278
+ - name: label
279
+ dtype:
280
+ class_label:
281
+ names:
282
+ '0': negative
283
+ '1': positive
284
+ - name: idx
285
+ dtype: int32
286
+ splits:
287
+ - name: train
288
+ num_bytes: 4681603
289
+ num_examples: 67349
290
+ - name: validation
291
+ num_bytes: 106252
292
+ num_examples: 872
293
+ - name: test
294
+ num_bytes: 216640
295
+ num_examples: 1821
296
+ download_size: 3331080
297
+ dataset_size: 5004495
298
+ - config_name: stsb
299
+ features:
300
+ - name: sentence1
301
+ dtype: string
302
+ - name: sentence2
303
+ dtype: string
304
+ - name: label
305
+ dtype: float32
306
+ - name: idx
307
+ dtype: int32
308
+ splits:
309
+ - name: train
310
+ num_bytes: 754791
311
+ num_examples: 5749
312
+ - name: validation
313
+ num_bytes: 216064
314
+ num_examples: 1500
315
+ - name: test
316
+ num_bytes: 169974
317
+ num_examples: 1379
318
+ download_size: 766983
319
+ dataset_size: 1140829
320
+ - config_name: wnli
321
+ features:
322
+ - name: sentence1
323
+ dtype: string
324
+ - name: sentence2
325
+ dtype: string
326
+ - name: label
327
+ dtype:
328
+ class_label:
329
+ names:
330
+ '0': not_entailment
331
+ '1': entailment
332
+ - name: idx
333
+ dtype: int32
334
+ splits:
335
+ - name: train
336
+ num_bytes: 107109
337
+ num_examples: 635
338
+ - name: validation
339
+ num_bytes: 12162
340
+ num_examples: 71
341
+ - name: test
342
+ num_bytes: 37889
343
+ num_examples: 146
344
+ download_size: 63522
345
+ dataset_size: 157160
346
+ configs:
347
+ - config_name: ax
348
+ data_files:
349
+ - split: test
350
+ path: ax/test-*
351
+ - config_name: cola
352
+ data_files:
353
+ - split: train
354
+ path: cola/train-*
355
+ - split: validation
356
+ path: cola/validation-*
357
+ - split: test
358
+ path: cola/test-*
359
+ - config_name: mnli
360
+ data_files:
361
+ - split: train
362
+ path: mnli/train-*
363
+ - split: validation_matched
364
+ path: mnli/validation_matched-*
365
+ - split: validation_mismatched
366
+ path: mnli/validation_mismatched-*
367
+ - split: test_matched
368
+ path: mnli/test_matched-*
369
+ - split: test_mismatched
370
+ path: mnli/test_mismatched-*
371
+ - config_name: mnli_matched
372
+ data_files:
373
+ - split: validation
374
+ path: mnli_matched/validation-*
375
+ - split: test
376
+ path: mnli_matched/test-*
377
+ - config_name: mnli_mismatched
378
+ data_files:
379
+ - split: validation
380
+ path: mnli_mismatched/validation-*
381
+ - split: test
382
+ path: mnli_mismatched/test-*
383
+ - config_name: mrpc
384
+ data_files:
385
+ - split: train
386
+ path: mrpc/train-*
387
+ - split: validation
388
+ path: mrpc/validation-*
389
+ - split: test
390
+ path: mrpc/test-*
391
+ - config_name: qnli
392
+ data_files:
393
+ - split: train
394
+ path: qnli/train-*
395
+ - split: validation
396
+ path: qnli/validation-*
397
+ - split: test
398
+ path: qnli/test-*
399
+ - config_name: qqp
400
+ data_files:
401
+ - split: train
402
+ path: qqp/train-*
403
+ - split: validation
404
+ path: qqp/validation-*
405
+ - split: test
406
+ path: qqp/test-*
407
+ - config_name: rte
408
+ data_files:
409
+ - split: train
410
+ path: rte/train-*
411
+ - split: validation
412
+ path: rte/validation-*
413
+ - split: test
414
+ path: rte/test-*
415
+ - config_name: sst2
416
+ data_files:
417
+ - split: train
418
+ path: sst2/train-*
419
+ - split: validation
420
+ path: sst2/validation-*
421
+ - split: test
422
+ path: sst2/test-*
423
+ - config_name: stsb
424
+ data_files:
425
+ - split: train
426
+ path: stsb/train-*
427
+ - split: validation
428
+ path: stsb/validation-*
429
+ - split: test
430
+ path: stsb/test-*
431
+ - config_name: wnli
432
+ data_files:
433
+ - split: train
434
+ path: wnli/train-*
435
+ - split: validation
436
+ path: wnli/validation-*
437
+ - split: test
438
+ path: wnli/test-*
439
+ train-eval-index:
440
+ - config: cola
441
+ task: text-classification
442
+ task_id: binary_classification
443
+ splits:
444
+ train_split: train
445
+ eval_split: validation
446
+ col_mapping:
447
+ sentence: text
448
+ label: target
449
+ - config: sst2
450
+ task: text-classification
451
+ task_id: binary_classification
452
+ splits:
453
+ train_split: train
454
+ eval_split: validation
455
+ col_mapping:
456
+ sentence: text
457
+ label: target
458
+ - config: mrpc
459
+ task: text-classification
460
+ task_id: natural_language_inference
461
+ splits:
462
+ train_split: train
463
+ eval_split: validation
464
+ col_mapping:
465
+ sentence1: text1
466
+ sentence2: text2
467
+ label: target
468
+ - config: qqp
469
+ task: text-classification
470
+ task_id: natural_language_inference
471
+ splits:
472
+ train_split: train
473
+ eval_split: validation
474
+ col_mapping:
475
+ question1: text1
476
+ question2: text2
477
+ label: target
478
+ - config: stsb
479
+ task: text-classification
480
+ task_id: natural_language_inference
481
+ splits:
482
+ train_split: train
483
+ eval_split: validation
484
+ col_mapping:
485
+ sentence1: text1
486
+ sentence2: text2
487
+ label: target
488
+ - config: mnli
489
+ task: text-classification
490
+ task_id: natural_language_inference
491
+ splits:
492
+ train_split: train
493
+ eval_split: validation_matched
494
+ col_mapping:
495
+ premise: text1
496
+ hypothesis: text2
497
+ label: target
498
+ - config: mnli_mismatched
499
+ task: text-classification
500
+ task_id: natural_language_inference
501
+ splits:
502
+ train_split: train
503
+ eval_split: validation
504
+ col_mapping:
505
+ premise: text1
506
+ hypothesis: text2
507
+ label: target
508
+ - config: mnli_matched
509
+ task: text-classification
510
+ task_id: natural_language_inference
511
+ splits:
512
+ train_split: train
513
+ eval_split: validation
514
+ col_mapping:
515
+ premise: text1
516
+ hypothesis: text2
517
+ label: target
518
+ - config: qnli
519
+ task: text-classification
520
+ task_id: natural_language_inference
521
+ splits:
522
+ train_split: train
523
+ eval_split: validation
524
+ col_mapping:
525
+ question: text1
526
+ sentence: text2
527
+ label: target
528
+ - config: rte
529
+ task: text-classification
530
+ task_id: natural_language_inference
531
+ splits:
532
+ train_split: train
533
+ eval_split: validation
534
+ col_mapping:
535
+ sentence1: text1
536
+ sentence2: text2
537
+ label: target
538
+ - config: wnli
539
+ task: text-classification
540
+ task_id: natural_language_inference
541
+ splits:
542
+ train_split: train
543
+ eval_split: validation
544
+ col_mapping:
545
+ sentence1: text1
546
+ sentence2: text2
547
+ label: target
548
+ ---
549
+
550
+ # Dataset Card for GLUE
551
+
552
+ ## Table of Contents
553
+ - [Dataset Card for GLUE](#dataset-card-for-glue)
554
+ - [Table of Contents](#table-of-contents)
555
+ - [Dataset Description](#dataset-description)
556
+ - [Dataset Summary](#dataset-summary)
557
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
558
+ - [ax](#ax)
559
+ - [cola](#cola)
560
+ - [mnli](#mnli)
561
+ - [mnli_matched](#mnli_matched)
562
+ - [mnli_mismatched](#mnli_mismatched)
563
+ - [mrpc](#mrpc)
564
+ - [qnli](#qnli)
565
+ - [qqp](#qqp)
566
+ - [rte](#rte)
567
+ - [sst2](#sst2)
568
+ - [stsb](#stsb)
569
+ - [wnli](#wnli)
570
+ - [Languages](#languages)
571
+ - [Dataset Structure](#dataset-structure)
572
+ - [Data Instances](#data-instances)
573
+ - [ax](#ax-1)
574
+ - [cola](#cola-1)
575
+ - [mnli](#mnli-1)
576
+ - [mnli_matched](#mnli_matched-1)
577
+ - [mnli_mismatched](#mnli_mismatched-1)
578
+ - [mrpc](#mrpc-1)
579
+ - [qnli](#qnli-1)
580
+ - [qqp](#qqp-1)
581
+ - [rte](#rte-1)
582
+ - [sst2](#sst2-1)
583
+ - [stsb](#stsb-1)
584
+ - [wnli](#wnli-1)
585
+ - [Data Fields](#data-fields)
586
+ - [ax](#ax-2)
587
+ - [cola](#cola-2)
588
+ - [mnli](#mnli-2)
589
+ - [mnli_matched](#mnli_matched-2)
590
+ - [mnli_mismatched](#mnli_mismatched-2)
591
+ - [mrpc](#mrpc-2)
592
+ - [qnli](#qnli-2)
593
+ - [qqp](#qqp-2)
594
+ - [rte](#rte-2)
595
+ - [sst2](#sst2-2)
596
+ - [stsb](#stsb-2)
597
+ - [wnli](#wnli-2)
598
+ - [Data Splits](#data-splits)
599
+ - [ax](#ax-3)
600
+ - [cola](#cola-3)
601
+ - [mnli](#mnli-3)
602
+ - [mnli_matched](#mnli_matched-3)
603
+ - [mnli_mismatched](#mnli_mismatched-3)
604
+ - [mrpc](#mrpc-3)
605
+ - [qnli](#qnli-3)
606
+ - [qqp](#qqp-3)
607
+ - [rte](#rte-3)
608
+ - [sst2](#sst2-3)
609
+ - [stsb](#stsb-3)
610
+ - [wnli](#wnli-3)
611
+ - [Dataset Creation](#dataset-creation)
612
+ - [Curation Rationale](#curation-rationale)
613
+ - [Source Data](#source-data)
614
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
615
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
616
+ - [Annotations](#annotations)
617
+ - [Annotation process](#annotation-process)
618
+ - [Who are the annotators?](#who-are-the-annotators)
619
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
620
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
621
+ - [Social Impact of Dataset](#social-impact-of-dataset)
622
+ - [Discussion of Biases](#discussion-of-biases)
623
+ - [Other Known Limitations](#other-known-limitations)
624
+ - [Additional Information](#additional-information)
625
+ - [Dataset Curators](#dataset-curators)
626
+ - [Licensing Information](#licensing-information)
627
+ - [Citation Information](#citation-information)
628
+ - [Contributions](#contributions)
629
+
630
+ ## Dataset Description
631
+
632
+ - **Homepage:** https://gluebenchmark.com/
633
+ - **Repository:** https://github.com/nyu-mll/GLUE-baselines
634
+ - **Paper:** https://arxiv.org/abs/1804.07461
635
+ - **Leaderboard:** https://gluebenchmark.com/leaderboard
636
+ - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
637
+ - **Size of downloaded dataset files:** 1.00 GB
638
+ - **Size of the generated dataset:** 240.84 MB
639
+ - **Total amount of disk used:** 1.24 GB
640
+
641
+ ### Dataset Summary
642
+
643
+ GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems.
644
+
645
+ ### Supported Tasks and Leaderboards
646
+
647
+ The leaderboard for the GLUE benchmark can be found [at this address](https://gluebenchmark.com/). It comprises the following tasks:
648
+
649
+ #### ax
650
+
651
+ A manually-curated evaluation dataset for fine-grained analysis of system performance on a broad range of linguistic phenomena. This dataset evaluates sentence understanding through Natural Language Inference (NLI) problems. Use a model trained on MulitNLI to produce predictions for this dataset.
652
+
653
+ #### cola
654
+
655
+ The Corpus of Linguistic Acceptability consists of English acceptability judgments drawn from books and journal articles on linguistic theory. Each example is a sequence of words annotated with whether it is a grammatical English sentence.
656
+
657
+ #### mnli
658
+
659
+ The Multi-Genre Natural Language Inference Corpus is a crowdsourced collection of sentence pairs with textual entailment annotations. Given a premise sentence and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are gathered from ten different sources, including transcribed speech, fiction, and government reports. The authors of the benchmark use the standard test set, for which they obtained private labels from the RTE authors, and evaluate on both the matched (in-domain) and mismatched (cross-domain) section. They also uses and recommend the SNLI corpus as 550k examples of auxiliary training data.
660
+
661
+ #### mnli_matched
662
+
663
+ The matched validation and test splits from MNLI. See the "mnli" BuilderConfig for additional information.
664
+
665
+ #### mnli_mismatched
666
+
667
+ The mismatched validation and test splits from MNLI. See the "mnli" BuilderConfig for additional information.
668
+
669
+ #### mrpc
670
+
671
+ The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of sentence pairs automatically extracted from online news sources, with human annotations for whether the sentences in the pair are semantically equivalent.
672
+
673
+ #### qnli
674
+
675
+ The Stanford Question Answering Dataset is a question-answering dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn from Wikipedia) contains the answer to the corresponding question (written by an annotator). The authors of the benchmark convert the task into sentence pair classification by forming a pair between each question and each sentence in the corresponding context, and filtering out pairs with low lexical overlap between the question and the context sentence. The task is to determine whether the context sentence contains the answer to the question. This modified version of the original task removes the requirement that the model select the exact answer, but also removes the simplifying assumptions that the answer is always present in the input and that lexical overlap is a reliable cue.
676
+
677
+ #### qqp
678
+
679
+ The Quora Question Pairs2 dataset is a collection of question pairs from the community question-answering website Quora. The task is to determine whether a pair of questions are semantically equivalent.
680
+
681
+ #### rte
682
+
683
+ The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual entailment challenges. The authors of the benchmark combined the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009). Examples are constructed based on news and Wikipedia text. The authors of the benchmark convert all datasets to a two-class split, where for three-class datasets they collapse neutral and contradiction into not entailment, for consistency.
684
+
685
+ #### sst2
686
+
687
+ The Stanford Sentiment Treebank consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence. It uses the two-way (positive/negative) class split, with only sentence-level labels.
688
+
689
+ #### stsb
690
+
691
+ The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of sentence pairs drawn from news headlines, video and image captions, and natural language inference data. Each pair is human-annotated with a similarity score from 1 to 5.
692
+
693
+ #### wnli
694
+
695
+ The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task in which a system must read a sentence with a pronoun and select the referent of that pronoun from a list of choices. The examples are manually constructed to foil simple statistical methods: Each one is contingent on contextual information provided by a single word or phrase in the sentence. To convert the problem into sentence pair classification, the authors of the benchmark construct sentence pairs by replacing the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the pronoun substituted is entailed by the original sentence. They use a small evaluation set consisting of new examples derived from fiction books that was shared privately by the authors of the original corpus. While the included training set is balanced between two classes, the test set is imbalanced between them (65% not entailment). Also, due to a data quirk, the development set is adversarial: hypotheses are sometimes shared between training and development examples, so if a model memorizes the training examples, they will predict the wrong label on corresponding development set example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence between a model's score on this task and its score on the unconverted original task. The authors of the benchmark call converted dataset WNLI (Winograd NLI).
696
+
697
+ ### Languages
698
+
699
+ The language data in GLUE is in English (BCP-47 `en`)
700
+
701
+ ## Dataset Structure
702
+
703
+ ### Data Instances
704
+
705
+ #### ax
706
+
707
+ - **Size of downloaded dataset files:** 0.22 MB
708
+ - **Size of the generated dataset:** 0.24 MB
709
+ - **Total amount of disk used:** 0.46 MB
710
+
711
+ An example of 'test' looks as follows.
712
+ ```
713
+ {
714
+ "premise": "The cat sat on the mat.",
715
+ "hypothesis": "The cat did not sit on the mat.",
716
+ "label": -1,
717
+ "idx: 0
718
+ }
719
+ ```
720
+
721
+ #### cola
722
+
723
+ - **Size of downloaded dataset files:** 0.38 MB
724
+ - **Size of the generated dataset:** 0.61 MB
725
+ - **Total amount of disk used:** 0.99 MB
726
+
727
+ An example of 'train' looks as follows.
728
+ ```
729
+ {
730
+ "sentence": "Our friends won't buy this analysis, let alone the next one we propose.",
731
+ "label": 1,
732
+ "id": 0
733
+ }
734
+ ```
735
+
736
+ #### mnli
737
+
738
+ - **Size of downloaded dataset files:** 312.78 MB
739
+ - **Size of the generated dataset:** 82.47 MB
740
+ - **Total amount of disk used:** 395.26 MB
741
+
742
+ An example of 'train' looks as follows.
743
+ ```
744
+ {
745
+ "premise": "Conceptually cream skimming has two basic dimensions - product and geography.",
746
+ "hypothesis": "Product and geography are what make cream skimming work.",
747
+ "label": 1,
748
+ "idx": 0
749
+ }
750
+ ```
751
+
752
+ #### mnli_matched
753
+
754
+ - **Size of downloaded dataset files:** 312.78 MB
755
+ - **Size of the generated dataset:** 3.69 MB
756
+ - **Total amount of disk used:** 316.48 MB
757
+
758
+ An example of 'test' looks as follows.
759
+ ```
760
+ {
761
+ "premise": "Hierbas, ans seco, ans dulce, and frigola are just a few names worth keeping a look-out for.",
762
+ "hypothesis": "Hierbas is a name worth looking out for.",
763
+ "label": -1,
764
+ "idx": 0
765
+ }
766
+ ```
767
+
768
+ #### mnli_mismatched
769
+
770
+ - **Size of downloaded dataset files:** 312.78 MB
771
+ - **Size of the generated dataset:** 3.91 MB
772
+ - **Total amount of disk used:** 316.69 MB
773
+
774
+ An example of 'test' looks as follows.
775
+ ```
776
+ {
777
+ "premise": "What have you decided, what are you going to do?",
778
+ "hypothesis": "So what's your decision?",
779
+ "label": -1,
780
+ "idx": 0
781
+ }
782
+ ```
783
+
784
+ #### mrpc
785
+
786
+ - **Size of downloaded dataset files:** ??
787
+ - **Size of the generated dataset:** 1.5 MB
788
+ - **Total amount of disk used:** ??
789
+
790
+ An example of 'train' looks as follows.
791
+ ```
792
+ {
793
+ "sentence1": "Amrozi accused his brother, whom he called "the witness", of deliberately distorting his evidence.",
794
+ "sentence2": "Referring to him as only "the witness", Amrozi accused his brother of deliberately distorting his evidence.",
795
+ "label": 1,
796
+ "idx": 0
797
+ }
798
+ ```
799
+
800
+ #### qnli
801
+
802
+ - **Size of downloaded dataset files:** ??
803
+ - **Size of the generated dataset:** 28 MB
804
+ - **Total amount of disk used:** ??
805
+
806
+ An example of 'train' looks as follows.
807
+ ```
808
+ {
809
+ "question": "When did the third Digimon series begin?",
810
+ "sentence": "Unlike the two seasons before it and most of the seasons that followed, Digimon Tamers takes a darker and more realistic approach to its story featuring Digimon who do not reincarnate after their deaths and more complex character development in the original Japanese.",
811
+ "label": 1,
812
+ "idx": 0
813
+ }
814
+ ```
815
+
816
+ #### qqp
817
+
818
+ - **Size of downloaded dataset files:** ??
819
+ - **Size of the generated dataset:** 107 MB
820
+ - **Total amount of disk used:** ??
821
+
822
+ An example of 'train' looks as follows.
823
+ ```
824
+ {
825
+ "question1": "How is the life of a math student? Could you describe your own experiences?",
826
+ "question2": "Which level of prepration is enough for the exam jlpt5?",
827
+ "label": 0,
828
+ "idx": 0
829
+ }
830
+ ```
831
+
832
+ #### rte
833
+
834
+ - **Size of downloaded dataset files:** ??
835
+ - **Size of the generated dataset:** 1.9 MB
836
+ - **Total amount of disk used:** ??
837
+
838
+ An example of 'train' looks as follows.
839
+ ```
840
+ {
841
+ "sentence1": "No Weapons of Mass Destruction Found in Iraq Yet.",
842
+ "sentence2": "Weapons of Mass Destruction Found in Iraq.",
843
+ "label": 1,
844
+ "idx": 0
845
+ }
846
+ ```
847
+
848
+ #### sst2
849
+
850
+ - **Size of downloaded dataset files:** ??
851
+ - **Size of the generated dataset:** 4.9 MB
852
+ - **Total amount of disk used:** ??
853
+
854
+ An example of 'train' looks as follows.
855
+ ```
856
+ {
857
+ "sentence": "hide new secretions from the parental units",
858
+ "label": 0,
859
+ "idx": 0
860
+ }
861
+ ```
862
+
863
+ #### stsb
864
+
865
+ - **Size of downloaded dataset files:** ??
866
+ - **Size of the generated dataset:** 1.2 MB
867
+ - **Total amount of disk used:** ??
868
+
869
+ An example of 'train' looks as follows.
870
+ ```
871
+ {
872
+ "sentence1": "A plane is taking off.",
873
+ "sentence2": "An air plane is taking off.",
874
+ "label": 5.0,
875
+ "idx": 0
876
+ }
877
+ ```
878
+
879
+ #### wnli
880
+
881
+ - **Size of downloaded dataset files:** ??
882
+ - **Size of the generated dataset:** 0.18 MB
883
+ - **Total amount of disk used:** ??
884
+
885
+ An example of 'train' looks as follows.
886
+ ```
887
+ {
888
+ "sentence1": "I stuck a pin through a carrot. When I pulled the pin out, it had a hole.",
889
+ "sentence2": "The carrot had a hole.",
890
+ "label": 1,
891
+ "idx": 0
892
+ }
893
+ ```
894
+
895
+ ### Data Fields
896
+
897
+ The data fields are the same among all splits.
898
+
899
+ #### ax
900
+ - `premise`: a `string` feature.
901
+ - `hypothesis`: a `string` feature.
902
+ - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
903
+ - `idx`: a `int32` feature.
904
+
905
+ #### cola
906
+ - `sentence`: a `string` feature.
907
+ - `label`: a classification label, with possible values including `unacceptable` (0), `acceptable` (1).
908
+ - `idx`: a `int32` feature.
909
+
910
+ #### mnli
911
+ - `premise`: a `string` feature.
912
+ - `hypothesis`: a `string` feature.
913
+ - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
914
+ - `idx`: a `int32` feature.
915
+
916
+ #### mnli_matched
917
+ - `premise`: a `string` feature.
918
+ - `hypothesis`: a `string` feature.
919
+ - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
920
+ - `idx`: a `int32` feature.
921
+
922
+ #### mnli_mismatched
923
+ - `premise`: a `string` feature.
924
+ - `hypothesis`: a `string` feature.
925
+ - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
926
+ - `idx`: a `int32` feature.
927
+
928
+ #### mrpc
929
+
930
+ - `sentence1`: a `string` feature.
931
+ - `sentence2`: a `string` feature.
932
+ - `label`: a classification label, with possible values including `not_equivalent` (0), `equivalent` (1).
933
+ - `idx`: a `int32` feature.
934
+
935
+ #### qnli
936
+
937
+ - `question`: a `string` feature.
938
+ - `sentence`: a `string` feature.
939
+ - `label`: a classification label, with possible values including `entailment` (0), `not_entailment` (1).
940
+ - `idx`: a `int32` feature.
941
+
942
+ #### qqp
943
+
944
+ - `question1`: a `string` feature.
945
+ - `question2`: a `string` feature.
946
+ - `label`: a classification label, with possible values including `not_duplicate` (0), `duplicate` (1).
947
+ - `idx`: a `int32` feature.
948
+
949
+ #### rte
950
+
951
+ - `sentence1`: a `string` feature.
952
+ - `sentence2`: a `string` feature.
953
+ - `label`: a classification label, with possible values including `entailment` (0), `not_entailment` (1).
954
+ - `idx`: a `int32` feature.
955
+
956
+ #### sst2
957
+
958
+ - `sentence`: a `string` feature.
959
+ - `label`: a classification label, with possible values including `negative` (0), `positive` (1).
960
+ - `idx`: a `int32` feature.
961
+
962
+ #### stsb
963
+
964
+ - `sentence1`: a `string` feature.
965
+ - `sentence2`: a `string` feature.
966
+ - `label`: a float32 regression label, with possible values from 0 to 5.
967
+ - `idx`: a `int32` feature.
968
+
969
+ #### wnli
970
+
971
+ - `sentence1`: a `string` feature.
972
+ - `sentence2`: a `string` feature.
973
+ - `label`: a classification label, with possible values including `not_entailment` (0), `entailment` (1).
974
+ - `idx`: a `int32` feature.
975
+
976
+ ### Data Splits
977
+
978
+ #### ax
979
+
980
+ | |test|
981
+ |---|---:|
982
+ |ax |1104|
983
+
984
+ #### cola
985
+
986
+ | |train|validation|test|
987
+ |----|----:|---------:|---:|
988
+ |cola| 8551| 1043|1063|
989
+
990
+ #### mnli
991
+
992
+ | |train |validation_matched|validation_mismatched|test_matched|test_mismatched|
993
+ |----|-----:|-----------------:|--------------------:|-----------:|--------------:|
994
+ |mnli|392702| 9815| 9832| 9796| 9847|
995
+
996
+ #### mnli_matched
997
+
998
+ | |validation|test|
999
+ |------------|---------:|---:|
1000
+ |mnli_matched| 9815|9796|
1001
+
1002
+ #### mnli_mismatched
1003
+
1004
+ | |validation|test|
1005
+ |---------------|---------:|---:|
1006
+ |mnli_mismatched| 9832|9847|
1007
+
1008
+ #### mrpc
1009
+
1010
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1011
+
1012
+ #### qnli
1013
+
1014
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1015
+
1016
+ #### qqp
1017
+
1018
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1019
+
1020
+ #### rte
1021
+
1022
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1023
+
1024
+ #### sst2
1025
+
1026
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1027
+
1028
+ #### stsb
1029
+
1030
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1031
+
1032
+ #### wnli
1033
+
1034
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1035
+
1036
+ ## Dataset Creation
1037
+
1038
+ ### Curation Rationale
1039
+
1040
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1041
+
1042
+ ### Source Data
1043
+
1044
+ #### Initial Data Collection and Normalization
1045
+
1046
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1047
+
1048
+ #### Who are the source language producers?
1049
+
1050
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1051
+
1052
+ ### Annotations
1053
+
1054
+ #### Annotation process
1055
+
1056
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1057
+
1058
+ #### Who are the annotators?
1059
+
1060
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1061
+
1062
+ ### Personal and Sensitive Information
1063
+
1064
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1065
+
1066
+ ## Considerations for Using the Data
1067
+
1068
+ ### Social Impact of Dataset
1069
+
1070
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1071
+
1072
+ ### Discussion of Biases
1073
+
1074
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1075
+
1076
+ ### Other Known Limitations
1077
+
1078
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1079
+
1080
+ ## Additional Information
1081
+
1082
+ ### Dataset Curators
1083
+
1084
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
1085
+
1086
+ ### Licensing Information
1087
+
1088
+ The primary GLUE tasks are built on and derived from existing datasets. We refer users to the original licenses accompanying each dataset.
1089
+
1090
+ ### Citation Information
1091
+
1092
+ If you use GLUE, please cite all the datasets you use.
1093
+
1094
+ In addition, we encourage you to use the following BibTeX citation for GLUE itself:
1095
+ ```
1096
+ @inproceedings{wang2019glue,
1097
+ title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
1098
+ author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
1099
+ note={In the Proceedings of ICLR.},
1100
+ year={2019}
1101
+ }
1102
+ ```
1103
+
1104
+ If you evaluate using GLUE, we also highly recommend citing the papers that originally introduced the nine GLUE tasks, both to give the original authors their due credit and because venues will expect papers to describe the data they evaluate on.
1105
+ The following provides BibTeX for all of the GLUE tasks, except QQP, for which we recommend adding a footnote to this page: https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs
1106
+ ```
1107
+ @article{warstadt2018neural,
1108
+ title={Neural Network Acceptability Judgments},
1109
+ author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R.},
1110
+ journal={arXiv preprint 1805.12471},
1111
+ year={2018}
1112
+ }
1113
+ @inproceedings{socher2013recursive,
1114
+ title={Recursive deep models for semantic compositionality over a sentiment treebank},
1115
+ author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
1116
+ booktitle={Proceedings of EMNLP},
1117
+ pages={1631--1642},
1118
+ year={2013}
1119
+ }
1120
+ @inproceedings{dolan2005automatically,
1121
+ title={Automatically constructing a corpus of sentential paraphrases},
1122
+ author={Dolan, William B and Brockett, Chris},
1123
+ booktitle={Proceedings of the International Workshop on Paraphrasing},
1124
+ year={2005}
1125
+ }
1126
+ @book{agirre2007semantic,
1127
+ editor = {Agirre, Eneko and M`arquez, Llu'{i}s and Wicentowski, Richard},
1128
+ title = {Proceedings of the Fourth International Workshop on Semantic Evaluations (SemEval-2007)},
1129
+ month = {June},
1130
+ year = {2007},
1131
+ address = {Prague, Czech Republic},
1132
+ publisher = {Association for Computational Linguistics},
1133
+ }
1134
+ @inproceedings{williams2018broad,
1135
+ author = {Williams, Adina and Nangia, Nikita and Bowman, Samuel R.},
1136
+ title = {A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference},
1137
+ booktitle = {Proceedings of NAACL-HLT},
1138
+ year = 2018
1139
+ }
1140
+ @inproceedings{rajpurkar2016squad,
1141
+ author = {Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy}
1142
+ title = {{SQ}u{AD}: 100,000+ Questions for Machine Comprehension of Text},
1143
+ booktitle = {Proceedings of EMNLP}
1144
+ year = {2016},
1145
+ publisher = {Association for Computational Linguistics},
1146
+ pages = {2383--2392},
1147
+ location = {Austin, Texas},
1148
+ }
1149
+ @incollection{dagan2006pascal,
1150
+ title={The {PASCAL} recognising textual entailment challenge},
1151
+ author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
1152
+ booktitle={Machine learning challenges. evaluating predictive uncertainty, visual object classification, and recognising tectual entailment},
1153
+ pages={177--190},
1154
+ year={2006},
1155
+ publisher={Springer}
1156
+ }
1157
+ @article{bar2006second,
1158
+ title={The second {PASCAL} recognising textual entailment challenge},
1159
+ author={Bar Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
1160
+ year={2006}
1161
+ }
1162
+ @inproceedings{giampiccolo2007third,
1163
+ title={The third {PASCAL} recognizing textual entailment challenge},
1164
+ author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
1165
+ booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
1166
+ pages={1--9},
1167
+ year={2007},
1168
+ organization={Association for Computational Linguistics},
1169
+ }
1170
+ @article{bentivogli2009fifth,
1171
+ title={The Fifth {PASCAL} Recognizing Textual Entailment Challenge},
1172
+ author={Bentivogli, Luisa and Dagan, Ido and Dang, Hoa Trang and Giampiccolo, Danilo and Magnini, Bernardo},
1173
+ booktitle={TAC},
1174
+ year={2009}
1175
+ }
1176
+ @inproceedings{levesque2011winograd,
1177
+ title={The {W}inograd schema challenge},
1178
+ author={Levesque, Hector J and Davis, Ernest and Morgenstern, Leora},
1179
+ booktitle={{AAAI} Spring Symposium: Logical Formalizations of Commonsense Reasoning},
1180
+ volume={46},
1181
+ pages={47},
1182
+ year={2011}
1183
+ }
1184
+ ```
1185
+
1186
+
1187
+ ### Contributions
1188
+
1189
+ Thanks to [@patpizio](https://github.com/patpizio), [@jeswan](https://github.com/jeswan), [@thomwolf](https://github.com/thomwolf), [@patrickvonplaten](https://github.com/patrickvonplaten), [@mariamabarham](https://github.com/mariamabarham) for adding this dataset.
ax/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a07b802fe2d4968a1f7ccce9406826dc77e0d1dc53fea9491664bd8ebba8571a
3
+ size 80767
cola/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c4d526b6f49f432621de43569f9ecf6af41f639baaf4a9d821b95d745def61d
3
+ size 37719
cola/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7538afa2000e63f5343f16a758d75c452661a384208399d2035cd2fce45c33
3
+ size 251124
cola/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c14b7219a7d9f9fe3dd291fd000f6623ee413805eb108c9c49578ed50873e4ba
3
+ size 37551
mnli/test_matched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a330c4f2aeb0bc92f1b4b133fbbaf51bf9c7d0f5cac3d06f49ef63af47dbb822
3
+ size 1220119
mnli/test_mismatched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5078398d5c83d183578b1bdafe94e4491ed28ad1cf8d98ee8846afcec651f16
3
+ size 1257857
mnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49a4a5508b89b8fed2c6e81d2c47d00f4759050a7048c6cc5d95d31122ced3c1
3
+ size 52224361
mnli/validation_matched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f918c09d9c35446b8e8f06a5672f8ab704e2897fecbf52e2e154141f3d7c421
3
+ size 1214936
mnli/validation_mismatched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04aba92823a954be36fe1b69b61eed334c9eb1009daba0dd79f69d77b87c535c
3
+ size 1251152
mnli_matched/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a330c4f2aeb0bc92f1b4b133fbbaf51bf9c7d0f5cac3d06f49ef63af47dbb822
3
+ size 1220119
mnli_matched/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f918c09d9c35446b8e8f06a5672f8ab704e2897fecbf52e2e154141f3d7c421
3
+ size 1214936
mnli_mismatched/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5078398d5c83d183578b1bdafe94e4491ed28ad1cf8d98ee8846afcec651f16
3
+ size 1257857
mnli_mismatched/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04aba92823a954be36fe1b69b61eed334c9eb1009daba0dd79f69d77b87c535c
3
+ size 1251152
mrpc/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a623ed1cbdf445b11f8e249acbf649d7d3a5ee58c918554c40cbd8307e488693
3
+ size 308441
mrpc/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61fd41301e0e244b0420c4350a170c8e7cf64740335fc875a4af2d79af0df0af
3
+ size 649281
mrpc/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c007dbf5bfa8463d87a13e6226df8c0fcf2596c2cd39d0f3bb79754e00f50f
3
+ size 75678
qnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f39520cd07925c9784e4a7f1f7aed8f17f136039b8498f7ad07c7bf13d65ba83
3
+ size 877345
qnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebc7cb70a5bbde0b0336c3d51f31bb4df4673e908e8874b090b52169b1365c6c
3
+ size 17528917
qnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69311b81dc65589286091d9905a27617a90436dd215c7a59832fa8f4f336169
3
+ size 872062
qqp/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95d5d1efcfa3ff7e090565e98085770b3497aad8dbcf12996412b23d2fb669e8
3
+ size 36694152
qqp/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d6f02e643f7c36e9a4f7d4971a5ee9bd74063a319452fe6c87850c739774cd7
3
+ size 33558839
qqp/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efd86a539c412d74874ee451573d7bd142f56c47fe36de033b9f367d8bb0fa71
3
+ size 3729274
rte/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f44aadbfb8bbb7a64ba0674bd26ff77b66e88fdf7a6d64255a5ba6ae9057383
3
+ size 621413
rte/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6252ab17015d718f6de1effe0980f7b158df63e3d16207cd8bd396b608e5147
3
+ size 583976
rte/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb2aa2e04f551133ba663617a15ae133dc22b0f6a969bc0629b5ea6003ee9cf8
3
+ size 69020
sst2/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9d23cf0067211d2baf018328b507f5153fb6704d75117295a8bda47c7adccb1
3
+ size 147793
sst2/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a253e67968acfabcbe49dbe9da964b42ac1c851c40ab760e8c8942efdb3229
3
+ size 3110468
sst2/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1371f3b3a7b0bcefa8388799a9359dc3ce76c349cc0079507a7991364fd2a9b
3
+ size 72819
stsb/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04fa2561f1ff3c395cf8980e3eed5d133c194abf636d5e1870d765c861087bd9
3
+ size 114296
stsb/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbd93bbb988fd18437e02185fe3b2bd9a18350376c392e7820de9df1b247ed1f
3
+ size 502065
stsb/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:152de7cf1fa34ee4df1c243bd209b02ade21a1d5c4fb3b7da5240f78e4000aa9
3
+ size 150622
wnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:766d3754c46a80f3275cb81a32ee6b7b49176fa8c1ef85ea92a4a3676510b902
3
+ size 13620
wnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40f4c0c60db68addeda8e9cbe25e6344cd99d5bbb80125535994a9a3141ee0a9
3
+ size 38835
wnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:880037e45e03df868d5799ca21dc03f3a6378f0adf3c01c7bfc46b94fa61f1cb
3
+ size 11067