Kaveri parquet-converter commited on
Commit
b7a20e2
·
0 Parent(s):

Duplicate from facebook/xnli

Browse files

Co-authored-by: Parquet-converter (BOT) <parquet-converter@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +27 -0
  2. README.md +842 -0
  3. all_languages/test-00000-of-00001.parquet +3 -0
  4. all_languages/train-00000-of-00004.parquet +3 -0
  5. all_languages/train-00001-of-00004.parquet +3 -0
  6. all_languages/train-00002-of-00004.parquet +3 -0
  7. all_languages/train-00003-of-00004.parquet +3 -0
  8. all_languages/validation-00000-of-00001.parquet +3 -0
  9. ar/test-00000-of-00001.parquet +3 -0
  10. ar/train-00000-of-00001.parquet +3 -0
  11. ar/validation-00000-of-00001.parquet +3 -0
  12. bg/test-00000-of-00001.parquet +3 -0
  13. bg/train-00000-of-00001.parquet +3 -0
  14. bg/validation-00000-of-00001.parquet +3 -0
  15. de/test-00000-of-00001.parquet +3 -0
  16. de/train-00000-of-00001.parquet +3 -0
  17. de/validation-00000-of-00001.parquet +3 -0
  18. el/test-00000-of-00001.parquet +3 -0
  19. el/train-00000-of-00001.parquet +3 -0
  20. el/validation-00000-of-00001.parquet +3 -0
  21. en/test-00000-of-00001.parquet +3 -0
  22. en/train-00000-of-00001.parquet +3 -0
  23. en/validation-00000-of-00001.parquet +3 -0
  24. es/test-00000-of-00001.parquet +3 -0
  25. es/train-00000-of-00001.parquet +3 -0
  26. es/validation-00000-of-00001.parquet +3 -0
  27. fr/test-00000-of-00001.parquet +3 -0
  28. fr/train-00000-of-00001.parquet +3 -0
  29. fr/validation-00000-of-00001.parquet +3 -0
  30. hi/test-00000-of-00001.parquet +3 -0
  31. hi/train-00000-of-00001.parquet +3 -0
  32. hi/validation-00000-of-00001.parquet +3 -0
  33. ru/test-00000-of-00001.parquet +3 -0
  34. ru/train-00000-of-00001.parquet +3 -0
  35. ru/validation-00000-of-00001.parquet +3 -0
  36. sw/test-00000-of-00001.parquet +3 -0
  37. sw/train-00000-of-00001.parquet +3 -0
  38. sw/validation-00000-of-00001.parquet +3 -0
  39. th/test-00000-of-00001.parquet +3 -0
  40. th/train-00000-of-00001.parquet +3 -0
  41. th/validation-00000-of-00001.parquet +3 -0
  42. tr/test-00000-of-00001.parquet +3 -0
  43. tr/train-00000-of-00001.parquet +3 -0
  44. tr/validation-00000-of-00001.parquet +3 -0
  45. ur/test-00000-of-00001.parquet +3 -0
  46. ur/train-00000-of-00001.parquet +3 -0
  47. ur/validation-00000-of-00001.parquet +3 -0
  48. vi/test-00000-of-00001.parquet +3 -0
  49. vi/train-00000-of-00001.parquet +3 -0
  50. vi/validation-00000-of-00001.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,842 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - ar
4
+ - bg
5
+ - de
6
+ - el
7
+ - en
8
+ - es
9
+ - fr
10
+ - hi
11
+ - ru
12
+ - sw
13
+ - th
14
+ - tr
15
+ - ur
16
+ - vi
17
+ - zh
18
+ paperswithcode_id: xnli
19
+ pretty_name: Cross-lingual Natural Language Inference
20
+ dataset_info:
21
+ - config_name: all_languages
22
+ features:
23
+ - name: premise
24
+ dtype:
25
+ translation:
26
+ languages:
27
+ - ar
28
+ - bg
29
+ - de
30
+ - el
31
+ - en
32
+ - es
33
+ - fr
34
+ - hi
35
+ - ru
36
+ - sw
37
+ - th
38
+ - tr
39
+ - ur
40
+ - vi
41
+ - zh
42
+ - name: hypothesis
43
+ dtype:
44
+ translation_variable_languages:
45
+ languages:
46
+ - ar
47
+ - bg
48
+ - de
49
+ - el
50
+ - en
51
+ - es
52
+ - fr
53
+ - hi
54
+ - ru
55
+ - sw
56
+ - th
57
+ - tr
58
+ - ur
59
+ - vi
60
+ - zh
61
+ num_languages: 15
62
+ - name: label
63
+ dtype:
64
+ class_label:
65
+ names:
66
+ '0': entailment
67
+ '1': neutral
68
+ '2': contradiction
69
+ splits:
70
+ - name: train
71
+ num_bytes: 1581471691
72
+ num_examples: 392702
73
+ - name: test
74
+ num_bytes: 19387432
75
+ num_examples: 5010
76
+ - name: validation
77
+ num_bytes: 9566179
78
+ num_examples: 2490
79
+ download_size: 963942271
80
+ dataset_size: 1610425302
81
+ - config_name: ar
82
+ features:
83
+ - name: premise
84
+ dtype: string
85
+ - name: hypothesis
86
+ dtype: string
87
+ - name: label
88
+ dtype:
89
+ class_label:
90
+ names:
91
+ '0': entailment
92
+ '1': neutral
93
+ '2': contradiction
94
+ splits:
95
+ - name: train
96
+ num_bytes: 107399614
97
+ num_examples: 392702
98
+ - name: test
99
+ num_bytes: 1294553
100
+ num_examples: 5010
101
+ - name: validation
102
+ num_bytes: 633001
103
+ num_examples: 2490
104
+ download_size: 59215902
105
+ dataset_size: 109327168
106
+ - config_name: bg
107
+ features:
108
+ - name: premise
109
+ dtype: string
110
+ - name: hypothesis
111
+ dtype: string
112
+ - name: label
113
+ dtype:
114
+ class_label:
115
+ names:
116
+ '0': entailment
117
+ '1': neutral
118
+ '2': contradiction
119
+ splits:
120
+ - name: train
121
+ num_bytes: 125973225
122
+ num_examples: 392702
123
+ - name: test
124
+ num_bytes: 1573034
125
+ num_examples: 5010
126
+ - name: validation
127
+ num_bytes: 774061
128
+ num_examples: 2490
129
+ download_size: 66117878
130
+ dataset_size: 128320320
131
+ - config_name: de
132
+ features:
133
+ - name: premise
134
+ dtype: string
135
+ - name: hypothesis
136
+ dtype: string
137
+ - name: label
138
+ dtype:
139
+ class_label:
140
+ names:
141
+ '0': entailment
142
+ '1': neutral
143
+ '2': contradiction
144
+ splits:
145
+ - name: train
146
+ num_bytes: 84684140
147
+ num_examples: 392702
148
+ - name: test
149
+ num_bytes: 996488
150
+ num_examples: 5010
151
+ - name: validation
152
+ num_bytes: 494604
153
+ num_examples: 2490
154
+ download_size: 55973883
155
+ dataset_size: 86175232
156
+ - config_name: el
157
+ features:
158
+ - name: premise
159
+ dtype: string
160
+ - name: hypothesis
161
+ dtype: string
162
+ - name: label
163
+ dtype:
164
+ class_label:
165
+ names:
166
+ '0': entailment
167
+ '1': neutral
168
+ '2': contradiction
169
+ splits:
170
+ - name: train
171
+ num_bytes: 139753358
172
+ num_examples: 392702
173
+ - name: test
174
+ num_bytes: 1704785
175
+ num_examples: 5010
176
+ - name: validation
177
+ num_bytes: 841226
178
+ num_examples: 2490
179
+ download_size: 74551247
180
+ dataset_size: 142299369
181
+ - config_name: en
182
+ features:
183
+ - name: premise
184
+ dtype: string
185
+ - name: hypothesis
186
+ dtype: string
187
+ - name: label
188
+ dtype:
189
+ class_label:
190
+ names:
191
+ '0': entailment
192
+ '1': neutral
193
+ '2': contradiction
194
+ splits:
195
+ - name: train
196
+ num_bytes: 74444026
197
+ num_examples: 392702
198
+ - name: test
199
+ num_bytes: 875134
200
+ num_examples: 5010
201
+ - name: validation
202
+ num_bytes: 433463
203
+ num_examples: 2490
204
+ download_size: 50627367
205
+ dataset_size: 75752623
206
+ - config_name: es
207
+ features:
208
+ - name: premise
209
+ dtype: string
210
+ - name: hypothesis
211
+ dtype: string
212
+ - name: label
213
+ dtype:
214
+ class_label:
215
+ names:
216
+ '0': entailment
217
+ '1': neutral
218
+ '2': contradiction
219
+ splits:
220
+ - name: train
221
+ num_bytes: 81383284
222
+ num_examples: 392702
223
+ - name: test
224
+ num_bytes: 969813
225
+ num_examples: 5010
226
+ - name: validation
227
+ num_bytes: 478422
228
+ num_examples: 2490
229
+ download_size: 53677157
230
+ dataset_size: 82831519
231
+ - config_name: fr
232
+ features:
233
+ - name: premise
234
+ dtype: string
235
+ - name: hypothesis
236
+ dtype: string
237
+ - name: label
238
+ dtype:
239
+ class_label:
240
+ names:
241
+ '0': entailment
242
+ '1': neutral
243
+ '2': contradiction
244
+ splits:
245
+ - name: train
246
+ num_bytes: 85808779
247
+ num_examples: 392702
248
+ - name: test
249
+ num_bytes: 1029239
250
+ num_examples: 5010
251
+ - name: validation
252
+ num_bytes: 510104
253
+ num_examples: 2490
254
+ download_size: 55968680
255
+ dataset_size: 87348122
256
+ - config_name: hi
257
+ features:
258
+ - name: premise
259
+ dtype: string
260
+ - name: hypothesis
261
+ dtype: string
262
+ - name: label
263
+ dtype:
264
+ class_label:
265
+ names:
266
+ '0': entailment
267
+ '1': neutral
268
+ '2': contradiction
269
+ splits:
270
+ - name: train
271
+ num_bytes: 170593964
272
+ num_examples: 392702
273
+ - name: test
274
+ num_bytes: 2073073
275
+ num_examples: 5010
276
+ - name: validation
277
+ num_bytes: 1023915
278
+ num_examples: 2490
279
+ download_size: 70908548
280
+ dataset_size: 173690952
281
+ - config_name: ru
282
+ features:
283
+ - name: premise
284
+ dtype: string
285
+ - name: hypothesis
286
+ dtype: string
287
+ - name: label
288
+ dtype:
289
+ class_label:
290
+ names:
291
+ '0': entailment
292
+ '1': neutral
293
+ '2': contradiction
294
+ splits:
295
+ - name: train
296
+ num_bytes: 129859615
297
+ num_examples: 392702
298
+ - name: test
299
+ num_bytes: 1603466
300
+ num_examples: 5010
301
+ - name: validation
302
+ num_bytes: 786442
303
+ num_examples: 2490
304
+ download_size: 70702606
305
+ dataset_size: 132249523
306
+ - config_name: sw
307
+ features:
308
+ - name: premise
309
+ dtype: string
310
+ - name: hypothesis
311
+ dtype: string
312
+ - name: label
313
+ dtype:
314
+ class_label:
315
+ names:
316
+ '0': entailment
317
+ '1': neutral
318
+ '2': contradiction
319
+ splits:
320
+ - name: train
321
+ num_bytes: 69285725
322
+ num_examples: 392702
323
+ - name: test
324
+ num_bytes: 871651
325
+ num_examples: 5010
326
+ - name: validation
327
+ num_bytes: 429850
328
+ num_examples: 2490
329
+ download_size: 45564152
330
+ dataset_size: 70587226
331
+ - config_name: th
332
+ features:
333
+ - name: premise
334
+ dtype: string
335
+ - name: hypothesis
336
+ dtype: string
337
+ - name: label
338
+ dtype:
339
+ class_label:
340
+ names:
341
+ '0': entailment
342
+ '1': neutral
343
+ '2': contradiction
344
+ splits:
345
+ - name: train
346
+ num_bytes: 176062892
347
+ num_examples: 392702
348
+ - name: test
349
+ num_bytes: 2147015
350
+ num_examples: 5010
351
+ - name: validation
352
+ num_bytes: 1061160
353
+ num_examples: 2490
354
+ download_size: 77222045
355
+ dataset_size: 179271067
356
+ - config_name: tr
357
+ features:
358
+ - name: premise
359
+ dtype: string
360
+ - name: hypothesis
361
+ dtype: string
362
+ - name: label
363
+ dtype:
364
+ class_label:
365
+ names:
366
+ '0': entailment
367
+ '1': neutral
368
+ '2': contradiction
369
+ splits:
370
+ - name: train
371
+ num_bytes: 71637140
372
+ num_examples: 392702
373
+ - name: test
374
+ num_bytes: 934934
375
+ num_examples: 5010
376
+ - name: validation
377
+ num_bytes: 459308
378
+ num_examples: 2490
379
+ download_size: 48509680
380
+ dataset_size: 73031382
381
+ - config_name: ur
382
+ features:
383
+ - name: premise
384
+ dtype: string
385
+ - name: hypothesis
386
+ dtype: string
387
+ - name: label
388
+ dtype:
389
+ class_label:
390
+ names:
391
+ '0': entailment
392
+ '1': neutral
393
+ '2': contradiction
394
+ splits:
395
+ - name: train
396
+ num_bytes: 96441486
397
+ num_examples: 392702
398
+ - name: test
399
+ num_bytes: 1416241
400
+ num_examples: 5010
401
+ - name: validation
402
+ num_bytes: 699952
403
+ num_examples: 2490
404
+ download_size: 46682785
405
+ dataset_size: 98557679
406
+ - config_name: vi
407
+ features:
408
+ - name: premise
409
+ dtype: string
410
+ - name: hypothesis
411
+ dtype: string
412
+ - name: label
413
+ dtype:
414
+ class_label:
415
+ names:
416
+ '0': entailment
417
+ '1': neutral
418
+ '2': contradiction
419
+ splits:
420
+ - name: train
421
+ num_bytes: 101417430
422
+ num_examples: 392702
423
+ - name: test
424
+ num_bytes: 1190217
425
+ num_examples: 5010
426
+ - name: validation
427
+ num_bytes: 590680
428
+ num_examples: 2490
429
+ download_size: 57690058
430
+ dataset_size: 103198327
431
+ - config_name: zh
432
+ features:
433
+ - name: premise
434
+ dtype: string
435
+ - name: hypothesis
436
+ dtype: string
437
+ - name: label
438
+ dtype:
439
+ class_label:
440
+ names:
441
+ '0': entailment
442
+ '1': neutral
443
+ '2': contradiction
444
+ splits:
445
+ - name: train
446
+ num_bytes: 72224841
447
+ num_examples: 392702
448
+ - name: test
449
+ num_bytes: 777929
450
+ num_examples: 5010
451
+ - name: validation
452
+ num_bytes: 384851
453
+ num_examples: 2490
454
+ download_size: 48269855
455
+ dataset_size: 73387621
456
+ configs:
457
+ - config_name: all_languages
458
+ data_files:
459
+ - split: train
460
+ path: all_languages/train-*
461
+ - split: test
462
+ path: all_languages/test-*
463
+ - split: validation
464
+ path: all_languages/validation-*
465
+ - config_name: ar
466
+ data_files:
467
+ - split: train
468
+ path: ar/train-*
469
+ - split: test
470
+ path: ar/test-*
471
+ - split: validation
472
+ path: ar/validation-*
473
+ - config_name: bg
474
+ data_files:
475
+ - split: train
476
+ path: bg/train-*
477
+ - split: test
478
+ path: bg/test-*
479
+ - split: validation
480
+ path: bg/validation-*
481
+ - config_name: de
482
+ data_files:
483
+ - split: train
484
+ path: de/train-*
485
+ - split: test
486
+ path: de/test-*
487
+ - split: validation
488
+ path: de/validation-*
489
+ - config_name: el
490
+ data_files:
491
+ - split: train
492
+ path: el/train-*
493
+ - split: test
494
+ path: el/test-*
495
+ - split: validation
496
+ path: el/validation-*
497
+ - config_name: en
498
+ data_files:
499
+ - split: train
500
+ path: en/train-*
501
+ - split: test
502
+ path: en/test-*
503
+ - split: validation
504
+ path: en/validation-*
505
+ - config_name: es
506
+ data_files:
507
+ - split: train
508
+ path: es/train-*
509
+ - split: test
510
+ path: es/test-*
511
+ - split: validation
512
+ path: es/validation-*
513
+ - config_name: fr
514
+ data_files:
515
+ - split: train
516
+ path: fr/train-*
517
+ - split: test
518
+ path: fr/test-*
519
+ - split: validation
520
+ path: fr/validation-*
521
+ - config_name: hi
522
+ data_files:
523
+ - split: train
524
+ path: hi/train-*
525
+ - split: test
526
+ path: hi/test-*
527
+ - split: validation
528
+ path: hi/validation-*
529
+ - config_name: ru
530
+ data_files:
531
+ - split: train
532
+ path: ru/train-*
533
+ - split: test
534
+ path: ru/test-*
535
+ - split: validation
536
+ path: ru/validation-*
537
+ - config_name: sw
538
+ data_files:
539
+ - split: train
540
+ path: sw/train-*
541
+ - split: test
542
+ path: sw/test-*
543
+ - split: validation
544
+ path: sw/validation-*
545
+ - config_name: th
546
+ data_files:
547
+ - split: train
548
+ path: th/train-*
549
+ - split: test
550
+ path: th/test-*
551
+ - split: validation
552
+ path: th/validation-*
553
+ - config_name: tr
554
+ data_files:
555
+ - split: train
556
+ path: tr/train-*
557
+ - split: test
558
+ path: tr/test-*
559
+ - split: validation
560
+ path: tr/validation-*
561
+ - config_name: ur
562
+ data_files:
563
+ - split: train
564
+ path: ur/train-*
565
+ - split: test
566
+ path: ur/test-*
567
+ - split: validation
568
+ path: ur/validation-*
569
+ - config_name: vi
570
+ data_files:
571
+ - split: train
572
+ path: vi/train-*
573
+ - split: test
574
+ path: vi/test-*
575
+ - split: validation
576
+ path: vi/validation-*
577
+ - config_name: zh
578
+ data_files:
579
+ - split: train
580
+ path: zh/train-*
581
+ - split: test
582
+ path: zh/test-*
583
+ - split: validation
584
+ path: zh/validation-*
585
+ ---
586
+
587
+ # Dataset Card for "xnli"
588
+
589
+ ## Table of Contents
590
+ - [Dataset Description](#dataset-description)
591
+ - [Dataset Summary](#dataset-summary)
592
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
593
+ - [Languages](#languages)
594
+ - [Dataset Structure](#dataset-structure)
595
+ - [Data Instances](#data-instances)
596
+ - [Data Fields](#data-fields)
597
+ - [Data Splits](#data-splits)
598
+ - [Dataset Creation](#dataset-creation)
599
+ - [Curation Rationale](#curation-rationale)
600
+ - [Source Data](#source-data)
601
+ - [Annotations](#annotations)
602
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
603
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
604
+ - [Social Impact of Dataset](#social-impact-of-dataset)
605
+ - [Discussion of Biases](#discussion-of-biases)
606
+ - [Other Known Limitations](#other-known-limitations)
607
+ - [Additional Information](#additional-information)
608
+ - [Dataset Curators](#dataset-curators)
609
+ - [Licensing Information](#licensing-information)
610
+ - [Citation Information](#citation-information)
611
+ - [Contributions](#contributions)
612
+
613
+ ## Dataset Description
614
+
615
+ - **Homepage:** [https://www.nyu.edu/projects/bowman/xnli/](https://www.nyu.edu/projects/bowman/xnli/)
616
+ - **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
617
+ - **Paper:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
618
+ - **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
619
+ - **Size of downloaded dataset files:** 7.74 GB
620
+ - **Size of the generated dataset:** 3.23 GB
621
+ - **Total amount of disk used:** 10.97 GB
622
+
623
+ ### Dataset Summary
624
+
625
+ XNLI is a subset of a few thousand examples from MNLI which has been translated
626
+ into a 14 different languages (some low-ish resource). As with MNLI, the goal is
627
+ to predict textual entailment (does sentence A imply/contradict/neither sentence
628
+ B) and is a classification task (given two sentences, predict one of three
629
+ labels).
630
+
631
+ ### Supported Tasks and Leaderboards
632
+
633
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
634
+
635
+ ### Languages
636
+
637
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
638
+
639
+ ## Dataset Structure
640
+
641
+ ### Data Instances
642
+
643
+ #### all_languages
644
+
645
+ - **Size of downloaded dataset files:** 483.96 MB
646
+ - **Size of the generated dataset:** 1.61 GB
647
+ - **Total amount of disk used:** 2.09 GB
648
+
649
+ An example of 'train' looks as follows.
650
+ ```
651
+ This example was too long and was cropped:
652
+
653
+ {
654
+ "hypothesis": "{\"language\": [\"ar\", \"bg\", \"de\", \"el\", \"en\", \"es\", \"fr\", \"hi\", \"ru\", \"sw\", \"th\", \"tr\", \"ur\", \"vi\", \"zh\"], \"translation\": [\"احد اع...",
655
+ "label": 0,
656
+ "premise": "{\"ar\": \"واحدة من رقابنا ستقوم بتنفيذ تعليماتك كلها بكل دقة\", \"bg\": \"един от нашите номера ще ви даде инструкции .\", \"de\": \"Eine ..."
657
+ }
658
+ ```
659
+
660
+ #### ar
661
+
662
+ - **Size of downloaded dataset files:** 483.96 MB
663
+ - **Size of the generated dataset:** 109.32 MB
664
+ - **Total amount of disk used:** 593.29 MB
665
+
666
+ An example of 'validation' looks as follows.
667
+ ```
668
+ {
669
+ "hypothesis": "اتصل بأمه حالما أوصلته حافلة الم��رسية.",
670
+ "label": 1,
671
+ "premise": "وقال، ماما، لقد عدت للمنزل."
672
+ }
673
+ ```
674
+
675
+ #### bg
676
+
677
+ - **Size of downloaded dataset files:** 483.96 MB
678
+ - **Size of the generated dataset:** 128.32 MB
679
+ - **Total amount of disk used:** 612.28 MB
680
+
681
+ An example of 'train' looks as follows.
682
+ ```
683
+ This example was too long and was cropped:
684
+
685
+ {
686
+ "hypothesis": "\"губиш нещата на следното ниво , ако хората си припомнят .\"...",
687
+ "label": 0,
688
+ "premise": "\"по време на сезона и предполагам , че на твоето ниво ще ги загубиш на следващото ниво , ако те решат да си припомнят отбора на ..."
689
+ }
690
+ ```
691
+
692
+ #### de
693
+
694
+ - **Size of downloaded dataset files:** 483.96 MB
695
+ - **Size of the generated dataset:** 86.17 MB
696
+ - **Total amount of disk used:** 570.14 MB
697
+
698
+ An example of 'train' looks as follows.
699
+ ```
700
+ This example was too long and was cropped:
701
+
702
+ {
703
+ "hypothesis": "Man verliert die Dinge auf die folgende Ebene , wenn sich die Leute erinnern .",
704
+ "label": 0,
705
+ "premise": "\"Du weißt , während der Saison und ich schätze , auf deiner Ebene verlierst du sie auf die nächste Ebene , wenn sie sich entschl..."
706
+ }
707
+ ```
708
+
709
+ #### el
710
+
711
+ - **Size of downloaded dataset files:** 483.96 MB
712
+ - **Size of the generated dataset:** 142.30 MB
713
+ - **Total amount of disk used:** 626.26 MB
714
+
715
+ An example of 'validation' looks as follows.
716
+ ```
717
+ This example was too long and was cropped:
718
+
719
+ {
720
+ "hypothesis": "\"Τηλεφώνησε στη μαμά του μόλις το σχολικό λεωφορείο τον άφησε.\"...",
721
+ "label": 1,
722
+ "premise": "Και είπε, Μαμά, έφτασα στο σπίτι."
723
+ }
724
+ ```
725
+
726
+ ### Data Fields
727
+
728
+ The data fields are the same among all splits.
729
+
730
+ #### all_languages
731
+ - `premise`: a multilingual `string` variable, with possible languages including `ar`, `bg`, `de`, `el`, `en`.
732
+ - `hypothesis`: a multilingual `string` variable, with possible languages including `ar`, `bg`, `de`, `el`, `en`.
733
+ - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
734
+
735
+ #### ar
736
+ - `premise`: a `string` feature.
737
+ - `hypothesis`: a `string` feature.
738
+ - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
739
+
740
+ #### bg
741
+ - `premise`: a `string` feature.
742
+ - `hypothesis`: a `string` feature.
743
+ - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
744
+
745
+ #### de
746
+ - `premise`: a `string` feature.
747
+ - `hypothesis`: a `string` feature.
748
+ - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
749
+
750
+ #### el
751
+ - `premise`: a `string` feature.
752
+ - `hypothesis`: a `string` feature.
753
+ - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
754
+
755
+ ### Data Splits
756
+
757
+ | name |train |validation|test|
758
+ |-------------|-----:|---------:|---:|
759
+ |all_languages|392702| 2490|5010|
760
+ |ar |392702| 2490|5010|
761
+ |bg |392702| 2490|5010|
762
+ |de |392702| 2490|5010|
763
+ |el |392702| 2490|5010|
764
+
765
+ ## Dataset Creation
766
+
767
+ ### Curation Rationale
768
+
769
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
770
+
771
+ ### Source Data
772
+
773
+ #### Initial Data Collection and Normalization
774
+
775
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
776
+
777
+ #### Who are the source language producers?
778
+
779
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
780
+
781
+ ### Annotations
782
+
783
+ #### Annotation process
784
+
785
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
786
+
787
+ #### Who are the annotators?
788
+
789
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
790
+
791
+ ### Personal and Sensitive Information
792
+
793
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
794
+
795
+ ## Considerations for Using the Data
796
+
797
+ ### Social Impact of Dataset
798
+
799
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
800
+
801
+ ### Discussion of Biases
802
+
803
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
804
+
805
+ ### Other Known Limitations
806
+
807
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
808
+
809
+ ## Additional Information
810
+
811
+ ### Dataset Curators
812
+
813
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
814
+
815
+ ### Licensing Information
816
+
817
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
818
+
819
+ ### Citation Information
820
+
821
+ ```
822
+ @InProceedings{conneau2018xnli,
823
+ author = {Conneau, Alexis
824
+ and Rinott, Ruty
825
+ and Lample, Guillaume
826
+ and Williams, Adina
827
+ and Bowman, Samuel R.
828
+ and Schwenk, Holger
829
+ and Stoyanov, Veselin},
830
+ title = {XNLI: Evaluating Cross-lingual Sentence Representations},
831
+ booktitle = {Proceedings of the 2018 Conference on Empirical Methods
832
+ in Natural Language Processing},
833
+ year = {2018},
834
+ publisher = {Association for Computational Linguistics},
835
+ location = {Brussels, Belgium},
836
+ }
837
+ ```
838
+
839
+
840
+ ### Contributions
841
+
842
+ Thanks to [@lewtun](https://github.com/lewtun), [@mariamabarham](https://github.com/mariamabarham), [@thomwolf](https://github.com/thomwolf), [@lhoestq](https://github.com/lhoestq), [@patrickvonplaten](https://github.com/patrickvonplaten) for adding this dataset.
all_languages/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:599e3a0191403f19cbe802afdf69841152000b41eaed725e4f463d432c0ffb49
3
+ size 6769722
all_languages/train-00000-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:beeb34bbb7779413d78c09f5746baf05b7a79ab37ff063f882f201412c437ddb
3
+ size 237658468
all_languages/train-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f3365b09a00408fa6b1de52e14f915decedab864376a179cc5f65937044a2c0
3
+ size 238583683
all_languages/train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:340d804e040607dee5c5b0581ccfd390901255129c4bfb4f81e9e7605b76a0d7
3
+ size 238115767
all_languages/train-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d05011ebb98705d0cbc5482fbc555f146dbc4e02890efa28e7774748539f5737
3
+ size 239422128
all_languages/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e6263b0872a3914c9bc165bfe3883e433aa2066c3fa3b9d142829a9b122518
3
+ size 3392503
ar/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89521231aa5f8404c46fcc5d421a9453819ca48bb99590680fa31fb8c82cf8bd
3
+ size 391980
ar/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a27850bd0e20411f7c08e7c2247413c0050669090ef23cb5263d138be937e89
3
+ size 58630165
ar/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8df098db4682a6a97dc4a08be518b60e58112f0e32df2d4c4c933e34db577cd3
3
+ size 193757
bg/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:333a5e84e09415f6a80dd285c4aa64d164bf721237b3e3d34892ce72066c92c1
3
+ size 447341
bg/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df65135e5813d4a42b3fd8e018ebfaecd981f7c313bbcfd7288e2019f0f4296c
3
+ size 65447048
bg/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfd1a75d8b1c82b97d857aa91cd9bf08e75d1ea58ab43109c2644e82079ac981
3
+ size 223489
de/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c25f5695d21673dbe966bac7b3c6a1e2df369431976928565c3cc56838f5632b
3
+ size 356132
de/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72b2d2adf469509cc6917213feb9ff17bc1919889e3f0dbe848e01660602ec7b
3
+ size 55436761
de/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68f6b30f13ba0fb92acf2567edd7cb1ec26a6b8b38c2274eb6f140fb7075f66e
3
+ size 180990
el/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:971110ccb25026cd75d8f8f0e0444877f8e9da7425b55be5fb97de74f9276e5b
3
+ size 489529
el/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2663e218de5250a1cc62c32121807175a4453a4102b1e99cb13d20399d911d87
3
+ size 73814854
el/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d173137cc27825fb0cb1dea81310141a5206692d78c150e28d23dac6f3f30e8e
3
+ size 246864
en/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f0fd1e105091e0f11cd37f9b2bc382f16de9949aa9471e1366b2605ba037167
3
+ size 308237
en/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6be63cec9ec932a5790de82caee7ee4d00e5a50ef19b79f23b740732da143424
3
+ size 50161923
en/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42961a7b66dee147da655d78db8cbaac97b3dbcf898009acffec6cda2d80f2bf
3
+ size 157207
es/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0159c9a734d7d0683d20a9850adcf3e214a49179b6517fea5868744bfd99886
3
+ size 341925
es/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f609c655a0c200168235609e7709bcf3ac48a465b4c6e90380093862a53a3a2a
3
+ size 53162139
es/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1913c1256f5b8761fba3f734e260d0b5e228a02ee31a8651ee915261cdf7c631
3
+ size 173093
fr/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bfce918feee19f8070106963281e46ae801d03583dd0786b443c2f200607d62
3
+ size 360233
fr/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e05c0cac22eba1d8c00ea0558b225af5d741671066abc9bcbc98584930f14b90
3
+ size 55425053
fr/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f97d84263ac129e54cdeec7bde66b3da9f8b1ba42cfabd0d0b3c5ac7f442cc2
3
+ size 183394
hi/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e946c50370a8fcc8cc58adb74a2625e5377f55c3f4ba821fd9d305c8d4b14c4e
3
+ size 492625
hi/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4fab2db1b6c29e0ad9b238f0f12f709f878986f8e074379821efd120e13f467
3
+ size 70167282
hi/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4cdc8ab6ab3a086dd7bc624b531a518e25352eb2907fb335643c7545e994ee0
3
+ size 248641
ru/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d939fb4f7f2b61a863fb85dde9f6ef0221dde161fb86170fb10c27ea7d9d4d0
3
+ size 477352
ru/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5db75fb1e1646d7a39dda45cdcd9b7ba95cfa1f2af0009151e8db34c22ca6f5a
3
+ size 69986524
ru/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a75ecd12d4145e2eefe22c08cb4ae4b966d18ce7452bdf26ee04f2d4c499078
3
+ size 238730
sw/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b0bfad4e885a468752a5f17e4c620ac75f64902a2cef2da23f1c1e5ac973a29
3
+ size 312255
sw/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3282ea435eda702e713b958adcc1f053f415f9f57704b21cc441e1d86b834f52
3
+ size 45093826
sw/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dd236d01144c13829450cfbc12ffcc33ecdcf289a02b4c6a60ca78a557eb80b
3
+ size 158071
th/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ca847893c31a98757d0aedcffacdcca8246922d5c3782c48c735db7e6137b1
3
+ size 503402
th/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec65ff12beef95f3ee4f59aa115252656d81b41ce4b39816760f88948d138dbc
3
+ size 76466353
th/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09abadf254309e8c145777854f95c15024ab6fe29a630f7cd5aec640bffca254
3
+ size 252290
tr/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1cffb8f840d8862ab11f4727b975d408bf6f3370585cbb96733b5456902b89a
3
+ size 338133
tr/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30d0b154e3c8b2a9eb7f91aeeb29c4285c96550c42aa5ce5b4d6523be2cbafa5
3
+ size 47999788
tr/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e98b0590ff9ef4464621690f620f463288c6db079eb6210d84c9ec175f04607
3
+ size 171759
ur/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:780b0b46fe8234fc9d478978d742697b82bd43cb7f34a63d2993e8a65c54352b
3
+ size 427737
ur/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0d6d2105e1433a93096cdc61892c96f93e82e48d6495d1281e656212049e3dc
3
+ size 46038912
ur/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a317147da65a9c9eadbae0328068d7e8f3f7874e8215fba31e2ca2c2f55fbabd
3
+ size 216136
vi/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34918143e8de1172147abd6fd78f369d6b6b6c2f5a016c2a4febc10190803fb1
3
+ size 364126
vi/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0eefcc40a026d25745a8edc6080ab8200a2307f974f333af32552c323a3dddc
3
+ size 57140047
vi/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff6d6aa869a554c38e43be21be8e880e3422cbccc7cc04bd0544ff2931af6f82
3
+ size 185885