parquet-converter commited on
Commit
87c5d08
·
1 Parent(s): f496e25

Update parquet files

Browse files
.gitattributes CHANGED
@@ -14,3 +14,4 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ zh-en/dataset-train.parquet filter=lfs diff=lfs merge=lfs -text
README.md DELETED
@@ -1,7 +0,0 @@
1
- A translation dataset between english and traditional chinese
2
-
3
- train : 101497 rows
4
- val : 1000 rows
5
- test : 1000 rows
6
-
7
-
 
 
 
 
 
 
 
 
dataset.py DELETED
@@ -1,94 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """dataset.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1sNXmgV-J4w6JSdtXK-0TckTP5SFkGPtz
8
-
9
- ###Create a file.py
10
- """
11
-
12
- import datasets
13
- import csv
14
- import pandas as pd
15
-
16
- _URLS = {
17
- "zh-en": {
18
- "TRAIN_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1z-reeSB_pAcZEJicRpBJWzrhuwdtJ-d1&export=download",
19
- "VALIDATION_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1f1izEby8pfXZWG7htvcky_FL2iTMnoD5&export=download",
20
- "TEST_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1VGM96MZvMuAPJoFzBeSpyC16IDSiu0vC&export=download"
21
- }
22
- }
23
-
24
- class NewDataset(datasets.GeneratorBasedBuilder):
25
-
26
- VERSION = datasets.Version("1.0.0")
27
-
28
- BUILDER_CONFIGS = [
29
- datasets.BuilderConfig(
30
- name = "zh-en",
31
- version = VERSION,
32
- description = "The translation dataset between Traditional Chinese and English"
33
- )
34
- ]
35
-
36
- def _info(self):
37
-
38
- if self.config.name == "zh-en": # This is the name of the configuration selected in BUILDER_CONFIGS above
39
- features = datasets.Features(
40
- { "translation": datasets.features.Translation( languages=["en", "zh"] ) }
41
- )
42
-
43
- return datasets.DatasetInfo(
44
- # This is the description that will appear on the datasets page.
45
- # description=_DESCRIPTION,
46
- # This defines the different columns of the dataset and their types
47
- features=features, # Here we define them above because they are different between the two configurations
48
- # If there's a common (input, target) tuple from the features,
49
- # specify them here. They'll be used if as_supervised=True in
50
- # builder.as_dataset.
51
- supervised_keys=None,
52
- # Homepage of the dataset for documentation
53
- # homepage=_HOMEPAGE,
54
- # License for the dataset if available
55
- # license=_LICENSE,
56
- # Citation for the dataset
57
- # citation=_CITATION,
58
- )
59
-
60
- def _split_generators(self, dl_manager):
61
- """ Returns SplitGenerators"""
62
-
63
- my_urls = _URLS[self.config.name]
64
-
65
- train_path = dl_manager.download_and_extract(my_urls["TRAIN_DOWNLOAD_URL"])
66
- validation_path = dl_manager.download_and_extract(my_urls["VALIDATION_DOWNLOAD_URL"])
67
- test_path = dl_manager.download_and_extract(my_urls["TEST_DOWNLOAD_URL"])
68
-
69
- return [
70
- datasets.SplitGenerator(
71
- name=datasets.Split.TRAIN,
72
- gen_kwargs = { "filepath" : train_path, "split" : "train" }
73
- ),
74
- datasets.SplitGenerator(
75
- name=datasets.Split.VALIDATION,
76
- gen_kwargs = { "filepath" : validation_path, "split" : "validation" }
77
- ),
78
- datasets.SplitGenerator(
79
- name=datasets.Split.TEST,
80
- gen_kwargs = { "filepath" : test_path, "split" : "test"},
81
- )
82
- ]
83
-
84
- def _generate_examples(self, filepath, split):
85
- """ Generate Dravidian MT examples"""
86
-
87
- with open(filepath, encoding="utf-8") as f:
88
- reader = csv.DictReader(f)
89
- for idx, row in enumerate(reader):
90
-
91
- if self.config.name == "zh-en":
92
- result = { "translation" : { "en" : row["en"], "zh" : row["zh"] } }
93
-
94
- yield idx, result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
zh-en/dataset-test.parquet ADDED
Binary file (109 kB). View file
 
zh-en/dataset-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19783084ceddea52aabdf4bfcbc610cc47f02e66583ffa467e930762b43f020e
3
+ size 12541812
zh-en/dataset-validation.parquet ADDED
Binary file (110 kB). View file