holylovenia commited on
Commit
0e3d7aa
·
verified ·
1 Parent(s): e40b6f1

Upload tha_lao_embassy_parcor.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tha_lao_embassy_parcor.py +126 -0
tha_lao_embassy_parcor.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+ import pandas as pd
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import Licenses, Tasks
26
+
27
+ _CITATION = """
28
+ Wannaphong Phatthiyaphaibun. (2021). PyThaiNLP/Thai-Lao-Parallel-Corpus: \
29
+ Thai Lao Parallel corpus v0.7 (v0.7). Zenodo \
30
+ https://doi.org/10.5281/zenodo.5807093"""
31
+
32
+ _DATASETNAME = "tha_lao_embassy_parcor"
33
+
34
+ _DESCRIPTION = """\
35
+ Thai-Lao Parallel Corpus contains equivalent Thai and Lao sentence pairs \
36
+ derived from the website of the Royal Thai Embassy in Vientiane, Laos.
37
+ """
38
+
39
+ _HOMEPAGE = "https://github.com/PyThaiNLP/Thai-Lao-Parallel-Corpus/tree/master"
40
+ _LANGUAGES = ["tha", "lao"]
41
+ _LICENSE = Licenses.CC0_1_0.value
42
+
43
+ _LOCAL = False
44
+ _URLS = {_DATASETNAME: "https://github.com/PyThaiNLP/Thai-Lao-Parallel-Corpus/raw/master/vientiane-thaiembassy-sent.csv"}
45
+
46
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
47
+ _SOURCE_VERSION = "0.7.0"
48
+ _SEACROWD_VERSION = "2024.06.20"
49
+
50
+
51
+ class ThaLaoEmbassyParcorDataset(datasets.GeneratorBasedBuilder):
52
+ """Thai-Lao Parallel Corpus contains equivalent Thai and Lao sentence pairs \
53
+ derived from the website of the Royal Thai Embassy in Vientiane, Laos."""
54
+
55
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
56
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
57
+ SEACROWD_SCHEMA_NAME = "t2t"
58
+
59
+ BUILDER_CONFIGS = [
60
+ SEACrowdConfig(
61
+ name=f"{_DATASETNAME}_source",
62
+ version=SOURCE_VERSION,
63
+ description=f"{_DATASETNAME} source schema",
64
+ schema="source",
65
+ subset_id=f"{_DATASETNAME}",
66
+ ),
67
+ SEACrowdConfig(
68
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
69
+ version=SEACROWD_VERSION,
70
+ description=f"{_DATASETNAME} SEACrowd schema",
71
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
72
+ subset_id=f"{_DATASETNAME}",
73
+ ),
74
+ ]
75
+
76
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
77
+
78
+ def _info(self) -> datasets.DatasetInfo:
79
+ if self.config.schema == "source":
80
+ features = datasets.Features(
81
+ {
82
+ "lao_sent": datasets.Value("string"),
83
+ "thai_sent": datasets.Value("string"),
84
+ }
85
+ )
86
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
87
+ features = schemas.text2text_features
88
+
89
+ return datasets.DatasetInfo(
90
+ description=_DESCRIPTION,
91
+ features=features,
92
+ homepage=_HOMEPAGE,
93
+ license=_LICENSE,
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
98
+ urls = _URLS[_DATASETNAME]
99
+ filename = dl_manager.download(urls)
100
+
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ gen_kwargs={
105
+ "filepath": os.path.join(filename),
106
+ "split": "train",
107
+ },
108
+ ),
109
+ ]
110
+
111
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
112
+ dataset = pd.read_csv(filepath)
113
+
114
+ if self.config.schema == "source":
115
+ for i, row in dataset.iterrows():
116
+ yield i, {"lao_sent": row["lao_sent"], "thai_sent": row["thai_sent"]}
117
+
118
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
119
+ for i, row in dataset.iterrows():
120
+ yield i, {
121
+ "id": i,
122
+ "text_1": row["lao_sent"],
123
+ "text_2": row["thai_sent"],
124
+ "text_1_name": "lao",
125
+ "text_2_name": "tha",
126
+ }