File size: 7,394 Bytes
7e88d16
 
 
 
 
 
 
 
 
 
485c8dd
7e88d16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485c8dd
 
abf8692
7e88d16
 
 
485c8dd
7e88d16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485c8dd
7e88d16
 
 
 
485c8dd
 
7e88d16
485c8dd
 
7e88d16
 
 
 
 
 
 
 
 
485c8dd
7e88d16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485c8dd
 
 
625ef68
485c8dd
7e88d16
 
 
 
 
 
 
 
 
 
 
485c8dd
7e88d16
 
485c8dd
7e88d16
 
 
 
 
 
70dc022
7e88d16
 
 
 
5eaba1e
7e88d16
 
 
 
 
485c8dd
7e88d16
485c8dd
7e88d16
5eaba1e
143a404
 
7e88d16
 
 
 
 
 
 
5eaba1e
 
7e88d16
 
 
 
 
485c8dd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
# coding=utf-8
# JParaCrawl Dataset

# Lint as: python3
"""JParaCrawl: A Large Scale Web-Based English-Japanese Parallel Corpus."""

from functools import partial
from pathlib import Path
import unicodedata

import packaging.version
import pandas as pd

import datasets
from datasets.features import Translation, Value

_DESCRIPTION = """\
JParaCrawl is the largest publicly available English-Japanese parallel corpus created by NTT.
It was created by largely crawling the web and automatically aligning parallel sentences.
"""

_CITATION = """\
@inproceedings{morishita-etal-2020-jparacrawl,
    title = "{JP}ara{C}rawl: A Large Scale Web-Based {E}nglish-{J}apanese Parallel Corpus",
    author = "Morishita, Makoto  and
      Suzuki, Jun  and
      Nagata, Masaaki",
    booktitle = "Proceedings of The 12th Language Resources and Evaluation Conference",
    month = may,
    year = "2020",
    address = "Marseille, France",
    publisher = "European Language Resources Association",
    url = "https://www.aclweb.org/anthology/2020.lrec-1.443",
    pages = "3603--3609",
    ISBN = "979-10-95546-34-4",
}
@misc{morishita2022jparacrawl,
    title={JParaCrawl v3.0: A Large-scale English-Japanese Parallel Corpus},
    author={Makoto Morishita and Katsuki Chousa and Jun Suzuki and Masaaki Nagata},
    year={2022},
    eprint={2202.12607},
    archivePrefix={arXiv},
    primaryClass={cs.CL}
}
"""

_LICENSE = """\
Terms of Use for Bilingual Data, Monolingual Data and Trained Models

Nippon Telegraph and Telephone Corporation (Hereinafter referred to as "our company".) will provide bilingual data, monolingual data and trained models (Hereinafter referred to as "this data.") subject to your acceptance of these Terms of Use. We assume that you have agreed to these Terms of Use when you start using this data (including downloads).

Article 1 (Use conditions)
This data can only be used for research purposes involving information analysis (Including, but not limited to, replication and distribution. Hereinafter the same in this article.). The same applies to the derived data created based on this data. However, this data is not available for commercial use, including the sale of translators trained using this data.

Article 2 (Disclaimer)
Our company does not warrant the quality, performance or any other aspects of this data. We shall not be liable for any direct or indirect damages caused by the use of this data. Our company shall not be liable for any damage to the system caused by the installation of this data.

Article 3 (Other).
This data may be changed in whole or in part, or provision of this data may be interrupted or stopped at our company’s discretion without prior notice.
"""

_VERSION = "3.0.0"  # 2.0 for zh-ja pair

_DATA_URL = "http://www.kecl.ntt.co.jp/icl/lirg/jparacrawl/release/%s/bitext/%s-ja.tar.gz"

_HOMEPAGE = "https://www.kecl.ntt.co.jp/icl/lirg/jparacrawl/"

_LANGUAGE_PAIRS = [("en", "ja"), ("zh", "ja")]


class JParaCrawlConfig(datasets.BuilderConfig):
    """BuilderConfig for JParaCrawl."""

    def __init__(self, language_pair=(None, None), **kwargs):
        """BuilderConfig for JParaCrawl.

        Args:
            for the `datasets.features.text.TextEncoder` used for the features feature.
          language_pair: pair of languages that will be used for translation. Should
            contain 2-letter coded strings. First will be used at source and second
            as target in supervised mode. For example: ("en", "ja").
          **kwargs: keyword arguments forwarded to super.
        """
        source, target = language_pair

        super(JParaCrawlConfig, self).__init__(
            name=f"{source}-{target}",
            description=f"{source}-{target} Translation dataset",
            version=datasets.Version(_VERSION, ""),
            **kwargs,
        )

        # Validate language pair.
        assert "ja" in language_pair, (
        "Config language pair must contain `ja`, got: %s", language_pair)

        non_ja = source if target == "ja" else source
        assert non_ja in ["en", "zh"], ("Invalid non-ja language in pair: %s", non_ja)

        self.language_pair = language_pair


class JParaCrawl(datasets.GeneratorBasedBuilder):
    """JParaCrawl machine translation dataset."""

    BUILDER_CONFIGS = [
        JParaCrawlConfig(language_pair=("en", "ja")),
        JParaCrawlConfig(language_pair=("zh", "ja")),
    ]

    def _info(self):
        source, target = self.config.language_pair
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "domain": Value(dtype="string", id=None),
                    "url": Value(dtype="string", id=None),
                    "probability": Value(dtype="float32", id=None),
                    "translation": Translation(languages=self.config.language_pair),
                }
            ),
            supervised_keys=(source, target),
            homepage=_HOMEPAGE,
            citation=_CITATION,
            license=_LICENSE,
        )

    def _split_generators(self, dl_manager):
        source, target = self.config.language_pair
        non_ja = source if target == "ja" else source
        v = packaging.version.parse(_VERSION)
        archive = dl_manager.download_and_extract(
            _DATA_URL % (f"{v.major}.{v.minor}", non_ja)
        )

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"extracted_path": archive}
            )
        ]

    def _generate_examples(self, extracted_path):
        """This function returns the examples in the raw form."""
        source, target = self.config.language_pair
        non_ja = source if target == "ja" else source
        df = None
        for path in Path(extracted_path).glob("**/*"):
            if path.name == f"{non_ja}-ja.bicleaner05.txt":
                df = pd.read_csv(
                    path,
                    header=None,
                    index_col=None,
                    sep="\t\t",  # dummy delimiter
                    encoding="utf8",
                    engine='python',
                )
                break
        assert df is not None, extracted_path

        def _split(line: str, col: int) -> str:
            return line.split('\t', 4)[col].strip()

        df['domain'] = df[0].apply(partial(_split, col=0))
        df['url'] = df[0].apply(partial(_split, col=1))
        df['probability'] = df[0].apply(partial(_split, col=2))
        df[non_ja] = df[0].apply(partial(_split, col=3))
        df['ja'] = df[0].apply(partial(_split, col=4))
        df = df.drop_duplicates(subset=[non_ja, 'ja'])

        def _normalize(s: str) -> str:
            return unicodedata.normalize("NFKC", s).replace('\t', ' ').strip()

        _id = 0
        for idx, row in df.iterrows():
            result = {
                "domain": row["domain"],
                "url": row["url"],
                "probability": float(row["probability"]),
                "translation": {
                    non_ja: _normalize(row[non_ja]),
                    "ja": _normalize(row["ja"]),
                },
            }
            # Make sure that both translations are non-empty.
            if all(v is not None for v in result.values()):
                yield _id, result
                _id += 1