Refactor wmt_utils to add language_pair to SubDataset
Browse files- wmt_utils.py +29 -28
wmt_utils.py
CHANGED
|
@@ -53,7 +53,7 @@ CWMT_SUBSET_NAMES = ["casia2015", "casict2011", "casict2015", "datum2015", "datu
|
|
| 53 |
class SubDataset:
|
| 54 |
"""Class to keep track of information on a sub-dataset of WMT."""
|
| 55 |
|
| 56 |
-
def __init__(self, name, target, sources,
|
| 57 |
"""Sub-dataset of WMT.
|
| 58 |
|
| 59 |
Args:
|
|
@@ -83,33 +83,34 @@ class SubDataset:
|
|
| 83 |
self._manual_dl_files = manual_dl_files if manual_dl_files else []
|
| 84 |
self.name = name
|
| 85 |
self.target = target
|
| 86 |
-
self.sources = set(sources)
|
|
|
|
| 87 |
|
| 88 |
-
def _inject_language(self, src, strings):
|
| 89 |
"""Injects languages into (potentially) template strings."""
|
| 90 |
-
if src not in self.
|
| 91 |
-
raise ValueError(f"Invalid source for '{self.name}': {src}")
|
| 92 |
|
| 93 |
def _format_string(s):
|
| 94 |
-
if "{0}" in s and "{1}" and "{src}" in s:
|
| 95 |
-
return s.format(*sorted([src,
|
| 96 |
elif "{0}" in s and "{1}" in s:
|
| 97 |
-
return s.format(*sorted([src,
|
| 98 |
elif "{src}" in s:
|
| 99 |
-
return s.format(src=src)
|
| 100 |
else:
|
| 101 |
return s
|
| 102 |
|
| 103 |
return [_format_string(s) for s in strings]
|
| 104 |
|
| 105 |
-
def get_url(self, src):
|
| 106 |
-
return self._inject_language(src, self._urls)
|
| 107 |
|
| 108 |
-
def get_manual_dl_files(self, src):
|
| 109 |
-
return self._inject_language(src, self._manual_dl_files)
|
| 110 |
|
| 111 |
-
def get_path(self, src):
|
| 112 |
-
return self._inject_language(src, self._paths)
|
| 113 |
|
| 114 |
|
| 115 |
# Subsets used in the training sets for various years of WMT.
|
|
@@ -677,14 +678,14 @@ class Wmt(datasets.GeneratorBasedBuilder):
|
|
| 677 |
@property
|
| 678 |
def subsets(self):
|
| 679 |
"""Subsets that make up each split of the dataset for the language pair."""
|
| 680 |
-
|
| 681 |
filtered_subsets = {}
|
| 682 |
subsets = self._subsets if self.config.subsets is None else self.config.subsets
|
| 683 |
for split, ss_names in subsets.items():
|
| 684 |
filtered_subsets[split] = []
|
| 685 |
for ss_name in ss_names:
|
| 686 |
dataset = DATASET_MAP[ss_name]
|
| 687 |
-
if
|
| 688 |
logger.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
|
| 689 |
else:
|
| 690 |
filtered_subsets[split].append(ss_name)
|
|
@@ -708,7 +709,7 @@ class Wmt(datasets.GeneratorBasedBuilder):
|
|
| 708 |
yield ex[language]
|
| 709 |
|
| 710 |
def _split_generators(self, dl_manager):
|
| 711 |
-
source,
|
| 712 |
manual_paths_dict = {}
|
| 713 |
urls_to_download = {}
|
| 714 |
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
|
|
@@ -716,30 +717,30 @@ class Wmt(datasets.GeneratorBasedBuilder):
|
|
| 716 |
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
|
| 717 |
# the filtering script so we can parse out which blocks need to be
|
| 718 |
# removed.
|
| 719 |
-
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source)
|
| 720 |
|
| 721 |
# get dataset
|
| 722 |
dataset = DATASET_MAP[ss_name]
|
| 723 |
-
if dataset.get_manual_dl_files(source):
|
| 724 |
# TODO(PVP): following two lines skip configs that are incomplete for now
|
| 725 |
# +++++++++++++++++++++
|
| 726 |
logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
|
| 727 |
continue
|
| 728 |
# +++++++++++++++++++++
|
| 729 |
|
| 730 |
-
manual_dl_files = dataset.get_manual_dl_files(source)
|
| 731 |
manual_paths = [
|
| 732 |
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
|
| 733 |
for fname in manual_dl_files
|
| 734 |
]
|
| 735 |
assert all(
|
| 736 |
os.path.exists(path) for path in manual_paths
|
| 737 |
-
), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}"
|
| 738 |
|
| 739 |
# set manual path for correct subset
|
| 740 |
manual_paths_dict[ss_name] = manual_paths
|
| 741 |
else:
|
| 742 |
-
urls_to_download[ss_name] = dataset.get_url(source)
|
| 743 |
|
| 744 |
# Download and extract files from URLs.
|
| 745 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
|
@@ -759,10 +760,10 @@ class Wmt(datasets.GeneratorBasedBuilder):
|
|
| 759 |
|
| 760 |
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
|
| 761 |
"""Returns the examples in the raw (text) form."""
|
| 762 |
-
source,
|
| 763 |
|
| 764 |
def _get_local_paths(dataset, extract_dirs):
|
| 765 |
-
rel_paths = dataset.get_path(source)
|
| 766 |
if len(extract_dirs) == 1:
|
| 767 |
extract_dirs = extract_dirs * len(rel_paths)
|
| 768 |
return [
|
|
@@ -771,8 +772,8 @@ class Wmt(datasets.GeneratorBasedBuilder):
|
|
| 771 |
]
|
| 772 |
|
| 773 |
def _get_filenames(dataset):
|
| 774 |
-
rel_paths = dataset.get_path(source)
|
| 775 |
-
urls = dataset.get_url(source)
|
| 776 |
if len(urls) == 1:
|
| 777 |
urls = urls * len(rel_paths)
|
| 778 |
return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)]
|
|
@@ -782,7 +783,7 @@ class Wmt(datasets.GeneratorBasedBuilder):
|
|
| 782 |
# +++++++++++++++++++++
|
| 783 |
dataset = DATASET_MAP[ss_name]
|
| 784 |
source, _ = self.config.language_pair
|
| 785 |
-
if dataset.get_manual_dl_files(source):
|
| 786 |
logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
|
| 787 |
continue
|
| 788 |
# +++++++++++++++++++++
|
|
|
|
| 53 |
class SubDataset:
|
| 54 |
"""Class to keep track of information on a sub-dataset of WMT."""
|
| 55 |
|
| 56 |
+
def __init__(self, name, url, path="", target=None, sources=None, language_pairs=None, manual_dl_files=None):
|
| 57 |
"""Sub-dataset of WMT.
|
| 58 |
|
| 59 |
Args:
|
|
|
|
| 83 |
self._manual_dl_files = manual_dl_files if manual_dl_files else []
|
| 84 |
self.name = name
|
| 85 |
self.target = target
|
| 86 |
+
self.sources = set(sources) if sources else sources
|
| 87 |
+
self.language_pairs = language_pairs if language_pairs else {(src, target) for src in self.sources}
|
| 88 |
|
| 89 |
+
def _inject_language(self, src, tgt, strings):
|
| 90 |
"""Injects languages into (potentially) template strings."""
|
| 91 |
+
if (src, tgt) not in self.language_pairs:
|
| 92 |
+
raise ValueError(f"Invalid source for '{self.name}': ({src}-{tgt})")
|
| 93 |
|
| 94 |
def _format_string(s):
|
| 95 |
+
if "{0}" in s and "{1}" in s and "{src}" in s:
|
| 96 |
+
return s.format(*sorted([src, tgt]), src=src)
|
| 97 |
elif "{0}" in s and "{1}" in s:
|
| 98 |
+
return s.format(*sorted([src, tgt]))
|
| 99 |
elif "{src}" in s:
|
| 100 |
+
return s.format(src=src, tgt=tgt)
|
| 101 |
else:
|
| 102 |
return s
|
| 103 |
|
| 104 |
return [_format_string(s) for s in strings]
|
| 105 |
|
| 106 |
+
def get_url(self, src, tgt):
|
| 107 |
+
return self._inject_language(src=src, tgt=tgt, strings=self._urls)
|
| 108 |
|
| 109 |
+
def get_manual_dl_files(self, src, tgt):
|
| 110 |
+
return self._inject_language(src=src, tgt=tgt, strings=self._manual_dl_files)
|
| 111 |
|
| 112 |
+
def get_path(self, src, tgt):
|
| 113 |
+
return self._inject_language(src=src, tgt=tgt, strings=self._paths)
|
| 114 |
|
| 115 |
|
| 116 |
# Subsets used in the training sets for various years of WMT.
|
|
|
|
| 678 |
@property
|
| 679 |
def subsets(self):
|
| 680 |
"""Subsets that make up each split of the dataset for the language pair."""
|
| 681 |
+
language_pair = self.config.language_pair
|
| 682 |
filtered_subsets = {}
|
| 683 |
subsets = self._subsets if self.config.subsets is None else self.config.subsets
|
| 684 |
for split, ss_names in subsets.items():
|
| 685 |
filtered_subsets[split] = []
|
| 686 |
for ss_name in ss_names:
|
| 687 |
dataset = DATASET_MAP[ss_name]
|
| 688 |
+
if language_pair not in dataset.language_pairs:
|
| 689 |
logger.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
|
| 690 |
else:
|
| 691 |
filtered_subsets[split].append(ss_name)
|
|
|
|
| 709 |
yield ex[language]
|
| 710 |
|
| 711 |
def _split_generators(self, dl_manager):
|
| 712 |
+
source, target = self.config.language_pair
|
| 713 |
manual_paths_dict = {}
|
| 714 |
urls_to_download = {}
|
| 715 |
for ss_name in itertools.chain.from_iterable(self.subsets.values()):
|
|
|
|
| 717 |
# CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download
|
| 718 |
# the filtering script so we can parse out which blocks need to be
|
| 719 |
# removed.
|
| 720 |
+
urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source, target)
|
| 721 |
|
| 722 |
# get dataset
|
| 723 |
dataset = DATASET_MAP[ss_name]
|
| 724 |
+
if dataset.get_manual_dl_files(source, target):
|
| 725 |
# TODO(PVP): following two lines skip configs that are incomplete for now
|
| 726 |
# +++++++++++++++++++++
|
| 727 |
logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
|
| 728 |
continue
|
| 729 |
# +++++++++++++++++++++
|
| 730 |
|
| 731 |
+
manual_dl_files = dataset.get_manual_dl_files(source, target)
|
| 732 |
manual_paths = [
|
| 733 |
os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
|
| 734 |
for fname in manual_dl_files
|
| 735 |
]
|
| 736 |
assert all(
|
| 737 |
os.path.exists(path) for path in manual_paths
|
| 738 |
+
), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source, target)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}"
|
| 739 |
|
| 740 |
# set manual path for correct subset
|
| 741 |
manual_paths_dict[ss_name] = manual_paths
|
| 742 |
else:
|
| 743 |
+
urls_to_download[ss_name] = dataset.get_url(source, target)
|
| 744 |
|
| 745 |
# Download and extract files from URLs.
|
| 746 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
|
|
|
| 760 |
|
| 761 |
def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
|
| 762 |
"""Returns the examples in the raw (text) form."""
|
| 763 |
+
source, target = self.config.language_pair
|
| 764 |
|
| 765 |
def _get_local_paths(dataset, extract_dirs):
|
| 766 |
+
rel_paths = dataset.get_path(source, target)
|
| 767 |
if len(extract_dirs) == 1:
|
| 768 |
extract_dirs = extract_dirs * len(rel_paths)
|
| 769 |
return [
|
|
|
|
| 772 |
]
|
| 773 |
|
| 774 |
def _get_filenames(dataset):
|
| 775 |
+
rel_paths = dataset.get_path(source, target)
|
| 776 |
+
urls = dataset.get_url(source, target)
|
| 777 |
if len(urls) == 1:
|
| 778 |
urls = urls * len(rel_paths)
|
| 779 |
return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)]
|
|
|
|
| 783 |
# +++++++++++++++++++++
|
| 784 |
dataset = DATASET_MAP[ss_name]
|
| 785 |
source, _ = self.config.language_pair
|
| 786 |
+
if dataset.get_manual_dl_files(source, target):
|
| 787 |
logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
|
| 788 |
continue
|
| 789 |
# +++++++++++++++++++++
|