| """Kenyan Languages to Swahili Translation Dataset"""
|
|
|
| import os
|
| import re
|
| from pathlib import Path
|
| from typing import Dict, List, Tuple
|
|
|
| import datasets
|
|
|
|
|
| _CITATION = """\
|
| @misc{kenyan_translation,
|
| title={Kenyan Languages to Swahili Translation Dataset},
|
| author={Kenya NLP},
|
| year={2025}
|
| }
|
| """
|
|
|
| _DESCRIPTION = """\
|
| This dataset contains parallel text translations from several Kenyan languages (Dholuo, Lubukusu, Lucheka, Lulogooli)
|
| into Swahili. Each file contains sentence pairs in the format:
|
| - O: <source_sentence> (Original)
|
| - T: <target_sentence> (Translation/Target)
|
|
|
| The dataset is organized by source language and can be loaded by specifying the language code.
|
| """
|
|
|
| _HOMEPAGE = "https://github.com/yourusername/kenyan-translation"
|
|
|
| _LICENSE = "CC-BY-4.0"
|
|
|
| _LANGUAGES = ["dho", "lbk", "lch", "llg"]
|
| _LANG_NAMES = {
|
| "dho": "Dholuo",
|
| "lbk": "Lubukusu",
|
| "lch": "Lucheka",
|
| "llg": "Lulogooli"
|
| }
|
|
|
| _TARGET_LANGUAGE = "swa"
|
|
|
|
|
| class KenyanTranslationConfig(datasets.BuilderConfig):
|
| """BuilderConfig for Kenyan Translation Dataset."""
|
|
|
| def __init__(self, lang: str = None, **kwargs):
|
| """
|
| Args:
|
| lang: Language code (dho, lbk, lch, or llg)
|
| **kwargs: keyword arguments forwarded to super.
|
| """
|
| super().__init__(**kwargs)
|
| self.lang = lang
|
|
|
|
|
| class KenyanTranslation(datasets.GeneratorBasedBuilder):
|
| """Kenyan Languages to Swahili Translation Dataset"""
|
|
|
| VERSION = datasets.Version("1.0.0")
|
|
|
| BUILDER_CONFIG_CLASS = KenyanTranslationConfig
|
|
|
| def _info(self):
|
| features = datasets.Features(
|
| {
|
| "id": datasets.Value("string"),
|
| "source": datasets.Value("string"),
|
| "target": datasets.Value("string"),
|
| "src_lang": datasets.Value("string"),
|
| "tgt_lang": datasets.Value("string"),
|
| "pair": datasets.Value("string"),
|
| "filename": datasets.Value("string"),
|
| }
|
| )
|
|
|
| return datasets.DatasetInfo(
|
| description=_DESCRIPTION,
|
| features=features,
|
| homepage=_HOMEPAGE,
|
| license=_LICENSE,
|
| citation=_CITATION,
|
| )
|
|
|
| def _split_generators(self, dl_manager):
|
| """Returns SplitGenerators."""
|
|
|
|
|
| if not self.config.lang:
|
| raise ValueError(
|
| "Please specify a language code using lang parameter. "
|
| f"Available languages: {', '.join(_LANGUAGES)}\n"
|
| f"Example: load_dataset('USERNAME/REPO_NAME', lang='dho')"
|
| )
|
|
|
| if self.config.lang not in _LANGUAGES:
|
| raise ValueError(
|
| f"Language '{self.config.lang}' not supported. "
|
| f"Available languages: {', '.join(_LANGUAGES)}"
|
| )
|
|
|
|
|
| if self.config.data_files:
|
|
|
| if isinstance(self.config.data_files, dict):
|
| all_files = []
|
| for split_files in self.config.data_files.values():
|
| if isinstance(split_files, list):
|
| all_files.extend(split_files)
|
| else:
|
| all_files.append(split_files)
|
| else:
|
| all_files = self.config.data_files if isinstance(self.config.data_files, list) else [self.config.data_files]
|
|
|
|
|
| lang_files = [f for f in all_files if f"data/{self.config.lang}/" in str(f)]
|
| else:
|
|
|
|
|
| import datasets
|
| from datasets.data_files import DataFilesDict, get_data_patterns
|
|
|
|
|
| base_path = dl_manager._base_path if hasattr(dl_manager, '_base_path') and dl_manager._base_path else ""
|
|
|
|
|
| data_pattern = f"{base_path}/data/{self.config.lang}/*.txt" if base_path else f"data/{self.config.lang}/*.txt"
|
|
|
|
|
| from datasets.data_files import DataFilesList
|
| lang_files = DataFilesList.from_patterns([data_pattern], base_path=base_path)
|
|
|
| if not lang_files:
|
| raise ValueError(
|
| f"No data files found for language '{self.config.lang}'. "
|
| f"Expected files in data/{self.config.lang}/"
|
| )
|
|
|
|
|
| downloaded_files = dl_manager.download(lang_files)
|
|
|
| return [
|
| datasets.SplitGenerator(
|
| name=datasets.Split.TRAIN,
|
| gen_kwargs={
|
| "files": downloaded_files,
|
| "lang": self.config.lang,
|
| },
|
| ),
|
| ]
|
|
|
| def _parse_ot_format(self, content: str) -> List[Tuple[str, str]]:
|
| """Parse O:/T: format from file content."""
|
| pairs = []
|
| lines = content.split('\n')
|
|
|
| current_source = ""
|
| current_target = ""
|
|
|
| for line in lines:
|
| line = line.strip()
|
|
|
|
|
| line = re.sub(r'^\s*\d+→', '', line)
|
|
|
| if line.startswith('O:'):
|
|
|
| if current_source and current_target:
|
| pairs.append((current_source.strip(), current_target.strip()))
|
| current_target = ""
|
|
|
|
|
| current_source = line[2:].strip()
|
|
|
| elif line.startswith('T:'):
|
|
|
| target_text = line[2:].strip()
|
| if current_target:
|
| current_target += " " + target_text
|
| else:
|
| current_target = target_text
|
|
|
|
|
| if current_source and current_target:
|
| pairs.append((current_source.strip(), current_target.strip()))
|
|
|
| return pairs
|
|
|
| def _parse_tab_format(self, content: str) -> List[Tuple[str, str]]:
|
| """Parse tab-separated format."""
|
| pairs = []
|
| for line in content.split('\n'):
|
| line = line.strip()
|
| if not line:
|
| continue
|
| parts = line.split('\t')
|
| if len(parts) >= 2:
|
| pairs.append((parts[0].strip(), parts[1].strip()))
|
| return pairs
|
|
|
| def _parse_pipe_format(self, content: str) -> List[Tuple[str, str]]:
|
| """Parse ||| separated format."""
|
| pairs = []
|
| for line in content.split('\n'):
|
| line = line.strip()
|
| if not line:
|
| continue
|
| parts = line.split('|||')
|
| if len(parts) >= 2:
|
| pairs.append((parts[0].strip(), parts[1].strip()))
|
| return pairs
|
|
|
| def _parse_content(self, content: str) -> List[Tuple[str, str]]:
|
| """Parse content string and return list of (source, target) pairs."""
|
|
|
| if 'O:' in content and 'T:' in content:
|
| pairs = self._parse_ot_format(content)
|
| if pairs:
|
| return pairs
|
|
|
|
|
| if '\t' in content:
|
| pairs = self._parse_tab_format(content)
|
| if pairs:
|
| return pairs
|
|
|
|
|
| if '|||' in content:
|
| pairs = self._parse_pipe_format(content)
|
| if pairs:
|
| return pairs
|
|
|
| return []
|
|
|
| def _parse_file(self, filepath: Path) -> List[Tuple[str, str]]:
|
| """Parse a file and return list of (source, target) pairs."""
|
| try:
|
| with open(filepath, 'r', encoding='utf-8') as f:
|
| content = f.read()
|
| except UnicodeDecodeError:
|
|
|
| with open(filepath, 'r', encoding='latin-1') as f:
|
| content = f.read()
|
|
|
| return self._parse_content(content)
|
|
|
| def _generate_examples(self, files, lang: str):
|
| """Yields examples."""
|
|
|
| idx = 0
|
|
|
| from pathlib import Path
|
|
|
|
|
| if isinstance(files, str):
|
| files = [files]
|
|
|
|
|
| for filepath in sorted(files):
|
| filepath = Path(filepath)
|
| filename = filepath.name
|
|
|
|
|
| try:
|
| with open(filepath, 'r', encoding='utf-8') as f:
|
| content = f.read()
|
| except UnicodeDecodeError:
|
| with open(filepath, 'r', encoding='latin-1') as f:
|
| content = f.read()
|
|
|
|
|
| pairs = self._parse_content(content)
|
|
|
|
|
| for pair_idx, (source, target) in enumerate(pairs):
|
|
|
| if not source or not target:
|
| continue
|
|
|
| example_id = f"{lang}_{filename}_{pair_idx}"
|
|
|
| yield idx, {
|
| "id": example_id,
|
| "source": source,
|
| "target": target,
|
| "src_lang": lang,
|
| "tgt_lang": _TARGET_LANGUAGE,
|
| "pair": f"{lang}-{_TARGET_LANGUAGE}",
|
| "filename": filename,
|
| }
|
|
|
| idx += 1
|
|
|