KenTrans / KenTrans.py
Everlyn's picture
Upload KenTrans.py with huggingface_hub
02aaca2 verified
raw
history blame
10.1 kB
"""Kenyan Languages to Swahili Translation Dataset"""
import os
import re
from pathlib import Path
from typing import Dict, List, Tuple
import datasets
_CITATION = """\
@misc{kenyan_translation,
title={Kenyan Languages to Swahili Translation Dataset},
author={Kenya NLP},
year={2025}
}
"""
_DESCRIPTION = """\
This dataset contains parallel text translations from several Kenyan languages (Dholuo, Lubukusu, Lucheka, Lulogooli)
into Swahili. Each file contains sentence pairs in the format:
- O: <source_sentence> (Original)
- T: <target_sentence> (Translation/Target)
The dataset is organized by source language and can be loaded by specifying the language code.
"""
_HOMEPAGE = "https://github.com/yourusername/kenyan-translation"
_LICENSE = "CC-BY-4.0"
_LANGUAGES = ["dho", "lbk", "lch", "llg"]
_LANG_NAMES = {
"dho": "Dholuo",
"lbk": "Lubukusu",
"lch": "Lucheka",
"llg": "Lulogooli"
}
_TARGET_LANGUAGE = "swa" # Swahili
class KenyanTranslationConfig(datasets.BuilderConfig):
"""BuilderConfig for Kenyan Translation Dataset."""
def __init__(self, lang: str = None, **kwargs):
"""
Args:
lang: Language code (dho, lbk, lch, or llg)
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(**kwargs)
self.lang = lang
class KenyanTranslation(datasets.GeneratorBasedBuilder):
"""Kenyan Languages to Swahili Translation Dataset"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = KenyanTranslationConfig
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"source": datasets.Value("string"),
"target": datasets.Value("string"),
"src_lang": datasets.Value("string"),
"tgt_lang": datasets.Value("string"),
"pair": datasets.Value("string"),
"filename": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Validate that lang was provided
if not self.config.lang:
raise ValueError(
"Please specify a language code using lang parameter. "
f"Available languages: {', '.join(_LANGUAGES)}\n"
f"Example: load_dataset('USERNAME/REPO_NAME', lang='dho')"
)
if self.config.lang not in _LANGUAGES:
raise ValueError(
f"Language '{self.config.lang}' not supported. "
f"Available languages: {', '.join(_LANGUAGES)}"
)
# Get data files - either from config or by listing from the data directory
if self.config.data_files:
# Get all files for this language from provided data_files
if isinstance(self.config.data_files, dict):
all_files = []
for split_files in self.config.data_files.values():
if isinstance(split_files, list):
all_files.extend(split_files)
else:
all_files.append(split_files)
else:
all_files = self.config.data_files if isinstance(self.config.data_files, list) else [self.config.data_files]
# Filter files that belong to the selected language
lang_files = [f for f in all_files if f"data/{self.config.lang}/" in str(f)]
else:
# No data_files provided - need to list files from the repository
# Use dl_manager's dataset_name to construct paths
import datasets
from datasets.data_files import DataFilesDict, get_data_patterns
# Get base path - this works for both local and Hub loading
base_path = dl_manager._base_path if hasattr(dl_manager, '_base_path') and dl_manager._base_path else ""
# Construct the pattern for the language data directory
data_pattern = f"{base_path}/data/{self.config.lang}/*.txt" if base_path else f"data/{self.config.lang}/*.txt"
# List files using the data files pattern
from datasets.data_files import DataFilesList
lang_files = DataFilesList.from_patterns([data_pattern], base_path=base_path)
if not lang_files:
raise ValueError(
f"No data files found for language '{self.config.lang}'. "
f"Expected files in data/{self.config.lang}/"
)
# Download all files
downloaded_files = dl_manager.download(lang_files)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": downloaded_files,
"lang": self.config.lang,
},
),
]
def _parse_ot_format(self, content: str) -> List[Tuple[str, str]]:
"""Parse O:/T: format from file content."""
pairs = []
lines = content.split('\n')
current_source = ""
current_target = ""
for line in lines:
line = line.strip()
# Remove line numbers (e.g., "1→O:" becomes "O:")
line = re.sub(r'^\s*\d+→', '', line)
if line.startswith('O:'):
# Save previous pair if exists
if current_source and current_target:
pairs.append((current_source.strip(), current_target.strip()))
current_target = ""
# Start new source
current_source = line[2:].strip()
elif line.startswith('T:'):
# Add to target (might span multiple T: lines)
target_text = line[2:].strip()
if current_target:
current_target += " " + target_text
else:
current_target = target_text
# Add last pair
if current_source and current_target:
pairs.append((current_source.strip(), current_target.strip()))
return pairs
def _parse_tab_format(self, content: str) -> List[Tuple[str, str]]:
"""Parse tab-separated format."""
pairs = []
for line in content.split('\n'):
line = line.strip()
if not line:
continue
parts = line.split('\t')
if len(parts) >= 2:
pairs.append((parts[0].strip(), parts[1].strip()))
return pairs
def _parse_pipe_format(self, content: str) -> List[Tuple[str, str]]:
"""Parse ||| separated format."""
pairs = []
for line in content.split('\n'):
line = line.strip()
if not line:
continue
parts = line.split('|||')
if len(parts) >= 2:
pairs.append((parts[0].strip(), parts[1].strip()))
return pairs
def _parse_content(self, content: str) -> List[Tuple[str, str]]:
"""Parse content string and return list of (source, target) pairs."""
# Try O:/T: format first
if 'O:' in content and 'T:' in content:
pairs = self._parse_ot_format(content)
if pairs:
return pairs
# Fall back to tab-separated
if '\t' in content:
pairs = self._parse_tab_format(content)
if pairs:
return pairs
# Fall back to pipe-separated
if '|||' in content:
pairs = self._parse_pipe_format(content)
if pairs:
return pairs
return []
def _parse_file(self, filepath: Path) -> List[Tuple[str, str]]:
"""Parse a file and return list of (source, target) pairs."""
try:
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
except UnicodeDecodeError:
# Try with different encoding
with open(filepath, 'r', encoding='latin-1') as f:
content = f.read()
return self._parse_content(content)
def _generate_examples(self, files, lang: str):
"""Yields examples."""
idx = 0
from pathlib import Path
# Ensure files is a list
if isinstance(files, str):
files = [files]
# Sort files for consistent ordering
for filepath in sorted(files):
filepath = Path(filepath)
filename = filepath.name
# Read and parse the file
try:
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
except UnicodeDecodeError:
with open(filepath, 'r', encoding='latin-1') as f:
content = f.read()
# Parse content
pairs = self._parse_content(content)
# Yield each pair as an example
for pair_idx, (source, target) in enumerate(pairs):
# Skip empty pairs
if not source or not target:
continue
example_id = f"{lang}_{filename}_{pair_idx}"
yield idx, {
"id": example_id,
"source": source,
"target": target,
"src_lang": lang,
"tgt_lang": _TARGET_LANGUAGE,
"pair": f"{lang}-{_TARGET_LANGUAGE}",
"filename": filename,
}
idx += 1