mimba/nllb200-ngiemboon2fr
Automatic Speech Recognition
β’
0.6B
β’
Updated
β’
140
β’
1
mimba/text2text
This dataset provides multilingual parallel sentence pairs for machine translation (text-to-text tasks).
Currently, it includes Ngiemboon β French (36,859 examples).
In the future, additional language pairs will be added (e.g., Ngiemboon β English, etc.).
source_text: source sentence target_text: target sentence source_lang: ISO 639β3 language code of the source (e.g., nnh) target_lang: ISO 639β3 language code of the target (e.g., fra)from datasets import load_dataset
# Load the dataset
dataset = load_dataset("mimba/text2text")
print(dataset)
DatasetDict({
nnh_fra: Dataset({
features: ['source_text', 'target_text', 'source_lang', 'target_lang'],
num_rows: 36859
})
})
The dataset is provided as a single split (nnh_fra). You can split it into train and validation/test using train_test_split:
from datasets import DatasetDict
# 90% train / 10% validation
split_dataset = dataset["nnh_fra"].train_test_split(test_size=0.1)
dataset_dict = DatasetDict({
"train": split_dataset["train"],
"validation": split_dataset["test"]
})
print(dataset_dict)
DatasetDict({
train: Dataset({
features: ['source_text', 'target_text', 'source_lang', 'target_lang'],
num_rows: 33173
})
validation: Dataset({
features: ['source_text', 'target_text', 'source_lang', 'target_lang'],
num_rows: 3686
})
})
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
model_name = "facebook/nllb-200-distilled-600M"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
# Add a custom language tag for Ngiemboon
tokenizer.add_tokens(["__ngiemboon__"])
model.resize_token_embeddings(len(tokenizer))
# Preprocessing
def preprocess_function(examples):
inputs = [f"__ngiemboon__ {src}" for src in examples["source_text"]]
targets = [tgt for tgt in examples["target_text"]]
model_inputs = tokenizer(inputs, max_length=128, truncation=True)
labels = tokenizer(targets, max_length=128, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
tokenized_datasets = dataset_dict.map(preprocess_function, batched=True)
Additional languages to be added progressively
@misc{
title = {Ngiemboon β French Parallel Corpus},
author = {Mimba},
year = {2026},
url = {https://huggingface.co/datasets/mimba/text2text}
}