bantu-lang-id / docs /usage_examples.md
wealthymanyasa
Update documentation with comprehensive MLOps guide and enhanced examples
c05dba6
# Usage Examples
This document provides comprehensive examples for using the English-Shona Language Identification Dataset.
## 🚀 Quick Start with MLOps
### 1. Setup Environment
```bash
# Clone and setup
git clone https://huggingface.co/datasets/omanyasa/english-shona-langid
cd english-shona-langid
# Create reproducible environment
conda env create -f environment.yml
conda activate english-shona-langid
```
### 2. Validate Dataset
```bash
# Run comprehensive tests
pytest tests/test_dataset.py -v
# Quick data quality check
python -c "
from datasets import load_dataset
dataset = load_dataset('omanyasa/english-shona-langid')
print(f'✅ Dataset loaded: {len(dataset[\"train\"])} train samples')
print(f'🌍 Languages: {sorted(set(dataset[\"train\"][\"label\"]))}')
"
```
### 3. Benchmark Baselines
```bash
# Run baseline benchmarks
python scripts/benchmark.py --max-samples 10000
# Results saved as:
# - benchmark_results_*.json
# - confusion_matrix_*.png
```
## Basic Usage
### Loading the Dataset
```python
from datasets import load_dataset
# Load the entire dataset
dataset = load_dataset("omanyasa/english-shona-langid")
# Load specific splits
train_dataset = load_dataset("omanyasa/english-shona-langid", split="train")
validation_dataset = load_dataset("omanyasa/english-shona-langid", split="validation")
test_dataset = load_dataset("omanyasa/english-shona-langid", split="test")
print(f"Train samples: {len(train_dataset)}")
print(f"Validation samples: {len(validation_dataset)}")
print(f"Test samples: {len(test_dataset)}")
```
### Exploring the Data
```python
# View sample data
print("Sample data:")
for i in range(5):
print(f"Text: {train_dataset[i]['text'][:100]}...")
print(f"Label: {train_dataset[i]['label']}")
print("-" * 50)
# Check label distribution
from collections import Counter
label_counts = Counter(train_dataset['label'])
print("Label distribution in training set:")
for label, count in label_counts.items():
print(f"{label}: {count} samples")
```
## Machine Learning Examples
### 1. Traditional ML Approach (Scikit-learn)
```python
from datasets import load_dataset
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score
import numpy as np
# Load dataset
dataset = load_dataset("omanyasa/english-shona-langid")
train_texts = dataset['train']['text']
train_labels = dataset['train']['label']
test_texts = dataset['test']['text']
test_labels = dataset['test']['label']
# Feature extraction
vectorizer = TfidfVectorizer(max_features=10000, ngram_range=(1, 3))
X_train = vectorizer.fit_transform(train_texts)
X_test = vectorizer.transform(test_texts)
# Train model
model = LogisticRegression(max_iter=1000, random_state=42)
model.fit(X_train, train_labels)
# Evaluate
y_pred = model.predict(X_test)
print(f"Accuracy: {accuracy_score(test_labels, y_pred):.4f}")
print("\nClassification Report:")
print(classification_report(test_labels, y_pred))
```
### 2. Deep Learning with PyTorch
```python
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from datasets import load_dataset
from transformers import AutoTokenizer
import numpy as np
class TextDataset(Dataset):
def __init__(self, texts, labels, tokenizer, max_length=128):
self.texts = texts
self.labels = labels
self.tokenizer = tokenizer
self.max_length = max_length
# Create label mapping
unique_labels = list(set(labels))
self.label_to_id = {label: idx for idx, label in enumerate(unique_labels)}
self.id_to_label = {idx: label for label, idx in self.label_to_id.items()}
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = self.texts[idx]
label = self.labels[idx]
encoding = self.tokenizer(
text,
truncation=True,
padding='max_length',
max_length=self.max_length,
return_tensors='pt'
)
return {
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'labels': torch.tensor(self.label_to_id[label], dtype=torch.long)
}
class LanguageClassifier(nn.Module):
def __init__(self, vocab_size, embed_dim=128, hidden_dim=256, num_classes=5):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True)
self.fc = nn.Linear(hidden_dim, num_classes)
self.dropout = nn.Dropout(0.2)
def forward(self, input_ids, attention_mask):
embedded = self.embedding(input_ids)
lstm_out, _ = self.lstm(embedded)
pooled = lstm_out[:, -1, :] # Use last hidden state
output = self.dropout(pooled)
return self.fc(output)
# Training setup
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer = AutoTokenizer.from_pretrained('bert-base-multilingual-cased')
# Load and prepare data
dataset = load_dataset("omanyasa/english-shona-langid")
train_dataset = TextDataset(
dataset['train']['text'],
dataset['train']['label'],
tokenizer
)
test_dataset = TextDataset(
dataset['test']['text'],
dataset['test']['label'],
tokenizer
)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32)
# Initialize model
model = LanguageClassifier(
vocab_size=tokenizer.vocab_size,
num_classes=len(train_dataset.label_to_id)
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()
# Training loop
for epoch in range(5):
model.train()
total_loss = 0
for batch in train_loader:
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
optimizer.zero_grad()
outputs = model(input_ids, attention_mask)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
total_loss += loss.item()
print(f"Epoch {epoch+1}, Loss: {total_loss/len(train_loader):.4f}")
```
### 3. Transformers with Hugging Face
```python
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
TrainingArguments,
Trainer
)
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
# Load dataset
dataset = load_dataset("omanyasa/english-shona-langid")
# Create label mappings
labels = dataset['train'].features['label'].names
label2id = {label: i for i, label in enumerate(labels)}
id2label = {i: label for i, label in enumerate(labels)}
# Initialize tokenizer and model
model_name = "distilbert-base-multilingual-cased"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(
model_name,
num_labels=len(labels),
label2id=label2id,
id2label=id2label
)
# Preprocessing function
def preprocess_function(examples):
return tokenizer(
examples["text"],
truncation=True,
padding="max_length",
max_length=128
)
# Tokenize dataset
tokenized_dataset = dataset.map(
preprocess_function,
batched=True,
remove_columns=dataset["train"].column_names
)
# Evaluation metrics
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
accuracy = accuracy_score(labels, predictions)
f1_macro = f1_score(labels, predictions, average='macro')
f1_weighted = f1_score(labels, predictions, average='weighted')
return {
'accuracy': accuracy,
'f1_macro': f1_macro,
'f1_weighted': f1_weighted
}
# Training arguments
training_args = TrainingArguments(
output_dir="./results",
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=3,
weight_decay=0.01,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
metric_for_best_model="f1_macro",
greater_is_better=True,
)
# Initialize trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset["train"],
eval_dataset=tokenized_dataset["validation"],
compute_metrics=compute_metrics,
)
# Train model
trainer.train()
# Evaluate on test set
test_results = trainer.evaluate(tokenized_dataset["test"])
print("Test Results:", test_results)
# Save model
trainer.save_model("./language_id_model")
tokenizer.save_pretrained("./language_id_model")
```
## Inference Examples
### 1. Using Trained Model for Prediction
```python
from transformers import pipeline
import torch
# Load the trained model
model_path = "./language_id_model"
classifier = pipeline(
"text-classification",
model=model_path,
tokenizer=model_path,
device=0 if torch.cuda.is_available() else -1
)
# Sample texts for prediction
texts = [
"Hello, how are you today?", # English
"Mhoro, makadii sei?", # Shona
"Sawubona, unjani?", # Ndebele
"Shani, mwaposa bwanji?", # Tonga
"Moni, muli bwanji?" # Chewa
]
# Make predictions
predictions = classifier(texts)
for text, pred in zip(texts, predictions):
print(f"Text: {text}")
print(f"Predicted Language: {pred['label']} (Confidence: {pred['score']:.4f})")
print("-" * 50)
```
### 2. Real-time Language Detection
```python
import gradio as gr
from transformers import pipeline
# Load model
classifier = pipeline(
"text-classification",
model="./language_id_model",
tokenizer="./language_id_model"
)
def detect_language(text):
if not text.strip():
return "Please enter some text..."
result = classifier(text)[0]
language = result['label']
confidence = result['score']
return f"Detected Language: {language} (Confidence: {confidence:.2%})"
# Create Gradio interface
iface = gr.Interface(
fn=detect_language,
inputs=gr.Textbox(lines=3, placeholder="Enter text to detect language..."),
outputs="text",
title="African Language Detector",
description="Detects English, Shona, Ndebele, Tonga, or Chewa from input text"
)
# Launch interface
iface.launch()
```
## Data Analysis Examples
### 1. Text Length Analysis
```python
from datasets import load_dataset
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# Load dataset
dataset = load_dataset("omanyasa/english-shona-langid")
# Analyze text lengths by language
def analyze_text_lengths(split_name):
data = dataset[split_name]
df = pd.DataFrame({
'text': data['text'],
'label': data['label'],
'length': [len(text.split()) for text in data['text']]
})
plt.figure(figsize=(12, 6))
sns.boxplot(data=df, x='label', y='length')
plt.title(f'Text Length Distribution by Language ({split_name} set)')
plt.xlabel('Language')
plt.ylabel('Word Count')
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()
return df.groupby('label')['length'].describe()
# Analyze training set
train_stats = analyze_text_lengths('train')
print("Text Length Statistics:")
print(train_stats)
```
### 2. Vocabulary Analysis
```python
from collections import Counter
import numpy as np
def analyze_vocabulary(split_name):
data = dataset[split_name]
vocab_by_language = {}
for label in set(data['label']):
texts = [text for text, lang in zip(data['text'], data['label']) if lang == label]
words = ' '.join(texts).lower().split()
vocab_by_language[label] = Counter(words)
# Print vocabulary statistics
for language, vocab in vocab_by_language.items():
print(f"\n{language}:")
print(f" Unique words: {len(vocab)}")
print(f" Total words: {sum(vocab.values())}")
print(f" Top 10 words: {vocab.most_common(10)}")
return vocab_by_language
vocab_stats = analyze_vocabulary('train')
```
## Error Analysis
```python
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
def analyze_errors(model, test_loader, device):
model.eval()
all_preds = []
all_labels = []
with torch.no_grad():
for batch in test_loader:
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
outputs = model(input_ids, attention_mask)
preds = torch.argmax(outputs, dim=1)
all_preds.extend(preds.cpu().numpy())
all_labels.extend(labels.cpu().numpy())
# Confusion matrix
cm = confusion_matrix(all_labels, all_preds)
plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
xticklabels=labels, yticklabels=labels)
plt.title('Confusion Matrix')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
return cm
# Use after training your model
# cm = analyze_errors(model, test_loader, device)
```
---
These examples provide a comprehensive guide for using the English-Shona Language Identification Dataset in various scenarios. Feel free to adapt them to your specific needs!