psidharth567's picture
Upload folder using huggingface_hub
65bd563 verified
#!/usr/bin/env python3
"""
Sample 1000 samples per language from merged_transliteration_sampled.jsonl
with constraints:
- Each sample must have < 100 words (counting both input_text and output_text)
- Samples must not exist in sampled_100k_translit.jsonl (train split)
"""
import json
import random
from collections import defaultdict
from typing import Set, Dict, List, Tuple
def count_words(text: str) -> int:
"""Count words in a text string."""
if not text:
return 0
return len(text.split())
def load_train_split(train_file: str) -> Set[Tuple[str, str]]:
"""
Load train split and create a set of (input_text, output_text) tuples
for fast lookup to avoid duplicates.
"""
train_samples = set()
print(f"Loading train split from {train_file}...")
with open(train_file, 'r', encoding='utf-8') as f:
for line_num, line in enumerate(f, 1):
if line_num % 10000 == 0:
print(f" Processed {line_num} lines...")
try:
data = json.loads(line.strip())
input_text = data.get('input_text', '').strip()
output_text = data.get('output_text', '').strip()
# Create a tuple for deduplication
train_samples.add((input_text, output_text))
except json.JSONDecodeError:
continue
print(f"Loaded {len(train_samples)} samples from train split")
return train_samples
def sample_test_split(
source_file: str,
train_samples: Set[Tuple[str, str]],
samples_per_language: int = 1000,
max_words: int = 100
) -> Dict[str, List[Dict]]:
"""
Sample test split from source file.
Returns:
Dictionary mapping language to list of sampled samples
"""
# Group samples by language
samples_by_language = defaultdict(list)
print(f"\nReading source file: {source_file}")
with open(source_file, 'r', encoding='utf-8') as f:
for line_num, line in enumerate(f, 1):
if line_num % 100000 == 0:
print(f" Processed {line_num} lines...")
try:
data = json.loads(line.strip())
input_text = data.get('input_text', '').strip()
output_text = data.get('output_text', '').strip()
language = data.get('language', '').strip()
if not language or not input_text or not output_text:
continue
# Check word count
total_words = count_words(input_text) + count_words(output_text)
if total_words >= max_words:
continue
# Check if sample exists in train split
sample_tuple = (input_text, output_text)
if sample_tuple in train_samples:
continue
# Add to language group
samples_by_language[language].append(data)
except json.JSONDecodeError:
continue
print(f"\nFound samples by language:")
for lang, samples in samples_by_language.items():
print(f" {lang}: {len(samples)} samples")
# Sample from each language
sampled_data = {}
print(f"\nSampling {samples_per_language} samples per language...")
for language, samples in samples_by_language.items():
if len(samples) < samples_per_language:
print(f" WARNING: {language} has only {len(samples)} samples, "
f"requested {samples_per_language}. Using all available.")
sampled_data[language] = samples
else:
sampled_data[language] = random.sample(samples, samples_per_language)
print(f" {language}: sampled {len(sampled_data[language])} samples")
return sampled_data
def write_output(sampled_data: Dict[str, List[Dict]], output_file: str):
"""Write sampled data to output file."""
print(f"\nWriting output to {output_file}...")
total_samples = 0
with open(output_file, 'w', encoding='utf-8') as f:
for language, samples in sorted(sampled_data.items()):
for sample in samples:
f.write(json.dumps(sample, ensure_ascii=False) + '\n')
total_samples += 1
print(f"Written {total_samples} samples to {output_file}")
def main():
source_file = "/projects/data/Embedding/IndicToolkit/datasets_final/data/merged_transliteration_sampled.jsonl"
train_file = "/projects/data/Embedding/IndicToolkit/datasets_final/data/sampled_100k_translit.jsonl"
output_file = "/projects/data/Embedding/IndicToolkit/datasets_final/data/test_split_translit.jsonl"
# Set random seed for reproducibility
random.seed(42)
# Load train split to avoid duplicates
train_samples = load_train_split(train_file)
# Sample test split
sampled_data = sample_test_split(
source_file=source_file,
train_samples=train_samples,
samples_per_language=1000,
max_words=100
)
# Write output
write_output(sampled_data, output_file)
print("\nDone!")
if __name__ == "__main__":
main()