File size: 5,214 Bytes
65bd563 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
#!/usr/bin/env python3
"""
Sample 1000 samples per language from merged_transliteration_sampled.jsonl
with constraints:
- Each sample must have < 100 words (counting both input_text and output_text)
- Samples must not exist in sampled_100k_translit.jsonl (train split)
"""
import json
import random
from collections import defaultdict
from typing import Set, Dict, List, Tuple
def count_words(text: str) -> int:
"""Count words in a text string."""
if not text:
return 0
return len(text.split())
def load_train_split(train_file: str) -> Set[Tuple[str, str]]:
"""
Load train split and create a set of (input_text, output_text) tuples
for fast lookup to avoid duplicates.
"""
train_samples = set()
print(f"Loading train split from {train_file}...")
with open(train_file, 'r', encoding='utf-8') as f:
for line_num, line in enumerate(f, 1):
if line_num % 10000 == 0:
print(f" Processed {line_num} lines...")
try:
data = json.loads(line.strip())
input_text = data.get('input_text', '').strip()
output_text = data.get('output_text', '').strip()
# Create a tuple for deduplication
train_samples.add((input_text, output_text))
except json.JSONDecodeError:
continue
print(f"Loaded {len(train_samples)} samples from train split")
return train_samples
def sample_test_split(
source_file: str,
train_samples: Set[Tuple[str, str]],
samples_per_language: int = 1000,
max_words: int = 100
) -> Dict[str, List[Dict]]:
"""
Sample test split from source file.
Returns:
Dictionary mapping language to list of sampled samples
"""
# Group samples by language
samples_by_language = defaultdict(list)
print(f"\nReading source file: {source_file}")
with open(source_file, 'r', encoding='utf-8') as f:
for line_num, line in enumerate(f, 1):
if line_num % 100000 == 0:
print(f" Processed {line_num} lines...")
try:
data = json.loads(line.strip())
input_text = data.get('input_text', '').strip()
output_text = data.get('output_text', '').strip()
language = data.get('language', '').strip()
if not language or not input_text or not output_text:
continue
# Check word count
total_words = count_words(input_text) + count_words(output_text)
if total_words >= max_words:
continue
# Check if sample exists in train split
sample_tuple = (input_text, output_text)
if sample_tuple in train_samples:
continue
# Add to language group
samples_by_language[language].append(data)
except json.JSONDecodeError:
continue
print(f"\nFound samples by language:")
for lang, samples in samples_by_language.items():
print(f" {lang}: {len(samples)} samples")
# Sample from each language
sampled_data = {}
print(f"\nSampling {samples_per_language} samples per language...")
for language, samples in samples_by_language.items():
if len(samples) < samples_per_language:
print(f" WARNING: {language} has only {len(samples)} samples, "
f"requested {samples_per_language}. Using all available.")
sampled_data[language] = samples
else:
sampled_data[language] = random.sample(samples, samples_per_language)
print(f" {language}: sampled {len(sampled_data[language])} samples")
return sampled_data
def write_output(sampled_data: Dict[str, List[Dict]], output_file: str):
"""Write sampled data to output file."""
print(f"\nWriting output to {output_file}...")
total_samples = 0
with open(output_file, 'w', encoding='utf-8') as f:
for language, samples in sorted(sampled_data.items()):
for sample in samples:
f.write(json.dumps(sample, ensure_ascii=False) + '\n')
total_samples += 1
print(f"Written {total_samples} samples to {output_file}")
def main():
source_file = "/projects/data/Embedding/IndicToolkit/datasets_final/data/merged_transliteration_sampled.jsonl"
train_file = "/projects/data/Embedding/IndicToolkit/datasets_final/data/sampled_100k_translit.jsonl"
output_file = "/projects/data/Embedding/IndicToolkit/datasets_final/data/test_split_translit.jsonl"
# Set random seed for reproducibility
random.seed(42)
# Load train split to avoid duplicates
train_samples = load_train_split(train_file)
# Sample test split
sampled_data = sample_test_split(
source_file=source_file,
train_samples=train_samples,
samples_per_language=1000,
max_words=100
)
# Write output
write_output(sampled_data, output_file)
print("\nDone!")
if __name__ == "__main__":
main()
|