Datasets:
File size: 2,292 Bytes
158140e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 | import csv
import json
import re
import random
from pathlib import Path
def extract_pos(hmr_text):
"""Extracts part of speech from strings like 'word (pos)'."""
if not hmr_text:
return hmr_text, None
match = re.search(r'\s*\(([^)]+)\)$', hmr_text)
if match:
pos = match.group(1)
hmr_clean = hmr_text[:match.start()].strip()
return hmr_clean, pos
return hmr_text.strip(), None
def convert():
data_dir = Path('data')
processed_dir = Path('processed')
processed_dir.mkdir(exist_ok=True)
all_data = []
csv_files = sorted(list(data_dir.rglob('*.csv')))
for csv_file in csv_files:
try:
with open(csv_file, mode='r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
en = (row.get('en') or '').strip()
hmr = (row.get('hmr') or '').strip()
if not en and not hmr:
continue
hmr_clean, pos = extract_pos(hmr)
all_data.append({
'hmr': hmr_clean,
'en': en,
'pos': pos
})
except Exception as e:
print(f"Error processing {csv_file}: {e}")
# Shuffle for unbiased splitting
random.seed(42) # For reproducibility
random.shuffle(all_data)
# Split 95/5
split_idx = int(len(all_data) * 0.95)
train_data = all_data[:split_idx]
test_data = all_data[split_idx:]
# Save full JSON
with open('hmar_data.json', 'w', encoding='utf-8') as f:
json.dump(all_data, f, indent=2, ensure_ascii=False)
# Save JSONL files for training
def save_jsonl(data, filename):
with open(processed_dir / filename, 'w', encoding='utf-8') as f:
for entry in data:
f.write(json.dumps(entry, ensure_ascii=False) + '\n')
save_jsonl(train_data, 'train.jsonl')
save_jsonl(test_data, 'test.jsonl')
print(f"Total entries: {len(all_data)}")
print(f"Saved {len(train_data)} to processed/train.jsonl")
print(f"Saved {len(test_data)} to processed/test.jsonl")
if __name__ == '__main__':
convert()
|