| from datasets import load_from_disk, Audio | |
| import re | |
| # ========================== | |
| # Config | |
| # ========================== | |
| DATA_DIR = "data/dataset_hindi_6k" | |
| HF_REPO ="PharynxAI/merged_multilingual_tts_6k_each" | |
| SAMPLING_RATE = 24000 | |
| # ========================== | |
| # Load merged dataset | |
| # ========================== | |
| print("π Loading merged dataset from disk...") | |
| ds = load_from_disk(DATA_DIR) | |
| print(ds) | |
| print(ds.features) | |
| # ========================== | |
| # Apply SAME preprocessing as example | |
| # ========================== | |
| def apply_preprocessing(example): | |
| text = example["text"].strip() | |
| # Match example: add Speaker prefix | |
| if not text.startswith("Speaker"): | |
| text = f"Speaker 0: {text}" | |
| return {"text": text} | |
| ds = ds.map(apply_preprocessing, num_proc=4) | |
| # EXACT equivalent of: | |
| # dataset = dataset.cast_column("audio", Audio(sampling_rate=24000)) | |
| ds = ds.cast_column("audio", Audio(sampling_rate=SAMPLING_RATE)) | |
| # ========================== | |
| # Final verification | |
| # ========================== | |
| print("β Final features:") | |
| print(ds.features) | |
| print("π Sample:", ds[0]["text"]) | |
| print("π SR:", ds[0]["audio"]["sampling_rate"]) | |
| # ========================== | |
| # Push to Hub | |
| # ========================== | |
| #print(f"π Pushing dataset to: {HF_REPO}") | |
| #ds.push_to_hub(HF_REPO, max_shard_size="500MB",num_proc=1) | |
| ds.save_to_disk("data/dataset_hindi_6k_processed") | |
| print("Saved dataset locally") | |
| print("π Done.") | |