File size: 1,438 Bytes
845fb83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from datasets import load_from_disk, Audio
import re

# ==========================
# Config
# ==========================
DATA_DIR = "data/dataset_hindi_6k"
HF_REPO ="PharynxAI/merged_multilingual_tts_6k_each"
SAMPLING_RATE = 24000


# ==========================
# Load merged dataset
# ==========================
print("πŸ“Œ Loading merged dataset from disk...")
ds = load_from_disk(DATA_DIR)

print(ds)
print(ds.features)


# ==========================
# Apply SAME preprocessing as example
# ==========================
def apply_preprocessing(example):
    text = example["text"].strip()

    # Match example: add Speaker prefix
    if not text.startswith("Speaker"):
        text = f"Speaker 0: {text}"

    return {"text": text}


ds = ds.map(apply_preprocessing, num_proc=4)

# EXACT equivalent of:
# dataset = dataset.cast_column("audio", Audio(sampling_rate=24000))
ds = ds.cast_column("audio", Audio(sampling_rate=SAMPLING_RATE))


# ==========================
# Final verification
# ==========================
print("βœ… Final features:")
print(ds.features)
print("πŸ“ Sample:", ds[0]["text"])
print("πŸ”Š SR:", ds[0]["audio"]["sampling_rate"])


# ==========================
# Push to Hub
# ==========================
#print(f"πŸš€ Pushing dataset to: {HF_REPO}")
#ds.push_to_hub(HF_REPO, max_shard_size="500MB",num_proc=1)
ds.save_to_disk("data/dataset_hindi_6k_processed")
print("Saved dataset locally")
print("πŸŽ‰ Done.")