File size: 3,931 Bytes
f781004
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# %%writefile data_prep.py
import os
import torch
from datasets import load_dataset, Audio, DatasetDict
from nemo.collections.tts.models import AudioCodecModel
from huggingface_hub import login
from torch.nn.utils.rnn import pad_sequence

# -------------------------------
# 1. Settings
# -------------------------------
SOURCE_DATASET = "SayantanJoker/original_data_hindi_tts"
TARGET_REPO = "ArunKr/tts-quantized-dataset"
SAMPLE_RATE = 22050
BATCH_SIZE = 32  # adjust depending on GPU memory

# -------------------------------
# 2. Retrieve HF Token (Colab + local compatible)
# -------------------------------
def get_hf_token():
    """Try to get HF token from Colab secrets or environment."""
    token = None
    try:
        # works only inside Google Colab
        from google.colab import userdata
        token = userdata.get("HF_TOKEN")
        if token:
            print("🔑 Loaded HF_TOKEN from Colab userdata.")
    except Exception:
        pass

    if not token:
        token = os.getenv("HF_TOKEN")
        if token:
            print("🔑 Loaded HF_TOKEN from environment variable.")

    if not token:
        raise EnvironmentError(
            "❌ Missing HF_TOKEN. Please set it in Colab secrets or export it as an environment variable."
        )
    return token

HF_TOKEN = get_hf_token()
os.environ["HF_TOKEN"] = HF_TOKEN
login(HF_TOKEN)

# -------------------------------
# 3. Load dataset
# -------------------------------
print(f"⬇️ Loading dataset {SOURCE_DATASET}")
raw_ds = load_dataset(SOURCE_DATASET, split="train")
raw_ds = load_dataset(SOURCE_DATASET, split="train").select(range(4))
# raw_ds = raw_ds.cast_column("audio", Audio(sampling_rate=SAMPLE_RATE))

# -------------------------------
# 4. Load codec
# -------------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
codec = AudioCodecModel.from_pretrained(
    "nvidia/nemo-nano-codec-22khz-0.6kbps-12.5fps"
).to(device).eval()

# -------------------------------
# 5. Batched encoding function
# -------------------------------
@torch.no_grad()
def batch_encode(batch):
    audios = batch["audio"]
    texts = batch["transcription"]
    speakers = [str(f) for f in batch["file_name"]]

    waveforms, lengths = [], []
    for a in audios:
        wav = torch.tensor(a["array"], dtype=torch.float32).to(device)
        waveforms.append(wav)
        lengths.append(len(wav))

    waveforms = pad_sequence(waveforms, batch_first=True)
    lengths = torch.tensor(lengths, device=device)

    encoded_tokens, encoded_len = codec.encode(audio=waveforms, audio_len=lengths)

    results = {
        "text": [],
        "speaker": [],
        "nano_layer_1": [],
        "nano_layer_2": [],
        "nano_layer_3": [],
        "nano_layer_4": [],
        "encoded_len": [],
    }

    for txt, spk, codes, L in zip(texts, speakers, encoded_tokens.cpu(), encoded_len.cpu()):
        spk = "hindi_female"
        results["text"].append(txt)
        results["speaker"].append(spk)
        results["nano_layer_1"].append(codes[0].tolist())
        results["nano_layer_2"].append(codes[1].tolist())
        results["nano_layer_3"].append(codes[2].tolist())
        results["nano_layer_4"].append(codes[3].tolist())
        results["encoded_len"].append(int(L))

    return results

# -------------------------------
# 6. Apply to dataset
# -------------------------------
print("🔄 Encoding in batches...")
processed_ds = raw_ds.map(
    batch_encode,
    batched=True,
    batch_size=BATCH_SIZE,
    remove_columns=raw_ds.column_names,
)

# Wrap in DatasetDict to make push_to_hub compatible
processed_ds = DatasetDict({"train": processed_ds})

# -------------------------------
# 7. Save + Push
# -------------------------------
processed_ds.save_to_disk("tts_quantized_dataset")
print(f"⬆️ Uploading to {TARGET_REPO}")
processed_ds.push_to_hub(TARGET_REPO, private=False, token=HF_TOKEN)

print("✅ Done.")