tts-quantized-dataset / data_prep.py
ArunKr's picture
Create data_prep.py
f781004 verified
# %%writefile data_prep.py
import os
import torch
from datasets import load_dataset, Audio, DatasetDict
from nemo.collections.tts.models import AudioCodecModel
from huggingface_hub import login
from torch.nn.utils.rnn import pad_sequence
# -------------------------------
# 1. Settings
# -------------------------------
SOURCE_DATASET = "SayantanJoker/original_data_hindi_tts"
TARGET_REPO = "ArunKr/tts-quantized-dataset"
SAMPLE_RATE = 22050
BATCH_SIZE = 32 # adjust depending on GPU memory
# -------------------------------
# 2. Retrieve HF Token (Colab + local compatible)
# -------------------------------
def get_hf_token():
"""Try to get HF token from Colab secrets or environment."""
token = None
try:
# works only inside Google Colab
from google.colab import userdata
token = userdata.get("HF_TOKEN")
if token:
print("🔑 Loaded HF_TOKEN from Colab userdata.")
except Exception:
pass
if not token:
token = os.getenv("HF_TOKEN")
if token:
print("🔑 Loaded HF_TOKEN from environment variable.")
if not token:
raise EnvironmentError(
"❌ Missing HF_TOKEN. Please set it in Colab secrets or export it as an environment variable."
)
return token
HF_TOKEN = get_hf_token()
os.environ["HF_TOKEN"] = HF_TOKEN
login(HF_TOKEN)
# -------------------------------
# 3. Load dataset
# -------------------------------
print(f"⬇️ Loading dataset {SOURCE_DATASET}")
raw_ds = load_dataset(SOURCE_DATASET, split="train")
raw_ds = load_dataset(SOURCE_DATASET, split="train").select(range(4))
# raw_ds = raw_ds.cast_column("audio", Audio(sampling_rate=SAMPLE_RATE))
# -------------------------------
# 4. Load codec
# -------------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
codec = AudioCodecModel.from_pretrained(
"nvidia/nemo-nano-codec-22khz-0.6kbps-12.5fps"
).to(device).eval()
# -------------------------------
# 5. Batched encoding function
# -------------------------------
@torch.no_grad()
def batch_encode(batch):
audios = batch["audio"]
texts = batch["transcription"]
speakers = [str(f) for f in batch["file_name"]]
waveforms, lengths = [], []
for a in audios:
wav = torch.tensor(a["array"], dtype=torch.float32).to(device)
waveforms.append(wav)
lengths.append(len(wav))
waveforms = pad_sequence(waveforms, batch_first=True)
lengths = torch.tensor(lengths, device=device)
encoded_tokens, encoded_len = codec.encode(audio=waveforms, audio_len=lengths)
results = {
"text": [],
"speaker": [],
"nano_layer_1": [],
"nano_layer_2": [],
"nano_layer_3": [],
"nano_layer_4": [],
"encoded_len": [],
}
for txt, spk, codes, L in zip(texts, speakers, encoded_tokens.cpu(), encoded_len.cpu()):
spk = "hindi_female"
results["text"].append(txt)
results["speaker"].append(spk)
results["nano_layer_1"].append(codes[0].tolist())
results["nano_layer_2"].append(codes[1].tolist())
results["nano_layer_3"].append(codes[2].tolist())
results["nano_layer_4"].append(codes[3].tolist())
results["encoded_len"].append(int(L))
return results
# -------------------------------
# 6. Apply to dataset
# -------------------------------
print("🔄 Encoding in batches...")
processed_ds = raw_ds.map(
batch_encode,
batched=True,
batch_size=BATCH_SIZE,
remove_columns=raw_ds.column_names,
)
# Wrap in DatasetDict to make push_to_hub compatible
processed_ds = DatasetDict({"train": processed_ds})
# -------------------------------
# 7. Save + Push
# -------------------------------
processed_ds.save_to_disk("tts_quantized_dataset")
print(f"⬆️ Uploading to {TARGET_REPO}")
processed_ds.push_to_hub(TARGET_REPO, private=False, token=HF_TOKEN)
print("✅ Done.")