baseten_vibevoice / merge_dataset.py
Diksha2001's picture
Upload folder using huggingface_hub
845fb83 verified
from datasets import load_dataset, concatenate_datasets, Audio
from glob import glob
from datasets import load_from_disk
MAX_ROWS = 6000
FINAL = []
# -------------------------------------------------
# Helper
# -------------------------------------------------
def load_local_parquet(path, text_col="transcription", filter_fn=None):
print(f"\nπŸ“Œ Loading: {path}")
files = sorted(glob(path, recursive=True))
assert files, f"No parquet files found in {path}"
ds = load_dataset(
"parquet",
data_files=files,
split="train"
)
if filter_fn:
ds = ds.filter(filter_fn)
if text_col != "text":
ds = ds.rename_column(text_col, "text")
# πŸ”₯ KEEP ONLY audio + text
ds = ds.select_columns(["audio", "text"])
ds = ds.cast_column("audio", Audio(sampling_rate=24000))
if len(ds) > MAX_ROWS:
ds = ds.shuffle(seed=42).select(range(MAX_ROWS))
print(f"βœ… Rows used: {len(ds)} | Columns: {ds.column_names}")
return ds
def load_arrow_dataset(path, text_col="transcription"):
print(f"\nπŸ“Œ Loading Arrow dataset: {path}")
ds = load_from_disk(path)
if text_col != "text":
ds = ds.rename_column(text_col, "text")
# πŸ”₯ KEEP ONLY audio + text
ds = ds.select_columns(["audio", "text"])
ds = ds.cast_column("audio", Audio(sampling_rate=24000))
if len(ds) > MAX_ROWS:
ds = ds.shuffle(seed=42).select(range(MAX_ROWS))
print(f"βœ… Rows used: {len(ds)} | Columns: {ds.column_names}")
return ds
# -------------------------------------------------
# 1. Bengali (male only)
# -------------------------------------------------
#bengali = load_local_parquet(
# "local_data/IndicTTS_Bengali/data/**/*.parquet",
# text_col="text",
# filter_fn=lambda x: "train_bengalimale" in x["utterance_id"]
#)
#FINAL.append(bengali)
# -------------------------------------------------
# 2. Arabic
# -------------------------------------------------
#arabic = load_local_parquet(
# "local_data/arabic_tts/**/*.parquet",
# text_col="transcription"
#)
#FINAL.append(arabic)
# -------------------------------------------------
# 3. Hindi (female 5hr)
# -------------------------------------------------
hindi = load_local_parquet(
"local_data/hindi_female_5hr/**/*.parquet",
text_col="text"
)
FINAL.append(hindi)
# -------------------------------------------------
# 4. English (local)
# -------------------------------------------------
#english = load_arrow_dataset(
# "local_data/local_eng",
# text_col="transcription"
#)
#FINAL.append(english)
# -------------------------------------------------
# 5. Punjabi (local)
# -------------------------------------------------
#punjabi = load_arrow_dataset(
# "local_data/local_punjabi/train",
# text_col="transcription"
#)
#FINAL.append(punjabi)
# -------------------------------------------------
# Merge ALL
# -------------------------------------------------
print("\nπŸš€ Merging all datasets")
merged = concatenate_datasets(FINAL)
print("\nπŸ“Š FINAL DATASET")
print(merged)
print("Total rows:", len(merged))
print("Columns:", merged.column_names)
# -------------------------------------------------
# Save locally
# -------------------------------------------------
OUT_DIR = "data/dataset_hindi_6k"
merged.save_to_disk(OUT_DIR)
print(f"\nπŸ’Ύ Saved with save_to_disk β†’ {OUT_DIR}")
# Optional: also save as Parquet (Trainer-friendly)
#merged.to_parquet(f"{OUT_DIR}.parquet")
print(f"πŸ’Ύ Saved Parquet β†’ {OUT_DIR}.parquet")