Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .dockerignore +18 -0
- .trussignore +9 -0
- Dockerfile +32 -0
- __pycache__/config.cpython-310.pyc +0 -0
- __pycache__/data_vibevoice.cpython-310.pyc +0 -0
- __pycache__/finetune_vibevoice_lora.cpython-310.pyc +0 -0
- __pycache__/finetune_vibevoice_lora.cpython-311.pyc +0 -0
- config.py +49 -0
- diff_head_layers.txt +26 -0
- finalize_indian_english.py +8 -0
- merge_dataset.py +133 -0
- pre-processing.py +59 -0
- pyproject.toml +41 -0
- requirements.txt +41 -0
- run.sh +6 -0
- src/__pycache__/data_vibevoice.cpython-310.pyc +0 -0
- src/__pycache__/finetune_vibevoice_lora.cpython-310.pyc +0 -0
- src/__pycache__/finetune_vibevoice_lora.cpython-311.pyc +0 -0
- src/data_vibevoice.py +493 -0
- src/finetune_vibevoice_lora.py +949 -0
- src/vibevoice/.DS_Store +0 -0
- src/vibevoice/configs/qwen2.5_1.5b_64k.json +112 -0
- src/vibevoice/configs/qwen2.5_7b_32k.json +113 -0
- src/vibevoice/data_vibevoice.py +0 -0
- src/vibevoice/modular/__init__.py +0 -0
- src/vibevoice/modular/__pycache__/__init__.cpython-310.pyc +0 -0
- src/vibevoice/modular/__pycache__/__init__.cpython-311.pyc +0 -0
- src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-310.pyc +0 -0
- src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-311.pyc +0 -0
- src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-310.pyc +0 -0
- src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-311.pyc +0 -0
- src/vibevoice/modular/__pycache__/modeling_vibevoice_inference.cpython-310.pyc +0 -0
- src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-310.pyc +0 -0
- src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-311.pyc +0 -0
- src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-310.pyc +0 -0
- src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-311.pyc +0 -0
- src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-310.pyc +0 -0
- src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-311.pyc +0 -0
- src/vibevoice/modular/__pycache__/streamer.cpython-310.pyc +0 -0
- src/vibevoice/modular/configuration_vibevoice.py +248 -0
- src/vibevoice/modular/modeling_vibevoice.py +508 -0
- src/vibevoice/modular/modeling_vibevoice_inference.py +715 -0
- src/vibevoice/modular/modular_vibevoice_diffusion_head.py +287 -0
- src/vibevoice/modular/modular_vibevoice_text_tokenizer.py +214 -0
- src/vibevoice/modular/modular_vibevoice_tokenizer.py +1195 -0
- src/vibevoice/modular/streamer.py +264 -0
- src/vibevoice/processor/__init__.py +0 -0
- src/vibevoice/processor/__pycache__/__init__.cpython-310.pyc +0 -0
- src/vibevoice/processor/__pycache__/__init__.cpython-311.pyc +0 -0
- src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-310.pyc +0 -0
.dockerignore
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.env
|
| 2 |
+
.venv
|
| 3 |
+
__pycache__/
|
| 4 |
+
*.pyc
|
| 5 |
+
*.pyo
|
| 6 |
+
*.pyd
|
| 7 |
+
.ipynb_checkpoints/
|
| 8 |
+
.cache/
|
| 9 |
+
.git/
|
| 10 |
+
.gitignore
|
| 11 |
+
wandb/
|
| 12 |
+
nohup.out
|
| 13 |
+
*.log
|
| 14 |
+
data/
|
| 15 |
+
local_data/
|
| 16 |
+
trash/
|
| 17 |
+
models/
|
| 18 |
+
vibevoice-large/
|
.trussignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Ignore large datasets
|
| 3 |
+
data/
|
| 4 |
+
data/**
|
| 5 |
+
|
| 6 |
+
# Optional: local caches
|
| 7 |
+
__pycache__/
|
| 8 |
+
*.pyc
|
| 9 |
+
.cache/
|
Dockerfile
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04
|
| 2 |
+
|
| 3 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 4 |
+
ENV PYTHONUNBUFFERED=1
|
| 5 |
+
ENV PIP_NO_CACHE_DIR=1
|
| 6 |
+
|
| 7 |
+
RUN apt-get update && apt-get install -y \
|
| 8 |
+
python3.10 \
|
| 9 |
+
python3.10-venv \
|
| 10 |
+
python3-pip \
|
| 11 |
+
git \
|
| 12 |
+
ffmpeg \
|
| 13 |
+
sox \
|
| 14 |
+
libsndfile1 \
|
| 15 |
+
libgl1 \
|
| 16 |
+
ca-certificates \
|
| 17 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 18 |
+
|
| 19 |
+
RUN ln -sf /usr/bin/python3.10 /usr/bin/python
|
| 20 |
+
RUN python -m pip install --upgrade pip setuptools wheel
|
| 21 |
+
|
| 22 |
+
WORKDIR /app
|
| 23 |
+
|
| 24 |
+
COPY requirements.txt .
|
| 25 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 26 |
+
|
| 27 |
+
COPY . .
|
| 28 |
+
|
| 29 |
+
RUN chmod +x training.sh
|
| 30 |
+
|
| 31 |
+
CMD ["bash", "training.sh"]
|
| 32 |
+
|
__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (974 Bytes). View file
|
|
|
__pycache__/data_vibevoice.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
__pycache__/finetune_vibevoice_lora.cpython-310.pyc
ADDED
|
Binary file (30.5 kB). View file
|
|
|
__pycache__/finetune_vibevoice_lora.cpython-311.pyc
ADDED
|
Binary file (67.2 kB). View file
|
|
|
config.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from truss_train import definitions
|
| 2 |
+
from truss.base import truss_config
|
| 3 |
+
|
| 4 |
+
# Base image
|
| 5 |
+
BASE_IMAGE = "pytorch/pytorch:2.4.1-cuda12.1-cudnn9-runtime"
|
| 6 |
+
|
| 7 |
+
# Runtime configuration
|
| 8 |
+
training_runtime = definitions.Runtime(
|
| 9 |
+
start_commands=[
|
| 10 |
+
"chmod +x run.sh",
|
| 11 |
+
"./run.sh",
|
| 12 |
+
],
|
| 13 |
+
environment_variables={
|
| 14 |
+
"HF_HOME": "/workspace/.cache/huggingface",
|
| 15 |
+
"TORCH_HOME": "/workspace/.cache/torch",
|
| 16 |
+
# Optional: only if using HF private models
|
| 17 |
+
# "HF_TOKEN": definitions.SecretReference(name="hf_access_token"),
|
| 18 |
+
},
|
| 19 |
+
# Truss 0.12 ONLY supports enabled=True (no paths)
|
| 20 |
+
cache_config=definitions.CacheConfig(
|
| 21 |
+
enabled=True,
|
| 22 |
+
),
|
| 23 |
+
checkpointing_config=definitions.CheckpointingConfig(
|
| 24 |
+
enabled=True,
|
| 25 |
+
),
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# Compute configuration
|
| 29 |
+
training_compute = definitions.Compute(
|
| 30 |
+
accelerator=truss_config.AcceleratorSpec(
|
| 31 |
+
accelerator=truss_config.Accelerator.H100,
|
| 32 |
+
count=1,
|
| 33 |
+
),
|
| 34 |
+
cpu_count=16,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
# Training job
|
| 38 |
+
training_job = definitions.TrainingJob(
|
| 39 |
+
image=definitions.Image(base_image=BASE_IMAGE),
|
| 40 |
+
compute=training_compute,
|
| 41 |
+
runtime=training_runtime,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
# Training project
|
| 45 |
+
training_project = definitions.TrainingProject(
|
| 46 |
+
name="vibevoice-finetune",
|
| 47 |
+
job=training_job,
|
| 48 |
+
)
|
| 49 |
+
|
diff_head_layers.txt
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[0] noisy_images_proj.weight (shape: (3584, 64), trainable: True)
|
| 2 |
+
[1] cond_proj.weight (shape: (3584, 3584), trainable: True)
|
| 3 |
+
[2] t_embedder.mlp.0.weight (shape: (3584, 256), trainable: True)
|
| 4 |
+
[3] t_embedder.mlp.2.weight (shape: (3584, 3584), trainable: True)
|
| 5 |
+
[4] layers.0.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
|
| 6 |
+
[5] layers.0.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
|
| 7 |
+
[6] layers.0.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
|
| 8 |
+
[7] layers.0.norm.weight (shape: (3584,), trainable: True)
|
| 9 |
+
[8] layers.0.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
|
| 10 |
+
[9] layers.1.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
|
| 11 |
+
[10] layers.1.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
|
| 12 |
+
[11] layers.1.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
|
| 13 |
+
[12] layers.1.norm.weight (shape: (3584,), trainable: True)
|
| 14 |
+
[13] layers.1.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
|
| 15 |
+
[14] layers.2.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
|
| 16 |
+
[15] layers.2.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
|
| 17 |
+
[16] layers.2.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
|
| 18 |
+
[17] layers.2.norm.weight (shape: (3584,), trainable: True)
|
| 19 |
+
[18] layers.2.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
|
| 20 |
+
[19] layers.3.ffn.gate_proj.weight (shape: (10752, 3584), trainable: True)
|
| 21 |
+
[20] layers.3.ffn.up_proj.weight (shape: (10752, 3584), trainable: True)
|
| 22 |
+
[21] layers.3.ffn.down_proj.weight (shape: (3584, 10752), trainable: True)
|
| 23 |
+
[22] layers.3.norm.weight (shape: (3584,), trainable: True)
|
| 24 |
+
[23] layers.3.adaLN_modulation.1.weight (shape: (10752, 3584), trainable: True)
|
| 25 |
+
[24] final_layer.linear.weight (shape: (64, 3584), trainable: True)
|
| 26 |
+
[25] final_layer.adaLN_modulation.1.weight (shape: (7168, 3584), trainable: True)
|
finalize_indian_english.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset
|
| 2 |
+
|
| 3 |
+
ds = load_dataset("hf_indian_english_raw", split="train")
|
| 4 |
+
print("Loaded:", len(ds))
|
| 5 |
+
|
| 6 |
+
ds.save_to_disk("local_eng")
|
| 7 |
+
print("Saved to local_eng")
|
| 8 |
+
|
merge_dataset.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_dataset, concatenate_datasets, Audio
|
| 2 |
+
from glob import glob
|
| 3 |
+
from datasets import load_from_disk
|
| 4 |
+
|
| 5 |
+
MAX_ROWS = 6000
|
| 6 |
+
FINAL = []
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# -------------------------------------------------
|
| 10 |
+
# Helper
|
| 11 |
+
# -------------------------------------------------
|
| 12 |
+
def load_local_parquet(path, text_col="transcription", filter_fn=None):
|
| 13 |
+
print(f"\n📌 Loading: {path}")
|
| 14 |
+
files = sorted(glob(path, recursive=True))
|
| 15 |
+
assert files, f"No parquet files found in {path}"
|
| 16 |
+
|
| 17 |
+
ds = load_dataset(
|
| 18 |
+
"parquet",
|
| 19 |
+
data_files=files,
|
| 20 |
+
split="train"
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
if filter_fn:
|
| 24 |
+
ds = ds.filter(filter_fn)
|
| 25 |
+
|
| 26 |
+
if text_col != "text":
|
| 27 |
+
ds = ds.rename_column(text_col, "text")
|
| 28 |
+
|
| 29 |
+
# 🔥 KEEP ONLY audio + text
|
| 30 |
+
ds = ds.select_columns(["audio", "text"])
|
| 31 |
+
|
| 32 |
+
ds = ds.cast_column("audio", Audio(sampling_rate=24000))
|
| 33 |
+
|
| 34 |
+
if len(ds) > MAX_ROWS:
|
| 35 |
+
ds = ds.shuffle(seed=42).select(range(MAX_ROWS))
|
| 36 |
+
|
| 37 |
+
print(f"✅ Rows used: {len(ds)} | Columns: {ds.column_names}")
|
| 38 |
+
return ds
|
| 39 |
+
def load_arrow_dataset(path, text_col="transcription"):
|
| 40 |
+
print(f"\n📌 Loading Arrow dataset: {path}")
|
| 41 |
+
|
| 42 |
+
ds = load_from_disk(path)
|
| 43 |
+
|
| 44 |
+
if text_col != "text":
|
| 45 |
+
ds = ds.rename_column(text_col, "text")
|
| 46 |
+
|
| 47 |
+
# 🔥 KEEP ONLY audio + text
|
| 48 |
+
ds = ds.select_columns(["audio", "text"])
|
| 49 |
+
|
| 50 |
+
ds = ds.cast_column("audio", Audio(sampling_rate=24000))
|
| 51 |
+
|
| 52 |
+
if len(ds) > MAX_ROWS:
|
| 53 |
+
ds = ds.shuffle(seed=42).select(range(MAX_ROWS))
|
| 54 |
+
|
| 55 |
+
print(f"✅ Rows used: {len(ds)} | Columns: {ds.column_names}")
|
| 56 |
+
return ds
|
| 57 |
+
|
| 58 |
+
# -------------------------------------------------
|
| 59 |
+
# 1. Bengali (male only)
|
| 60 |
+
# -------------------------------------------------
|
| 61 |
+
#bengali = load_local_parquet(
|
| 62 |
+
# "local_data/IndicTTS_Bengali/data/**/*.parquet",
|
| 63 |
+
# text_col="text",
|
| 64 |
+
# filter_fn=lambda x: "train_bengalimale" in x["utterance_id"]
|
| 65 |
+
#)
|
| 66 |
+
#FINAL.append(bengali)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
# -------------------------------------------------
|
| 70 |
+
# 2. Arabic
|
| 71 |
+
# -------------------------------------------------
|
| 72 |
+
#arabic = load_local_parquet(
|
| 73 |
+
# "local_data/arabic_tts/**/*.parquet",
|
| 74 |
+
# text_col="transcription"
|
| 75 |
+
#)
|
| 76 |
+
#FINAL.append(arabic)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# -------------------------------------------------
|
| 80 |
+
# 3. Hindi (female 5hr)
|
| 81 |
+
# -------------------------------------------------
|
| 82 |
+
hindi = load_local_parquet(
|
| 83 |
+
"local_data/hindi_female_5hr/**/*.parquet",
|
| 84 |
+
text_col="text"
|
| 85 |
+
)
|
| 86 |
+
FINAL.append(hindi)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# -------------------------------------------------
|
| 90 |
+
# 4. English (local)
|
| 91 |
+
# -------------------------------------------------
|
| 92 |
+
#english = load_arrow_dataset(
|
| 93 |
+
# "local_data/local_eng",
|
| 94 |
+
# text_col="transcription"
|
| 95 |
+
#)
|
| 96 |
+
#FINAL.append(english)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# -------------------------------------------------
|
| 100 |
+
# 5. Punjabi (local)
|
| 101 |
+
# -------------------------------------------------
|
| 102 |
+
#punjabi = load_arrow_dataset(
|
| 103 |
+
# "local_data/local_punjabi/train",
|
| 104 |
+
# text_col="transcription"
|
| 105 |
+
#)
|
| 106 |
+
#FINAL.append(punjabi)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
# -------------------------------------------------
|
| 110 |
+
# Merge ALL
|
| 111 |
+
# -------------------------------------------------
|
| 112 |
+
print("\n🚀 Merging all datasets")
|
| 113 |
+
merged = concatenate_datasets(FINAL)
|
| 114 |
+
|
| 115 |
+
print("\n📊 FINAL DATASET")
|
| 116 |
+
print(merged)
|
| 117 |
+
print("Total rows:", len(merged))
|
| 118 |
+
print("Columns:", merged.column_names)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# -------------------------------------------------
|
| 122 |
+
# Save locally
|
| 123 |
+
# -------------------------------------------------
|
| 124 |
+
OUT_DIR = "data/dataset_hindi_6k"
|
| 125 |
+
merged.save_to_disk(OUT_DIR)
|
| 126 |
+
|
| 127 |
+
print(f"\n💾 Saved with save_to_disk → {OUT_DIR}")
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
# Optional: also save as Parquet (Trainer-friendly)
|
| 131 |
+
#merged.to_parquet(f"{OUT_DIR}.parquet")
|
| 132 |
+
print(f"💾 Saved Parquet → {OUT_DIR}.parquet")
|
| 133 |
+
|
pre-processing.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import load_from_disk, Audio
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
# ==========================
|
| 5 |
+
# Config
|
| 6 |
+
# ==========================
|
| 7 |
+
DATA_DIR = "data/dataset_hindi_6k"
|
| 8 |
+
HF_REPO ="PharynxAI/merged_multilingual_tts_6k_each"
|
| 9 |
+
SAMPLING_RATE = 24000
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# ==========================
|
| 13 |
+
# Load merged dataset
|
| 14 |
+
# ==========================
|
| 15 |
+
print("📌 Loading merged dataset from disk...")
|
| 16 |
+
ds = load_from_disk(DATA_DIR)
|
| 17 |
+
|
| 18 |
+
print(ds)
|
| 19 |
+
print(ds.features)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# ==========================
|
| 23 |
+
# Apply SAME preprocessing as example
|
| 24 |
+
# ==========================
|
| 25 |
+
def apply_preprocessing(example):
|
| 26 |
+
text = example["text"].strip()
|
| 27 |
+
|
| 28 |
+
# Match example: add Speaker prefix
|
| 29 |
+
if not text.startswith("Speaker"):
|
| 30 |
+
text = f"Speaker 0: {text}"
|
| 31 |
+
|
| 32 |
+
return {"text": text}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
ds = ds.map(apply_preprocessing, num_proc=4)
|
| 36 |
+
|
| 37 |
+
# EXACT equivalent of:
|
| 38 |
+
# dataset = dataset.cast_column("audio", Audio(sampling_rate=24000))
|
| 39 |
+
ds = ds.cast_column("audio", Audio(sampling_rate=SAMPLING_RATE))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# ==========================
|
| 43 |
+
# Final verification
|
| 44 |
+
# ==========================
|
| 45 |
+
print("✅ Final features:")
|
| 46 |
+
print(ds.features)
|
| 47 |
+
print("📝 Sample:", ds[0]["text"])
|
| 48 |
+
print("🔊 SR:", ds[0]["audio"]["sampling_rate"])
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# ==========================
|
| 52 |
+
# Push to Hub
|
| 53 |
+
# ==========================
|
| 54 |
+
#print(f"🚀 Pushing dataset to: {HF_REPO}")
|
| 55 |
+
#ds.push_to_hub(HF_REPO, max_shard_size="500MB",num_proc=1)
|
| 56 |
+
ds.save_to_disk("data/dataset_hindi_6k_processed")
|
| 57 |
+
print("Saved dataset locally")
|
| 58 |
+
print("🎉 Done.")
|
| 59 |
+
|
pyproject.toml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "vibevoice-finetuning"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Open Source finetuning code for VibeVoice"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.8"
|
| 7 |
+
license = {file = "LICENSE"}
|
| 8 |
+
|
| 9 |
+
authors = [
|
| 10 |
+
{name = "jpgallegoarvpb", email = "juanpablo.gallego@voicepowered.ai"}
|
| 11 |
+
]
|
| 12 |
+
|
| 13 |
+
dependencies = [
|
| 14 |
+
# ---- DO NOT manage torch here ----
|
| 15 |
+
# torch
|
| 16 |
+
# torchaudio
|
| 17 |
+
# torchvision
|
| 18 |
+
|
| 19 |
+
"numpy~=1.26.0",
|
| 20 |
+
"resampy==0.4.3",
|
| 21 |
+
"librosa==0.11.0",
|
| 22 |
+
"s3tokenizer",
|
| 23 |
+
"transformers==4.51.3",
|
| 24 |
+
"datasets>=2.18.0",
|
| 25 |
+
"diffusers==0.29.0",
|
| 26 |
+
"resemble-perth==1.0.1",
|
| 27 |
+
"omegaconf==2.3.0",
|
| 28 |
+
"conformer==0.3.2",
|
| 29 |
+
"safetensors==0.5.3",
|
| 30 |
+
"peft>=0.11.0",
|
| 31 |
+
"tensorboard>=2.12",
|
| 32 |
+
"wandb"
|
| 33 |
+
]
|
| 34 |
+
|
| 35 |
+
[build-system]
|
| 36 |
+
requires = ["setuptools>=61.0"]
|
| 37 |
+
build-backend = "setuptools.build_meta"
|
| 38 |
+
|
| 39 |
+
[tool.setuptools.packages.find]
|
| 40 |
+
where = ["src"]
|
| 41 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ================= Core ML =================
|
| 2 |
+
accelerate==0.28.0
|
| 3 |
+
transformers==4.51.3
|
| 4 |
+
datasets==2.19.2
|
| 5 |
+
diffusers==0.29.0
|
| 6 |
+
peft==0.11.1
|
| 7 |
+
einops==0.8.1
|
| 8 |
+
|
| 9 |
+
# ================= Audio =================
|
| 10 |
+
librosa==0.11.0
|
| 11 |
+
soundfile==0.13.1
|
| 12 |
+
soxr==1.0.0
|
| 13 |
+
resampy==0.4.3
|
| 14 |
+
audioread==3.1.0
|
| 15 |
+
|
| 16 |
+
# ================= Data & Math =================
|
| 17 |
+
numpy==1.26.4
|
| 18 |
+
scipy==1.15.3
|
| 19 |
+
pandas==2.3.3
|
| 20 |
+
scikit-learn==1.7.2
|
| 21 |
+
pyarrow==15.0.2
|
| 22 |
+
|
| 23 |
+
# ================= HF =================
|
| 24 |
+
huggingface-hub==0.36.0
|
| 25 |
+
tokenizers==0.21.4
|
| 26 |
+
safetensors==0.5.3
|
| 27 |
+
|
| 28 |
+
# ================= Utils =================
|
| 29 |
+
omegaconf==2.3.0
|
| 30 |
+
pyyaml==6.0.3
|
| 31 |
+
tqdm==4.67.1
|
| 32 |
+
regex==2024.11.6
|
| 33 |
+
requests==2.32.5
|
| 34 |
+
filelock==3.15.4
|
| 35 |
+
psutil==6.0.0
|
| 36 |
+
protobuf==4.25.3
|
| 37 |
+
|
| 38 |
+
# ================= Tracking =================
|
| 39 |
+
wandb==0.17.8
|
| 40 |
+
tensorboard==2.17.0
|
| 41 |
+
|
run.sh
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
set -eux
|
| 3 |
+
|
| 4 |
+
chmod +x training.sh
|
| 5 |
+
./training.sh
|
| 6 |
+
|
src/__pycache__/data_vibevoice.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
src/__pycache__/finetune_vibevoice_lora.cpython-310.pyc
ADDED
|
Binary file (30.9 kB). View file
|
|
|
src/__pycache__/finetune_vibevoice_lora.cpython-311.pyc
ADDED
|
Binary file (67.2 kB). View file
|
|
|
src/data_vibevoice.py
ADDED
|
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import warnings
|
| 8 |
+
import random
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import librosa # type: ignore
|
| 12 |
+
except Exception: # pragma: no cover
|
| 13 |
+
librosa = None # Fallback: user must install librosa when using local audio paths
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
import resampy # type: ignore
|
| 17 |
+
except Exception: # pragma: no cover
|
| 18 |
+
resampy = None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _resample_if_needed(wav: np.ndarray, orig_sr: int, target_sr: int) -> np.ndarray:
|
| 22 |
+
if orig_sr == target_sr:
|
| 23 |
+
return wav.astype(np.float32, copy=False)
|
| 24 |
+
if resampy is not None:
|
| 25 |
+
return resampy.resample(wav.astype(np.float32), orig_sr, target_sr)
|
| 26 |
+
if librosa is not None:
|
| 27 |
+
return librosa.resample(y=wav.astype(np.float32), orig_sr=orig_sr, target_sr=target_sr)
|
| 28 |
+
warnings.warn(
|
| 29 |
+
"No resampler available; treating audio as target_sr without resampling. Install resampy or librosa.",
|
| 30 |
+
RuntimeWarning,
|
| 31 |
+
)
|
| 32 |
+
return wav.astype(np.float32, copy=False)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# Lightweight HF-style dataset wrapper (optional). Trainer can also pass raw HF datasets directly.
|
| 36 |
+
class VibeVoiceDataset:
|
| 37 |
+
def __init__(
|
| 38 |
+
self,
|
| 39 |
+
dataset: Any,
|
| 40 |
+
text_column: str = "text",
|
| 41 |
+
audio_column: str = "audio",
|
| 42 |
+
voice_prompts_column: Optional[str] = "voice_prompts",
|
| 43 |
+
) -> None:
|
| 44 |
+
self.dataset = dataset
|
| 45 |
+
self.text_column = text_column
|
| 46 |
+
self.audio_column = audio_column
|
| 47 |
+
self.voice_prompts_column = voice_prompts_column
|
| 48 |
+
|
| 49 |
+
def __len__(self) -> int:
|
| 50 |
+
return len(self.dataset)
|
| 51 |
+
|
| 52 |
+
def __getitem__(self, idx: int) -> Dict[str, Any]:
|
| 53 |
+
# item = self.dataset[idx]
|
| 54 |
+
if isinstance(self.dataset, dict):
|
| 55 |
+
item = self.dataset["train"][idx]
|
| 56 |
+
else:
|
| 57 |
+
item = self.dataset[idx]
|
| 58 |
+
|
| 59 |
+
data: Dict[str, Any] = {}
|
| 60 |
+
data["text"] = item[self.text_column]
|
| 61 |
+
data["audio"] = item[self.audio_column]
|
| 62 |
+
|
| 63 |
+
user_provided_prompt = None
|
| 64 |
+
if self.voice_prompts_column and self.voice_prompts_column in item:
|
| 65 |
+
user_provided_prompt = item[self.voice_prompts_column]
|
| 66 |
+
|
| 67 |
+
if user_provided_prompt:
|
| 68 |
+
# A prompt was provided in the dataset, so we use it.
|
| 69 |
+
if not isinstance(user_provided_prompt, list):
|
| 70 |
+
data["voice_prompts"] = [user_provided_prompt]
|
| 71 |
+
else:
|
| 72 |
+
data["voice_prompts"] = user_provided_prompt
|
| 73 |
+
else:
|
| 74 |
+
# FALLBACK: No prompt provided, so we auto-generate one from the target audio.
|
| 75 |
+
try:
|
| 76 |
+
target_sr = 24000
|
| 77 |
+
wav_array = _load_audio_to_24k(item[self.audio_column], target_sr=target_sr)
|
| 78 |
+
audio_len_seconds = len(wav_array) / target_sr
|
| 79 |
+
|
| 80 |
+
min_len_sec = min(5.0, audio_len_seconds / 4.0)
|
| 81 |
+
max_len_sec = min(15.0, audio_len_seconds / 2.0)
|
| 82 |
+
|
| 83 |
+
if min_len_sec > max_len_sec:
|
| 84 |
+
min_len_sec = max_len_sec
|
| 85 |
+
max_len_sec = min(max_len_sec, audio_len_seconds)
|
| 86 |
+
|
| 87 |
+
if max_len_sec > 0.1:
|
| 88 |
+
prompt_len_sec = random.uniform(min_len_sec, max_len_sec)
|
| 89 |
+
prompt_len_samples = int(prompt_len_sec * target_sr)
|
| 90 |
+
|
| 91 |
+
max_start_sample = len(wav_array) - prompt_len_samples
|
| 92 |
+
start_sample = random.randint(0, max_start_sample)
|
| 93 |
+
|
| 94 |
+
prompt_crop = wav_array[start_sample : start_sample + prompt_len_samples]
|
| 95 |
+
|
| 96 |
+
data["voice_prompts"] = [prompt_crop]
|
| 97 |
+
else:
|
| 98 |
+
data["voice_prompts"] = None
|
| 99 |
+
|
| 100 |
+
except Exception as e:
|
| 101 |
+
warnings.warn(f"Could not create voice prompt for item {idx}: {e}")
|
| 102 |
+
data["voice_prompts"] = None
|
| 103 |
+
return data
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def _apply_silence_with_crossfade(
|
| 107 |
+
wav: np.ndarray,
|
| 108 |
+
*,
|
| 109 |
+
sample_rate: int,
|
| 110 |
+
pre_silence_sec: float = 0.25,
|
| 111 |
+
pre_crossfade_sec: float = 0.25,
|
| 112 |
+
post_crossfade_sec: float = 0.25,
|
| 113 |
+
post_silence_sec: float = 0.75,
|
| 114 |
+
) -> np.ndarray:
|
| 115 |
+
"""Pad audio with leading/trailing silence and apply crossfades.
|
| 116 |
+
|
| 117 |
+
Structure: [pre_silence][pre_crossfade][audio_body][post_crossfade][post_silence]
|
| 118 |
+
Crossfades blend the audio with silence linearly to avoid hard edges.
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
wav = np.asarray(wav, dtype=np.float32).reshape(-1)
|
| 122 |
+
|
| 123 |
+
start_sil_samples = int(round(pre_silence_sec * sample_rate))
|
| 124 |
+
end_sil_samples = int(round(post_silence_sec * sample_rate))
|
| 125 |
+
pre_crossfade_samples = int(round(pre_crossfade_sec * sample_rate))
|
| 126 |
+
post_crossfade_samples = int(round(post_crossfade_sec * sample_rate))
|
| 127 |
+
|
| 128 |
+
total_len = wav.shape[0]
|
| 129 |
+
if total_len == 0:
|
| 130 |
+
pieces: List[np.ndarray] = []
|
| 131 |
+
if start_sil_samples > 0:
|
| 132 |
+
pieces.append(np.zeros(start_sil_samples, dtype=np.float32))
|
| 133 |
+
if end_sil_samples > 0:
|
| 134 |
+
pieces.append(np.zeros(end_sil_samples, dtype=np.float32))
|
| 135 |
+
return np.concatenate(pieces) if pieces else wav
|
| 136 |
+
|
| 137 |
+
start_len = min(pre_crossfade_samples, total_len)
|
| 138 |
+
remaining_after_start = max(total_len - start_len, 0)
|
| 139 |
+
end_len = min(post_crossfade_samples, remaining_after_start)
|
| 140 |
+
middle_end_idx = total_len - end_len
|
| 141 |
+
|
| 142 |
+
start_segment = wav[:start_len]
|
| 143 |
+
middle_segment = wav[start_len:middle_end_idx]
|
| 144 |
+
end_segment = wav[middle_end_idx:]
|
| 145 |
+
|
| 146 |
+
def _linear_fade(num_samples: int, start: float, end: float) -> np.ndarray:
|
| 147 |
+
if num_samples <= 0:
|
| 148 |
+
return np.zeros((0,), dtype=np.float32)
|
| 149 |
+
return np.linspace(start, end, num_samples, endpoint=True, dtype=np.float32)
|
| 150 |
+
|
| 151 |
+
start_crossfade = start_segment * _linear_fade(start_len, 0.0, 1.0)
|
| 152 |
+
end_crossfade = end_segment * _linear_fade(end_segment.shape[0], 1.0, 0.0)
|
| 153 |
+
|
| 154 |
+
pieces: List[np.ndarray] = []
|
| 155 |
+
if start_sil_samples > 0:
|
| 156 |
+
pieces.append(np.zeros(start_sil_samples, dtype=np.float32))
|
| 157 |
+
if start_crossfade.size > 0:
|
| 158 |
+
pieces.append(start_crossfade.astype(np.float32, copy=False))
|
| 159 |
+
if middle_segment.size > 0:
|
| 160 |
+
pieces.append(middle_segment.astype(np.float32, copy=False))
|
| 161 |
+
if end_crossfade.size > 0:
|
| 162 |
+
pieces.append(end_crossfade.astype(np.float32, copy=False))
|
| 163 |
+
if end_sil_samples > 0:
|
| 164 |
+
pieces.append(np.zeros(end_sil_samples, dtype=np.float32))
|
| 165 |
+
|
| 166 |
+
return np.concatenate(pieces)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# def _load_audio_to_24k(
|
| 170 |
+
# audio: Union[str, np.ndarray, torch.Tensor, Dict[str, Any]],
|
| 171 |
+
# *,
|
| 172 |
+
# target_sr: int = 24000,
|
| 173 |
+
# augment_with_silence: bool = False,
|
| 174 |
+
# ) -> np.ndarray:
|
| 175 |
+
# if isinstance(audio, np.ndarray):
|
| 176 |
+
# wav_out = audio.astype(np.float32)
|
| 177 |
+
# elif isinstance(audio, torch.Tensor):
|
| 178 |
+
# wav_out = audio.detach().cpu().float().numpy()
|
| 179 |
+
# elif isinstance(audio, str):
|
| 180 |
+
# if librosa is None:
|
| 181 |
+
# raise RuntimeError("librosa is required to load audio file paths. Please pip install librosa.")
|
| 182 |
+
# wav, sr = librosa.load(audio, sr=None, mono=True)
|
| 183 |
+
# wav_out = _resample_if_needed(wav, int(sr), target_sr)
|
| 184 |
+
# elif isinstance(audio, dict) and "array" in audio and "sampling_rate" in audio:
|
| 185 |
+
# arr = np.asarray(audio["array"], dtype=np.float32)
|
| 186 |
+
# sr = int(audio["sampling_rate"])
|
| 187 |
+
# wav_out = _resample_if_needed(arr, sr, target_sr)
|
| 188 |
+
# else:
|
| 189 |
+
# raise ValueError(f"Unsupported audio type: {type(audio)}")
|
| 190 |
+
|
| 191 |
+
# wav_out = np.asarray(wav_out, dtype=np.float32)
|
| 192 |
+
|
| 193 |
+
# if augment_with_silence:
|
| 194 |
+
# wav_out = _apply_silence_with_crossfade(wav_out, sample_rate=target_sr)
|
| 195 |
+
|
| 196 |
+
# return wav_out
|
| 197 |
+
def _load_audio_to_24k(
|
| 198 |
+
audio,
|
| 199 |
+
*,
|
| 200 |
+
target_sr: int = 24000,
|
| 201 |
+
augment_with_silence: bool = False,
|
| 202 |
+
):
|
| 203 |
+
|
| 204 |
+
# 🔥 FIX: unwrap HF AudioDecoder
|
| 205 |
+
if hasattr(audio, "__class__") and audio.__class__.__name__ == "AudioDecoder":
|
| 206 |
+
audio = audio["array"]
|
| 207 |
+
|
| 208 |
+
wav_out = None
|
| 209 |
+
|
| 210 |
+
try:
|
| 211 |
+
wav_out = np.asarray(audio, dtype=np.float32)
|
| 212 |
+
except Exception:
|
| 213 |
+
wav_out = None
|
| 214 |
+
|
| 215 |
+
if wav_out is None and isinstance(audio, torch.Tensor):
|
| 216 |
+
wav_out = audio.detach().cpu().float().numpy()
|
| 217 |
+
|
| 218 |
+
elif wav_out is None and isinstance(audio, dict) and "array" in audio:
|
| 219 |
+
wav_out = np.asarray(audio["array"], dtype=np.float32)
|
| 220 |
+
|
| 221 |
+
elif wav_out is None and isinstance(audio, str):
|
| 222 |
+
wav, sr = librosa.load(audio, sr=None, mono=True)
|
| 223 |
+
wav_out = _resample_if_needed(wav, int(sr), target_sr)
|
| 224 |
+
|
| 225 |
+
if wav_out is None:
|
| 226 |
+
raise ValueError(f"Unsupported audio type: {type(audio)}")
|
| 227 |
+
|
| 228 |
+
# Safety clamp
|
| 229 |
+
MAX_AUDIO = target_sr * 30
|
| 230 |
+
if wav_out.ndim != 1 or len(wav_out) == 0:
|
| 231 |
+
raise ValueError("Invalid audio")
|
| 232 |
+
|
| 233 |
+
if len(wav_out) > MAX_AUDIO:
|
| 234 |
+
wav_out = wav_out[:MAX_AUDIO]
|
| 235 |
+
|
| 236 |
+
if augment_with_silence:
|
| 237 |
+
wav_out = _apply_silence_with_crossfade(wav_out, sample_rate=target_sr)
|
| 238 |
+
|
| 239 |
+
return wav_out
|
| 240 |
+
|
| 241 |
+
@dataclass
|
| 242 |
+
class VibeVoiceCollator:
|
| 243 |
+
processor: Any # VibeVoiceProcessor
|
| 244 |
+
max_length: Optional[int] = None
|
| 245 |
+
speech_compress_ratio: int = 3200
|
| 246 |
+
semantic_vae_dim: int = 128
|
| 247 |
+
compute_semantics: bool = False
|
| 248 |
+
debug_checks: bool = False
|
| 249 |
+
|
| 250 |
+
text_field: str = "text"
|
| 251 |
+
audio_field: str = "audio"
|
| 252 |
+
voice_prompts_field: str = "voice_prompts"
|
| 253 |
+
voice_prompt_drop_rate: float = 0.0
|
| 254 |
+
|
| 255 |
+
def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, Any]:
|
| 256 |
+
batch_size = len(features)
|
| 257 |
+
|
| 258 |
+
sample_input_ids: List[List[int]] = []
|
| 259 |
+
sample_attention_masks: List[List[int]] = []
|
| 260 |
+
sample_acoustic_input_masks: List[List[bool]] = []
|
| 261 |
+
sample_acoustic_loss_masks: List[List[bool]] = []
|
| 262 |
+
|
| 263 |
+
all_speech_waveforms: List[np.ndarray] = []
|
| 264 |
+
all_speech_latent_lengths: List[int] = []
|
| 265 |
+
per_segment_is_target: List[bool] = []
|
| 266 |
+
|
| 267 |
+
for ex in features:
|
| 268 |
+
text: str = ex.get(self.text_field, "")
|
| 269 |
+
voice_prompts: Optional[List[Union[str, np.ndarray, torch.Tensor]]] = ex.get(self.voice_prompts_field)
|
| 270 |
+
target_audio: Union[str, np.ndarray, torch.Tensor, Dict[str, Any]] = ex.get(self.audio_field)
|
| 271 |
+
|
| 272 |
+
# Clamp drop rate for safety
|
| 273 |
+
_drop_rate = self.voice_prompt_drop_rate
|
| 274 |
+
if _drop_rate < 0.0:
|
| 275 |
+
_drop_rate = 0.0
|
| 276 |
+
elif _drop_rate > 1.0:
|
| 277 |
+
_drop_rate = 1.0
|
| 278 |
+
|
| 279 |
+
proc = self.processor(
|
| 280 |
+
text=[text],
|
| 281 |
+
voice_samples=[voice_prompts] if voice_prompts is not None and random.random() >= _drop_rate else None,
|
| 282 |
+
padding=False,
|
| 283 |
+
truncation=False,
|
| 284 |
+
max_length=self.max_length,
|
| 285 |
+
return_tensors="pt",
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
ids = proc["input_ids"][0].tolist()
|
| 289 |
+
attn = proc.get("attention_mask", torch.ones_like(proc["input_ids"]))[0].tolist()
|
| 290 |
+
speech_input_mask = proc.get("speech_input_mask")
|
| 291 |
+
if speech_input_mask is None:
|
| 292 |
+
speech_input_mask = torch.zeros_like(proc["input_ids"], dtype=torch.bool)
|
| 293 |
+
speech_input_mask_list = speech_input_mask[0].tolist()
|
| 294 |
+
|
| 295 |
+
wav_target = _load_audio_to_24k(target_audio, target_sr=24000, augment_with_silence=True)
|
| 296 |
+
# Prefer exact frame count from acoustic tokenizer if available; fallback to compress ratio
|
| 297 |
+
target_latent_len = None
|
| 298 |
+
try:
|
| 299 |
+
acoustic_tok = getattr(self.processor, "acoustic_tokenizer", None)
|
| 300 |
+
if acoustic_tok is not None and hasattr(acoustic_tok, "encode"):
|
| 301 |
+
enc_out = acoustic_tok.encode(wav_target)
|
| 302 |
+
# Normalize various possible return formats to get time dimension
|
| 303 |
+
T = None
|
| 304 |
+
try:
|
| 305 |
+
# Direct array-like with shape (T, D) or (T,)
|
| 306 |
+
if hasattr(enc_out, "shape") and len(getattr(enc_out, "shape", [])) >= 1:
|
| 307 |
+
T = int(enc_out.shape[0])
|
| 308 |
+
else:
|
| 309 |
+
# Nested lists/tuples or ModelOutput-like
|
| 310 |
+
cand = enc_out
|
| 311 |
+
# Drill down a couple of levels safely
|
| 312 |
+
for _ in range(2):
|
| 313 |
+
if isinstance(cand, (list, tuple)) and len(cand) > 0:
|
| 314 |
+
cand = cand[0]
|
| 315 |
+
if hasattr(cand, "shape") and len(getattr(cand, "shape", [])) >= 1:
|
| 316 |
+
T = int(cand.shape[0])
|
| 317 |
+
except Exception:
|
| 318 |
+
T = None
|
| 319 |
+
if T is not None and T > 0:
|
| 320 |
+
target_latent_len = T
|
| 321 |
+
except Exception:
|
| 322 |
+
target_latent_len = None
|
| 323 |
+
if target_latent_len is None:
|
| 324 |
+
target_latent_len = max(1, int(math.ceil(len(wav_target) / float(self.speech_compress_ratio))))
|
| 325 |
+
|
| 326 |
+
speech_diff_id = self.processor.tokenizer.speech_diffusion_id
|
| 327 |
+
target_placeholders = [speech_diff_id] * target_latent_len
|
| 328 |
+
|
| 329 |
+
ids_extended = ids + target_placeholders
|
| 330 |
+
attn_extended = attn + [1] * target_latent_len
|
| 331 |
+
|
| 332 |
+
acoustic_input_mask = speech_input_mask_list + [True] * target_latent_len
|
| 333 |
+
acoustic_loss_mask = ([False] * len(speech_input_mask_list)) + [True] * target_latent_len
|
| 334 |
+
|
| 335 |
+
speech_end_id = self.processor.tokenizer.speech_end_id
|
| 336 |
+
ids_extended.append(speech_end_id)
|
| 337 |
+
attn_extended.append(1)
|
| 338 |
+
acoustic_input_mask.append(False)
|
| 339 |
+
acoustic_loss_mask.append(False)
|
| 340 |
+
|
| 341 |
+
# Ensure text decoding sees an explicit end-of-sequence token after speech output.
|
| 342 |
+
eos_token_id = getattr(self.processor.tokenizer, "eos_id", None)
|
| 343 |
+
if eos_token_id is None:
|
| 344 |
+
eos_token_id = getattr(self.processor.tokenizer, "eos_token_id", None)
|
| 345 |
+
if eos_token_id is not None and eos_token_id >= 0:
|
| 346 |
+
ids_extended.append(eos_token_id)
|
| 347 |
+
attn_extended.append(1)
|
| 348 |
+
acoustic_input_mask.append(False)
|
| 349 |
+
acoustic_loss_mask.append(False)
|
| 350 |
+
|
| 351 |
+
if self.max_length is not None and len(ids_extended) > self.max_length:
|
| 352 |
+
cut = len(ids_extended) - int(self.max_length)
|
| 353 |
+
leading_non_acoustic = 0
|
| 354 |
+
for v in acoustic_input_mask:
|
| 355 |
+
if v:
|
| 356 |
+
break
|
| 357 |
+
leading_non_acoustic += 1
|
| 358 |
+
if cut > leading_non_acoustic:
|
| 359 |
+
raise ValueError(
|
| 360 |
+
f"--max_length={self.max_length} would truncate into acoustic tokens. "
|
| 361 |
+
f"Needed cut={cut}, but only {leading_non_acoustic} leading non-acoustic tokens available. "
|
| 362 |
+
"Increase max_length or shorten text/voice-prompt preamble."
|
| 363 |
+
)
|
| 364 |
+
ids_extended = ids_extended[cut:]
|
| 365 |
+
attn_extended = attn_extended[cut:]
|
| 366 |
+
acoustic_input_mask = acoustic_input_mask[cut:]
|
| 367 |
+
acoustic_loss_mask = acoustic_loss_mask[cut:]
|
| 368 |
+
|
| 369 |
+
sample_input_ids.append(ids_extended)
|
| 370 |
+
sample_attention_masks.append(attn_extended)
|
| 371 |
+
sample_acoustic_input_masks.append(acoustic_input_mask)
|
| 372 |
+
sample_acoustic_loss_masks.append(acoustic_loss_mask)
|
| 373 |
+
|
| 374 |
+
voice_speeches = []
|
| 375 |
+
voice_latent_lengths = []
|
| 376 |
+
if proc.get("speech_tensors") is not None:
|
| 377 |
+
voice_np = proc["speech_tensors"].cpu().numpy()
|
| 378 |
+
voice_masks = proc["speech_masks"].cpu().numpy().astype(bool)
|
| 379 |
+
for seg_idx in range(voice_np.shape[0]):
|
| 380 |
+
voice_speeches.append(voice_np[seg_idx])
|
| 381 |
+
voice_latent_lengths.append(int(voice_masks[seg_idx].sum()))
|
| 382 |
+
|
| 383 |
+
all_speech_waveforms.extend(voice_speeches)
|
| 384 |
+
all_speech_latent_lengths.extend(voice_latent_lengths)
|
| 385 |
+
per_segment_is_target.extend([False] * len(voice_speeches))
|
| 386 |
+
|
| 387 |
+
all_speech_waveforms.append(wav_target)
|
| 388 |
+
all_speech_latent_lengths.append(target_latent_len)
|
| 389 |
+
per_segment_is_target.append(True)
|
| 390 |
+
|
| 391 |
+
max_seq_len = max(len(x) for x in sample_input_ids)
|
| 392 |
+
padded_input_ids = []
|
| 393 |
+
padded_attention_masks = []
|
| 394 |
+
padded_acoustic_input_masks = []
|
| 395 |
+
padded_acoustic_loss_masks = []
|
| 396 |
+
tok = self.processor.tokenizer
|
| 397 |
+
pad_token_id = getattr(tok, "pad_token_id", None)
|
| 398 |
+
if pad_token_id is None or pad_token_id < 0:
|
| 399 |
+
pad_token_id = getattr(tok, "eos_token_id", None)
|
| 400 |
+
if pad_token_id is None or pad_token_id < 0:
|
| 401 |
+
raise ValueError(
|
| 402 |
+
"Tokenizer has no pad_token_id or eos_token_id; please set one or pass a valid pad id."
|
| 403 |
+
)
|
| 404 |
+
for ids, attn, ain_mask, aloss_mask in zip(
|
| 405 |
+
sample_input_ids, sample_attention_masks, sample_acoustic_input_masks, sample_acoustic_loss_masks
|
| 406 |
+
):
|
| 407 |
+
pad_len = max_seq_len - len(ids)
|
| 408 |
+
padded_input_ids.append(ids + [pad_token_id] * pad_len)
|
| 409 |
+
padded_attention_masks.append(attn + [0] * pad_len)
|
| 410 |
+
padded_acoustic_input_masks.append(ain_mask + [False] * pad_len)
|
| 411 |
+
padded_acoustic_loss_masks.append(aloss_mask + [False] * pad_len)
|
| 412 |
+
|
| 413 |
+
input_ids_tensor = torch.tensor(padded_input_ids, dtype=torch.long)
|
| 414 |
+
attention_mask_tensor = torch.tensor(padded_attention_masks, dtype=torch.long)
|
| 415 |
+
acoustic_input_mask_tensor = torch.tensor(padded_acoustic_input_masks, dtype=torch.bool)
|
| 416 |
+
acoustic_loss_mask_tensor = torch.tensor(padded_acoustic_loss_masks, dtype=torch.bool)
|
| 417 |
+
|
| 418 |
+
if all_speech_waveforms:
|
| 419 |
+
max_wave_len = max(w.shape[0] for w in all_speech_waveforms)
|
| 420 |
+
padded_speeches = np.zeros((len(all_speech_waveforms), max_wave_len), dtype=np.float32)
|
| 421 |
+
for i, w in enumerate(all_speech_waveforms):
|
| 422 |
+
L = w.shape[0]
|
| 423 |
+
padded_speeches[i, :L] = w
|
| 424 |
+
|
| 425 |
+
max_latent_len = max(all_speech_latent_lengths) if all_speech_latent_lengths else 1
|
| 426 |
+
speech_masks_np = np.zeros((len(all_speech_waveforms), max_latent_len), dtype=np.bool_)
|
| 427 |
+
for i, L_lat in enumerate(all_speech_latent_lengths):
|
| 428 |
+
speech_masks_np[i, :L_lat] = True
|
| 429 |
+
|
| 430 |
+
speech_tensors_tensor = torch.tensor(padded_speeches, dtype=torch.float32)
|
| 431 |
+
speech_masks_tensor = torch.tensor(speech_masks_np, dtype=torch.bool)
|
| 432 |
+
|
| 433 |
+
speeches_loss_input_np = np.zeros_like(speech_masks_np, dtype=np.bool_)
|
| 434 |
+
for i, is_target in enumerate(per_segment_is_target):
|
| 435 |
+
if is_target:
|
| 436 |
+
speeches_loss_input_np[i] = speech_masks_np[i]
|
| 437 |
+
speeches_loss_input_tensor = torch.tensor(speeches_loss_input_np, dtype=torch.bool)
|
| 438 |
+
|
| 439 |
+
# Semantic features
|
| 440 |
+
if self.compute_semantics and hasattr(self.processor, "semantic_tokenizer") and self.processor.semantic_tokenizer is not None:
|
| 441 |
+
sem_feats: List[np.ndarray] = []
|
| 442 |
+
for w in all_speech_waveforms:
|
| 443 |
+
try:
|
| 444 |
+
# Expect [T, D] where T ≈ ceil(len(w)/compress_ratio)
|
| 445 |
+
sem = self.processor.semantic_tokenizer.encode(w)
|
| 446 |
+
sem = np.asarray(sem, dtype=np.float32)
|
| 447 |
+
except Exception:
|
| 448 |
+
sem = np.zeros((0, self.semantic_vae_dim), dtype=np.float32)
|
| 449 |
+
if sem.ndim != 2:
|
| 450 |
+
raise RuntimeError(f"Semantic tokenizer returned unexpected shape {sem.shape}. Expect [T, D].")
|
| 451 |
+
L = sem.shape[0]
|
| 452 |
+
D = sem.shape[1]
|
| 453 |
+
if D != self.semantic_vae_dim:
|
| 454 |
+
if D < self.semantic_vae_dim:
|
| 455 |
+
pad_d = np.zeros((L, self.semantic_vae_dim - D), dtype=np.float32)
|
| 456 |
+
sem = np.concatenate([sem, pad_d], axis=1)
|
| 457 |
+
else:
|
| 458 |
+
sem = sem[:, : self.semantic_vae_dim]
|
| 459 |
+
if L < max_latent_len:
|
| 460 |
+
pad = np.zeros((max_latent_len - L, self.semantic_vae_dim), dtype=np.float32)
|
| 461 |
+
sem = np.concatenate([sem, pad], axis=0)
|
| 462 |
+
elif L > max_latent_len:
|
| 463 |
+
sem = sem[:max_latent_len]
|
| 464 |
+
sem_feats.append(sem.astype(np.float32))
|
| 465 |
+
speech_semantic_tensors = torch.tensor(np.stack(sem_feats, axis=0), dtype=torch.float32)
|
| 466 |
+
else:
|
| 467 |
+
# Semantic tokenizer unavailable while semantics are required for training.
|
| 468 |
+
# Raise to avoid silently degrading alignment with zeroed features.
|
| 469 |
+
raise RuntimeError(
|
| 470 |
+
"Semantic features are required but could not be computed. "
|
| 471 |
+
"Ensure processor.semantic_tokenizer is available or precompute and provide features."
|
| 472 |
+
)
|
| 473 |
+
else:
|
| 474 |
+
speech_tensors_tensor = None
|
| 475 |
+
speech_masks_tensor = None
|
| 476 |
+
speeches_loss_input_tensor = None
|
| 477 |
+
speech_semantic_tensors = None # No segments in batch
|
| 478 |
+
|
| 479 |
+
if self.debug_checks:
|
| 480 |
+
assert (input_ids_tensor >= 0).all(), "input_ids contains negative indices"
|
| 481 |
+
if speech_tensors_tensor is not None:
|
| 482 |
+
assert speech_tensors_tensor.dim() == 2, "Expected speech_tensors 2D [segments, samples]"
|
| 483 |
+
|
| 484 |
+
return {
|
| 485 |
+
"input_ids": input_ids_tensor,
|
| 486 |
+
"attention_mask": attention_mask_tensor,
|
| 487 |
+
"speech_tensors": speech_tensors_tensor,
|
| 488 |
+
"speech_masks": speech_masks_tensor,
|
| 489 |
+
"speech_semantic_tensors": speech_semantic_tensors,
|
| 490 |
+
"acoustic_input_mask": acoustic_input_mask_tensor,
|
| 491 |
+
"acoustic_loss_mask": acoustic_loss_mask_tensor,
|
| 492 |
+
"speeches_loss_input": speeches_loss_input_tensor,
|
| 493 |
+
}
|
src/finetune_vibevoice_lora.py
ADDED
|
@@ -0,0 +1,949 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# train_vibevoice_lora.py
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
from datasets import load_dataset, DatasetDict, VerificationMode, load_from_disk
|
| 11 |
+
|
| 12 |
+
from transformers import (
|
| 13 |
+
HfArgumentParser,
|
| 14 |
+
Trainer,
|
| 15 |
+
set_seed,
|
| 16 |
+
TrainerCallback,
|
| 17 |
+
)
|
| 18 |
+
from transformers import TrainingArguments as HfTrainingArguments
|
| 19 |
+
|
| 20 |
+
from peft import LoraConfig, get_peft_model, TaskType
|
| 21 |
+
|
| 22 |
+
from vibevoice.modular.modeling_vibevoice import VibeVoiceForConditionalGeneration
|
| 23 |
+
from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
|
| 24 |
+
from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
|
| 25 |
+
|
| 26 |
+
from data_vibevoice import VibeVoiceDataset, VibeVoiceCollator
|
| 27 |
+
|
| 28 |
+
logger = logging.getLogger(__name__)
|
| 29 |
+
|
| 30 |
+
# ================== SAMPLE CALLBACK UTILS ==================
|
| 31 |
+
|
| 32 |
+
import copy
|
| 33 |
+
import torch
|
| 34 |
+
from transformers import TrainerCallback
|
| 35 |
+
|
| 36 |
+
class EmaCallback(TrainerCallback):
|
| 37 |
+
def __init__(self, attr_path="model.prediction_head", decay=0.999, device="cpu"):
|
| 38 |
+
"""
|
| 39 |
+
attr_path: where the head lives under self.model (Trainer wraps your VibeVoiceForConditionalGeneration)
|
| 40 |
+
decay: EMA decay (0.999 ~ stable, 0.9999 ~ very smooth, slower to adapt)
|
| 41 |
+
"""
|
| 42 |
+
self.attr_path = attr_path
|
| 43 |
+
self.decay = float(decay)
|
| 44 |
+
self.device = torch.device(device)
|
| 45 |
+
self.shadow = None
|
| 46 |
+
self._orig = None # store non-EMA weights when we swap
|
| 47 |
+
|
| 48 |
+
def _get_module(self, model):
|
| 49 |
+
# Resolve dotted path like "model.prediction_head"
|
| 50 |
+
mod = model
|
| 51 |
+
for name in self.attr_path.split('.'):
|
| 52 |
+
mod = getattr(mod, name)
|
| 53 |
+
return mod
|
| 54 |
+
|
| 55 |
+
def on_train_begin(self, args, state, control, model=None, **kwargs):
|
| 56 |
+
head = self._get_module(model)
|
| 57 |
+
self.shadow = {k: p.detach().to(self.device).clone()
|
| 58 |
+
for k, p in head.state_dict().items()}
|
| 59 |
+
|
| 60 |
+
def on_step_end(self, args, state, control, model=None, **kwargs):
|
| 61 |
+
if self.shadow is None: return
|
| 62 |
+
head = self._get_module(model)
|
| 63 |
+
with torch.no_grad():
|
| 64 |
+
for k, v in head.state_dict().items():
|
| 65 |
+
self.shadow[k].mul_(self.decay).add_(v.detach().to(self.device), alpha=(1.0 - self.decay))
|
| 66 |
+
|
| 67 |
+
# ---- Swap helpers ----
|
| 68 |
+
def _swap_in_ema(self, model):
|
| 69 |
+
head = self._get_module(model)
|
| 70 |
+
self._orig = copy.deepcopy(head.state_dict())
|
| 71 |
+
head.load_state_dict(self.shadow, strict=False)
|
| 72 |
+
|
| 73 |
+
def _swap_back(self, model):
|
| 74 |
+
if self._orig is None: return
|
| 75 |
+
head = self._get_module(model)
|
| 76 |
+
head.load_state_dict(self._orig, strict=False)
|
| 77 |
+
self._orig = None
|
| 78 |
+
|
| 79 |
+
def on_evaluate(self, args, state, control, model=None, **kwargs):
|
| 80 |
+
# use EMA during eval
|
| 81 |
+
self._swap_in_ema(model)
|
| 82 |
+
|
| 83 |
+
def on_evaluate_end(self, args, state, control, model=None, **kwargs):
|
| 84 |
+
self._swap_back(model)
|
| 85 |
+
|
| 86 |
+
def on_save(self, args, state, control, model=None, **kwargs):
|
| 87 |
+
# temporarily swap to EMA, let Trainer save, then swap back
|
| 88 |
+
self._swap_in_ema(model)
|
| 89 |
+
|
| 90 |
+
def on_save_end(self, args, state, control, model=None, **kwargs):
|
| 91 |
+
self._swap_back(model)
|
| 92 |
+
|
| 93 |
+
def on_train_end(self, args, state, control, model=None, **kwargs):
|
| 94 |
+
# final checkpoint: persist EMA
|
| 95 |
+
self._swap_in_ema(model)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@dataclass
|
| 99 |
+
class ModelArguments:
|
| 100 |
+
model_name_or_path: Optional[str] = field(
|
| 101 |
+
default=None, metadata={"help": "Path to VibeVoice base model with config.json"}
|
| 102 |
+
)
|
| 103 |
+
processor_name_or_path: Optional[str] = field(
|
| 104 |
+
default=None, metadata={"help": "Path to processor dir (preprocessor_config.json). Defaults to model path."}
|
| 105 |
+
)
|
| 106 |
+
cache_dir: Optional[str] = field(default=None)
|
| 107 |
+
freeze_acoustic_tokenizer: bool = field(default=True)
|
| 108 |
+
freeze_semantic_tokenizer: bool = field(default=True)
|
| 109 |
+
lora_r: int = field(default=8)
|
| 110 |
+
lora_alpha: int = field(default=32)
|
| 111 |
+
lora_dropout: float = field(default=0.05)
|
| 112 |
+
lora_target_modules: str = field(
|
| 113 |
+
default="q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj",
|
| 114 |
+
metadata={"help": "Comma-separated list of target module names in the LLM blocks"},
|
| 115 |
+
)
|
| 116 |
+
lora_wrap_diffusion_head: bool = field(default=False, metadata={"help": "Wrap diffusion head with PEFT LoRA"})
|
| 117 |
+
train_diffusion_head: bool = field(default=False, metadata={"help": "Train diffusion prediction head (full fine-tune)"})
|
| 118 |
+
train_connectors: bool = field(default=False, metadata={"help": "Train acoustic/semantic connectors (full fine-tune)"})
|
| 119 |
+
layers_to_freeze: Optional[str] = field(
|
| 120 |
+
default=None,
|
| 121 |
+
metadata={"help": "Comma-separated indices of diffusion head layers to freeze (e.g., '0,1,5,7,8')."}
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
@dataclass
|
| 125 |
+
class DataArguments:
|
| 126 |
+
dataset_name: Optional[str] = field(default=None, metadata={"help": "HF dataset name or 'json' with --train_jsonl for local files"})
|
| 127 |
+
dataset_config_name: Optional[str] = field(default=None)
|
| 128 |
+
train_split_name: str = field(default="train")
|
| 129 |
+
eval_split_name: Optional[str] = field(default="validation")
|
| 130 |
+
text_column_name: str = field(default="text")
|
| 131 |
+
audio_column_name: str = field(default="audio")
|
| 132 |
+
voice_prompts_column_name: Optional[str] = field(default="voice_prompts")
|
| 133 |
+
eval_split_size: float = field(default=0.0)
|
| 134 |
+
ignore_verifications: bool = field(default=False)
|
| 135 |
+
max_length: Optional[int] = field(default=None)
|
| 136 |
+
train_jsonl: Optional[str] = field(default=None, metadata={"help": "Path to local train JSONL with {text, audio, [voice_prompts]}"})
|
| 137 |
+
validation_jsonl: Optional[str] = field(default=None, metadata={"help": "Optional path to local validation JSONL"})
|
| 138 |
+
voice_prompt_drop_rate: float = field(
|
| 139 |
+
default=0.0,
|
| 140 |
+
metadata={"help": "Probability to drop conditioning voice prompt during training (0.0 keep always, 1.0 drop always)."},
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
@dataclass
|
| 144 |
+
class CustomTrainingArguments(HfTrainingArguments):
|
| 145 |
+
ddpm_batch_mul: int = field(default=1)
|
| 146 |
+
ce_loss_weight: float = field(default=1.0)
|
| 147 |
+
diffusion_loss_weight: float = field(default=1.0)
|
| 148 |
+
debug_ce_details: bool = field(default=False)
|
| 149 |
+
debug_ce_topk: int = field(default=5)
|
| 150 |
+
debug_ce_max_examples: int = field(default=1)
|
| 151 |
+
debug_ce_every_n_steps: int = field(default=200)
|
| 152 |
+
gradient_clipping: bool = field(
|
| 153 |
+
default=False,
|
| 154 |
+
metadata={"help": "Enable gradient clipping using max_grad_norm (set via --max_grad_norm, default 1.0). When False, disables clipping by forcing max_grad_norm=0.0."},
|
| 155 |
+
)
|
| 156 |
+
debug_save: bool = field(
|
| 157 |
+
default=False,
|
| 158 |
+
metadata={"help": "If set, saves model components BEFORE training starts, into output_dir/debug_initial."},
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
def build_lora_config(args: ModelArguments) -> LoraConfig:
|
| 162 |
+
target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
|
| 163 |
+
return LoraConfig(
|
| 164 |
+
r=args.lora_r,
|
| 165 |
+
lora_alpha=args.lora_alpha,
|
| 166 |
+
lora_dropout=args.lora_dropout,
|
| 167 |
+
bias="none",
|
| 168 |
+
task_type=TaskType.CAUSAL_LM,
|
| 169 |
+
target_modules=target_modules,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
def build_head_lora_config(args: ModelArguments) -> LoraConfig:
|
| 173 |
+
target_modules = ["noisy_images_proj","cond_proj","gate_proj","up_proj","down_proj","linear"]
|
| 174 |
+
return LoraConfig(
|
| 175 |
+
r=args.lora_r,
|
| 176 |
+
lora_alpha=args.lora_alpha,
|
| 177 |
+
lora_dropout=args.lora_dropout,
|
| 178 |
+
bias="none",
|
| 179 |
+
task_type=TaskType.FEATURE_EXTRACTION,
|
| 180 |
+
target_modules=target_modules,
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
def mask_for_ce(labels: torch.Tensor, attention_mask: torch.Tensor, acoustic_input_mask: torch.Tensor, pad_id: int = -100) -> torch.Tensor:
|
| 184 |
+
shifted = labels[:, 1:].contiguous()
|
| 185 |
+
base_mask = attention_mask[:, 1:].contiguous().eq(1) if (attention_mask is not None and attention_mask.numel() > 0) else torch.ones_like(shifted, dtype=torch.bool)
|
| 186 |
+
label_is_acoustic = acoustic_input_mask[:, 1:].contiguous()
|
| 187 |
+
final_mask = base_mask & (~label_is_acoustic)
|
| 188 |
+
out = shifted.clone()
|
| 189 |
+
out[~final_mask] = pad_id
|
| 190 |
+
return out
|
| 191 |
+
|
| 192 |
+
def _patch_acoustic_encode_for_legacy_indexing(model_obj, logger_):
|
| 193 |
+
try:
|
| 194 |
+
acoustic = getattr(getattr(model_obj, "model", model_obj), "acoustic_tokenizer", None)
|
| 195 |
+
if acoustic is None or not hasattr(acoustic, "encode"):
|
| 196 |
+
logger_.warning("No acoustic_tokenizer.encode() found to patch.")
|
| 197 |
+
return
|
| 198 |
+
base_encode = acoustic.encode
|
| 199 |
+
def encode_wrapped(*args, **kwargs):
|
| 200 |
+
out = base_encode(*args, **kwargs)
|
| 201 |
+
try:
|
| 202 |
+
_ = out[0][0]
|
| 203 |
+
return out
|
| 204 |
+
except Exception:
|
| 205 |
+
pass
|
| 206 |
+
if isinstance(out, dict):
|
| 207 |
+
for k in ("frames", "codes", "tokens", "latents", "hidden_states"):
|
| 208 |
+
if k in out:
|
| 209 |
+
return [[out[k]]]
|
| 210 |
+
if len(out) > 0:
|
| 211 |
+
return [[next(iter(out.values()))]]
|
| 212 |
+
for attr in ("frames", "codes", "tokens", "latents", "hidden_states"):
|
| 213 |
+
if hasattr(out, attr):
|
| 214 |
+
return [[getattr(out, attr)]]
|
| 215 |
+
try:
|
| 216 |
+
if isinstance(out, torch.Tensor):
|
| 217 |
+
return [[out]]
|
| 218 |
+
except Exception:
|
| 219 |
+
pass
|
| 220 |
+
return [[out]]
|
| 221 |
+
acoustic.encode = encode_wrapped
|
| 222 |
+
logger_.info("Patched acoustic_tokenizer.encode() to return [[...]] for legacy indexing.")
|
| 223 |
+
except Exception as e:
|
| 224 |
+
logger_.warning(f"Failed to patch acoustic_tokenizer.encode(): {e}")
|
| 225 |
+
|
| 226 |
+
def main() -> None:
|
| 227 |
+
parser = HfArgumentParser((ModelArguments, DataArguments, CustomTrainingArguments))
|
| 228 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
| 229 |
+
|
| 230 |
+
logging.basicConfig(
|
| 231 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
| 232 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
| 233 |
+
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
|
| 234 |
+
)
|
| 235 |
+
logger.info("Training/evaluation parameters %s", training_args)
|
| 236 |
+
set_seed(training_args.seed)
|
| 237 |
+
|
| 238 |
+
# Configure gradient clipping
|
| 239 |
+
if not getattr(training_args, "gradient_clipping", False):
|
| 240 |
+
if hasattr(training_args, "max_grad_norm"):
|
| 241 |
+
training_args.max_grad_norm = 0.0
|
| 242 |
+
logger.info("Gradient clipping disabled (set max_grad_norm=0.0). Use --gradient_clipping to enable.")
|
| 243 |
+
else:
|
| 244 |
+
if (not hasattr(training_args, "max_grad_norm")) or training_args.max_grad_norm is None or training_args.max_grad_norm <= 0:
|
| 245 |
+
training_args.max_grad_norm = 1.0
|
| 246 |
+
logger.info(f"Gradient clipping enabled: max_grad_norm={training_args.max_grad_norm}")
|
| 247 |
+
|
| 248 |
+
# Load processor
|
| 249 |
+
processor_path = model_args.processor_name_or_path or model_args.model_name_or_path
|
| 250 |
+
if processor_path is None:
|
| 251 |
+
raise ValueError("--model_name_or_path (or --processor_name_or_path) must be provided")
|
| 252 |
+
processor: VibeVoiceProcessor = VibeVoiceProcessor.from_pretrained(processor_path)
|
| 253 |
+
|
| 254 |
+
# Required special tokens
|
| 255 |
+
tok = processor.tokenizer
|
| 256 |
+
for required in ["speech_start_id", "speech_diffusion_id", "speech_end_id"]:
|
| 257 |
+
if not hasattr(tok, required) or getattr(tok, required) is None:
|
| 258 |
+
raise RuntimeError(f"Tokenizer missing required special id: {required}")
|
| 259 |
+
|
| 260 |
+
# Load model
|
| 261 |
+
if model_args.model_name_or_path is None:
|
| 262 |
+
raise ValueError("--model_name_or_path is required to load VibeVoice base model")
|
| 263 |
+
dtype = torch.float32
|
| 264 |
+
if training_args.bf16:
|
| 265 |
+
dtype = torch.bfloat16
|
| 266 |
+
elif getattr(training_args, "fp16", False):
|
| 267 |
+
dtype = torch.float16
|
| 268 |
+
model = VibeVoiceForConditionalGeneration.from_pretrained(
|
| 269 |
+
model_args.model_name_or_path,
|
| 270 |
+
torch_dtype=dtype,
|
| 271 |
+
)
|
| 272 |
+
_patch_acoustic_encode_for_legacy_indexing(model, logger)
|
| 273 |
+
processor.semantic_tokenizer = getattr(model.model, "semantic_tokenizer", None)
|
| 274 |
+
|
| 275 |
+
# Diagnostics: LM head tie
|
| 276 |
+
try:
|
| 277 |
+
in_emb_mod = model.get_input_embeddings()
|
| 278 |
+
out_emb_mod = model.get_output_embeddings()
|
| 279 |
+
in_w = getattr(in_emb_mod, "weight", None)
|
| 280 |
+
out_w = getattr(out_emb_mod, "weight", None)
|
| 281 |
+
shared_ptr = bool(in_w is not None and out_w is not None and in_w.data_ptr() == out_w.data_ptr())
|
| 282 |
+
values_equal = False
|
| 283 |
+
if in_w is not None and out_w is not None and in_w.shape == out_w.shape:
|
| 284 |
+
try:
|
| 285 |
+
values_equal = bool(torch.allclose(in_w, out_w))
|
| 286 |
+
except Exception:
|
| 287 |
+
values_equal = False
|
| 288 |
+
try:
|
| 289 |
+
tie_cfg = getattr(getattr(model.config, "decoder_config", model.config), "tie_word_embeddings", None)
|
| 290 |
+
except Exception:
|
| 291 |
+
tie_cfg = getattr(model.config, "tie_word_embeddings", None)
|
| 292 |
+
logger.info(f"LM head diagnostics -> shared_params={shared_ptr}, values_equal={values_equal}, tie_word_embeddings={tie_cfg}")
|
| 293 |
+
if out_w is not None:
|
| 294 |
+
logger.info(f"LM head requires_grad before freeze: {bool(out_w.requires_grad)}")
|
| 295 |
+
except Exception as e:
|
| 296 |
+
logger.warning(f"LM head tie diagnostics failed: {e}")
|
| 297 |
+
|
| 298 |
+
# Hard-tie LM head
|
| 299 |
+
try:
|
| 300 |
+
emb_module = model.get_input_embeddings()
|
| 301 |
+
head_module = model.get_output_embeddings()
|
| 302 |
+
if hasattr(emb_module, "weight") and hasattr(head_module, "weight"):
|
| 303 |
+
if emb_module.weight.shape == head_module.weight.shape and emb_module.weight.data_ptr() != head_module.weight.data_ptr():
|
| 304 |
+
with torch.no_grad():
|
| 305 |
+
head_module.weight = emb_module.weight
|
| 306 |
+
logger.info("Force-tied LM head weight to input embeddings (pointer share).")
|
| 307 |
+
except Exception as e:
|
| 308 |
+
logger.warning(f"Force-tie of LM head failed: {e}")
|
| 309 |
+
|
| 310 |
+
# Validate special IDs (info logs only)
|
| 311 |
+
try:
|
| 312 |
+
special_names = ["speech_start_id", "speech_diffusion_id", "speech_end_id"]
|
| 313 |
+
try:
|
| 314 |
+
vocab_size = int(getattr(model.config.decoder_config, "vocab_size", 0))
|
| 315 |
+
except Exception:
|
| 316 |
+
vocab_size = 0
|
| 317 |
+
in_emb_mod = model.get_input_embeddings()
|
| 318 |
+
out_emb_mod = model.get_output_embeddings()
|
| 319 |
+
in_w = getattr(in_emb_mod, "weight", None)
|
| 320 |
+
out_w = getattr(out_emb_mod, "weight", None)
|
| 321 |
+
for name in special_names:
|
| 322 |
+
val = getattr(tok, name, None)
|
| 323 |
+
exists = (val is not None)
|
| 324 |
+
in_range = (exists and isinstance(val, int) and 0 <= val < vocab_size)
|
| 325 |
+
equal_row = None
|
| 326 |
+
if in_range and in_w is not None and out_w is not None and in_w.shape == out_w.shape and in_w.size(0) > val:
|
| 327 |
+
try:
|
| 328 |
+
equal_row = bool(torch.allclose(in_w[val], out_w[val]))
|
| 329 |
+
except Exception:
|
| 330 |
+
equal_row = False
|
| 331 |
+
decoded_str = None
|
| 332 |
+
if exists and isinstance(val, int):
|
| 333 |
+
try:
|
| 334 |
+
decoded_str = tok.decode([val])
|
| 335 |
+
except Exception:
|
| 336 |
+
try:
|
| 337 |
+
decoded_str = tok.convert_ids_to_tokens(val)
|
| 338 |
+
except Exception:
|
| 339 |
+
decoded_str = "<decode_failed>"
|
| 340 |
+
logger.info(f"Special token check -> {name}={val}, decoded='{decoded_str}', exists={exists}, in_vocab_range={in_range}, emb_vs_head_row_equal={equal_row}")
|
| 341 |
+
except Exception as e:
|
| 342 |
+
logger.warning(f"Special token ID/row validation failed: {e}")
|
| 343 |
+
|
| 344 |
+
# Quick tokenizer diagnostics (optional)
|
| 345 |
+
try:
|
| 346 |
+
logger.info("=== TOKENIZER DIAGNOSTICS ===")
|
| 347 |
+
logger.info(f"Tokenizer class: {type(tok).__name__}")
|
| 348 |
+
logger.info(f"Tokenizer vocab_size: {tok.vocab_size}")
|
| 349 |
+
# tiny CE smoke test
|
| 350 |
+
with torch.no_grad():
|
| 351 |
+
simple_text = "The cat sat on the mat."
|
| 352 |
+
simple_ids = torch.tensor([tok.encode(simple_text, add_special_tokens=True)], device=model.device)
|
| 353 |
+
simple_mask = torch.ones_like(simple_ids)
|
| 354 |
+
x = model.get_input_embeddings()(simple_ids)
|
| 355 |
+
outputs = model.model(inputs_embeds=x, attention_mask=simple_mask, return_dict=True)
|
| 356 |
+
logits = model.lm_head(outputs.last_hidden_state)
|
| 357 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 358 |
+
shift_labels = simple_ids[:, 1:].contiguous()
|
| 359 |
+
ce_loss = F.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduction='mean')
|
| 360 |
+
logger.info(f"Simple text CE loss: {ce_loss.item():.4f}")
|
| 361 |
+
except Exception as e:
|
| 362 |
+
logger.warning(f"Tokenizer diagnostics failed: {e}")
|
| 363 |
+
|
| 364 |
+
# Disable cache during training
|
| 365 |
+
if hasattr(model.config, "use_cache") and training_args.do_train:
|
| 366 |
+
model.config.use_cache = True
|
| 367 |
+
|
| 368 |
+
# Freeze tokenizers
|
| 369 |
+
if model_args.freeze_acoustic_tokenizer and hasattr(model.model, "acoustic_tokenizer"):
|
| 370 |
+
for p in model.model.acoustic_tokenizer.parameters():
|
| 371 |
+
p.requires_grad = False
|
| 372 |
+
if model_args.freeze_semantic_tokenizer and hasattr(model.model, "semantic_tokenizer"):
|
| 373 |
+
for p in model.model.semantic_tokenizer.parameters():
|
| 374 |
+
p.requires_grad = False
|
| 375 |
+
|
| 376 |
+
# LoRA wrap LLM (optional)
|
| 377 |
+
lora_cfg = build_lora_config(model_args)
|
| 378 |
+
tm_lower = [s.strip().lower() for s in model_args.lora_target_modules.split(",") if s.strip()]
|
| 379 |
+
skip_lm_lora = (len(tm_lower) == 0) or all(t in ("none", "off", "disable", "disabled") for t in tm_lower)
|
| 380 |
+
if not skip_lm_lora:
|
| 381 |
+
model.model.language_model = get_peft_model(model.model.language_model, lora_cfg)
|
| 382 |
+
else:
|
| 383 |
+
logger.info("Skipping LLM LoRA wrapping (lora_target_modules indicates none).")
|
| 384 |
+
|
| 385 |
+
try:
|
| 386 |
+
model.tie_weights()
|
| 387 |
+
except Exception:
|
| 388 |
+
pass
|
| 389 |
+
|
| 390 |
+
# Freeze all then enable trainable subsets
|
| 391 |
+
for _, p in model.named_parameters():
|
| 392 |
+
p.requires_grad = False
|
| 393 |
+
|
| 394 |
+
try:
|
| 395 |
+
for n, p in model.model.language_model.named_parameters():
|
| 396 |
+
if "lora_A" in n or "lora_B" in n:
|
| 397 |
+
p.requires_grad = True
|
| 398 |
+
except Exception:
|
| 399 |
+
logger.warning("Could not re-enable LoRA params on language_model.")
|
| 400 |
+
|
| 401 |
+
# Diffusion head LoRA wrapping (optional)
|
| 402 |
+
if getattr(model_args, "lora_wrap_diffusion_head", False) and hasattr(model.model, "prediction_head"):
|
| 403 |
+
class _HeadForwardShim(nn.Module):
|
| 404 |
+
def __init__(self, base: nn.Module): super().__init__(); self.base = base
|
| 405 |
+
def forward(self, *args, **kwargs):
|
| 406 |
+
if len(args) >= 3:
|
| 407 |
+
noisy_images, timesteps, condition = args[:3]
|
| 408 |
+
else:
|
| 409 |
+
noisy_images = kwargs.get("noisy_images")
|
| 410 |
+
timesteps = kwargs.get("timesteps")
|
| 411 |
+
condition = kwargs.get("condition")
|
| 412 |
+
return self.base(noisy_images, timesteps, condition)
|
| 413 |
+
try:
|
| 414 |
+
shim = _HeadForwardShim(model.model.prediction_head)
|
| 415 |
+
model.model.prediction_head = get_peft_model(shim, build_head_lora_config(model_args))
|
| 416 |
+
for n, p in model.model.prediction_head.named_parameters():
|
| 417 |
+
if "lora_A" in n or "lora_B" in n:
|
| 418 |
+
p.requires_grad = True
|
| 419 |
+
except Exception as e:
|
| 420 |
+
logger.warning(f"Could not LoRA-wrap diffusion head: {e}")
|
| 421 |
+
|
| 422 |
+
# Train full diffusion head (optional)
|
| 423 |
+
if getattr(model_args, "train_diffusion_head", False) and hasattr(model.model, "prediction_head"):
|
| 424 |
+
for p in model.model.prediction_head.parameters():
|
| 425 |
+
p.requires_grad = True
|
| 426 |
+
|
| 427 |
+
# Freeze diffusion head layers (optional)
|
| 428 |
+
if model_args.layers_to_freeze is not None and hasattr(model.model, "prediction_head"):
|
| 429 |
+
head_params = list(model.model.prediction_head.named_parameters())
|
| 430 |
+
try:
|
| 431 |
+
indices_to_freeze = {int(x.strip()) for x in model_args.layers_to_freeze.split(',') if x.strip()}
|
| 432 |
+
frozen_count = 0
|
| 433 |
+
for i, (name, param) in enumerate(head_params):
|
| 434 |
+
if i in indices_to_freeze:
|
| 435 |
+
param.requires_grad = False
|
| 436 |
+
frozen_count += 1
|
| 437 |
+
logger.info(f"Froze layer [{i}]: {name}")
|
| 438 |
+
logger.info(f"Successfully froze {frozen_count} parameter groups in the diffusion head.")
|
| 439 |
+
except Exception as e:
|
| 440 |
+
logger.error(f"Could not parse --layers_to_freeze: {e}")
|
| 441 |
+
raise
|
| 442 |
+
|
| 443 |
+
# Connectors
|
| 444 |
+
if getattr(model_args, "train_connectors", False):
|
| 445 |
+
if hasattr(model.model, "acoustic_connector"):
|
| 446 |
+
for p in model.model.acoustic_connector.parameters():
|
| 447 |
+
p.requires_grad = True
|
| 448 |
+
if hasattr(model.model, "semantic_connector"):
|
| 449 |
+
for p in model.model.semantic_connector.parameters():
|
| 450 |
+
p.requires_grad = True
|
| 451 |
+
else:
|
| 452 |
+
if hasattr(model.model, "acoustic_connector"):
|
| 453 |
+
for p in model.model.acoustic_connector.parameters():
|
| 454 |
+
p.requires_grad = False
|
| 455 |
+
if hasattr(model.model, "semantic_connector"):
|
| 456 |
+
for p in model.model.semantic_connector.parameters():
|
| 457 |
+
p.requires_grad = False
|
| 458 |
+
|
| 459 |
+
# Freeze embedding + head
|
| 460 |
+
try:
|
| 461 |
+
emb = model.get_input_embeddings()
|
| 462 |
+
if hasattr(emb, "weight"):
|
| 463 |
+
emb.weight.requires_grad_(True)
|
| 464 |
+
head = model.get_output_embeddings()
|
| 465 |
+
if head is not None and hasattr(head, "weight"):
|
| 466 |
+
head.weight.requires_grad_(True)
|
| 467 |
+
except Exception:
|
| 468 |
+
pass
|
| 469 |
+
|
| 470 |
+
# Diagnostics
|
| 471 |
+
def _sum_params(named_iter):
|
| 472 |
+
return sum(p.numel() for _, p in named_iter if p.requires_grad)
|
| 473 |
+
try:
|
| 474 |
+
lm_lora = _sum_params(model.model.language_model.named_parameters()) if hasattr(model.model, "language_model") else 0
|
| 475 |
+
pred_head_train = _sum_params(model.model.prediction_head.named_parameters()) if hasattr(model.model, "prediction_head") else 0
|
| 476 |
+
ac_conn_train = _sum_params(model.model.acoustic_connector.named_parameters()) if hasattr(model.model, "acoustic_connector") else 0
|
| 477 |
+
se_conn_train = _sum_params(model.model.semantic_connector.named_parameters()) if hasattr(model.model, "semantic_connector") else 0
|
| 478 |
+
total_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 479 |
+
logger.info(f"Trainable by block -> LLM-LoRA: {lm_lora:,} | diff_head: {pred_head_train:,} | ac_conn: {ac_conn_train:,} | se_conn: {se_conn_train:,}")
|
| 480 |
+
logger.info("TOTAL trainable: %s", f"{total_trainable:,}")
|
| 481 |
+
except Exception:
|
| 482 |
+
pass
|
| 483 |
+
|
| 484 |
+
# Datasets
|
| 485 |
+
verification_mode = VerificationMode.NO_CHECKS if data_args.ignore_verifications else VerificationMode.BASIC_CHECKS
|
| 486 |
+
if data_args.train_jsonl is not None:
|
| 487 |
+
data_files: Dict[str, str] = {"train": data_args.train_jsonl}
|
| 488 |
+
if data_args.validation_jsonl is not None:
|
| 489 |
+
data_files["validation"] = data_args.validation_jsonl
|
| 490 |
+
raw = load_dataset("json", data_files=data_files, verification_mode=verification_mode, cache_dir=model_args.cache_dir)
|
| 491 |
+
# else:
|
| 492 |
+
# if data_args.dataset_name is None:
|
| 493 |
+
# raise ValueError("Provide --dataset_name (HF datasets) or use --train_jsonl/--validation_jsonl for local files.")
|
| 494 |
+
# raw = load_dataset(
|
| 495 |
+
# data_args.dataset_name,
|
| 496 |
+
# data_args.dataset_config_name,
|
| 497 |
+
# verification_mode=verification_mode,
|
| 498 |
+
# cache_dir=model_args.cache_dir,
|
| 499 |
+
# )
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
else:
|
| 503 |
+
if data_args.dataset_name is None:
|
| 504 |
+
raise ValueError(
|
| 505 |
+
"Provide --dataset_name (HF datasets or local path) "
|
| 506 |
+
"or use --train_jsonl/--validation_jsonl for local files."
|
| 507 |
+
)
|
| 508 |
+
if os.path.isdir(data_args.dataset_name):
|
| 509 |
+
raw = load_from_disk(data_args.dataset_name)
|
| 510 |
+
|
| 511 |
+
else:
|
| 512 |
+
raw = load_dataset(
|
| 513 |
+
data_args.dataset_name,
|
| 514 |
+
data_args.dataset_config_name,
|
| 515 |
+
verification_mode=verification_mode,
|
| 516 |
+
cache_dir=model_args.cache_dir,
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
# train_ds = raw[data_args.train_split_name]
|
| 520 |
+
# eval_ds = None
|
| 521 |
+
# if training_args.do_eval:
|
| 522 |
+
# if data_args.eval_split_name and data_args.eval_split_name in raw:
|
| 523 |
+
# eval_ds = raw[data_args.eval_split_name]
|
| 524 |
+
# elif data_args.eval_split_size and data_args.eval_split_size > 0 and len(train_ds) > 1:
|
| 525 |
+
# split = train_ds.train_test_split(test_size=data_args.eval_split_size, seed=training_args.seed)
|
| 526 |
+
# train_ds, eval_ds = split["train"], split["test"]
|
| 527 |
+
|
| 528 |
+
train_dataset = VibeVoiceDataset(
|
| 529 |
+
raw,
|
| 530 |
+
text_column=data_args.text_column_name,
|
| 531 |
+
audio_column=data_args.audio_column_name,
|
| 532 |
+
voice_prompts_column=data_args.voice_prompts_column_name,
|
| 533 |
+
)
|
| 534 |
+
eval_dataset = None
|
| 535 |
+
# if eval_ds is not None:
|
| 536 |
+
# eval_dataset = VibeVoiceDataset(
|
| 537 |
+
# eval_ds,
|
| 538 |
+
# text_column=data_args.text_column_name,
|
| 539 |
+
# audio_column=data_args.audio_column_name,
|
| 540 |
+
# voice_prompts_column=data_args.voice_prompts_column_name,
|
| 541 |
+
# )
|
| 542 |
+
|
| 543 |
+
# Ratios/dims from processor+model
|
| 544 |
+
speech_compress_ratio = getattr(processor, "speech_tok_compress_ratio", 3200)
|
| 545 |
+
semantic_dim = getattr(model.config, "semantic_vae_dim", None)
|
| 546 |
+
if semantic_dim is None:
|
| 547 |
+
try:
|
| 548 |
+
semantic_dim = int(getattr(model.config.semantic_tokenizer_config, "vae_dim", 128))
|
| 549 |
+
except Exception:
|
| 550 |
+
semantic_dim = 128
|
| 551 |
+
|
| 552 |
+
compute_semantics_flag = hasattr(processor, "semantic_tokenizer") and processor.semantic_tokenizer is not None
|
| 553 |
+
|
| 554 |
+
data_collator = VibeVoiceCollator(
|
| 555 |
+
processor=processor,
|
| 556 |
+
max_length=data_args.max_length,
|
| 557 |
+
speech_compress_ratio=speech_compress_ratio,
|
| 558 |
+
semantic_vae_dim=semantic_dim,
|
| 559 |
+
compute_semantics=compute_semantics_flag,
|
| 560 |
+
debug_checks=False,
|
| 561 |
+
voice_prompt_drop_rate=data_args.voice_prompt_drop_rate,
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
class LoRADebugCallback(TrainerCallback):
|
| 565 |
+
def __init__(self, log_every_n_steps: int = 50):
|
| 566 |
+
self.log_every_n_steps = max(1, int(log_every_n_steps))
|
| 567 |
+
self.prev_param_norms: Dict[str, float] = {}
|
| 568 |
+
self.lora_param_names: List[str] = []
|
| 569 |
+
|
| 570 |
+
def on_train_begin(self, args, state, control, model=None, **kwargs):
|
| 571 |
+
try:
|
| 572 |
+
if model is None:
|
| 573 |
+
return
|
| 574 |
+
named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
|
| 575 |
+
self.lora_param_names = [n for n in named.keys() if ("lora_A" in n or "lora_B" in n)]
|
| 576 |
+
for n in self.lora_param_names:
|
| 577 |
+
p = named[n]
|
| 578 |
+
self.prev_param_norms[n] = float(p.data.norm().item())
|
| 579 |
+
total = len(self.lora_param_names)
|
| 580 |
+
req_grad = sum(1 for n in self.lora_param_names if named[n].requires_grad)
|
| 581 |
+
num_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
|
| 582 |
+
num_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
|
| 583 |
+
zero_B = sum(1 for n in self.lora_param_names if ("lora_B" in n and float(named[n].data.norm().item()) == 0.0))
|
| 584 |
+
logger.info(f"LoRA debug: found {total} LoRA params (A={num_A}, B={num_B}); trainable={req_grad}. Initial lora_B_zero={zero_B}.")
|
| 585 |
+
if total == 0:
|
| 586 |
+
logger.warning("LoRA debug: No LoRA parameters found. Check lora_target_modules.")
|
| 587 |
+
if req_grad != total:
|
| 588 |
+
logger.warning("LoRA debug: Some LoRA params are frozen. They should be trainable.")
|
| 589 |
+
except Exception as e:
|
| 590 |
+
logger.warning(f"LoRA debug (on_train_begin) failed: {e}")
|
| 591 |
+
|
| 592 |
+
def on_step_end(self, args, state, control, model=None, **kwargs):
|
| 593 |
+
try:
|
| 594 |
+
if model is None or len(self.lora_param_names) == 0:
|
| 595 |
+
return
|
| 596 |
+
step = int(getattr(state, "global_step", 0) or 0)
|
| 597 |
+
if step % self.log_every_n_steps != 0 and step != 1:
|
| 598 |
+
return
|
| 599 |
+
named: Dict[str, torch.nn.Parameter] = dict(model.named_parameters())
|
| 600 |
+
changed_A = 0
|
| 601 |
+
changed_B = 0
|
| 602 |
+
zero_B = 0
|
| 603 |
+
eps = 1e-12
|
| 604 |
+
for n in self.lora_param_names:
|
| 605 |
+
p = named.get(n, None)
|
| 606 |
+
if p is None:
|
| 607 |
+
continue
|
| 608 |
+
prev = self.prev_param_norms.get(n, 0.0)
|
| 609 |
+
curr = float(p.data.norm().item())
|
| 610 |
+
if "lora_A" in n and abs(curr - prev) > eps:
|
| 611 |
+
changed_A += 1
|
| 612 |
+
if "lora_B" in n:
|
| 613 |
+
if abs(curr - prev) > eps:
|
| 614 |
+
changed_B += 1
|
| 615 |
+
if curr == 0.0:
|
| 616 |
+
zero_B += 1
|
| 617 |
+
self.prev_param_norms[n] = curr
|
| 618 |
+
total_A = sum(1 for n in self.lora_param_names if "lora_A" in n)
|
| 619 |
+
total_B = sum(1 for n in self.lora_param_names if "lora_B" in n)
|
| 620 |
+
logger.info(f"LoRA debug step {step}: changed A {changed_A}/{total_A}, changed B {changed_B}/{total_B}, lora_B_zero_now={zero_B}.")
|
| 621 |
+
except Exception as e:
|
| 622 |
+
logger.warning(f"LoRA debug (on_step_end) failed: {e}")
|
| 623 |
+
|
| 624 |
+
class VibeVoiceTrainer(Trainer):
|
| 625 |
+
def compute_loss(self, model: VibeVoiceForConditionalGeneration, inputs: Dict[str, Any], return_outputs=False, num_items_in_batch: Optional[int] = None):
|
| 626 |
+
labels = inputs.get("input_ids")
|
| 627 |
+
attention_mask = inputs.get("attention_mask")
|
| 628 |
+
acoustic_input_mask = inputs.get("acoustic_input_mask")
|
| 629 |
+
|
| 630 |
+
# Ensure semantic tensors exist and have correct dtype/device
|
| 631 |
+
sem = inputs.get("speech_semantic_tensors", None)
|
| 632 |
+
# unwrap DDP safely
|
| 633 |
+
base_model = model.module if hasattr(model, "module") else model
|
| 634 |
+
|
| 635 |
+
try:
|
| 636 |
+
target_dtype = next(base_model.model.semantic_connector.parameters()).dtype
|
| 637 |
+
except Exception:
|
| 638 |
+
target_dtype = base_model.get_input_embeddings().weight.dtype
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
if sem is None:
|
| 642 |
+
sm = inputs.get("speech_masks")
|
| 643 |
+
if sm is not None:
|
| 644 |
+
zeros = torch.zeros(
|
| 645 |
+
sm.size(0), sm.size(1),
|
| 646 |
+
semantic_dim,
|
| 647 |
+
dtype=target_dtype,
|
| 648 |
+
device=sm.device,
|
| 649 |
+
).detach()
|
| 650 |
+
zeros = zeros.to(sm.device, non_blocking=True)
|
| 651 |
+
inputs["speech_semantic_tensors"] = zeros
|
| 652 |
+
else:
|
| 653 |
+
if isinstance(sem, torch.Tensor):
|
| 654 |
+
inputs["speech_semantic_tensors"] = sem.to(dtype=target_dtype)
|
| 655 |
+
outputs = model(
|
| 656 |
+
input_ids=inputs.get("input_ids"),
|
| 657 |
+
attention_mask=attention_mask,
|
| 658 |
+
speech_tensors=inputs.get("speech_tensors"),
|
| 659 |
+
speech_masks=inputs.get("speech_masks"),
|
| 660 |
+
speech_semantic_tensors=inputs.get("speech_semantic_tensors"),
|
| 661 |
+
acoustic_input_mask=acoustic_input_mask,
|
| 662 |
+
acoustic_loss_mask=inputs.get("acoustic_loss_mask"),
|
| 663 |
+
speeches_loss_input=inputs.get("speeches_loss_input"),
|
| 664 |
+
ddpm_batch_mul=training_args.ddpm_batch_mul,
|
| 665 |
+
)
|
| 666 |
+
|
| 667 |
+
# Invariants: token/latent selection equality across views (warn, don't assert)
|
| 668 |
+
try:
|
| 669 |
+
al_mask = inputs.get("acoustic_loss_mask")
|
| 670 |
+
sp_masks = inputs.get("speech_masks")
|
| 671 |
+
sp_loss_sel = inputs.get("speeches_loss_input")
|
| 672 |
+
num_tok_total = int(acoustic_input_mask.sum().item()) if acoustic_input_mask is not None else 0
|
| 673 |
+
num_tok_loss = int(al_mask.sum().item()) if al_mask is not None else 0
|
| 674 |
+
num_lat_total = int(sp_masks.sum().item()) if sp_masks is not None else 0
|
| 675 |
+
num_lat_loss = int(((sp_loss_sel & sp_masks).sum().item())) if (sp_loss_sel is not None and sp_masks is not None) else 0
|
| 676 |
+
self.log({
|
| 677 |
+
"debug/num_tok_total": float(num_tok_total),
|
| 678 |
+
"debug/num_tok_loss": float(num_tok_loss),
|
| 679 |
+
"debug/num_lat_total": float(num_lat_total),
|
| 680 |
+
"debug/num_lat_loss": float(num_lat_loss),
|
| 681 |
+
})
|
| 682 |
+
if sp_loss_sel is not None and sp_masks is not None and al_mask is not None:
|
| 683 |
+
if num_tok_loss != num_lat_loss:
|
| 684 |
+
logger.warning(f"Loss selection mismatch: acoustic_loss_mask={num_tok_loss} vs speeches_loss_input={num_lat_loss}")
|
| 685 |
+
except Exception:
|
| 686 |
+
pass
|
| 687 |
+
# CE Loss
|
| 688 |
+
logits = outputs.logits
|
| 689 |
+
ce_labels = mask_for_ce(labels, attention_mask, acoustic_input_mask, pad_id=-100)
|
| 690 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 691 |
+
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
|
| 692 |
+
ce_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), ce_labels.view(-1))
|
| 693 |
+
|
| 694 |
+
# Optional CE diagnostics
|
| 695 |
+
try:
|
| 696 |
+
self._debug_ce(shift_logits, ce_labels, attention_mask, acoustic_input_mask)
|
| 697 |
+
except Exception as e:
|
| 698 |
+
logger.warning(f"Failed invoking CE debug: {e}")
|
| 699 |
+
|
| 700 |
+
# Diffusion loss
|
| 701 |
+
diffusion_loss = outputs.diffusion_loss if outputs.diffusion_loss is not None else torch.tensor(0.0, device=ce_loss.device)
|
| 702 |
+
total = training_args.ce_loss_weight * ce_loss + training_args.diffusion_loss_weight * diffusion_loss
|
| 703 |
+
|
| 704 |
+
# Logs
|
| 705 |
+
try:
|
| 706 |
+
prefix = "train" if model.training else "eval"
|
| 707 |
+
self.log({
|
| 708 |
+
f"{prefix}/ce_loss": ce_loss.detach().item(),
|
| 709 |
+
f"{prefix}/diffusion_loss": diffusion_loss.detach().item() if isinstance(diffusion_loss, torch.Tensor) else float(diffusion_loss),
|
| 710 |
+
})
|
| 711 |
+
if hasattr(self, "optimizer") and self.optimizer is not None and len(self.optimizer.param_groups) > 0:
|
| 712 |
+
lr_val = self.optimizer.param_groups[0].get("lr", None)
|
| 713 |
+
if lr_val is not None:
|
| 714 |
+
self.log({"train/learning_rate_real": float(lr_val)})
|
| 715 |
+
except Exception:
|
| 716 |
+
pass
|
| 717 |
+
|
| 718 |
+
# if return_outputs:
|
| 719 |
+
# return total, outputs
|
| 720 |
+
# if not model.training:
|
| 721 |
+
# return {"loss": total}
|
| 722 |
+
# return total
|
| 723 |
+
|
| 724 |
+
if return_outputs:
|
| 725 |
+
return total, outputs
|
| 726 |
+
return total
|
| 727 |
+
|
| 728 |
+
def prediction_step(
|
| 729 |
+
self,
|
| 730 |
+
model,
|
| 731 |
+
inputs,
|
| 732 |
+
prediction_loss_only=False,
|
| 733 |
+
ignore_keys=None,
|
| 734 |
+
):
|
| 735 |
+
model.eval()
|
| 736 |
+
with torch.no_grad():
|
| 737 |
+
loss = self.compute_loss(model, inputs)
|
| 738 |
+
|
| 739 |
+
# HuggingFace expects: (loss, logits, labels)
|
| 740 |
+
# We don't need logits or labels, so return None for both
|
| 741 |
+
return loss, None, None
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
def _debug_ce(self, shift_logits: torch.Tensor, ce_labels: torch.Tensor, attention_mask: Optional[torch.Tensor], acoustic_input_mask: Optional[torch.Tensor]):
|
| 746 |
+
try:
|
| 747 |
+
if not getattr(training_args, "debug_ce_details", False):
|
| 748 |
+
return
|
| 749 |
+
step = int(getattr(self.state, "global_step", 0) or 0)
|
| 750 |
+
every_n = max(1, int(getattr(training_args, "debug_ce_every_n_steps", 200) or 200))
|
| 751 |
+
if not (step <= 1 or (step % every_n == 0)):
|
| 752 |
+
return
|
| 753 |
+
|
| 754 |
+
with torch.no_grad():
|
| 755 |
+
vocab = shift_logits.size(-1)
|
| 756 |
+
per_token_loss = F.cross_entropy(
|
| 757 |
+
shift_logits.view(-1, vocab),
|
| 758 |
+
ce_labels.view(-1),
|
| 759 |
+
reduction="none",
|
| 760 |
+
ignore_index=-100,
|
| 761 |
+
).view_as(ce_labels)
|
| 762 |
+
|
| 763 |
+
valid_mask = ce_labels.ne(-100)
|
| 764 |
+
num_valid = int(valid_mask.sum().item())
|
| 765 |
+
avg_loss = float((per_token_loss[valid_mask].mean().item())) if num_valid > 0 else float("nan")
|
| 766 |
+
|
| 767 |
+
per_ex_avgs = []
|
| 768 |
+
max_examples = max(1, int(getattr(training_args, "debug_ce_max_examples", 1) or 1))
|
| 769 |
+
B = ce_labels.size(0)
|
| 770 |
+
for b in range(min(B, max_examples)):
|
| 771 |
+
vb = valid_mask[b]
|
| 772 |
+
if int(vb.sum().item()) > 0:
|
| 773 |
+
per_ex_avgs.append(float(per_token_loss[b][vb].mean().item()))
|
| 774 |
+
else:
|
| 775 |
+
per_ex_avgs.append(float("nan"))
|
| 776 |
+
logger.info(f"CE debug: tokens_in_loss={num_valid}, avg_loss={avg_loss:.4f}, per_example_avgs={[round(x,4) if x==x else None for x in per_ex_avgs]}")
|
| 777 |
+
except Exception as e:
|
| 778 |
+
logger.warning(f"CE detailed debug failed: {e}")
|
| 779 |
+
|
| 780 |
+
# --------- CRITICAL SAVE OVERRIDES: also dump FULL head/connectors for inference ---------
|
| 781 |
+
|
| 782 |
+
|
| 783 |
+
def _save(self, output_dir: Optional[str] = None, state_dict=None) -> None:
|
| 784 |
+
try:
|
| 785 |
+
target_dir = output_dir or self.args.output_dir
|
| 786 |
+
lora_out = os.path.join(target_dir, "lora")
|
| 787 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 788 |
+
|
| 789 |
+
# --- LLM PEFT adapters (if LoRA-wrapped) ---
|
| 790 |
+
language_model = getattr(self.model.model, "language_model", None)
|
| 791 |
+
if hasattr(language_model, "save_pretrained"):
|
| 792 |
+
language_model.save_pretrained(lora_out)
|
| 793 |
+
|
| 794 |
+
# --- Diffusion head PEFT adapters (if LoRA-wrapped) ---
|
| 795 |
+
pred_head = getattr(self.model.model, "prediction_head", None)
|
| 796 |
+
if hasattr(pred_head, "save_pretrained"):
|
| 797 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 798 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 799 |
+
pred_head.save_pretrained(ph_dir)
|
| 800 |
+
|
| 801 |
+
# --- ALWAYS save FULL diffusion head state_dict for fallback ---
|
| 802 |
+
if pred_head is not None and hasattr(pred_head, "state_dict"):
|
| 803 |
+
sd = pred_head.state_dict()
|
| 804 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 805 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 806 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 807 |
+
torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
|
| 808 |
+
|
| 809 |
+
# --- Connectors (plain state_dicts) ---
|
| 810 |
+
ac = getattr(self.model.model, "acoustic_connector", None)
|
| 811 |
+
if ac is not None:
|
| 812 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 813 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 814 |
+
torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 815 |
+
|
| 816 |
+
se = getattr(self.model.model, "semantic_connector", None)
|
| 817 |
+
if se is not None:
|
| 818 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 819 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 820 |
+
torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 821 |
+
|
| 822 |
+
except Exception as e:
|
| 823 |
+
logger.warning(f"Failed to save LoRA assets: {e}")
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
# ------------- Build the Trainer -------------
|
| 827 |
+
|
| 828 |
+
# Resolve which adapters to apply in samples
|
| 829 |
+
|
| 830 |
+
ema_cb = EmaCallback(attr_path="model.prediction_head", decay=0.999, device="cpu")
|
| 831 |
+
|
| 832 |
+
trainer = VibeVoiceTrainer(
|
| 833 |
+
model=model,
|
| 834 |
+
args=training_args,
|
| 835 |
+
train_dataset=train_dataset,
|
| 836 |
+
eval_dataset=eval_dataset,
|
| 837 |
+
data_collator=data_collator,
|
| 838 |
+
callbacks=[ema_cb, LoRADebugCallback(log_every_n_steps=(int(getattr(training_args, "logging_steps", 50) or 50)))],
|
| 839 |
+
)
|
| 840 |
+
|
| 841 |
+
# Optional debug pre-training save
|
| 842 |
+
if getattr(training_args, "debug_save", False):
|
| 843 |
+
try:
|
| 844 |
+
debug_dir = os.path.join(training_args.output_dir, "debug_initial")
|
| 845 |
+
lora_out = os.path.join(debug_dir, "lora")
|
| 846 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 847 |
+
logger.info(f"[debug_save] Saving initial (pre-training) model components to: {debug_dir}")
|
| 848 |
+
# language model adapters / base
|
| 849 |
+
try:
|
| 850 |
+
if hasattr(model.model.language_model, "save_pretrained"):
|
| 851 |
+
model.model.language_model.save_pretrained(lora_out)
|
| 852 |
+
except Exception as e_lm:
|
| 853 |
+
logger.warning(f"[debug_save] Failed to save language_model: {e_lm}")
|
| 854 |
+
# diffusion head
|
| 855 |
+
try:
|
| 856 |
+
if hasattr(model.model, "prediction_head") and hasattr(model.model.prediction_head, "save_pretrained"):
|
| 857 |
+
model.model.prediction_head.save_pretrained(os.path.join(lora_out, "diffusion_head"))
|
| 858 |
+
except Exception as e_head:
|
| 859 |
+
logger.warning(f"[debug_save] Failed to save prediction_head: {e_head}")
|
| 860 |
+
# NEW: full diffusion head state_dict as fallback
|
| 861 |
+
try:
|
| 862 |
+
ph = getattr(model.model, "prediction_head", None)
|
| 863 |
+
if ph is not None and hasattr(ph, "state_dict"):
|
| 864 |
+
sd = ph.state_dict()
|
| 865 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 866 |
+
os.makedirs(os.path.join(lora_out, "diffusion_head"), exist_ok=True)
|
| 867 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head", "diffusion_head_full.bin"))
|
| 868 |
+
except Exception as e:
|
| 869 |
+
logger.warning(f"[debug_save] Failed to save FULL diffusion head: {e}")
|
| 870 |
+
# connectors
|
| 871 |
+
try:
|
| 872 |
+
ac_conn = getattr(model.model, "acoustic_connector", None)
|
| 873 |
+
if ac_conn is not None:
|
| 874 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 875 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 876 |
+
torch.save(ac_conn.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 877 |
+
except Exception as e_ac:
|
| 878 |
+
logger.warning(f"[debug_save] Failed to save acoustic_connector: {e_ac}")
|
| 879 |
+
try:
|
| 880 |
+
se_conn = getattr(model.model, "semantic_connector", None)
|
| 881 |
+
if se_conn is not None:
|
| 882 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 883 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 884 |
+
torch.save(se_conn.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 885 |
+
except Exception as e_se:
|
| 886 |
+
logger.warning(f"[debug_save] Failed to save semantic_connector: {e_se}")
|
| 887 |
+
except Exception as e:
|
| 888 |
+
logger.warning(f"[debug_save] Unexpected failure saving initial components: {e}")
|
| 889 |
+
|
| 890 |
+
if getattr(training_args, "gradient_checkpointing", True):
|
| 891 |
+
try:
|
| 892 |
+
model.gradient_checkpointing_enable()
|
| 893 |
+
except Exception:
|
| 894 |
+
logger.warning("Failed to enable gradient checkpointing on the model.")
|
| 895 |
+
|
| 896 |
+
if training_args.do_train:
|
| 897 |
+
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
|
| 898 |
+
|
| 899 |
+
lora_out = os.path.join(training_args.output_dir, "lora")
|
| 900 |
+
os.makedirs(lora_out, exist_ok=True)
|
| 901 |
+
|
| 902 |
+
# LLM PEFT (if any)
|
| 903 |
+
lm = getattr(model.model, "language_model", None)
|
| 904 |
+
if hasattr(lm, "save_pretrained"):
|
| 905 |
+
lm.save_pretrained(lora_out)
|
| 906 |
+
|
| 907 |
+
# Diffusion head PEFT (if any)
|
| 908 |
+
ph = getattr(model.model, "prediction_head", None)
|
| 909 |
+
if hasattr(ph, "save_pretrained"):
|
| 910 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 911 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 912 |
+
ph.save_pretrained(ph_dir)
|
| 913 |
+
|
| 914 |
+
# ALWAYS: full diffusion head state_dict fallback
|
| 915 |
+
try:
|
| 916 |
+
if ph is not None and hasattr(ph, "state_dict"):
|
| 917 |
+
sd = ph.state_dict()
|
| 918 |
+
torch.save(sd, os.path.join(lora_out, "diffusion_head_full.bin"))
|
| 919 |
+
ph_dir = os.path.join(lora_out, "diffusion_head")
|
| 920 |
+
os.makedirs(ph_dir, exist_ok=True)
|
| 921 |
+
torch.save(sd, os.path.join(ph_dir, "diffusion_head_full.bin"))
|
| 922 |
+
except Exception as e:
|
| 923 |
+
logger.warning(f"Failed to save FULL diffusion head at end: {e}")
|
| 924 |
+
|
| 925 |
+
# Connectors (if trained)
|
| 926 |
+
try:
|
| 927 |
+
ac = getattr(model.model, "acoustic_connector", None)
|
| 928 |
+
if ac is not None:
|
| 929 |
+
ac_dir = os.path.join(lora_out, "acoustic_connector")
|
| 930 |
+
os.makedirs(ac_dir, exist_ok=True)
|
| 931 |
+
torch.save(ac.state_dict(), os.path.join(ac_dir, "pytorch_model.bin"))
|
| 932 |
+
except Exception as e:
|
| 933 |
+
logger.warning(f"Failed to save acoustic_connector: {e}")
|
| 934 |
+
|
| 935 |
+
try:
|
| 936 |
+
se = getattr(model.model, "semantic_connector", None)
|
| 937 |
+
if se is not None:
|
| 938 |
+
se_dir = os.path.join(lora_out, "semantic_connector")
|
| 939 |
+
os.makedirs(se_dir, exist_ok=True)
|
| 940 |
+
torch.save(se.state_dict(), os.path.join(se_dir, "pytorch_model.bin"))
|
| 941 |
+
except Exception as e:
|
| 942 |
+
logger.warning(f"Failed to save semantic_connector: {e}")
|
| 943 |
+
|
| 944 |
+
if training_args.do_eval and eval_dataset is not None:
|
| 945 |
+
trainer.evaluate()
|
| 946 |
+
|
| 947 |
+
|
| 948 |
+
if __name__ == "__main__":
|
| 949 |
+
main()
|
src/vibevoice/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
src/vibevoice/configs/qwen2.5_1.5b_64k.json
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_attn_implementation_autoset": true,
|
| 3 |
+
"acoustic_vae_dim": 64,
|
| 4 |
+
"acoustic_tokenizer_config": {
|
| 5 |
+
"causal": true,
|
| 6 |
+
"channels": 1,
|
| 7 |
+
"conv_bias": true,
|
| 8 |
+
"conv_norm": "none",
|
| 9 |
+
"corpus_normalize": 0.0,
|
| 10 |
+
"decoder_depths": null,
|
| 11 |
+
"decoder_n_filters": 32,
|
| 12 |
+
"decoder_ratios": [
|
| 13 |
+
8,
|
| 14 |
+
5,
|
| 15 |
+
5,
|
| 16 |
+
4,
|
| 17 |
+
2,
|
| 18 |
+
2
|
| 19 |
+
],
|
| 20 |
+
"disable_last_norm": true,
|
| 21 |
+
"encoder_depths": "3-3-3-3-3-3-8",
|
| 22 |
+
"encoder_n_filters": 32,
|
| 23 |
+
"encoder_ratios": [
|
| 24 |
+
8,
|
| 25 |
+
5,
|
| 26 |
+
5,
|
| 27 |
+
4,
|
| 28 |
+
2,
|
| 29 |
+
2
|
| 30 |
+
],
|
| 31 |
+
"fix_std": 0.5,
|
| 32 |
+
"layer_scale_init_value": 1e-06,
|
| 33 |
+
"layernorm": "RMSNorm",
|
| 34 |
+
"layernorm_elementwise_affine": true,
|
| 35 |
+
"layernorm_eps": 1e-05,
|
| 36 |
+
"mixer_layer": "depthwise_conv",
|
| 37 |
+
"model_type": "vibepod_acoustic_tokenizer",
|
| 38 |
+
"pad_mode": "constant",
|
| 39 |
+
"std_dist_type": "gaussian",
|
| 40 |
+
"vae_dim": 64,
|
| 41 |
+
"weight_init_value": 0.01
|
| 42 |
+
},
|
| 43 |
+
"decoder_config": {
|
| 44 |
+
"attention_dropout": 0.0,
|
| 45 |
+
"hidden_act": "silu",
|
| 46 |
+
"hidden_size": 1536,
|
| 47 |
+
"initializer_range": 0.02,
|
| 48 |
+
"intermediate_size": 8960,
|
| 49 |
+
"max_position_embeddings": 65536,
|
| 50 |
+
"max_window_layers": 28,
|
| 51 |
+
"model_type": "qwen2",
|
| 52 |
+
"num_attention_heads": 12,
|
| 53 |
+
"num_hidden_layers": 28,
|
| 54 |
+
"num_key_value_heads": 2,
|
| 55 |
+
"rms_norm_eps": 1e-06,
|
| 56 |
+
"rope_scaling": null,
|
| 57 |
+
"rope_theta": 1000000.0,
|
| 58 |
+
"sliding_window": null,
|
| 59 |
+
"tie_word_embeddings": true,
|
| 60 |
+
"torch_dtype": "bfloat16",
|
| 61 |
+
"use_cache": true,
|
| 62 |
+
"use_sliding_window": false,
|
| 63 |
+
"vocab_size": 151936
|
| 64 |
+
},
|
| 65 |
+
"diffusion_head_config": {
|
| 66 |
+
"ddpm_batch_mul": 4,
|
| 67 |
+
"ddpm_beta_schedule": "cosine",
|
| 68 |
+
"ddpm_num_inference_steps": 20,
|
| 69 |
+
"ddpm_num_steps": 1000,
|
| 70 |
+
"diffusion_type": "ddpm",
|
| 71 |
+
"head_ffn_ratio": 3.0,
|
| 72 |
+
"head_layers": 4,
|
| 73 |
+
"hidden_size": 1536,
|
| 74 |
+
"latent_size": 64,
|
| 75 |
+
"model_type": "vibepod_diffusion_head",
|
| 76 |
+
"prediction_type": "v_prediction",
|
| 77 |
+
"rms_norm_eps": 1e-05,
|
| 78 |
+
"speech_vae_dim": 64
|
| 79 |
+
},
|
| 80 |
+
"model_type": "vibepod",
|
| 81 |
+
"semantic_tokenizer_config": {
|
| 82 |
+
"causal": true,
|
| 83 |
+
"channels": 1,
|
| 84 |
+
"conv_bias": true,
|
| 85 |
+
"conv_norm": "none",
|
| 86 |
+
"corpus_normalize": 0.0,
|
| 87 |
+
"disable_last_norm": true,
|
| 88 |
+
"encoder_depths": "3-3-3-3-3-3-8",
|
| 89 |
+
"encoder_n_filters": 32,
|
| 90 |
+
"encoder_ratios": [
|
| 91 |
+
8,
|
| 92 |
+
5,
|
| 93 |
+
5,
|
| 94 |
+
4,
|
| 95 |
+
2,
|
| 96 |
+
2
|
| 97 |
+
],
|
| 98 |
+
"fix_std": 0,
|
| 99 |
+
"layer_scale_init_value": 1e-06,
|
| 100 |
+
"layernorm": "RMSNorm",
|
| 101 |
+
"layernorm_elementwise_affine": true,
|
| 102 |
+
"layernorm_eps": 1e-05,
|
| 103 |
+
"mixer_layer": "depthwise_conv",
|
| 104 |
+
"model_type": "vibepod_semantic_tokenizer",
|
| 105 |
+
"pad_mode": "constant",
|
| 106 |
+
"std_dist_type": "none",
|
| 107 |
+
"vae_dim": 128,
|
| 108 |
+
"weight_init_value": 0.01
|
| 109 |
+
},
|
| 110 |
+
"semantic_vae_dim": 128,
|
| 111 |
+
"torch_dtype": "bfloat16"
|
| 112 |
+
}
|
src/vibevoice/configs/qwen2.5_7b_32k.json
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_attn_implementation_autoset": true,
|
| 3 |
+
"acoustic_vae_dim": 64,
|
| 4 |
+
"acoustic_tokenizer_config": {
|
| 5 |
+
"causal": true,
|
| 6 |
+
"channels": 1,
|
| 7 |
+
"conv_bias": true,
|
| 8 |
+
"conv_norm": "none",
|
| 9 |
+
"corpus_normalize": 0.0,
|
| 10 |
+
"decoder_depths": null,
|
| 11 |
+
"decoder_n_filters": 32,
|
| 12 |
+
"decoder_ratios": [
|
| 13 |
+
8,
|
| 14 |
+
5,
|
| 15 |
+
5,
|
| 16 |
+
4,
|
| 17 |
+
2,
|
| 18 |
+
2
|
| 19 |
+
],
|
| 20 |
+
"disable_last_norm": true,
|
| 21 |
+
"encoder_depths": "3-3-3-3-3-3-8",
|
| 22 |
+
"encoder_n_filters": 32,
|
| 23 |
+
"encoder_ratios": [
|
| 24 |
+
8,
|
| 25 |
+
5,
|
| 26 |
+
5,
|
| 27 |
+
4,
|
| 28 |
+
2,
|
| 29 |
+
2
|
| 30 |
+
],
|
| 31 |
+
"fix_std": 0.5,
|
| 32 |
+
"layer_scale_init_value": 1e-06,
|
| 33 |
+
"layernorm": "RMSNorm",
|
| 34 |
+
"layernorm_elementwise_affine": true,
|
| 35 |
+
"layernorm_eps": 1e-05,
|
| 36 |
+
"mixer_layer": "depthwise_conv",
|
| 37 |
+
"model_type": "vibepod_acoustic_tokenizer",
|
| 38 |
+
"pad_mode": "constant",
|
| 39 |
+
"std_dist_type": "gaussian",
|
| 40 |
+
"vae_dim": 64,
|
| 41 |
+
"weight_init_value": 0.01
|
| 42 |
+
},
|
| 43 |
+
"decoder_config": {
|
| 44 |
+
"attention_dropout": 0.0,
|
| 45 |
+
"hidden_act": "silu",
|
| 46 |
+
"hidden_size": 3584,
|
| 47 |
+
"initializer_range": 0.02,
|
| 48 |
+
"intermediate_size": 18944,
|
| 49 |
+
"max_position_embeddings": 32768,
|
| 50 |
+
"max_window_layers": 28,
|
| 51 |
+
"model_type": "qwen2",
|
| 52 |
+
"num_attention_heads": 28,
|
| 53 |
+
"num_hidden_layers": 28,
|
| 54 |
+
"num_key_value_heads": 4,
|
| 55 |
+
"rms_norm_eps": 1e-06,
|
| 56 |
+
"rope_theta": 1000000.0,
|
| 57 |
+
"sliding_window": null,
|
| 58 |
+
"tie_word_embeddings": false,
|
| 59 |
+
"torch_dtype": "bfloat16",
|
| 60 |
+
"transformers_version": "4.40.1",
|
| 61 |
+
"use_cache": true,
|
| 62 |
+
"use_mrope": false,
|
| 63 |
+
"use_sliding_window": false,
|
| 64 |
+
"vocab_size": 152064
|
| 65 |
+
},
|
| 66 |
+
"diffusion_head_config": {
|
| 67 |
+
"ddpm_batch_mul": 4,
|
| 68 |
+
"ddpm_beta_schedule": "cosine",
|
| 69 |
+
"ddpm_num_inference_steps": 20,
|
| 70 |
+
"ddpm_num_steps": 1000,
|
| 71 |
+
"diffusion_type": "ddpm",
|
| 72 |
+
"head_ffn_ratio": 3.0,
|
| 73 |
+
"head_layers": 4,
|
| 74 |
+
"hidden_size": 3584,
|
| 75 |
+
"latent_size": 64,
|
| 76 |
+
"model_type": "vibepod_diffusion_head",
|
| 77 |
+
"prediction_type": "v_prediction",
|
| 78 |
+
"rms_norm_eps": 1e-05,
|
| 79 |
+
"speech_vae_dim": 64
|
| 80 |
+
},
|
| 81 |
+
"model_type": "vibepod",
|
| 82 |
+
"semantic_tokenizer_config": {
|
| 83 |
+
"causal": true,
|
| 84 |
+
"channels": 1,
|
| 85 |
+
"conv_bias": true,
|
| 86 |
+
"conv_norm": "none",
|
| 87 |
+
"corpus_normalize": 0.0,
|
| 88 |
+
"disable_last_norm": true,
|
| 89 |
+
"encoder_depths": "3-3-3-3-3-3-8",
|
| 90 |
+
"encoder_n_filters": 32,
|
| 91 |
+
"encoder_ratios": [
|
| 92 |
+
8,
|
| 93 |
+
5,
|
| 94 |
+
5,
|
| 95 |
+
4,
|
| 96 |
+
2,
|
| 97 |
+
2
|
| 98 |
+
],
|
| 99 |
+
"fix_std": 0,
|
| 100 |
+
"layer_scale_init_value": 1e-06,
|
| 101 |
+
"layernorm": "RMSNorm",
|
| 102 |
+
"layernorm_elementwise_affine": true,
|
| 103 |
+
"layernorm_eps": 1e-05,
|
| 104 |
+
"mixer_layer": "depthwise_conv",
|
| 105 |
+
"model_type": "vibepod_semantic_tokenizer",
|
| 106 |
+
"pad_mode": "constant",
|
| 107 |
+
"std_dist_type": "none",
|
| 108 |
+
"vae_dim": 128,
|
| 109 |
+
"weight_init_value": 0.01
|
| 110 |
+
},
|
| 111 |
+
"semantic_vae_dim": 128,
|
| 112 |
+
"torch_dtype": "bfloat16"
|
| 113 |
+
}
|
src/vibevoice/data_vibevoice.py
ADDED
|
File without changes
|
src/vibevoice/modular/__init__.py
ADDED
|
File without changes
|
src/vibevoice/modular/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (160 Bytes). View file
|
|
|
src/vibevoice/modular/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-310.pyc
ADDED
|
Binary file (5.8 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/configuration_vibevoice.cpython-311.pyc
ADDED
|
Binary file (9.25 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-310.pyc
ADDED
|
Binary file (14.9 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/modeling_vibevoice.cpython-311.pyc
ADDED
|
Binary file (29.3 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/modeling_vibevoice_inference.cpython-310.pyc
ADDED
|
Binary file (20.9 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-310.pyc
ADDED
|
Binary file (9.32 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/modular_vibevoice_diffusion_head.cpython-311.pyc
ADDED
|
Binary file (15.8 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-310.pyc
ADDED
|
Binary file (6.22 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/modular_vibevoice_text_tokenizer.cpython-311.pyc
ADDED
|
Binary file (8.29 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-310.pyc
ADDED
|
Binary file (33.5 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/modular_vibevoice_tokenizer.cpython-311.pyc
ADDED
|
Binary file (65.4 kB). View file
|
|
|
src/vibevoice/modular/__pycache__/streamer.cpython-310.pyc
ADDED
|
Binary file (8.84 kB). View file
|
|
|
src/vibevoice/modular/configuration_vibevoice.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" VibeVoice_AcousticTokenizer model configuration"""
|
| 2 |
+
|
| 3 |
+
from typing import Dict, List, Optional, Tuple
|
| 4 |
+
|
| 5 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 6 |
+
from transformers.utils import logging
|
| 7 |
+
|
| 8 |
+
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
|
| 9 |
+
|
| 10 |
+
logger = logging.get_logger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class VibeVoiceAcousticTokenizerConfig(PretrainedConfig):
|
| 14 |
+
model_type = "vibevoice_acoustic_tokenizer"
|
| 15 |
+
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
channels: int = 1,
|
| 19 |
+
corpus_normalize: float = 0.0,
|
| 20 |
+
causal: bool = True,
|
| 21 |
+
vae_dim: int = 64,
|
| 22 |
+
fix_std: float = 0.5,
|
| 23 |
+
std_dist_type: str = 'gaussian',
|
| 24 |
+
# common
|
| 25 |
+
mixer_layer: str = 'depthwise_conv',
|
| 26 |
+
conv_norm: str = 'none',
|
| 27 |
+
pad_mode: str = 'constant',
|
| 28 |
+
disable_last_norm: bool = True,
|
| 29 |
+
layernorm: str = 'RMSNorm',
|
| 30 |
+
layernorm_eps: float = 1e-5,
|
| 31 |
+
layernorm_elementwise_affine: bool = True,
|
| 32 |
+
conv_bias: bool = True,
|
| 33 |
+
layer_scale_init_value: float = 1e-6,
|
| 34 |
+
weight_init_value: float = 1e-2,
|
| 35 |
+
# encoder specific
|
| 36 |
+
encoder_n_filters: int = 32,
|
| 37 |
+
encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2],
|
| 38 |
+
encoder_depths: str = "3-3-3-3-3-3-8",
|
| 39 |
+
# decoder specific
|
| 40 |
+
decoder_n_filters: int = 32,
|
| 41 |
+
decoder_ratios: Optional[List[int]] = None, # if None, same as encoder
|
| 42 |
+
decoder_depths: Optional[str] = None,
|
| 43 |
+
**kwargs
|
| 44 |
+
):
|
| 45 |
+
super().__init__(**kwargs)
|
| 46 |
+
self.channels = channels
|
| 47 |
+
self.corpus_normalize = corpus_normalize
|
| 48 |
+
self.causal = causal
|
| 49 |
+
self.vae_dim = vae_dim
|
| 50 |
+
self.fix_std = fix_std
|
| 51 |
+
self.std_dist_type = std_dist_type
|
| 52 |
+
|
| 53 |
+
# common parameters
|
| 54 |
+
self.conv_norm = conv_norm
|
| 55 |
+
self.pad_mode = pad_mode
|
| 56 |
+
self.layernorm_eps = layernorm_eps
|
| 57 |
+
self.disable_last_norm = disable_last_norm
|
| 58 |
+
self.layernorm = layernorm
|
| 59 |
+
self.layernorm_elementwise_affine = layernorm_elementwise_affine
|
| 60 |
+
self.conv_bias = conv_bias
|
| 61 |
+
self.layer_scale_init_value = layer_scale_init_value
|
| 62 |
+
self.weight_init_value = weight_init_value
|
| 63 |
+
self.mixer_layer = mixer_layer
|
| 64 |
+
|
| 65 |
+
# encoder specific parameters
|
| 66 |
+
self.encoder_n_filters = encoder_n_filters
|
| 67 |
+
self.encoder_ratios = encoder_ratios
|
| 68 |
+
self.encoder_depths = encoder_depths
|
| 69 |
+
|
| 70 |
+
# decoder specific parameters
|
| 71 |
+
self.decoder_ratios = decoder_ratios if decoder_ratios is not None else encoder_ratios
|
| 72 |
+
self.decoder_n_filters = decoder_n_filters
|
| 73 |
+
self.decoder_depths = decoder_depths
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class VibeVoiceSemanticTokenizerConfig(PretrainedConfig):
|
| 77 |
+
model_type = "vibevoice_semantic_tokenizer"
|
| 78 |
+
|
| 79 |
+
def __init__(
|
| 80 |
+
self,
|
| 81 |
+
channels: int = 1,
|
| 82 |
+
corpus_normalize: float = 0.0,
|
| 83 |
+
causal: bool = True,
|
| 84 |
+
vae_dim: int = 64,
|
| 85 |
+
fix_std: float = 0,
|
| 86 |
+
std_dist_type: str = 'none',
|
| 87 |
+
# common
|
| 88 |
+
mixer_layer: str = 'depthwise_conv',
|
| 89 |
+
conv_norm: str = 'none',
|
| 90 |
+
pad_mode: str = 'constant',
|
| 91 |
+
disable_last_norm: bool = True,
|
| 92 |
+
layernorm: str = 'RMSNorm',
|
| 93 |
+
layernorm_eps: float = 1e-5,
|
| 94 |
+
layernorm_elementwise_affine: bool = True,
|
| 95 |
+
conv_bias: bool = True,
|
| 96 |
+
layer_scale_init_value: float = 1e-6,
|
| 97 |
+
weight_init_value: float = 1e-2,
|
| 98 |
+
# encoder specific
|
| 99 |
+
encoder_n_filters: int = 32,
|
| 100 |
+
encoder_ratios: Optional[List[int]] = [8,5,5,4,2,2],
|
| 101 |
+
encoder_depths: str = "3-3-3-3-3-3-8",
|
| 102 |
+
**kwargs
|
| 103 |
+
):
|
| 104 |
+
super().__init__(**kwargs)
|
| 105 |
+
self.channels = channels
|
| 106 |
+
self.corpus_normalize = corpus_normalize
|
| 107 |
+
self.causal = causal
|
| 108 |
+
self.vae_dim = vae_dim
|
| 109 |
+
self.fix_std = fix_std
|
| 110 |
+
self.std_dist_type = std_dist_type
|
| 111 |
+
|
| 112 |
+
# common parameters
|
| 113 |
+
self.conv_norm = conv_norm
|
| 114 |
+
self.pad_mode = pad_mode
|
| 115 |
+
self.layernorm_eps = layernorm_eps
|
| 116 |
+
self.disable_last_norm = disable_last_norm
|
| 117 |
+
self.layernorm = layernorm
|
| 118 |
+
self.layernorm_elementwise_affine = layernorm_elementwise_affine
|
| 119 |
+
self.conv_bias = conv_bias
|
| 120 |
+
self.layer_scale_init_value = layer_scale_init_value
|
| 121 |
+
self.weight_init_value = weight_init_value
|
| 122 |
+
self.mixer_layer = mixer_layer
|
| 123 |
+
|
| 124 |
+
# encoder specific parameters
|
| 125 |
+
self.encoder_n_filters = encoder_n_filters
|
| 126 |
+
self.encoder_ratios = encoder_ratios
|
| 127 |
+
self.encoder_depths = encoder_depths
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class VibeVoiceDiffusionHeadConfig(PretrainedConfig):
|
| 131 |
+
model_type = "vibevoice_diffusion_head"
|
| 132 |
+
|
| 133 |
+
def __init__(
|
| 134 |
+
self,
|
| 135 |
+
hidden_size=768,
|
| 136 |
+
head_layers=4,
|
| 137 |
+
head_ffn_ratio=3.0,
|
| 138 |
+
rms_norm_eps=1e-5,
|
| 139 |
+
latent_size=64,
|
| 140 |
+
speech_vae_dim=None,
|
| 141 |
+
prediction_type="v_prediction",
|
| 142 |
+
diffusion_type="ddpm",
|
| 143 |
+
ddpm_num_steps=1000,
|
| 144 |
+
ddpm_num_inference_steps=20,
|
| 145 |
+
ddpm_beta_schedule="cosine",
|
| 146 |
+
ddpm_batch_mul=4,
|
| 147 |
+
**kwargs
|
| 148 |
+
):
|
| 149 |
+
self.hidden_size = hidden_size
|
| 150 |
+
self.head_layers = head_layers
|
| 151 |
+
self.head_ffn_ratio = head_ffn_ratio
|
| 152 |
+
self.rms_norm_eps = rms_norm_eps
|
| 153 |
+
self.latent_size = latent_size
|
| 154 |
+
self.speech_vae_dim = speech_vae_dim
|
| 155 |
+
self.prediction_type = prediction_type
|
| 156 |
+
self.diffusion_type = diffusion_type
|
| 157 |
+
self.ddpm_num_steps = ddpm_num_steps
|
| 158 |
+
self.ddpm_num_inference_steps = ddpm_num_inference_steps
|
| 159 |
+
self.ddpm_beta_schedule = ddpm_beta_schedule
|
| 160 |
+
self.ddpm_batch_mul = ddpm_batch_mul
|
| 161 |
+
|
| 162 |
+
super().__init__(**kwargs)
|
| 163 |
+
|
| 164 |
+
class VibeVoiceConfig(PretrainedConfig):
|
| 165 |
+
model_type = "vibevoice"
|
| 166 |
+
is_composition = True
|
| 167 |
+
sub_configs = {
|
| 168 |
+
"acoustic_tokenizer_config": VibeVoiceAcousticTokenizerConfig,
|
| 169 |
+
"semantic_tokenizer_config": VibeVoiceSemanticTokenizerConfig,
|
| 170 |
+
"decoder_config": Qwen2Config,
|
| 171 |
+
"diffusion_head_config": VibeVoiceDiffusionHeadConfig,
|
| 172 |
+
}
|
| 173 |
+
# keys_to_ignore_at_inference = ["past_key_values"]
|
| 174 |
+
# Default tensor parallel plan for base model `Qwen2`
|
| 175 |
+
base_model_tp_plan = {
|
| 176 |
+
"layers.*.self_attn.q_proj": "colwise",
|
| 177 |
+
"layers.*.self_attn.k_proj": "colwise",
|
| 178 |
+
"layers.*.self_attn.v_proj": "colwise",
|
| 179 |
+
"layers.*.self_attn.o_proj": "rowwise",
|
| 180 |
+
"layers.*.mlp.gate_proj": "colwise",
|
| 181 |
+
"layers.*.mlp.up_proj": "colwise",
|
| 182 |
+
"layers.*.mlp.down_proj": "rowwise",
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
def __init__(
|
| 186 |
+
self,
|
| 187 |
+
acoustic_tokenizer_config=None,
|
| 188 |
+
semantic_tokenizer_config=None,
|
| 189 |
+
decoder_config=None,
|
| 190 |
+
diffusion_head_config=None,
|
| 191 |
+
**kwargs
|
| 192 |
+
):
|
| 193 |
+
|
| 194 |
+
# kwargs["_attn_implementation"] = "flash_attention_2"
|
| 195 |
+
kwargs["_attn_implementation_autoset"] = False
|
| 196 |
+
|
| 197 |
+
if acoustic_tokenizer_config is None:
|
| 198 |
+
self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"]()
|
| 199 |
+
elif isinstance(acoustic_tokenizer_config, dict):
|
| 200 |
+
acoustic_tokenizer_config["model_type"] = "vibevoice_acoustic_tokenizer"
|
| 201 |
+
self.acoustic_tokenizer_config = self.sub_configs["acoustic_tokenizer_config"](**acoustic_tokenizer_config)
|
| 202 |
+
elif isinstance(acoustic_tokenizer_config, VibeVoiceAcousticTokenizerConfig):
|
| 203 |
+
# If an instance of the config class is provided
|
| 204 |
+
self.acoustic_tokenizer_config = acoustic_tokenizer_config
|
| 205 |
+
|
| 206 |
+
if semantic_tokenizer_config is None:
|
| 207 |
+
self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"]()
|
| 208 |
+
elif isinstance(semantic_tokenizer_config, dict):
|
| 209 |
+
semantic_tokenizer_config["model_type"] = "vibevoice_semantic_tokenizer"
|
| 210 |
+
self.semantic_tokenizer_config = self.sub_configs["semantic_tokenizer_config"](**semantic_tokenizer_config)
|
| 211 |
+
elif isinstance(semantic_tokenizer_config, VibeVoiceSemanticTokenizerConfig):
|
| 212 |
+
# If an instance of the config class is provided
|
| 213 |
+
self.semantic_tokenizer_config = semantic_tokenizer_config
|
| 214 |
+
|
| 215 |
+
if decoder_config is None:
|
| 216 |
+
self.decoder_config = self.sub_configs["decoder_config"]()
|
| 217 |
+
elif isinstance(decoder_config, dict):
|
| 218 |
+
# If a dictionary is provided, instantiate the config class with it
|
| 219 |
+
# self.decoder_config = self.sub_configs["decoder_config"](**decoder_config)
|
| 220 |
+
if decoder_config.get("model_type", '') == "qwen2":
|
| 221 |
+
self.decoder_config = Qwen2Config(**decoder_config)
|
| 222 |
+
else:
|
| 223 |
+
raise ValueError(f"Unsupported decoder model type: {decoder_config.get('model_type', '')}")
|
| 224 |
+
elif isinstance(decoder_config, (Qwen2Config,)):
|
| 225 |
+
# If an instance of the config class is provided
|
| 226 |
+
self.decoder_config = decoder_config
|
| 227 |
+
|
| 228 |
+
if diffusion_head_config is None:
|
| 229 |
+
self.diffusion_head_config = self.sub_configs["diffusion_head_config"]()
|
| 230 |
+
elif isinstance(diffusion_head_config, dict):
|
| 231 |
+
diffusion_head_config["model_type"] = "vibevoice_diffusion_head"
|
| 232 |
+
self.diffusion_head_config = self.sub_configs["diffusion_head_config"](**diffusion_head_config)
|
| 233 |
+
elif isinstance(diffusion_head_config, VibeVoiceDiffusionHeadConfig):
|
| 234 |
+
# If an instance of the config class is provided
|
| 235 |
+
self.diffusion_head_config = diffusion_head_config
|
| 236 |
+
|
| 237 |
+
# other parameters
|
| 238 |
+
self.acoustic_vae_dim = getattr(self.acoustic_tokenizer_config, 'vae_dim', 64)
|
| 239 |
+
self.semantic_vae_dim = getattr(self.semantic_tokenizer_config, 'vae_dim', 128)
|
| 240 |
+
|
| 241 |
+
super().__init__(**kwargs)
|
| 242 |
+
|
| 243 |
+
__all__ = [
|
| 244 |
+
"VibeVoiceAcousticTokenizerConfig",
|
| 245 |
+
"VibeVoiceSemanticTokenizerConfig",
|
| 246 |
+
"VibeVoiceDiffusionHeadConfig",
|
| 247 |
+
"VibeVoiceConfig"
|
| 248 |
+
]
|
src/vibevoice/modular/modeling_vibevoice.py
ADDED
|
@@ -0,0 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Dict, List, Optional, Tuple, Union, Callable
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
import torch.distributed as dist
|
| 8 |
+
|
| 9 |
+
from transformers.models.auto import AutoModel, AutoModelForCausalLM
|
| 10 |
+
|
| 11 |
+
from transformers.activations import ACT2FN
|
| 12 |
+
from transformers.modeling_outputs import CausalLMOutput, BaseModelOutputWithPast, ModelOutput
|
| 13 |
+
from transformers.models.llama.modeling_llama import LlamaRMSNorm
|
| 14 |
+
from transformers import modeling_utils
|
| 15 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 16 |
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
| 17 |
+
from transformers.utils import logging
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceAcousticTokenizerModel, VibeVoiceSemanticTokenizerModel
|
| 21 |
+
from .modular_vibevoice_diffusion_head import VibeVoiceDiffusionHead
|
| 22 |
+
from vibevoice.schedule.dpm_solver import DPMSolverMultistepScheduler
|
| 23 |
+
|
| 24 |
+
from .configuration_vibevoice import VibeVoiceConfig
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
logger = logging.get_logger(__name__)
|
| 28 |
+
|
| 29 |
+
if not hasattr(modeling_utils, "ALL_PARALLEL_STYLES") or modeling_utils.ALL_PARALLEL_STYLES is None:
|
| 30 |
+
modeling_utils.ALL_PARALLEL_STYLES = ["tp", "none", "colwise", "rowwise"]
|
| 31 |
+
|
| 32 |
+
@dataclass
|
| 33 |
+
class VibeVoiceCausalLMOutputWithPast(ModelOutput):
|
| 34 |
+
loss: Optional[torch.FloatTensor] = None
|
| 35 |
+
diffusion_loss: Optional[torch.FloatTensor] = None
|
| 36 |
+
speech_token_num: Optional[int] = None
|
| 37 |
+
logits: torch.FloatTensor = None
|
| 38 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
| 39 |
+
hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 40 |
+
attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@dataclass
|
| 44 |
+
class VibeVoiceGenerationOutput(ModelOutput):
|
| 45 |
+
"""
|
| 46 |
+
Output type for VibeVoice generation.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 50 |
+
The generated sequences.
|
| 51 |
+
speech_outputs (`List[torch.FloatTensor]`, *optional*):
|
| 52 |
+
List of generated speech waveforms or latents for each speech segment.
|
| 53 |
+
"""
|
| 54 |
+
sequences: torch.LongTensor = None
|
| 55 |
+
speech_outputs: Optional[List[torch.FloatTensor]] = None
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class SpeechConnector(nn.Module):
|
| 59 |
+
def __init__(self, input_dim, output_dim):
|
| 60 |
+
super().__init__()
|
| 61 |
+
self.fc1 = nn.Linear(input_dim, output_dim)
|
| 62 |
+
self.norm = LlamaRMSNorm(output_dim, eps=1e-6)
|
| 63 |
+
self.fc2 = nn.Linear(output_dim, output_dim)
|
| 64 |
+
|
| 65 |
+
def forward(self, features, **kwargs):
|
| 66 |
+
x = self.fc1(features)
|
| 67 |
+
x = self.norm(x)
|
| 68 |
+
x = self.fc2(x)
|
| 69 |
+
return x
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# @auto_docstring
|
| 73 |
+
class VibeVoicePreTrainedModel(PreTrainedModel):
|
| 74 |
+
config_class = VibeVoiceConfig
|
| 75 |
+
base_model_prefix = "model"
|
| 76 |
+
supports_gradient_checkpointing = True
|
| 77 |
+
_skip_keys_device_placement = "past_key_values"
|
| 78 |
+
_supports_cache_class = True
|
| 79 |
+
_supports_flash_attn_2 = True
|
| 80 |
+
_supports_sdpa = True
|
| 81 |
+
_supports_quantized_cache = True
|
| 82 |
+
_supports_static_cache = True
|
| 83 |
+
_supports_attention_backend = True
|
| 84 |
+
|
| 85 |
+
def _init_weights(self, module):
|
| 86 |
+
if isinstance(module, VibeVoiceDiffusionHead):
|
| 87 |
+
module.initialize_weights()
|
| 88 |
+
return
|
| 89 |
+
|
| 90 |
+
# Use the language model's initializer_range if available
|
| 91 |
+
if hasattr(self.config, 'language_model_config') and hasattr(self.config.language_model_config, 'initializer_range'):
|
| 92 |
+
std = self.config.language_model_config.initializer_range
|
| 93 |
+
elif hasattr(self.config, 'decoder_config') and hasattr(self.config.decoder_config, 'initializer_range'):
|
| 94 |
+
std = self.config.decoder_config.initializer_range
|
| 95 |
+
else:
|
| 96 |
+
std = 0.02 # Default value
|
| 97 |
+
|
| 98 |
+
if isinstance(module, nn.Linear):
|
| 99 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
| 100 |
+
if module.bias is not None:
|
| 101 |
+
module.bias.data.zero_()
|
| 102 |
+
elif isinstance(module, nn.LayerNorm):
|
| 103 |
+
module.weight.data.fill_(1.0)
|
| 104 |
+
module.bias.data.zero_()
|
| 105 |
+
|
| 106 |
+
# @auto_docstring
|
| 107 |
+
class VibeVoiceModel(VibeVoicePreTrainedModel):
|
| 108 |
+
def __init__(self, config):
|
| 109 |
+
super().__init__(config)
|
| 110 |
+
|
| 111 |
+
if hasattr(config, 'torch_dtype') and config.torch_dtype is not None:
|
| 112 |
+
if isinstance(config.torch_dtype, str):
|
| 113 |
+
dtype = getattr(torch, config.torch_dtype)
|
| 114 |
+
else:
|
| 115 |
+
dtype = config.torch_dtype
|
| 116 |
+
else:
|
| 117 |
+
dtype = torch.float32
|
| 118 |
+
|
| 119 |
+
# Initialize Qwen2 model for language modeling
|
| 120 |
+
lm_config = config.decoder_config
|
| 121 |
+
self.language_model = AutoModel.from_config(lm_config)
|
| 122 |
+
|
| 123 |
+
# Initialize speech components if needed
|
| 124 |
+
self.acoustic_tokenizer = AutoModel.from_config(config.acoustic_tokenizer_config).to(dtype)
|
| 125 |
+
self.semantic_tokenizer = AutoModel.from_config(config.semantic_tokenizer_config).to(dtype)
|
| 126 |
+
|
| 127 |
+
self.acoustic_connector = SpeechConnector(config.acoustic_vae_dim, lm_config.hidden_size).to(dtype)
|
| 128 |
+
self.semantic_connector = SpeechConnector(config.semantic_vae_dim, lm_config.hidden_size).to(dtype)
|
| 129 |
+
|
| 130 |
+
# Register scaling factors as buffers - use 1D tensors for FSDP compatibility
|
| 131 |
+
self.register_buffer('speech_scaling_factor', torch.tensor(float('nan')))
|
| 132 |
+
self.register_buffer('speech_bias_factor', torch.tensor(float('nan')))
|
| 133 |
+
|
| 134 |
+
# Initialize prediction head for speech generation
|
| 135 |
+
self.prediction_head = AutoModel.from_config(config.diffusion_head_config).to(dtype)
|
| 136 |
+
|
| 137 |
+
# Initialize noise scheduler
|
| 138 |
+
self.noise_scheduler = DPMSolverMultistepScheduler(
|
| 139 |
+
num_train_timesteps=config.diffusion_head_config.ddpm_num_steps,
|
| 140 |
+
beta_schedule=config.diffusion_head_config.ddpm_beta_schedule,
|
| 141 |
+
prediction_type=config.diffusion_head_config.prediction_type
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
def get_input_embeddings(self):
|
| 145 |
+
if hasattr(self.language_model, 'embed_tokens'):
|
| 146 |
+
# If the language model has an embed_tokens attribute, return it
|
| 147 |
+
return self.language_model.embed_tokens
|
| 148 |
+
|
| 149 |
+
for name, attr in self.language_model.fullmap.items(): # parallel by nnscaler, the name is changed
|
| 150 |
+
if attr.orig_name == 'embed_tokens.weight':
|
| 151 |
+
return getattr(self.language_model, name)
|
| 152 |
+
assert False, 'should not arrive here'
|
| 153 |
+
|
| 154 |
+
def set_input_embeddings(self, value):
|
| 155 |
+
self.language_model.embed_tokens = value
|
| 156 |
+
|
| 157 |
+
def set_speech_tokenizers(self, acoustic_tokenizer=None, semantic_tokenizer=None):
|
| 158 |
+
"""Set the speech tokenizers used for encoding and decoding speech."""
|
| 159 |
+
self.acoustic_tokenizer = acoustic_tokenizer
|
| 160 |
+
self.semantic_tokenizer = semantic_tokenizer
|
| 161 |
+
|
| 162 |
+
# Reset the encoder to evaluation mode
|
| 163 |
+
if self.acoustic_tokenizer is not None:
|
| 164 |
+
self.acoustic_tokenizer.eval()
|
| 165 |
+
|
| 166 |
+
if self.semantic_tokenizer is not None:
|
| 167 |
+
self.semantic_tokenizer.eval()
|
| 168 |
+
|
| 169 |
+
def forward(
|
| 170 |
+
self,
|
| 171 |
+
input_ids: torch.LongTensor = None,
|
| 172 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 173 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 174 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 175 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 176 |
+
use_cache: Optional[bool] = None,
|
| 177 |
+
output_attentions: Optional[bool] = None,
|
| 178 |
+
output_hidden_states: Optional[bool] = None,
|
| 179 |
+
return_dict: Optional[bool] = None,
|
| 180 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 181 |
+
**kwargs,
|
| 182 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
| 183 |
+
|
| 184 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 185 |
+
|
| 186 |
+
# Forward through language model
|
| 187 |
+
outputs = self.language_model(
|
| 188 |
+
input_ids=input_ids,
|
| 189 |
+
attention_mask=attention_mask,
|
| 190 |
+
position_ids=position_ids,
|
| 191 |
+
past_key_values=past_key_values,
|
| 192 |
+
inputs_embeds=inputs_embeds,
|
| 193 |
+
use_cache=use_cache,
|
| 194 |
+
output_attentions=output_attentions,
|
| 195 |
+
output_hidden_states=output_hidden_states,
|
| 196 |
+
return_dict=return_dict,
|
| 197 |
+
cache_position=cache_position,
|
| 198 |
+
**kwargs,
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
if not return_dict:
|
| 202 |
+
return outputs
|
| 203 |
+
|
| 204 |
+
return BaseModelOutputWithPast(
|
| 205 |
+
last_hidden_state=outputs.last_hidden_state,
|
| 206 |
+
past_key_values=outputs.past_key_values,
|
| 207 |
+
hidden_states=outputs.hidden_states,
|
| 208 |
+
attentions=outputs.attentions,
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class VibeVoiceForConditionalGeneration(VibeVoicePreTrainedModel):
|
| 213 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 214 |
+
_tp_plan = {"lm_head": "colwise_rep"}
|
| 215 |
+
|
| 216 |
+
def __init__(self, config):
|
| 217 |
+
super().__init__(config)
|
| 218 |
+
self.model = VibeVoiceModel(config)
|
| 219 |
+
self.vocab_size = config.decoder_config.vocab_size
|
| 220 |
+
self.lm_head = nn.Linear(config.decoder_config.hidden_size, self.vocab_size, bias=False)
|
| 221 |
+
|
| 222 |
+
self.post_init()
|
| 223 |
+
|
| 224 |
+
def get_input_embeddings(self):
|
| 225 |
+
return self.model.get_input_embeddings()
|
| 226 |
+
|
| 227 |
+
def set_input_embeddings(self, value):
|
| 228 |
+
self.model.set_input_embeddings(value)
|
| 229 |
+
|
| 230 |
+
def get_output_embeddings(self):
|
| 231 |
+
return self.lm_head
|
| 232 |
+
|
| 233 |
+
def set_decoder(self, decoder):
|
| 234 |
+
self.model.language_model = decoder
|
| 235 |
+
|
| 236 |
+
def get_decoder(self):
|
| 237 |
+
return self.model.language_model
|
| 238 |
+
|
| 239 |
+
def tie_weights(self):
|
| 240 |
+
"""
|
| 241 |
+
Tie the weights between the input embeddings and the output embeddings.
|
| 242 |
+
"""
|
| 243 |
+
if getattr(self.config.decoder_config, 'tie_word_embeddings', False):
|
| 244 |
+
# The standard PreTrainedModel method will handle the tying.
|
| 245 |
+
# It typically does a simple parameter object assignment, which is
|
| 246 |
+
# CORRECT to do BEFORE FSDP wraps the model.
|
| 247 |
+
output_embeddings = self.get_output_embeddings()
|
| 248 |
+
input_embeddings = self.get_input_embeddings()
|
| 249 |
+
if hasattr(input_embeddings, 'weight'):
|
| 250 |
+
output_embeddings.weight = input_embeddings.weight
|
| 251 |
+
else:
|
| 252 |
+
# maybe returned input_embeddings a tensor directly
|
| 253 |
+
output_embeddings.weight = input_embeddings
|
| 254 |
+
|
| 255 |
+
if getattr(output_embeddings, "bias", None) is not None:
|
| 256 |
+
output_embeddings.bias.data = nn.functional.pad(
|
| 257 |
+
output_embeddings.bias.data,
|
| 258 |
+
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),
|
| 259 |
+
"constant",
|
| 260 |
+
0,
|
| 261 |
+
)
|
| 262 |
+
print("✅ Tied input and output embeddings using standard assignment.")
|
| 263 |
+
else:
|
| 264 |
+
print("ℹ️ tie_word_embeddings is False, not tying weights.")
|
| 265 |
+
|
| 266 |
+
# Also, ensure set_output_embeddings is safe, though your implementation looks okay.
|
| 267 |
+
# The key is to avoid calling it after accelerator.prepare().
|
| 268 |
+
def set_output_embeddings(self, new_embeddings):
|
| 269 |
+
# Your current implementation using data.copy_ is good practice,
|
| 270 |
+
# but the best way is to not call this after prepare().
|
| 271 |
+
self.lm_head = new_embeddings
|
| 272 |
+
|
| 273 |
+
def forward_speech_features(
|
| 274 |
+
self,
|
| 275 |
+
speech_tensors=None,
|
| 276 |
+
speech_masks=None,
|
| 277 |
+
speech_type="audio",
|
| 278 |
+
return_unmask=False
|
| 279 |
+
):
|
| 280 |
+
if speech_tensors is None:
|
| 281 |
+
# Use config to get vae_dim instead of non-existent self.args
|
| 282 |
+
vae_dim = self.config.acoustic_tokenizer_config.vae_dim
|
| 283 |
+
audio_features = torch.zeros(1, 1, vae_dim).to(self.get_input_embeddings().weight)
|
| 284 |
+
connect_features = self.model.acoustic_connector(audio_features)
|
| 285 |
+
return audio_features, connect_features
|
| 286 |
+
else:
|
| 287 |
+
with torch.no_grad():
|
| 288 |
+
if speech_type == "audio":
|
| 289 |
+
with torch.no_grad():
|
| 290 |
+
frames = self.model.acoustic_tokenizer.encode(speech_tensors.unsqueeze(1))[0][0]
|
| 291 |
+
audio_tokens = frames.sample(self.model.acoustic_tokenizer.std_dist_type)[0]
|
| 292 |
+
|
| 293 |
+
elif speech_type == "vae":
|
| 294 |
+
# Use config to get vae_dim instead of non-existent self.args
|
| 295 |
+
vae_dim = self.config.acoustic_tokenizer_config.vae_dim
|
| 296 |
+
speech_mode = speech_tensors.reshape(speech_tensors.size(0), -1, vae_dim)
|
| 297 |
+
|
| 298 |
+
# gaussian sample from the speech_mode
|
| 299 |
+
batch_size = speech_mode.size(0)
|
| 300 |
+
value = self.model.acoustic_tokenizer.fix_std / 0.8
|
| 301 |
+
std = torch.randn(batch_size, dtype=speech_mode.dtype, device=speech_mode.device) * value
|
| 302 |
+
std = std.view(-1, *[1] * (speech_mode.dim() - 1))
|
| 303 |
+
audio_tokens = speech_mode + std * torch.randn(speech_mode.shape).to(speech_mode)
|
| 304 |
+
else:
|
| 305 |
+
raise NotImplementedError(f"Speech type {speech_type} not implemented")
|
| 306 |
+
|
| 307 |
+
if torch.isnan(self.model.speech_scaling_factor) or torch.isnan(self.model.speech_bias_factor):
|
| 308 |
+
scaling_factor = 1. / audio_tokens[speech_masks].flatten().std()
|
| 309 |
+
bias_factor = -audio_tokens[speech_masks].flatten().mean()
|
| 310 |
+
|
| 311 |
+
# Only use distributed operations if the process group is initialized
|
| 312 |
+
if dist.is_available() and dist.is_initialized():
|
| 313 |
+
dist.all_reduce(scaling_factor, op=dist.ReduceOp.SUM)
|
| 314 |
+
dist.all_reduce(bias_factor, op=dist.ReduceOp.SUM)
|
| 315 |
+
world_size = dist.get_world_size()
|
| 316 |
+
self.model.speech_scaling_factor.copy_(scaling_factor / world_size)
|
| 317 |
+
self.model.speech_bias_factor.copy_(bias_factor / world_size)
|
| 318 |
+
print(f"Speech scaling factor (distributed): {self.model.speech_scaling_factor}, bias factor: {self.model.speech_bias_factor}", flush=True)
|
| 319 |
+
else:
|
| 320 |
+
# Single process case
|
| 321 |
+
self.model.speech_scaling_factor.copy_(scaling_factor)
|
| 322 |
+
self.model.speech_bias_factor.copy_(bias_factor)
|
| 323 |
+
print(f"Speech scaling factor (single process): {self.model.speech_scaling_factor}, bias factor: {self.model.speech_bias_factor}", flush=True)
|
| 324 |
+
|
| 325 |
+
audio_features = (audio_tokens + self.model.speech_bias_factor) * self.model.speech_scaling_factor
|
| 326 |
+
|
| 327 |
+
connect_features = self.model.acoustic_connector(audio_features)
|
| 328 |
+
if return_unmask:
|
| 329 |
+
return audio_features, connect_features
|
| 330 |
+
return audio_features[speech_masks], connect_features[speech_masks]
|
| 331 |
+
|
| 332 |
+
def forward(
|
| 333 |
+
self,
|
| 334 |
+
input_ids: torch.LongTensor = None,
|
| 335 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 336 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 337 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 338 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 339 |
+
labels: Optional[torch.LongTensor] = None,
|
| 340 |
+
use_cache: Optional[bool] = False,
|
| 341 |
+
output_attentions: Optional[bool] = None,
|
| 342 |
+
output_hidden_states: Optional[bool] = None,
|
| 343 |
+
return_dict: Optional[bool] = None,
|
| 344 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 345 |
+
# New arguments for speech processing and loss calculation
|
| 346 |
+
speech_tensors: Optional[torch.FloatTensor] = None,
|
| 347 |
+
speech_masks: Optional[torch.BoolTensor] = None,
|
| 348 |
+
speeches_loss_input: Optional[torch.FloatTensor] = None,
|
| 349 |
+
speech_semantic_tensors: Optional[torch.FloatTensor] = None,
|
| 350 |
+
acoustic_input_mask: Optional[torch.BoolTensor] = None,
|
| 351 |
+
acoustic_loss_mask: Optional[torch.BoolTensor] = None,
|
| 352 |
+
ddpm_batch_mul: int = 1,
|
| 353 |
+
**kwargs: Optional[Dict[str, Union[torch.Tensor, str]]],
|
| 354 |
+
) -> Union[Tuple, VibeVoiceCausalLMOutputWithPast]:
|
| 355 |
+
|
| 356 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 357 |
+
|
| 358 |
+
x = self.get_input_embeddings()(input_ids)
|
| 359 |
+
|
| 360 |
+
semantic_speech_all_connect_features = self.model.semantic_connector(speech_semantic_tensors)
|
| 361 |
+
if speeches_loss_input is not None:
|
| 362 |
+
# only part audio need diffuse
|
| 363 |
+
speech_all_features, speech_all_connect_features = self.forward_speech_features(
|
| 364 |
+
speech_tensors=speech_tensors.type_as(x) if speech_tensors is not None else None,
|
| 365 |
+
speech_masks=speech_masks,
|
| 366 |
+
speech_type=kwargs.get("speech_type", "audio"),
|
| 367 |
+
return_unmask=True
|
| 368 |
+
)
|
| 369 |
+
if speech_tensors is not None:
|
| 370 |
+
if semantic_speech_all_connect_features is not None:
|
| 371 |
+
x[acoustic_input_mask] = speech_all_connect_features[speech_masks] + semantic_speech_all_connect_features[speech_masks]
|
| 372 |
+
else:
|
| 373 |
+
x[acoustic_input_mask] = speech_all_connect_features[speech_masks]
|
| 374 |
+
speech_features = speech_all_features[speeches_loss_input & speech_masks] # only part audio need diffuse
|
| 375 |
+
speech_connect_features = speech_all_connect_features[speeches_loss_input & speech_masks]
|
| 376 |
+
# Forward-time consistency check: selected latent count should match number of acoustic placeholders
|
| 377 |
+
try:
|
| 378 |
+
if acoustic_input_mask is not None:
|
| 379 |
+
assert speech_connect_features.shape[0] == int(acoustic_input_mask.sum().item()), (
|
| 380 |
+
f"Mismatch between selected speech connectors ({speech_connect_features.shape[0]}) and acoustic_input_mask sum ({int(acoustic_input_mask.sum().item())})"
|
| 381 |
+
)
|
| 382 |
+
except Exception:
|
| 383 |
+
pass
|
| 384 |
+
else:
|
| 385 |
+
speech_features, speech_connect_features = self.forward_speech_features(
|
| 386 |
+
speech_tensors=speech_tensors.type_as(x) if speech_tensors is not None else None,
|
| 387 |
+
speech_masks=speech_masks,
|
| 388 |
+
speech_type=kwargs.get("speech_type", "audio"),
|
| 389 |
+
)
|
| 390 |
+
if speech_tensors is not None:
|
| 391 |
+
x[acoustic_input_mask] = speech_connect_features
|
| 392 |
+
|
| 393 |
+
outputs = self.model(
|
| 394 |
+
input_ids=None,
|
| 395 |
+
attention_mask=attention_mask,
|
| 396 |
+
position_ids=position_ids,
|
| 397 |
+
past_key_values=past_key_values,
|
| 398 |
+
inputs_embeds=x,
|
| 399 |
+
use_cache=use_cache,
|
| 400 |
+
output_attentions=output_attentions,
|
| 401 |
+
output_hidden_states=False,
|
| 402 |
+
return_dict=return_dict,
|
| 403 |
+
cache_position=cache_position,
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
hidden_states = outputs.last_hidden_state
|
| 407 |
+
logits = self.lm_head(hidden_states)
|
| 408 |
+
# logits = logits.float()
|
| 409 |
+
|
| 410 |
+
loss = None
|
| 411 |
+
if labels is not None:
|
| 412 |
+
# The custom CE loss with masking is calculated in the training script.
|
| 413 |
+
# We leave the standard loss calculation here as None.
|
| 414 |
+
pass
|
| 415 |
+
|
| 416 |
+
# --- Diffusion Loss Calculation ---
|
| 417 |
+
diffusion_loss = None
|
| 418 |
+
# This block is executed only if we are in a context that involves speech.
|
| 419 |
+
if speech_tensors is not None and acoustic_loss_mask.sum().item() > 0:
|
| 420 |
+
# Build conditioning mask from positions whose NEXT token is a speech latent (shift left by 1)
|
| 421 |
+
cond_mask = torch.zeros_like(acoustic_loss_mask, dtype=torch.bool)
|
| 422 |
+
cond_mask[:, :-1] = acoustic_loss_mask[:, 1:]
|
| 423 |
+
cond_mask[:, 0] = False
|
| 424 |
+
condition_features = hidden_states[cond_mask]
|
| 425 |
+
|
| 426 |
+
speech_len, latent_size = speech_features.shape
|
| 427 |
+
# Sanity check: ensure 1:1 alignment between selected conditions and latents
|
| 428 |
+
try:
|
| 429 |
+
assert condition_features.shape[0] == speech_len, (
|
| 430 |
+
f"Mismatch: condition_features={condition_features.shape[0]} vs speech_features={speech_len}"
|
| 431 |
+
)
|
| 432 |
+
except Exception:
|
| 433 |
+
pass
|
| 434 |
+
|
| 435 |
+
noise = torch.randn(
|
| 436 |
+
(speech_len * ddpm_batch_mul, latent_size),
|
| 437 |
+
device=hidden_states.device,
|
| 438 |
+
dtype=hidden_states.dtype
|
| 439 |
+
)
|
| 440 |
+
|
| 441 |
+
timesteps = torch.multinomial(
|
| 442 |
+
torch.ones(self.config.diffusion_head_config.ddpm_num_steps),
|
| 443 |
+
speech_len * ddpm_batch_mul,
|
| 444 |
+
replacement=True,
|
| 445 |
+
).to(hidden_states.device)
|
| 446 |
+
|
| 447 |
+
speech_features_repeated = speech_features.repeat_interleave(ddpm_batch_mul, dim=0)
|
| 448 |
+
condition_features_repeated = condition_features.repeat_interleave(ddpm_batch_mul, dim=0)
|
| 449 |
+
|
| 450 |
+
noisy_speech_features = self.model.noise_scheduler.add_noise(
|
| 451 |
+
speech_features_repeated, noise, timesteps
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
model_output = self.model.prediction_head(
|
| 455 |
+
noisy_speech_features,
|
| 456 |
+
timesteps.type_as(x),
|
| 457 |
+
condition_features_repeated
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
prediction_type = self.config.diffusion_head_config.prediction_type
|
| 461 |
+
if prediction_type == "epsilon":
|
| 462 |
+
target_for_loss = noise
|
| 463 |
+
elif prediction_type == "v_prediction":
|
| 464 |
+
target_for_loss = self.model.noise_scheduler.get_velocity(
|
| 465 |
+
speech_features_repeated, noise, timesteps
|
| 466 |
+
)
|
| 467 |
+
else:
|
| 468 |
+
raise NotImplementedError(f"Prediction type {prediction_type} not implemented")
|
| 469 |
+
|
| 470 |
+
diffusion_loss = F.mse_loss(model_output.float(), target_for_loss.float(), reduction='sum')
|
| 471 |
+
if latent_size > 0 and ddpm_batch_mul > 0:
|
| 472 |
+
# Normalize by latent dim, number of sampled diffusion steps per latent, and number of speech tokens
|
| 473 |
+
diffusion_loss = diffusion_loss / latent_size / ddpm_batch_mul / max(speech_len, 1)
|
| 474 |
+
else:
|
| 475 |
+
diffusion_loss = torch.tensor(0.0, device=diffusion_loss.device)
|
| 476 |
+
|
| 477 |
+
else:
|
| 478 |
+
# Dummy loss for DDP to work when there are no speech samples in a batch,
|
| 479 |
+
# but we are in a speech context.
|
| 480 |
+
diffusion_loss = sum(p.sum() for p in self.model.prediction_head.parameters()) * 0.0
|
| 481 |
+
diffusion_loss += sum(p.sum() for p in self.model.acoustic_connector.parameters()) * 0.0
|
| 482 |
+
diffusion_loss += sum(p.sum() for p in self.model.semantic_connector.parameters()) * 0.0
|
| 483 |
+
# --- End Diffusion Loss Calculation ---
|
| 484 |
+
|
| 485 |
+
if not return_dict:
|
| 486 |
+
output = (logits, speech_len) + outputs.to_tuple()[1:]
|
| 487 |
+
return (loss, diffusion_loss) + output
|
| 488 |
+
|
| 489 |
+
return VibeVoiceCausalLMOutputWithPast(
|
| 490 |
+
loss=loss,
|
| 491 |
+
diffusion_loss=diffusion_loss,
|
| 492 |
+
speech_token_num=speech_len if speech_tensors is not None else 0,
|
| 493 |
+
logits=logits,
|
| 494 |
+
past_key_values=outputs.past_key_values,
|
| 495 |
+
hidden_states=outputs.hidden_states,
|
| 496 |
+
attentions=outputs.attentions,
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
AutoModel.register(VibeVoiceConfig, VibeVoiceModel)
|
| 500 |
+
AutoModelForCausalLM.register(VibeVoiceConfig, VibeVoiceForConditionalGeneration)
|
| 501 |
+
|
| 502 |
+
__all__ = [
|
| 503 |
+
"VibeVoiceModel",
|
| 504 |
+
"VibeVoicePreTrainedModel",
|
| 505 |
+
"VibeVoiceForConditionalGeneration",
|
| 506 |
+
"VibeVoiceCausalLMOutputWithPast",
|
| 507 |
+
"VibeVoiceGenerationOutput",
|
| 508 |
+
]
|
src/vibevoice/modular/modeling_vibevoice_inference.py
ADDED
|
@@ -0,0 +1,715 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import Dict, List, Optional, Tuple, Union, Callable
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from transformers.models.auto import AutoModel, AutoModelForCausalLM
|
| 8 |
+
|
| 9 |
+
from transformers.generation import GenerationMixin, GenerationConfig, LogitsProcessor, LogitsProcessorList, StoppingCriteriaList
|
| 10 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
|
| 11 |
+
from transformers import modeling_utils
|
| 12 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 13 |
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
| 14 |
+
from transformers.utils import logging
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceAcousticTokenizerModel, VibeVoiceSemanticTokenizerModel
|
| 18 |
+
from .modular_vibevoice_tokenizer import VibeVoiceTokenizerStreamingCache, VibeVoiceTokenizerEncoderOutput
|
| 19 |
+
from .modular_vibevoice_diffusion_head import VibeVoiceDiffusionHead
|
| 20 |
+
from vibevoice.schedule.dpm_solver import DPMSolverMultistepScheduler
|
| 21 |
+
|
| 22 |
+
from .configuration_vibevoice import VibeVoiceConfig
|
| 23 |
+
|
| 24 |
+
from .modular_vibevoice_text_tokenizer import VibeVoiceTextTokenizer, VibeVoiceTextTokenizerFast
|
| 25 |
+
|
| 26 |
+
from .modeling_vibevoice import VibeVoiceModel, VibeVoicePreTrainedModel
|
| 27 |
+
from .streamer import AudioStreamer, AsyncAudioStreamer
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
if not hasattr(modeling_utils, "ALL_PARALLEL_STYLES") or modeling_utils.ALL_PARALLEL_STYLES is None:
|
| 32 |
+
modeling_utils.ALL_PARALLEL_STYLES = ["tp", "none", "colwise", "rowwise"]
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class VibeVoiceCausalLMOutputWithPast(BaseModelOutputWithPast):
|
| 36 |
+
logits: Optional[torch.FloatTensor] = None
|
| 37 |
+
|
| 38 |
+
@dataclass
|
| 39 |
+
class VibeVoiceGenerationOutput(ModelOutput):
|
| 40 |
+
"""
|
| 41 |
+
Output type for VibeVoice generation.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| 45 |
+
The generated sequences.
|
| 46 |
+
speech_outputs (`List[torch.FloatTensor]`, *optional*):
|
| 47 |
+
List of generated speech waveforms or latents for each speech segment.
|
| 48 |
+
"""
|
| 49 |
+
sequences: torch.LongTensor = None
|
| 50 |
+
speech_outputs: Optional[List[torch.FloatTensor]] = None
|
| 51 |
+
reach_max_step_sample: Optional[torch.BoolTensor] = None
|
| 52 |
+
|
| 53 |
+
class VibeVoiceTokenConstraintProcessor(LogitsProcessor):
|
| 54 |
+
"""Constrains token generation to only valid tokens during speech generation."""
|
| 55 |
+
|
| 56 |
+
def __init__(self, valid_token_ids: List[int], device: torch.device = None):
|
| 57 |
+
self.valid_token_ids = torch.tensor(valid_token_ids, dtype=torch.long, device=device)
|
| 58 |
+
|
| 59 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
|
| 60 |
+
# Create a mask for valid tokens
|
| 61 |
+
mask = torch.full_like(scores, float('-inf'))
|
| 62 |
+
mask[:, self.valid_token_ids] = 0
|
| 63 |
+
|
| 64 |
+
# Apply mask to scores
|
| 65 |
+
scores = scores + mask
|
| 66 |
+
return scores
|
| 67 |
+
|
| 68 |
+
class VibeVoiceForConditionalGenerationInference(VibeVoicePreTrainedModel, GenerationMixin):
|
| 69 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 70 |
+
_tp_plan = {"lm_head": "colwise_rep"}
|
| 71 |
+
|
| 72 |
+
def __init__(self, config):
|
| 73 |
+
super().__init__(config)
|
| 74 |
+
|
| 75 |
+
# Initialize the base model
|
| 76 |
+
self.model = VibeVoiceModel(config)
|
| 77 |
+
|
| 78 |
+
# LM head for text generation
|
| 79 |
+
self.lm_head = nn.Linear(config.decoder_config.hidden_size, config.decoder_config.vocab_size, bias=False)
|
| 80 |
+
|
| 81 |
+
# inference configuration
|
| 82 |
+
self.ddpm_inference_steps = config.diffusion_head_config.ddpm_num_inference_steps
|
| 83 |
+
|
| 84 |
+
# Initialize weights and apply final processing
|
| 85 |
+
self.post_init()
|
| 86 |
+
|
| 87 |
+
@property
|
| 88 |
+
def noise_scheduler(self):
|
| 89 |
+
return self.model.noise_scheduler
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def prediction_head(self):
|
| 93 |
+
return self.model.prediction_head
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def speech_scaling_factor(self):
|
| 97 |
+
return self.model.speech_scaling_factor
|
| 98 |
+
|
| 99 |
+
@property
|
| 100 |
+
def speech_bias_factor(self):
|
| 101 |
+
return self.model.speech_bias_factor
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def acoustic_tokenizer(self):
|
| 105 |
+
return self.model.acoustic_tokenizer
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def semantic_tokenizer(self):
|
| 109 |
+
return self.model.semantic_tokenizer
|
| 110 |
+
|
| 111 |
+
@property
|
| 112 |
+
def acoustic_connector(self):
|
| 113 |
+
return self.model.acoustic_connector
|
| 114 |
+
|
| 115 |
+
@property
|
| 116 |
+
def semantic_connector(self):
|
| 117 |
+
return self.model.semantic_connector
|
| 118 |
+
|
| 119 |
+
def tie_weights(self):
|
| 120 |
+
"""
|
| 121 |
+
Tie the weights between the input embeddings and the output embeddings.
|
| 122 |
+
"""
|
| 123 |
+
# Tie lm_head.weight to language_model.embed_tokens.weight
|
| 124 |
+
if not getattr(self.config, 'tie_word_embeddings', False):
|
| 125 |
+
return
|
| 126 |
+
|
| 127 |
+
if hasattr(self, 'lm_head') and hasattr(self.model.language_model, 'embed_tokens'):
|
| 128 |
+
self.lm_head.weight = self.model.language_model.embed_tokens.weight
|
| 129 |
+
|
| 130 |
+
def get_input_embeddings(self):
|
| 131 |
+
return self.model.get_input_embeddings()
|
| 132 |
+
|
| 133 |
+
def set_input_embeddings(self, value):
|
| 134 |
+
self.model.set_input_embeddings(value)
|
| 135 |
+
|
| 136 |
+
def get_output_embeddings(self):
|
| 137 |
+
return self.lm_head
|
| 138 |
+
|
| 139 |
+
def set_output_embeddings(self, new_embeddings):
|
| 140 |
+
self.lm_head = new_embeddings
|
| 141 |
+
|
| 142 |
+
def set_speech_tokenizers(self, acoustic_tokenizer=None, semantic_tokenizer=None):
|
| 143 |
+
"""Set the speech tokenizers used for encoding and decoding speech."""
|
| 144 |
+
self.model.set_speech_tokenizers(acoustic_tokenizer, semantic_tokenizer)
|
| 145 |
+
|
| 146 |
+
def set_ddpm_inference_steps(self, num_steps=None):
|
| 147 |
+
self.ddpm_inference_steps = num_steps or self.config.diffusion_head_config.ddpm_num_inference_steps
|
| 148 |
+
|
| 149 |
+
def _process_speech_inputs(self, speech_tensors, speech_masks, speech_type="audio"):
|
| 150 |
+
"""Process speech inputs through tokenizers and connectors."""
|
| 151 |
+
with torch.no_grad():
|
| 152 |
+
if speech_type == "audio":
|
| 153 |
+
# Encode audio to acoustic latents
|
| 154 |
+
encoder_output = self.model.acoustic_tokenizer.encode(speech_tensors.unsqueeze(1))
|
| 155 |
+
acoustic_latents = encoder_output.sample(dist_type=self.model.acoustic_tokenizer.std_dist_type)[0]
|
| 156 |
+
|
| 157 |
+
# Apply scaling and bias
|
| 158 |
+
acoustic_features = (acoustic_latents + self.model.speech_bias_factor.to(acoustic_latents.device)) * self.model.speech_scaling_factor.to(acoustic_latents.device)
|
| 159 |
+
|
| 160 |
+
# Connect to language model space
|
| 161 |
+
acoustic_connected = self.model.acoustic_connector(acoustic_features)[speech_masks.cpu()]
|
| 162 |
+
|
| 163 |
+
return acoustic_features, acoustic_connected
|
| 164 |
+
elif speech_type == "pt":
|
| 165 |
+
encoder_output = VibeVoiceTokenizerEncoderOutput(mean=speech_tensors, std=self.acoustic_tokenizer.config.fix_std)
|
| 166 |
+
acoustic_latents = encoder_output.sample(dist_type=self.model.acoustic_tokenizer.std_dist_type)[0]
|
| 167 |
+
|
| 168 |
+
# Apply scaling and bias
|
| 169 |
+
acoustic_features = (acoustic_latents + self.model.speech_bias_factor.to(acoustic_latents.device)) * self.model.speech_scaling_factor.to(acoustic_latents.device)
|
| 170 |
+
|
| 171 |
+
# Connect to language model space
|
| 172 |
+
acoustic_connected = self.model.acoustic_connector(acoustic_features)[speech_masks.cpu()]
|
| 173 |
+
|
| 174 |
+
return acoustic_features, acoustic_connected
|
| 175 |
+
else:
|
| 176 |
+
raise NotImplementedError(f"Speech type {speech_type} not implemented")
|
| 177 |
+
|
| 178 |
+
# @can_return_tuple
|
| 179 |
+
def forward(
|
| 180 |
+
self,
|
| 181 |
+
input_ids: torch.LongTensor = None,
|
| 182 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 183 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 184 |
+
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
|
| 185 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 186 |
+
labels: Optional[torch.LongTensor] = None,
|
| 187 |
+
use_cache: Optional[bool] = None,
|
| 188 |
+
output_attentions: Optional[bool] = None,
|
| 189 |
+
output_hidden_states: Optional[bool] = None,
|
| 190 |
+
return_dict: Optional[bool] = None,
|
| 191 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 192 |
+
speech_tensors: Optional[torch.FloatTensor] = None,
|
| 193 |
+
speech_masks: Optional[torch.BoolTensor] = None,
|
| 194 |
+
speech_input_mask: Optional[torch.BoolTensor] = None,
|
| 195 |
+
logits_to_keep: Union[int, slice] = 0,
|
| 196 |
+
**kwargs,
|
| 197 |
+
) -> Union[Tuple, VibeVoiceCausalLMOutputWithPast]:
|
| 198 |
+
"""
|
| 199 |
+
Args:
|
| 200 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 201 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| 202 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| 203 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
| 204 |
+
speech_tensors (`torch.FloatTensor`, *optional*):
|
| 205 |
+
Input speech waveforms for voice cloning or speech understanding.
|
| 206 |
+
speech_masks (`torch.BoolTensor`, *optional*):
|
| 207 |
+
Masks indicating valid speech frames.
|
| 208 |
+
speech_input_mask (`torch.BoolTensor`, *optional*):
|
| 209 |
+
Positions in the input sequence where speech embeddings should be inserted.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
`VibeVoiceCausalLMOutputWithPast` or tuple
|
| 213 |
+
"""
|
| 214 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 215 |
+
|
| 216 |
+
# Get embeddings
|
| 217 |
+
if inputs_embeds is None:
|
| 218 |
+
inputs_embeds = self.model.get_input_embeddings()(input_ids)
|
| 219 |
+
|
| 220 |
+
# Process speech inputs if provided
|
| 221 |
+
if speech_tensors is not None and speech_masks is not None:
|
| 222 |
+
acoustic_features, speech_embeds = self._process_speech_inputs(speech_tensors.to(self.dtype), speech_masks)
|
| 223 |
+
if speech_input_mask is not None:
|
| 224 |
+
inputs_embeds[speech_input_mask] = speech_embeds
|
| 225 |
+
|
| 226 |
+
outputs = self.model(
|
| 227 |
+
inputs_embeds=inputs_embeds,
|
| 228 |
+
attention_mask=attention_mask,
|
| 229 |
+
position_ids=position_ids,
|
| 230 |
+
past_key_values=past_key_values,
|
| 231 |
+
use_cache=use_cache,
|
| 232 |
+
output_attentions=output_attentions,
|
| 233 |
+
output_hidden_states=output_hidden_states,
|
| 234 |
+
return_dict=return_dict,
|
| 235 |
+
cache_position=cache_position,
|
| 236 |
+
**kwargs,
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
hidden_states = outputs[0] if not return_dict else outputs.last_hidden_state
|
| 240 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
| 241 |
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
| 242 |
+
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
| 243 |
+
|
| 244 |
+
if labels is not None:
|
| 245 |
+
raise NotImplementedError("Loss computation is not implemented in this version.")
|
| 246 |
+
|
| 247 |
+
return VibeVoiceCausalLMOutputWithPast(
|
| 248 |
+
logits=logits,
|
| 249 |
+
past_key_values=outputs.past_key_values,
|
| 250 |
+
last_hidden_state=hidden_states,
|
| 251 |
+
attentions=outputs.attentions,
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
def _build_generate_config_model_kwargs(self, generation_config, inputs, tokenizer, return_processors=False, **kwargs):
|
| 255 |
+
if generation_config is None:
|
| 256 |
+
generation_config = GenerationConfig(
|
| 257 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 258 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 259 |
+
pad_token_id = tokenizer.pad_token_id
|
| 260 |
+
)
|
| 261 |
+
else:
|
| 262 |
+
generation_config = GenerationConfig(
|
| 263 |
+
**generation_config,
|
| 264 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 265 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 266 |
+
pad_token_id = tokenizer.pad_token_id
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
generation_config, model_kwargs = self._prepare_generation_config(
|
| 270 |
+
generation_config,
|
| 271 |
+
True,
|
| 272 |
+
speech_start_id=tokenizer.speech_start_id,
|
| 273 |
+
speech_end_id=tokenizer.speech_end_id,
|
| 274 |
+
speech_diffusion_id=tokenizer.speech_diffusion_id,
|
| 275 |
+
**kwargs
|
| 276 |
+
)
|
| 277 |
+
generation_config.speech_start_id = tokenizer.speech_start_id
|
| 278 |
+
generation_config.speech_end_id = tokenizer.speech_end_id
|
| 279 |
+
generation_config.speech_diffusion_id = tokenizer.speech_diffusion_id
|
| 280 |
+
|
| 281 |
+
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(inputs, generation_config.bos_token_id, model_kwargs)
|
| 282 |
+
batch_size = inputs_tensor.shape[0]
|
| 283 |
+
device = self.device
|
| 284 |
+
|
| 285 |
+
self._prepare_special_tokens(generation_config, True, device=device)
|
| 286 |
+
generation_config.use_cache = True
|
| 287 |
+
model_kwargs["use_cache"] = generation_config.use_cache
|
| 288 |
+
input_ids = inputs_tensor.to(self.device)
|
| 289 |
+
|
| 290 |
+
input_ids_length = input_ids.shape[1]
|
| 291 |
+
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
|
| 292 |
+
has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
|
| 293 |
+
generation_config = self._prepare_generated_length(
|
| 294 |
+
generation_config=generation_config,
|
| 295 |
+
has_default_max_length=has_default_max_length,
|
| 296 |
+
has_default_min_length=has_default_min_length,
|
| 297 |
+
model_input_name=model_input_name,
|
| 298 |
+
inputs_tensor=inputs_tensor,
|
| 299 |
+
input_ids_length=input_ids_length,
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
max_cache_length = generation_config.max_length - 1
|
| 303 |
+
self._prepare_cache_for_generation(generation_config, model_kwargs, None, batch_size, max_cache_length, device)
|
| 304 |
+
model_kwargs['cache_position'] = torch.arange(input_ids_length, device=device, dtype=torch.long)
|
| 305 |
+
for k, v in model_kwargs.items():
|
| 306 |
+
if isinstance(v, torch.Tensor):
|
| 307 |
+
model_kwargs[k] = v.to(device=device)
|
| 308 |
+
|
| 309 |
+
if return_processors:
|
| 310 |
+
logits_processor = self._get_logits_processor(
|
| 311 |
+
generation_config=generation_config,
|
| 312 |
+
input_ids_seq_length=input_ids_length,
|
| 313 |
+
encoder_input_ids=inputs_tensor,
|
| 314 |
+
prefix_allowed_tokens_fn=None,
|
| 315 |
+
logits_processor=LogitsProcessorList(),
|
| 316 |
+
device=inputs_tensor.device,
|
| 317 |
+
model_kwargs=model_kwargs,
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
stopping_criteria = self._get_stopping_criteria(generation_config=generation_config, stopping_criteria=StoppingCriteriaList())
|
| 321 |
+
|
| 322 |
+
return generation_config, model_kwargs, input_ids, logits_processor, stopping_criteria
|
| 323 |
+
else:
|
| 324 |
+
return generation_config, model_kwargs, input_ids
|
| 325 |
+
|
| 326 |
+
@torch.no_grad()
|
| 327 |
+
def generate(
|
| 328 |
+
self,
|
| 329 |
+
inputs: Optional[torch.Tensor] = None,
|
| 330 |
+
generation_config: Optional[GenerationConfig] = None,
|
| 331 |
+
logits_processor: Optional[LogitsProcessorList] = None,
|
| 332 |
+
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
| 333 |
+
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
|
| 334 |
+
synced_gpus: Optional[bool] = None,
|
| 335 |
+
assistant_model: Optional["PreTrainedModel"] = None,
|
| 336 |
+
audio_streamer: Optional[Union[AudioStreamer, AsyncAudioStreamer]] = None,
|
| 337 |
+
negative_prompt_ids: Optional[torch.Tensor] = None,
|
| 338 |
+
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
|
| 339 |
+
speech_tensors: Optional[torch.FloatTensor] = None,
|
| 340 |
+
speech_masks: Optional[torch.BoolTensor] = None,
|
| 341 |
+
speech_input_mask: Optional[torch.BoolTensor] = None,
|
| 342 |
+
return_speech: bool = True,
|
| 343 |
+
cfg_scale: float = 1.0,
|
| 344 |
+
stop_check_fn: Optional[Callable[[], bool]] = None,
|
| 345 |
+
**kwargs,
|
| 346 |
+
) -> Union[torch.LongTensor, VibeVoiceGenerationOutput]:
|
| 347 |
+
"""
|
| 348 |
+
Generates sequences of token ids and optionally speech outputs.
|
| 349 |
+
|
| 350 |
+
Args:
|
| 351 |
+
All standard generation arguments from GenerationMixin
|
| 352 |
+
negative_prompt_ids: Negative prompt for CFG in speech generation
|
| 353 |
+
negative_prompt_attention_mask: Attention mask for negative prompt
|
| 354 |
+
speech_tensors: Input speech for voice cloning
|
| 355 |
+
speech_masks: Masks for speech tensors
|
| 356 |
+
speech_input_mask: Positions to insert speech embeddings
|
| 357 |
+
return_speech: Whether to decode and return speech outputs
|
| 358 |
+
cfg_scale: CFG scale for speech generation
|
| 359 |
+
stop_check_fn: Optional callable that returns True if generation should stop
|
| 360 |
+
|
| 361 |
+
Returns:
|
| 362 |
+
Generated token sequences and optionally speech outputs
|
| 363 |
+
"""
|
| 364 |
+
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
|
| 365 |
+
tokenizer = kwargs.pop("tokenizer", None) # Pull this out first, we only use it for stopping criteria
|
| 366 |
+
parsed_scripts = kwargs.pop("parsed_scripts", None)
|
| 367 |
+
all_speakers_list = kwargs.pop("all_speakers_list", None)
|
| 368 |
+
max_length_times = kwargs.pop("max_length_times", 2)
|
| 369 |
+
|
| 370 |
+
if kwargs.get('max_new_tokens', None) is None:
|
| 371 |
+
kwargs['max_new_tokens'] = self.config.decoder_config.max_position_embeddings - kwargs['input_ids'].shape[-1]
|
| 372 |
+
|
| 373 |
+
generation_config, model_kwargs, input_ids, logits_processor, stopping_criteria = self._build_generate_config_model_kwargs(
|
| 374 |
+
generation_config, inputs, tokenizer, return_processors=True, **kwargs
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
negative_kwargs = {
|
| 378 |
+
'input_ids': torch.full((kwargs['input_ids'].shape[0], 1), tokenizer.speech_start_id, dtype=torch.long, device=kwargs['input_ids'].device),
|
| 379 |
+
'attention_mask': torch.ones((kwargs['input_ids'].shape[0], 1), dtype=torch.long, device=kwargs['input_ids'].device),
|
| 380 |
+
'max_new_tokens': kwargs.get('max_new_tokens', 100)
|
| 381 |
+
}
|
| 382 |
+
negative_generation_config, negative_model_kwargs, negative_input_ids = self._build_generate_config_model_kwargs(
|
| 383 |
+
None, None, tokenizer, return_processors=False, **negative_kwargs
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
acoustic_cache = VibeVoiceTokenizerStreamingCache()
|
| 387 |
+
semantic_cache = VibeVoiceTokenizerStreamingCache()
|
| 388 |
+
|
| 389 |
+
batch_size = input_ids.shape[0]
|
| 390 |
+
device = input_ids.device
|
| 391 |
+
finished_tags = torch.zeros(batch_size, dtype=torch.bool, device=device)
|
| 392 |
+
correct_cnt = torch.zeros(batch_size, dtype=torch.long, device=device)
|
| 393 |
+
is_prefill = True
|
| 394 |
+
inputs_embeds = None
|
| 395 |
+
verbose = kwargs.get("verbose", False)
|
| 396 |
+
|
| 397 |
+
# Initialize audio chunks storage for each sample
|
| 398 |
+
audio_chunks = [[] for _ in range(batch_size)]
|
| 399 |
+
|
| 400 |
+
initial_length = input_ids.shape[-1]
|
| 401 |
+
initial_length_per_sample = model_kwargs['attention_mask'].sum(dim=-1)
|
| 402 |
+
|
| 403 |
+
# Define all valid tokens that can be generated
|
| 404 |
+
valid_tokens = [
|
| 405 |
+
generation_config.speech_start_id,
|
| 406 |
+
generation_config.speech_end_id,
|
| 407 |
+
generation_config.speech_diffusion_id,
|
| 408 |
+
generation_config.eos_token_id
|
| 409 |
+
]
|
| 410 |
+
# Add bos_token_id if it exists
|
| 411 |
+
if hasattr(generation_config, 'bos_token_id') and generation_config.bos_token_id is not None:
|
| 412 |
+
valid_tokens.append(generation_config.bos_token_id)
|
| 413 |
+
|
| 414 |
+
# Add custom processor to constrain token generation
|
| 415 |
+
token_constraint_processor = VibeVoiceTokenConstraintProcessor(valid_tokens, device=device)
|
| 416 |
+
if logits_processor is None:
|
| 417 |
+
logits_processor = LogitsProcessorList()
|
| 418 |
+
logits_processor.append(token_constraint_processor)
|
| 419 |
+
|
| 420 |
+
max_steps = min(generation_config.max_length - initial_length, int(max_length_times * initial_length))
|
| 421 |
+
max_step_per_sample = torch.min(generation_config.max_length - initial_length_per_sample, (max_length_times * initial_length_per_sample).long())
|
| 422 |
+
reach_max_step_sample = torch.zeros(batch_size, dtype=torch.bool, device=device)
|
| 423 |
+
|
| 424 |
+
# Create progress iterator if verbose
|
| 425 |
+
if kwargs.get("show_progress_bar", True):
|
| 426 |
+
progress_bar = tqdm(range(max_steps), desc="Generating", leave=False)
|
| 427 |
+
else:
|
| 428 |
+
progress_bar = range(max_steps)
|
| 429 |
+
|
| 430 |
+
for step in progress_bar:
|
| 431 |
+
# Check for external stop signal
|
| 432 |
+
if stop_check_fn is not None and stop_check_fn():
|
| 433 |
+
if verbose:
|
| 434 |
+
print(f"Generation stopped externally at step {step + 1}")
|
| 435 |
+
# End the audio streamer if it exists
|
| 436 |
+
if audio_streamer is not None:
|
| 437 |
+
audio_streamer.end()
|
| 438 |
+
break
|
| 439 |
+
|
| 440 |
+
# Check if audio_streamer has been ended (stopped externally)
|
| 441 |
+
if audio_streamer is not None and hasattr(audio_streamer, 'finished_flags'):
|
| 442 |
+
if any(audio_streamer.finished_flags):
|
| 443 |
+
if verbose:
|
| 444 |
+
print(f"Audio generation stopped externally at step {step + 1}")
|
| 445 |
+
break
|
| 446 |
+
|
| 447 |
+
if finished_tags.all():
|
| 448 |
+
if hasattr(progress_bar, 'set_description'):
|
| 449 |
+
progress_bar.set_description("Generation complete")
|
| 450 |
+
break
|
| 451 |
+
|
| 452 |
+
if input_ids.shape[-1] >= generation_config.max_length:
|
| 453 |
+
print(f"Reached maximum generation length {generation_config.max_length}, stopped it.")
|
| 454 |
+
reached_samples = torch.arange(batch_size, device=device)[~finished_tags]
|
| 455 |
+
if reached_samples.numel() > 0:
|
| 456 |
+
reach_max_step_sample[reached_samples] = True
|
| 457 |
+
break
|
| 458 |
+
|
| 459 |
+
# Update progress bar description with active samples
|
| 460 |
+
if hasattr(progress_bar, 'set_description'):
|
| 461 |
+
active_samples = (~finished_tags).sum().item()
|
| 462 |
+
progress_bar.set_description(f"Generating (active: {active_samples}/{batch_size})")
|
| 463 |
+
|
| 464 |
+
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
|
| 465 |
+
if is_prefill:
|
| 466 |
+
# we process the speech inputs only during the first generation step
|
| 467 |
+
prefill_inputs = {
|
| 468 |
+
"speech_tensors": speech_tensors.to(device=device),
|
| 469 |
+
"speech_masks": speech_masks.to(device),
|
| 470 |
+
"speech_input_mask": speech_input_mask.to(device),
|
| 471 |
+
}
|
| 472 |
+
is_prefill = False
|
| 473 |
+
else:
|
| 474 |
+
_ = model_inputs.pop('inputs_embeds', None)
|
| 475 |
+
prefill_inputs = {'inputs_embeds': inputs_embeds}
|
| 476 |
+
|
| 477 |
+
# Forward pass through the model
|
| 478 |
+
outputs = self(
|
| 479 |
+
**model_inputs, **prefill_inputs, logits_to_keep=1, return_dict=True, output_attentions=False, output_hidden_states=False,
|
| 480 |
+
)
|
| 481 |
+
model_kwargs = self._update_model_kwargs_for_generation(
|
| 482 |
+
outputs, model_kwargs, is_encoder_decoder=False,
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
# Get logits and apply logits processor
|
| 486 |
+
next_token_logits = outputs.logits[:, -1, :].to(copy=True, dtype=torch.float32, device=input_ids.device)
|
| 487 |
+
# next_token_logits = outputs.logits[:, -1, :].to(copy=True, device=input_ids.device)
|
| 488 |
+
next_token_scores = logits_processor(input_ids, next_token_logits)
|
| 489 |
+
|
| 490 |
+
# token selection
|
| 491 |
+
if generation_config.do_sample:
|
| 492 |
+
probs = nn.functional.softmax(next_token_scores, dim=-1)
|
| 493 |
+
# TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
|
| 494 |
+
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
|
| 495 |
+
else:
|
| 496 |
+
next_tokens = torch.argmax(next_token_scores, dim=-1)
|
| 497 |
+
|
| 498 |
+
next_tokens[finished_tags] = generation_config.eos_token_id
|
| 499 |
+
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
|
| 500 |
+
|
| 501 |
+
if not kwargs.get('refresh_negative', True):
|
| 502 |
+
negative_model_inputs = self.prepare_inputs_for_generation(negative_input_ids, **negative_model_kwargs)
|
| 503 |
+
# Forward negative pass through the model
|
| 504 |
+
if negative_model_inputs['inputs_embeds'] is None and inputs_embeds is not None:
|
| 505 |
+
negative_model_inputs['inputs_embeds'] = inputs_embeds
|
| 506 |
+
negative_model_inputs['input_ids'] = None
|
| 507 |
+
|
| 508 |
+
negative_outputs = self(
|
| 509 |
+
**negative_model_inputs, logits_to_keep=0, return_dict=True, output_attentions=False, output_hidden_states=False,
|
| 510 |
+
)
|
| 511 |
+
negative_model_kwargs = self._update_model_kwargs_for_generation(
|
| 512 |
+
negative_outputs, negative_model_kwargs, is_encoder_decoder=False,
|
| 513 |
+
)
|
| 514 |
+
negative_input_ids = torch.cat([negative_input_ids, next_tokens[:, None]], dim=-1)
|
| 515 |
+
|
| 516 |
+
# reached end of generation
|
| 517 |
+
if (next_tokens == generation_config.eos_token_id).any():
|
| 518 |
+
eos_indices = (next_tokens == generation_config.eos_token_id).nonzero(as_tuple=False).squeeze(1)
|
| 519 |
+
# Only print for samples that are newly finished (not already marked as finished)
|
| 520 |
+
new_eos_indices = eos_indices[~finished_tags[eos_indices]]
|
| 521 |
+
if new_eos_indices.numel() > 0:
|
| 522 |
+
finished_tags[new_eos_indices] = True
|
| 523 |
+
if verbose:
|
| 524 |
+
print(f"Samples {new_eos_indices.tolist()} reached EOS token at step {step + 1}.", flush=True)
|
| 525 |
+
if audio_streamer is not None:
|
| 526 |
+
audio_streamer.end(new_eos_indices)
|
| 527 |
+
|
| 528 |
+
# Check if any sample reached its maximum generation length
|
| 529 |
+
max_length_reached = step >= max_step_per_sample
|
| 530 |
+
new_max_length_indices = torch.nonzero(max_length_reached & ~finished_tags, as_tuple=False).squeeze(1)
|
| 531 |
+
if new_max_length_indices.numel() > 0:
|
| 532 |
+
finished_tags[new_max_length_indices] = True
|
| 533 |
+
reach_max_step_sample[new_max_length_indices] = True
|
| 534 |
+
if verbose:
|
| 535 |
+
print(f"Samples {new_max_length_indices.tolist()} reached max generation length at step {step + 1}.", flush=True)
|
| 536 |
+
if audio_streamer is not None:
|
| 537 |
+
audio_streamer.end(new_max_length_indices)
|
| 538 |
+
|
| 539 |
+
# speech_end
|
| 540 |
+
diffusion_end_indices = (next_tokens == generation_config.speech_end_id).nonzero(as_tuple=False).squeeze(1)
|
| 541 |
+
if diffusion_end_indices.numel() > 0:
|
| 542 |
+
# Clear tokenizer caches for samples that reached speech end
|
| 543 |
+
acoustic_cache.set_to_zero(diffusion_end_indices)
|
| 544 |
+
semantic_cache.set_to_zero(diffusion_end_indices)
|
| 545 |
+
|
| 546 |
+
# speech_begin
|
| 547 |
+
diffusion_start_indices = torch.arange(batch_size, device=device)[~finished_tags & (next_tokens == generation_config.speech_start_id)]
|
| 548 |
+
if diffusion_start_indices.numel() > 0 and kwargs.get('refresh_negative', True):
|
| 549 |
+
# update attention mask
|
| 550 |
+
for i, sample_idx in enumerate(diffusion_start_indices.tolist()):
|
| 551 |
+
negative_model_kwargs['attention_mask'][sample_idx, :] = 0
|
| 552 |
+
negative_model_kwargs['attention_mask'][sample_idx, -1] = 1
|
| 553 |
+
# update past key values
|
| 554 |
+
for layer_idx, (k_cache, v_cache) in enumerate(zip(negative_model_kwargs['past_key_values'].key_cache,
|
| 555 |
+
negative_model_kwargs['past_key_values'].value_cache)):
|
| 556 |
+
# Process each non-diffusion sample
|
| 557 |
+
for sample_idx in diffusion_start_indices.tolist():
|
| 558 |
+
# Shift cache for this sample
|
| 559 |
+
k_cache[sample_idx, :, -1, :] = k_cache[sample_idx, :, 0, :].clone()
|
| 560 |
+
v_cache[sample_idx, :, -1, :] = v_cache[sample_idx, :, 0, :].clone()
|
| 561 |
+
# update negative_input_ids
|
| 562 |
+
for sample_idx in diffusion_start_indices.tolist():
|
| 563 |
+
negative_input_ids[sample_idx, -1] = generation_config.speech_start_id
|
| 564 |
+
|
| 565 |
+
# Prepare inputs_embeds for next iteration
|
| 566 |
+
# Initialize with default embeddings for all tokens
|
| 567 |
+
next_inputs_embeds = self.model.get_input_embeddings()(next_tokens).unsqueeze(1) # [batch_size, 1, hidden_size]
|
| 568 |
+
|
| 569 |
+
# forward diffusion
|
| 570 |
+
# Diffusion indices are those that are not finished and not special tokens
|
| 571 |
+
diffusion_indices = torch.arange(batch_size, device=device)[~finished_tags & (next_tokens == generation_config.speech_diffusion_id)]
|
| 572 |
+
|
| 573 |
+
if diffusion_indices.numel() > 0:
|
| 574 |
+
if kwargs.get('refresh_negative', True):
|
| 575 |
+
negative_model_inputs = self.prepare_inputs_for_generation(negative_input_ids, **negative_model_kwargs)
|
| 576 |
+
# Forward negative pass through the model
|
| 577 |
+
if negative_model_inputs['inputs_embeds'] is None and inputs_embeds is not None:
|
| 578 |
+
negative_model_inputs['inputs_embeds'] = inputs_embeds
|
| 579 |
+
negative_model_inputs['input_ids'] = None
|
| 580 |
+
|
| 581 |
+
negative_outputs = self(
|
| 582 |
+
**negative_model_inputs, logits_to_keep=0, return_dict=True, output_attentions=False, output_hidden_states=False,
|
| 583 |
+
)
|
| 584 |
+
negative_model_kwargs = self._update_model_kwargs_for_generation(
|
| 585 |
+
negative_outputs, negative_model_kwargs, is_encoder_decoder=False,
|
| 586 |
+
)
|
| 587 |
+
negative_input_ids = torch.cat([negative_input_ids, next_tokens[:, None]], dim=-1)
|
| 588 |
+
# correct the non-diffusion indices
|
| 589 |
+
# we forward all samples' negative outputs even if
|
| 590 |
+
# they are not in diffusion mode to keep the cache consistent
|
| 591 |
+
# So we need to correct the kv cache of non-diffusion samples
|
| 592 |
+
non_diffusion_mask = ~finished_tags & (next_tokens != generation_config.speech_diffusion_id)
|
| 593 |
+
if non_diffusion_mask.any():
|
| 594 |
+
non_diffusion_indices = torch.arange(batch_size, device=device)[non_diffusion_mask]
|
| 595 |
+
start_indices = correct_cnt[non_diffusion_indices]
|
| 596 |
+
|
| 597 |
+
# 1. Update attention_mask - need to handle each sample separately
|
| 598 |
+
seq_len = negative_model_kwargs['attention_mask'].shape[1]
|
| 599 |
+
for i, (sample_idx, start_idx) in enumerate(zip(non_diffusion_indices.tolist(), start_indices.tolist())):
|
| 600 |
+
# Shift the attention mask for this sample
|
| 601 |
+
if start_idx + 1 < seq_len - 1:
|
| 602 |
+
negative_model_kwargs['attention_mask'][sample_idx, start_idx+1:] = \
|
| 603 |
+
negative_model_kwargs['attention_mask'][sample_idx, start_idx:-1].clone()
|
| 604 |
+
negative_model_kwargs['attention_mask'][sample_idx, start_idx] = 0
|
| 605 |
+
|
| 606 |
+
# 2. Update past_key_values
|
| 607 |
+
for layer_idx, (k_cache, v_cache) in enumerate(zip(negative_model_kwargs['past_key_values'].key_cache,
|
| 608 |
+
negative_model_kwargs['past_key_values'].value_cache)):
|
| 609 |
+
# Process each non-diffusion sample
|
| 610 |
+
for sample_idx, start_idx in zip(non_diffusion_indices.tolist(), start_indices.tolist()):
|
| 611 |
+
if start_idx + 1 < k_cache.shape[2] - 1:
|
| 612 |
+
# Shift cache for this sample
|
| 613 |
+
k_cache[sample_idx, :, start_idx+1:, :] = k_cache[sample_idx, :, start_idx:-1, :].clone()
|
| 614 |
+
v_cache[sample_idx, :, start_idx+1:, :] = v_cache[sample_idx, :, start_idx:-1, :].clone()
|
| 615 |
+
|
| 616 |
+
# 3. Update negative_input_ids
|
| 617 |
+
for sample_idx, start_idx in zip(non_diffusion_indices.tolist(), start_indices.tolist()):
|
| 618 |
+
if start_idx + 1 < negative_input_ids.shape[1] - 1:
|
| 619 |
+
negative_input_ids[sample_idx, start_idx+1:] = \
|
| 620 |
+
negative_input_ids[sample_idx, start_idx:-1].clone()
|
| 621 |
+
|
| 622 |
+
correct_cnt[non_diffusion_indices] += 1
|
| 623 |
+
|
| 624 |
+
positive_condition = outputs.last_hidden_state[diffusion_indices, -1, :]
|
| 625 |
+
negative_condition = negative_outputs.last_hidden_state[diffusion_indices, -1, :]
|
| 626 |
+
|
| 627 |
+
speech_latent = self.sample_speech_tokens(
|
| 628 |
+
positive_condition,
|
| 629 |
+
negative_condition,
|
| 630 |
+
cfg_scale=cfg_scale,
|
| 631 |
+
).unsqueeze(1)
|
| 632 |
+
|
| 633 |
+
# Decode acoustic latent to audio using acoustic streaming cache
|
| 634 |
+
scaled_latent = speech_latent / self.model.speech_scaling_factor.to(speech_latent.device) - self.model.speech_bias_factor.to(speech_latent.device)
|
| 635 |
+
audio_chunk = self.model.acoustic_tokenizer.decode(
|
| 636 |
+
scaled_latent.to(self.model.acoustic_tokenizer.device),
|
| 637 |
+
cache=acoustic_cache, # Use acoustic-specific cache
|
| 638 |
+
sample_indices=diffusion_indices.to(self.model.acoustic_tokenizer.device),
|
| 639 |
+
use_cache=True,
|
| 640 |
+
debug=False
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
# Store audio chunks for each sample
|
| 644 |
+
for i, sample_idx in enumerate(diffusion_indices):
|
| 645 |
+
idx = sample_idx.item()
|
| 646 |
+
# Only append audio chunk if the sample is not finished
|
| 647 |
+
if not finished_tags[idx]:
|
| 648 |
+
audio_chunks[idx].append(audio_chunk[i])
|
| 649 |
+
|
| 650 |
+
# Add streaming support here
|
| 651 |
+
if audio_streamer is not None:
|
| 652 |
+
# Stream the audio chunks immediately
|
| 653 |
+
audio_streamer.put(audio_chunk, diffusion_indices)
|
| 654 |
+
|
| 655 |
+
# Encode audio to semantic features using semantic streaming cache
|
| 656 |
+
semantic_features = self.model.semantic_tokenizer.encode(
|
| 657 |
+
audio_chunk,
|
| 658 |
+
cache=semantic_cache, # Use semantic-specific cache
|
| 659 |
+
sample_indices=diffusion_indices,
|
| 660 |
+
use_cache=True,
|
| 661 |
+
debug=False
|
| 662 |
+
).mean # semantic tokenizer has no VAE.
|
| 663 |
+
|
| 664 |
+
# Combine acoustic and semantic features for next input
|
| 665 |
+
acoustic_embed = self.model.acoustic_connector(speech_latent)
|
| 666 |
+
semantic_embed = self.model.semantic_connector(semantic_features)
|
| 667 |
+
diffusion_embeds = acoustic_embed + semantic_embed
|
| 668 |
+
|
| 669 |
+
# Update embeddings for diffusion indices
|
| 670 |
+
next_inputs_embeds[diffusion_indices] = diffusion_embeds
|
| 671 |
+
|
| 672 |
+
# Set inputs_embeds for next iteration
|
| 673 |
+
inputs_embeds = next_inputs_embeds
|
| 674 |
+
|
| 675 |
+
if audio_streamer is not None:
|
| 676 |
+
audio_streamer.end()
|
| 677 |
+
|
| 678 |
+
# Concatenate audio chunks for each sample
|
| 679 |
+
final_audio_outputs = []
|
| 680 |
+
for sample_chunks in audio_chunks:
|
| 681 |
+
if sample_chunks:
|
| 682 |
+
# Concatenate all chunks along the time dimension (assumed to be the last dimension)
|
| 683 |
+
concatenated_audio = torch.cat(sample_chunks, dim=-1)
|
| 684 |
+
final_audio_outputs.append(concatenated_audio)
|
| 685 |
+
else:
|
| 686 |
+
# If no audio was generated for this sample, append None
|
| 687 |
+
final_audio_outputs.append(None)
|
| 688 |
+
|
| 689 |
+
return VibeVoiceGenerationOutput(
|
| 690 |
+
sequences=input_ids,
|
| 691 |
+
speech_outputs=final_audio_outputs if return_speech else None,
|
| 692 |
+
reach_max_step_sample=reach_max_step_sample,
|
| 693 |
+
)
|
| 694 |
+
|
| 695 |
+
@torch.no_grad()
|
| 696 |
+
def sample_speech_tokens(self, condition, neg_condition, cfg_scale=3.0):
|
| 697 |
+
self.model.noise_scheduler.set_timesteps(self.ddpm_inference_steps)
|
| 698 |
+
condition = torch.cat([condition, neg_condition], dim=0).to(self.model.prediction_head.device)
|
| 699 |
+
speech = torch.randn(condition.shape[0], self.config.acoustic_vae_dim).to(condition)
|
| 700 |
+
for t in self.model.noise_scheduler.timesteps:
|
| 701 |
+
half = speech[: len(speech) // 2]
|
| 702 |
+
combined = torch.cat([half, half], dim=0)
|
| 703 |
+
eps = self.model.prediction_head(combined, t.repeat(combined.shape[0]).to(combined), condition=condition)
|
| 704 |
+
cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)
|
| 705 |
+
half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)
|
| 706 |
+
eps = torch.cat([half_eps, half_eps], dim=0)
|
| 707 |
+
speech = self.model.noise_scheduler.step(eps, t, speech).prev_sample
|
| 708 |
+
return speech[: len(speech) // 2]
|
| 709 |
+
|
| 710 |
+
|
| 711 |
+
AutoModelForCausalLM.register(VibeVoiceConfig, VibeVoiceForConditionalGenerationInference)
|
| 712 |
+
|
| 713 |
+
__all__ = [
|
| 714 |
+
"VibeVoiceForConditionalGenerationInference",
|
| 715 |
+
]
|
src/vibevoice/modular/modular_vibevoice_diffusion_head.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from typing import Optional, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
from transformers.models.auto import AutoModel
|
| 9 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 10 |
+
# from transformers.modeling_layers import GradientCheckpointingLayer
|
| 11 |
+
from transformers.activations import ACT2FN
|
| 12 |
+
from transformers.utils import logging
|
| 13 |
+
|
| 14 |
+
from .configuration_vibevoice import VibeVoiceDiffusionHeadConfig
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
logger = logging.get_logger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class RMSNorm(nn.Module):
|
| 21 |
+
def __init__(self, dim: int, eps: float = 1e-6, elementwise_affine=True, memory_efficient=False):
|
| 22 |
+
super().__init__()
|
| 23 |
+
self.dim = dim
|
| 24 |
+
self.eps = eps
|
| 25 |
+
self.elementwise_affine = elementwise_affine
|
| 26 |
+
if self.elementwise_affine:
|
| 27 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
| 28 |
+
else:
|
| 29 |
+
self.register_parameter('weight', None)
|
| 30 |
+
|
| 31 |
+
def _norm(self, x):
|
| 32 |
+
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
| 33 |
+
|
| 34 |
+
def forward(self, x):
|
| 35 |
+
output = self._norm(x.float()).type_as(x)
|
| 36 |
+
if self.weight is not None:
|
| 37 |
+
output = output * self.weight
|
| 38 |
+
return output
|
| 39 |
+
|
| 40 |
+
def extra_repr(self) -> str:
|
| 41 |
+
return f'dim={self.dim}, eps={self.eps}, elementwise_affine={self.elementwise_affine}'
|
| 42 |
+
|
| 43 |
+
def modulate(x, shift, scale):
|
| 44 |
+
"""Apply modulation to input tensor."""
|
| 45 |
+
return x * (1 + scale) + shift
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class TimestepEmbedder(nn.Module):
|
| 49 |
+
"""
|
| 50 |
+
Embeds scalar timesteps into vector representations.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
hidden_size (`int`): Size of the output embedding
|
| 54 |
+
frequency_embedding_size (`int`, optional): Size of the intermediate frequency embedding
|
| 55 |
+
"""
|
| 56 |
+
def __init__(self, hidden_size, frequency_embedding_size=256):
|
| 57 |
+
super().__init__()
|
| 58 |
+
self.mlp = nn.Sequential(
|
| 59 |
+
nn.Linear(frequency_embedding_size, hidden_size, bias=False),
|
| 60 |
+
# nn.SiLU(),
|
| 61 |
+
ACT2FN['silu'],
|
| 62 |
+
nn.Linear(hidden_size, hidden_size, bias=False),
|
| 63 |
+
)
|
| 64 |
+
self.frequency_embedding_size = frequency_embedding_size
|
| 65 |
+
|
| 66 |
+
@staticmethod
|
| 67 |
+
def timestep_embedding(t, dim, max_period=10000):
|
| 68 |
+
"""
|
| 69 |
+
Create sinusoidal timestep embeddings.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
t (`torch.Tensor`): A 1-D Tensor of N indices, one per batch element.
|
| 73 |
+
These may be fractional.
|
| 74 |
+
dim (`int`): The dimension of the output.
|
| 75 |
+
max_period (`int`, optional): Controls the minimum frequency of the embeddings.
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
`torch.Tensor`: An [N, D] Tensor of positional embeddings.
|
| 79 |
+
"""
|
| 80 |
+
half = dim // 2
|
| 81 |
+
freqs = torch.exp(
|
| 82 |
+
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
|
| 83 |
+
).to(t.device)
|
| 84 |
+
args = t[:, None].float() * freqs[None]
|
| 85 |
+
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
| 86 |
+
if dim % 2:
|
| 87 |
+
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
| 88 |
+
return embedding.to(t.dtype)
|
| 89 |
+
|
| 90 |
+
def forward(self, t):
|
| 91 |
+
t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
|
| 92 |
+
t_emb = self.mlp(t_freq)
|
| 93 |
+
return t_emb
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class FeedForwardNetwork(nn.Module):
|
| 97 |
+
"""
|
| 98 |
+
Standard feed-forward network with SwiGLU activation.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
embed_dim (`int`): Input dimension
|
| 102 |
+
ffn_dim (`int`): Hidden dimension
|
| 103 |
+
"""
|
| 104 |
+
def __init__(
|
| 105 |
+
self,
|
| 106 |
+
embed_dim,
|
| 107 |
+
ffn_dim,
|
| 108 |
+
):
|
| 109 |
+
super().__init__()
|
| 110 |
+
self.embed_dim = embed_dim
|
| 111 |
+
self.gate_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False)
|
| 112 |
+
self.up_proj = nn.Linear(self.embed_dim, ffn_dim, bias=False)
|
| 113 |
+
self.down_proj = nn.Linear(ffn_dim, self.embed_dim, bias=False)
|
| 114 |
+
self.act_fn = ACT2FN['silu'] # Using SiLU as the activation function
|
| 115 |
+
|
| 116 |
+
def forward(self, x):
|
| 117 |
+
gate = self.gate_proj(x)
|
| 118 |
+
up = self.up_proj(x)
|
| 119 |
+
|
| 120 |
+
# SwiGLU activation
|
| 121 |
+
# gate = F.silu(gate)
|
| 122 |
+
gate = self.act_fn(gate)
|
| 123 |
+
return self.down_proj(gate * up)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
class HeadLayer(nn.Module):
|
| 127 |
+
"""
|
| 128 |
+
A layer in the diffusion head.
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
embed_dim (`int`): Input dimension
|
| 132 |
+
ffn_dim (`int`): Hidden dimension
|
| 133 |
+
cond_dim (`int`): Condition embedding dimension
|
| 134 |
+
norm_eps (`float`, optional): Epsilon for normalization
|
| 135 |
+
"""
|
| 136 |
+
def __init__(
|
| 137 |
+
self,
|
| 138 |
+
embed_dim,
|
| 139 |
+
ffn_dim,
|
| 140 |
+
cond_dim,
|
| 141 |
+
norm_eps=1e-5,
|
| 142 |
+
):
|
| 143 |
+
super().__init__()
|
| 144 |
+
self.embed_dim = embed_dim
|
| 145 |
+
self.cond_dim = cond_dim
|
| 146 |
+
self.ffn_dim = ffn_dim
|
| 147 |
+
self.ffn = FeedForwardNetwork(
|
| 148 |
+
self.embed_dim,
|
| 149 |
+
self.ffn_dim,
|
| 150 |
+
)
|
| 151 |
+
self.norm = RMSNorm(self.embed_dim, eps=norm_eps)
|
| 152 |
+
self.adaLN_modulation = nn.Sequential(
|
| 153 |
+
# nn.SiLU(),
|
| 154 |
+
ACT2FN['silu'],
|
| 155 |
+
nn.Linear(cond_dim, 3 * self.embed_dim, bias=False)
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
def forward(self, x, c):
|
| 159 |
+
shift_ffn, scale_ffn, gate_ffn = self.adaLN_modulation(c).chunk(3, dim=-1)
|
| 160 |
+
x = x + gate_ffn * self.ffn(modulate(self.norm(x), shift_ffn, scale_ffn))
|
| 161 |
+
return x
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class FinalLayer(nn.Module):
|
| 165 |
+
"""
|
| 166 |
+
Final layer in the diffusion head.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
hidden_size (`int`): Input dimension
|
| 170 |
+
output_size (`int`): Output dimension
|
| 171 |
+
cond_size (`int`): Condition embedding dimension
|
| 172 |
+
norm_eps (`float`, optional): Epsilon for normalization
|
| 173 |
+
"""
|
| 174 |
+
def __init__(self, hidden_size, output_size, cond_size, norm_eps=1e-5):
|
| 175 |
+
super().__init__()
|
| 176 |
+
self.norm_final = RMSNorm(hidden_size, eps=norm_eps, elementwise_affine=False)
|
| 177 |
+
self.linear = nn.Linear(hidden_size, output_size, bias=False)
|
| 178 |
+
self.adaLN_modulation = nn.Sequential(
|
| 179 |
+
# nn.SiLU(),
|
| 180 |
+
ACT2FN['silu'],
|
| 181 |
+
nn.Linear(cond_size, 2 * hidden_size, bias=False)
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
def forward(self, x, c):
|
| 185 |
+
shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1)
|
| 186 |
+
x = modulate(self.norm_final(x), shift, scale)
|
| 187 |
+
x = self.linear(x)
|
| 188 |
+
return x
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class VibeVoiceDiffusionHead(PreTrainedModel):
|
| 192 |
+
"""
|
| 193 |
+
Diffusion head model for vibevoice.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
config (`VibeVoiceDiffusionHeadConfig`): Model configuration
|
| 197 |
+
latent_size (`int`, optional): Size of the latent space. If not provided, uses `config.latent_size`.
|
| 198 |
+
"""
|
| 199 |
+
config_class = VibeVoiceDiffusionHeadConfig
|
| 200 |
+
supports_gradient_checkpointing = True
|
| 201 |
+
_supports_flash_attn_2 = True
|
| 202 |
+
_supports_sdpa = True
|
| 203 |
+
|
| 204 |
+
def __init__(
|
| 205 |
+
self,
|
| 206 |
+
config,
|
| 207 |
+
):
|
| 208 |
+
super().__init__(config)
|
| 209 |
+
self.config = config
|
| 210 |
+
self.cond_dim = config.hidden_size
|
| 211 |
+
latent_size = config.latent_size
|
| 212 |
+
|
| 213 |
+
self.noisy_images_proj = nn.Linear(latent_size, config.hidden_size, bias=False)
|
| 214 |
+
self.cond_proj = nn.Linear(config.hidden_size, self.cond_dim, bias=False)
|
| 215 |
+
self.t_embedder = TimestepEmbedder(self.cond_dim)
|
| 216 |
+
|
| 217 |
+
ffn_dim = int(config.hidden_size * config.head_ffn_ratio)
|
| 218 |
+
|
| 219 |
+
# Create the intermediate layers
|
| 220 |
+
self.layers = nn.ModuleList([
|
| 221 |
+
HeadLayer(
|
| 222 |
+
embed_dim=config.hidden_size,
|
| 223 |
+
ffn_dim=ffn_dim,
|
| 224 |
+
cond_dim=self.cond_dim,
|
| 225 |
+
norm_eps=config.rms_norm_eps
|
| 226 |
+
)
|
| 227 |
+
for _ in range(config.head_layers)
|
| 228 |
+
])
|
| 229 |
+
|
| 230 |
+
# Final layer for output
|
| 231 |
+
self.final_layer = FinalLayer(
|
| 232 |
+
hidden_size=config.hidden_size,
|
| 233 |
+
output_size=latent_size,
|
| 234 |
+
cond_size=self.cond_dim,
|
| 235 |
+
norm_eps=config.rms_norm_eps
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
self.initialize_weights()
|
| 239 |
+
|
| 240 |
+
def initialize_weights(self):
|
| 241 |
+
"""Initialize the weights of the model."""
|
| 242 |
+
# Initialize timestep embedder
|
| 243 |
+
nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
|
| 244 |
+
nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
|
| 245 |
+
|
| 246 |
+
# Zero-out adaLN modulation layers
|
| 247 |
+
for layer in self.layers:
|
| 248 |
+
nn.init.constant_(layer.adaLN_modulation[-1].weight, 0)
|
| 249 |
+
|
| 250 |
+
# Zero-out output layers
|
| 251 |
+
nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0)
|
| 252 |
+
nn.init.constant_(self.final_layer.linear.weight, 0)
|
| 253 |
+
|
| 254 |
+
def forward(
|
| 255 |
+
self,
|
| 256 |
+
noisy_images,
|
| 257 |
+
timesteps,
|
| 258 |
+
condition,
|
| 259 |
+
):
|
| 260 |
+
"""
|
| 261 |
+
Forward pass of the prediction head.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
noisy_images (`torch.Tensor`): Noisy images/latents to denoise
|
| 265 |
+
timesteps (`torch.Tensor`): Timesteps for diffusion
|
| 266 |
+
condition (`torch.Tensor`): Conditioning information
|
| 267 |
+
|
| 268 |
+
Returns:
|
| 269 |
+
`torch.Tensor`: The predicted noise/velocity
|
| 270 |
+
"""
|
| 271 |
+
x = self.noisy_images_proj(noisy_images)
|
| 272 |
+
t = self.t_embedder(timesteps)
|
| 273 |
+
condition = self.cond_proj(condition)
|
| 274 |
+
c = condition + t
|
| 275 |
+
|
| 276 |
+
for layer in self.layers:
|
| 277 |
+
x = layer(x, c)
|
| 278 |
+
|
| 279 |
+
x = self.final_layer(x, c)
|
| 280 |
+
return x
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
AutoModel.register(VibeVoiceDiffusionHeadConfig, VibeVoiceDiffusionHead)
|
| 284 |
+
|
| 285 |
+
__all__ = [
|
| 286 |
+
"VibeVoiceDiffusionHead",
|
| 287 |
+
]
|
src/vibevoice/modular/modular_vibevoice_text_tokenizer.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tokenization classes for vibevoice."""
|
| 2 |
+
|
| 3 |
+
from typing import List, Optional, Union
|
| 4 |
+
|
| 5 |
+
from transformers.utils import logging
|
| 6 |
+
from transformers.models.qwen2.tokenization_qwen2 import Qwen2Tokenizer
|
| 7 |
+
from transformers.models.qwen2.tokenization_qwen2_fast import Qwen2TokenizerFast
|
| 8 |
+
|
| 9 |
+
logger = logging.get_logger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class VibeVoiceTextTokenizer(Qwen2Tokenizer):
|
| 13 |
+
"""
|
| 14 |
+
Construct a VibeVoice tokenizer. Based on the Qwen2 tokenizer with additional special tokens for speech.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
vocab_file (`str`):
|
| 18 |
+
Path to the vocabulary file.
|
| 19 |
+
merges_file (`str`):
|
| 20 |
+
Path to the merges file.
|
| 21 |
+
errors (`str`, *optional*, defaults to `"replace"`):
|
| 22 |
+
Paradigm to follow when decoding bytes to UTF-8.
|
| 23 |
+
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 24 |
+
The unknown token.
|
| 25 |
+
bos_token (`str`, *optional*):
|
| 26 |
+
The beginning of sequence token. Not used for vibevoice.
|
| 27 |
+
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 28 |
+
The end of sequence token.
|
| 29 |
+
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 30 |
+
The token used for padding.
|
| 31 |
+
add_special_tokens (`bool`, *optional*, defaults to `True`):
|
| 32 |
+
Whether or not to add special tokens when encoding.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 36 |
+
|
| 37 |
+
def __init__(
|
| 38 |
+
self,
|
| 39 |
+
vocab_file,
|
| 40 |
+
merges_file,
|
| 41 |
+
errors="replace",
|
| 42 |
+
unk_token="<|endoftext|>",
|
| 43 |
+
bos_token=None,
|
| 44 |
+
eos_token="<|endoftext|>",
|
| 45 |
+
pad_token="<|endoftext|>",
|
| 46 |
+
add_prefix_space=False,
|
| 47 |
+
add_special_tokens=True,
|
| 48 |
+
**kwargs,
|
| 49 |
+
):
|
| 50 |
+
super().__init__(
|
| 51 |
+
vocab_file=vocab_file,
|
| 52 |
+
merges_file=merges_file,
|
| 53 |
+
errors=errors,
|
| 54 |
+
unk_token=unk_token,
|
| 55 |
+
bos_token=bos_token,
|
| 56 |
+
eos_token=eos_token,
|
| 57 |
+
pad_token=pad_token,
|
| 58 |
+
add_prefix_space=add_prefix_space,
|
| 59 |
+
add_special_tokens=add_special_tokens,
|
| 60 |
+
**kwargs,
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
# Add VibeVoice-specific special tokens
|
| 64 |
+
self._add_vibevoice_special_tokens()
|
| 65 |
+
|
| 66 |
+
def _add_vibevoice_special_tokens(self):
|
| 67 |
+
"""Add VibeVoice-specific special tokens."""
|
| 68 |
+
special_tokens = {
|
| 69 |
+
"additional_special_tokens": [
|
| 70 |
+
"<|vision_start|>", # Speech start (reusing vision tokens)
|
| 71 |
+
"<|vision_end|>", # Speech end
|
| 72 |
+
"<|vision_pad|>", # Speech diffusion pad
|
| 73 |
+
]
|
| 74 |
+
}
|
| 75 |
+
num_added = self.add_special_tokens(special_tokens)
|
| 76 |
+
|
| 77 |
+
# Cache special token IDs
|
| 78 |
+
self._speech_start_id = self.convert_tokens_to_ids("<|vision_start|>")
|
| 79 |
+
self._speech_end_id = self.convert_tokens_to_ids("<|vision_end|>")
|
| 80 |
+
self._speech_diffusion_id = self.convert_tokens_to_ids("<|vision_pad|>")
|
| 81 |
+
|
| 82 |
+
self._eos_id = self.convert_tokens_to_ids('<|endoftext|>')
|
| 83 |
+
|
| 84 |
+
return num_added
|
| 85 |
+
|
| 86 |
+
@property
|
| 87 |
+
def eos_id(self) -> int:
|
| 88 |
+
"""Id of the end of sequence token."""
|
| 89 |
+
return self._eos_id
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def speech_start_id(self) -> int:
|
| 93 |
+
"""Id of the speech start token."""
|
| 94 |
+
return self._speech_start_id
|
| 95 |
+
|
| 96 |
+
@property
|
| 97 |
+
def speech_end_id(self) -> int:
|
| 98 |
+
"""Id of the speech end token."""
|
| 99 |
+
return self._speech_end_id
|
| 100 |
+
|
| 101 |
+
@property
|
| 102 |
+
def speech_diffusion_id(self) -> int:
|
| 103 |
+
"""Id of the speech diffusion token."""
|
| 104 |
+
return self._speech_diffusion_id
|
| 105 |
+
|
| 106 |
+
@property
|
| 107 |
+
def pad_id(self) -> int:
|
| 108 |
+
"""Id used for padding (returns -100 for loss masking)."""
|
| 109 |
+
return -100
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
class VibeVoiceTextTokenizerFast(Qwen2TokenizerFast):
|
| 113 |
+
"""
|
| 114 |
+
Construct a "fast" VibeVoice tokenizer (backed by HuggingFace's *tokenizers* library).
|
| 115 |
+
Based on the Qwen2 tokenizer with additional special tokens for speech.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
vocab_file (`str`, *optional*):
|
| 119 |
+
Path to the vocabulary file.
|
| 120 |
+
merges_file (`str`, *optional*):
|
| 121 |
+
Path to the merges file.
|
| 122 |
+
tokenizer_file (`str`, *optional*):
|
| 123 |
+
Path to [tokenizers](https://github.com/huggingface/tokenizers) file.
|
| 124 |
+
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 125 |
+
The unknown token.
|
| 126 |
+
bos_token (`str`, *optional*):
|
| 127 |
+
The beginning of sequence token. Not used for vibevoice.
|
| 128 |
+
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 129 |
+
The end of sequence token.
|
| 130 |
+
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
|
| 131 |
+
The token used for padding.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
model_input_names = ["input_ids", "attention_mask"]
|
| 135 |
+
|
| 136 |
+
def __init__(
|
| 137 |
+
self,
|
| 138 |
+
vocab_file=None,
|
| 139 |
+
merges_file=None,
|
| 140 |
+
tokenizer_file=None,
|
| 141 |
+
unk_token="<|endoftext|>",
|
| 142 |
+
bos_token=None,
|
| 143 |
+
eos_token="<|endoftext|>",
|
| 144 |
+
pad_token="<|endoftext|>",
|
| 145 |
+
add_prefix_space=False,
|
| 146 |
+
**kwargs,
|
| 147 |
+
):
|
| 148 |
+
super().__init__(
|
| 149 |
+
vocab_file=vocab_file,
|
| 150 |
+
merges_file=merges_file,
|
| 151 |
+
tokenizer_file=tokenizer_file,
|
| 152 |
+
unk_token=unk_token,
|
| 153 |
+
bos_token=bos_token,
|
| 154 |
+
eos_token=eos_token,
|
| 155 |
+
pad_token=pad_token,
|
| 156 |
+
add_prefix_space=add_prefix_space,
|
| 157 |
+
**kwargs,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
# Add VibeVoice-specific special tokens
|
| 161 |
+
self._add_vibevoice_special_tokens()
|
| 162 |
+
|
| 163 |
+
def _add_vibevoice_special_tokens(self):
|
| 164 |
+
"""Add VibeVoice-specific special tokens."""
|
| 165 |
+
special_tokens = {
|
| 166 |
+
"additional_special_tokens": [
|
| 167 |
+
"<|vision_start|>", # Speech start (reusing vision tokens)
|
| 168 |
+
"<|vision_end|>", # Speech end
|
| 169 |
+
"<|vision_pad|>", # Speech diffusion pad
|
| 170 |
+
]
|
| 171 |
+
}
|
| 172 |
+
num_added = self.add_special_tokens(special_tokens)
|
| 173 |
+
|
| 174 |
+
# Cache special token IDs
|
| 175 |
+
self._speech_start_id = self.convert_tokens_to_ids("<|vision_start|>")
|
| 176 |
+
self._speech_end_id = self.convert_tokens_to_ids("<|vision_end|>")
|
| 177 |
+
self._speech_diffusion_id = self.convert_tokens_to_ids("<|vision_pad|>")
|
| 178 |
+
|
| 179 |
+
# self._eos_id = self.convert_tokens_to_ids('<|endoftext|>')
|
| 180 |
+
self._eos_id = self.eos_token_id # qwen2 / qwen3
|
| 181 |
+
self._pad_id = self.convert_tokens_to_ids('<|image_pad|>')
|
| 182 |
+
|
| 183 |
+
return num_added
|
| 184 |
+
|
| 185 |
+
@property
|
| 186 |
+
def eos_id(self) -> int:
|
| 187 |
+
"""Id of the end of sequence token."""
|
| 188 |
+
return self._eos_id
|
| 189 |
+
|
| 190 |
+
@property
|
| 191 |
+
def speech_start_id(self) -> int:
|
| 192 |
+
"""Id of the speech start token."""
|
| 193 |
+
return self._speech_start_id
|
| 194 |
+
|
| 195 |
+
@property
|
| 196 |
+
def speech_end_id(self) -> int:
|
| 197 |
+
"""Id of the speech end token."""
|
| 198 |
+
return self._speech_end_id
|
| 199 |
+
|
| 200 |
+
@property
|
| 201 |
+
def speech_diffusion_id(self) -> int:
|
| 202 |
+
"""Id of the speech diffusion token."""
|
| 203 |
+
return self._speech_diffusion_id
|
| 204 |
+
|
| 205 |
+
@property
|
| 206 |
+
def pad_id(self) -> int:
|
| 207 |
+
"""Id used for padding (returns -100 for loss masking)."""
|
| 208 |
+
return self._pad_id
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
__all__ = [
|
| 212 |
+
"VibeVoiceTextTokenizer",
|
| 213 |
+
"VibeVoiceTextTokenizerFast",
|
| 214 |
+
]
|
src/vibevoice/modular/modular_vibevoice_tokenizer.py
ADDED
|
@@ -0,0 +1,1195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import typing as tp
|
| 3 |
+
from functools import partial
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from typing import Dict, List, Optional, Tuple, Union
|
| 6 |
+
import copy
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
|
| 13 |
+
from transformers.models.auto import AutoModel
|
| 14 |
+
|
| 15 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 16 |
+
from transformers.utils import logging
|
| 17 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 18 |
+
from transformers.activations import ACT2FN
|
| 19 |
+
|
| 20 |
+
from .configuration_vibevoice import VibeVoiceAcousticTokenizerConfig, VibeVoiceSemanticTokenizerConfig
|
| 21 |
+
|
| 22 |
+
logger = logging.get_logger(__name__)
|
| 23 |
+
|
| 24 |
+
import os
|
| 25 |
+
# Try to import APEX FusedRMSNorm
|
| 26 |
+
try:
|
| 27 |
+
from apex.normalization.fused_layer_norm import fused_rms_norm_affine
|
| 28 |
+
APEX_AVAILABLE = True
|
| 29 |
+
logger.info("APEX FusedRMSNorm is available and will be used for optimization")
|
| 30 |
+
if int(os.getenv("OPTIMIZE_FOR_SPEED", "0")) == 0:
|
| 31 |
+
APEX_AVAILABLE = False
|
| 32 |
+
logger.warning("APEX FusedRMSNorm is disabled by environment variable OPTIMIZE_FOR_SPEED=0")
|
| 33 |
+
except ImportError:
|
| 34 |
+
APEX_AVAILABLE = False
|
| 35 |
+
logger.warning("APEX FusedRMSNorm not available, using native implementation")
|
| 36 |
+
# APEX_AVAILABLE=False
|
| 37 |
+
|
| 38 |
+
# Normalization modules
|
| 39 |
+
class ConvLayerNorm(nn.LayerNorm):
|
| 40 |
+
"""
|
| 41 |
+
Convolution-friendly LayerNorm that moves channels to last dimensions
|
| 42 |
+
before running the normalization and moves them back to original position right after.
|
| 43 |
+
"""
|
| 44 |
+
def __init__(self, normalized_shape: tp.Union[int, tp.List[int], torch.Size], **kwargs):
|
| 45 |
+
super().__init__(normalized_shape, **kwargs)
|
| 46 |
+
|
| 47 |
+
def forward(self, x):
|
| 48 |
+
x = x.transpose(1, 2) # b ... t -> b t ...
|
| 49 |
+
x = nn.functional.layer_norm(x.float(), self.normalized_shape, self.weight.float(), self.bias.float(), self.eps).type_as(x)
|
| 50 |
+
x = x.transpose(1, 2) # b t ... -> b ... t
|
| 51 |
+
return x
|
| 52 |
+
|
| 53 |
+
class RMSNorm(nn.Module):
|
| 54 |
+
def __init__(self, dim: int, eps: float = 1e-5, elementwise_affine=True, weight_shape=None):
|
| 55 |
+
super().__init__()
|
| 56 |
+
self.dim = dim
|
| 57 |
+
self.eps = eps
|
| 58 |
+
self.elementwise_affine = elementwise_affine
|
| 59 |
+
if self.elementwise_affine:
|
| 60 |
+
weight_shape = (dim,) if weight_shape is None else weight_shape
|
| 61 |
+
self.weight = nn.Parameter(torch.ones(weight_shape))
|
| 62 |
+
else:
|
| 63 |
+
self.register_parameter('weight', None)
|
| 64 |
+
|
| 65 |
+
def _norm(self, x):
|
| 66 |
+
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
| 67 |
+
|
| 68 |
+
def forward(self, x):
|
| 69 |
+
output = self._norm(x.float()).type_as(x)
|
| 70 |
+
if self.weight is not None:
|
| 71 |
+
output = output * self.weight
|
| 72 |
+
return output
|
| 73 |
+
|
| 74 |
+
def extra_repr(self) -> str:
|
| 75 |
+
return f'dim={self.dim}, eps={self.eps}, elementwise_affine={self.elementwise_affine}'
|
| 76 |
+
|
| 77 |
+
class ConvRMSNorm(RMSNorm):
|
| 78 |
+
def __init__(self, dim: int, eps: float = 1e-5, elementwise_affine=True, weight_shape=None):
|
| 79 |
+
super().__init__(dim, eps, elementwise_affine, weight_shape)
|
| 80 |
+
|
| 81 |
+
def forward(self, x):
|
| 82 |
+
x = x.transpose(1, 2) # b ... t -> b t ...
|
| 83 |
+
if (not APEX_AVAILABLE) or (not self.elementwise_affine):
|
| 84 |
+
# Fallback to native implementation
|
| 85 |
+
output = self._norm(x.float()).type_as(x)
|
| 86 |
+
if self.weight is not None:
|
| 87 |
+
output = output * self.weight
|
| 88 |
+
else:
|
| 89 |
+
output = fused_rms_norm_affine(x, self.weight, self.weight.shape, self.eps)
|
| 90 |
+
output = output.transpose(1, 2) # b t ... -> b ... t
|
| 91 |
+
return output
|
| 92 |
+
|
| 93 |
+
# Convolutional layers and utilities
|
| 94 |
+
CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm',
|
| 95 |
+
'time_layer_norm', 'layer_norm', 'time_group_norm'])
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def apply_parametrization_norm(module: nn.Module, norm: str = 'none') -> nn.Module:
|
| 99 |
+
assert norm in CONV_NORMALIZATIONS
|
| 100 |
+
if norm == 'weight_norm':
|
| 101 |
+
return nn.utils.weight_norm(module)
|
| 102 |
+
elif norm == 'spectral_norm':
|
| 103 |
+
return nn.utils.spectral_norm(module)
|
| 104 |
+
else:
|
| 105 |
+
# We already check was in CONV_NORMALIZATION, so any other choice
|
| 106 |
+
# doesn't need reparametrization.
|
| 107 |
+
return module
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs) -> nn.Module:
|
| 111 |
+
"""Return the proper normalization module. If causal is True, this will ensure the returned
|
| 112 |
+
module is causal, or return an error if the normalization doesn't support causal evaluation.
|
| 113 |
+
"""
|
| 114 |
+
assert norm in CONV_NORMALIZATIONS
|
| 115 |
+
if norm == 'layer_norm':
|
| 116 |
+
assert isinstance(module, nn.modules.conv._ConvNd)
|
| 117 |
+
return ConvLayerNorm(module.out_channels, **norm_kwargs)
|
| 118 |
+
elif norm == 'time_group_norm':
|
| 119 |
+
if causal:
|
| 120 |
+
raise ValueError("GroupNorm doesn't support causal evaluation.")
|
| 121 |
+
assert isinstance(module, nn.modules.conv._ConvNd)
|
| 122 |
+
return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
|
| 123 |
+
else:
|
| 124 |
+
return nn.Identity()
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int,
|
| 128 |
+
padding_total: int = 0) -> int:
|
| 129 |
+
"""Calculate extra padding needed for convolution to have the same output length"""
|
| 130 |
+
length = x.shape[-1]
|
| 131 |
+
n_frames = (length - kernel_size + padding_total) / stride + 1
|
| 132 |
+
ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total)
|
| 133 |
+
return ideal_length - length
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'zero', value: float = 0.):
|
| 137 |
+
"""Pad 1D input with handling for small inputs in reflect mode"""
|
| 138 |
+
length = x.shape[-1]
|
| 139 |
+
padding_left, padding_right = paddings
|
| 140 |
+
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
|
| 141 |
+
if mode == 'reflect':
|
| 142 |
+
max_pad = max(padding_left, padding_right)
|
| 143 |
+
extra_pad = 0
|
| 144 |
+
if length <= max_pad:
|
| 145 |
+
extra_pad = max_pad - length + 1
|
| 146 |
+
x = F.pad(x, (0, extra_pad))
|
| 147 |
+
padded = F.pad(x, paddings, mode, value)
|
| 148 |
+
end = padded.shape[-1] - extra_pad
|
| 149 |
+
return padded[..., :end]
|
| 150 |
+
else:
|
| 151 |
+
return F.pad(x, paddings, mode, value)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]):
|
| 155 |
+
"""Remove padding from x, handling properly zero padding. Only for 1d!"""
|
| 156 |
+
padding_left, padding_right = paddings
|
| 157 |
+
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
|
| 158 |
+
assert (padding_left + padding_right) <= x.shape[-1]
|
| 159 |
+
end = x.shape[-1] - padding_right
|
| 160 |
+
return x[..., padding_left: end]
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class NormConv1d(nn.Module):
|
| 164 |
+
"""Wrapper around Conv1d and normalization applied to this conv"""
|
| 165 |
+
def __init__(self, *args, causal: bool = False, norm: str = 'none',
|
| 166 |
+
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
|
| 167 |
+
super().__init__()
|
| 168 |
+
self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm)
|
| 169 |
+
self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs)
|
| 170 |
+
self.norm_type = norm
|
| 171 |
+
|
| 172 |
+
def forward(self, x):
|
| 173 |
+
x = self.conv(x)
|
| 174 |
+
x = self.norm(x)
|
| 175 |
+
return x
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class NormConvTranspose1d(nn.Module):
|
| 179 |
+
"""Wrapper around ConvTranspose1d and normalization applied to this conv"""
|
| 180 |
+
def __init__(self, *args, causal: bool = False, norm: str = 'none',
|
| 181 |
+
norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs):
|
| 182 |
+
super().__init__()
|
| 183 |
+
self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm)
|
| 184 |
+
self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs)
|
| 185 |
+
self.norm_type = norm
|
| 186 |
+
|
| 187 |
+
def forward(self, x):
|
| 188 |
+
x = self.convtr(x)
|
| 189 |
+
x = self.norm(x)
|
| 190 |
+
return x
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class VibeVoiceTokenizerStreamingCache:
|
| 194 |
+
"""Cache for streaming convolution, similar to KV cache in attention"""
|
| 195 |
+
def __init__(self):
|
| 196 |
+
self.cache = {} # Dict mapping (layer_id, sample_idx) to state tensor
|
| 197 |
+
|
| 198 |
+
def get(self, layer_id: str, sample_indices: torch.Tensor) -> Optional[torch.Tensor]:
|
| 199 |
+
"""Get cached states for given layer and sample indices"""
|
| 200 |
+
states = []
|
| 201 |
+
max_length = 0
|
| 202 |
+
|
| 203 |
+
# First pass: collect states and find max length
|
| 204 |
+
for idx in sample_indices.tolist():
|
| 205 |
+
key = (layer_id, idx)
|
| 206 |
+
if key not in self.cache:
|
| 207 |
+
return None # If any sample is missing, return None
|
| 208 |
+
state = self.cache[key]
|
| 209 |
+
states.append(state)
|
| 210 |
+
max_length = max(max_length, state.shape[-1])
|
| 211 |
+
|
| 212 |
+
# Second pass: pad states to max length if needed
|
| 213 |
+
if len(states) > 0 and states[0].dim() >= 2:
|
| 214 |
+
padded_states = []
|
| 215 |
+
for state in states:
|
| 216 |
+
if state.shape[-1] < max_length:
|
| 217 |
+
# Pad on the time dimension (last dimension)
|
| 218 |
+
pad_size = max_length - state.shape[-1]
|
| 219 |
+
# Pad with zeros on the LEFT to align the most recent samples
|
| 220 |
+
padded_state = F.pad(state, (pad_size, 0), mode='constant', value=0)
|
| 221 |
+
padded_states.append(padded_state)
|
| 222 |
+
else:
|
| 223 |
+
padded_states.append(state)
|
| 224 |
+
return torch.stack(padded_states, dim=0)
|
| 225 |
+
else:
|
| 226 |
+
return torch.stack(states, dim=0)
|
| 227 |
+
|
| 228 |
+
def set(self, layer_id: str, sample_indices: torch.Tensor, states: torch.Tensor):
|
| 229 |
+
"""Set cached states for given layer and sample indices"""
|
| 230 |
+
for i, idx in enumerate(sample_indices.tolist()):
|
| 231 |
+
key = (layer_id, idx)
|
| 232 |
+
self.cache[key] = states[i].detach()
|
| 233 |
+
|
| 234 |
+
def set_to_zero(self, sample_indices: torch.Tensor):
|
| 235 |
+
"""Set all cached states to zero for given sample indices"""
|
| 236 |
+
for key in list(self.cache.keys()):
|
| 237 |
+
layer_id, sample_idx = key
|
| 238 |
+
if sample_idx in sample_indices.tolist():
|
| 239 |
+
# Create zero tensor with same shape and dtype as cached tensor
|
| 240 |
+
cached_tensor = self.cache[key]
|
| 241 |
+
self.cache[key] = torch.zeros_like(cached_tensor)
|
| 242 |
+
|
| 243 |
+
def clear(self, layer_id: Optional[str] = None, sample_indices: Optional[torch.Tensor] = None):
|
| 244 |
+
"""Clear cache for specific layer/samples or everything"""
|
| 245 |
+
if layer_id is None and sample_indices is None:
|
| 246 |
+
self.cache.clear()
|
| 247 |
+
elif layer_id is not None and sample_indices is None:
|
| 248 |
+
# Clear all samples for a specific layer
|
| 249 |
+
keys_to_remove = [k for k in self.cache.keys() if k[0] == layer_id]
|
| 250 |
+
for k in keys_to_remove:
|
| 251 |
+
del self.cache[k]
|
| 252 |
+
elif layer_id is not None and sample_indices is not None:
|
| 253 |
+
# Clear specific samples for a specific layer
|
| 254 |
+
for idx in sample_indices.tolist():
|
| 255 |
+
key = (layer_id, idx)
|
| 256 |
+
self.cache.pop(key, None)
|
| 257 |
+
|
| 258 |
+
class SConv1d(nn.Module):
|
| 259 |
+
"""Conv1d with built-in handling of asymmetric or causal padding and normalization."""
|
| 260 |
+
def __init__(self, in_channels: int, out_channels: int,
|
| 261 |
+
kernel_size: int, stride: int = 1, dilation: int = 1,
|
| 262 |
+
groups: int = 1, bias: bool = True, causal: bool = False,
|
| 263 |
+
norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {},
|
| 264 |
+
pad_mode: str = 'reflect'):
|
| 265 |
+
super().__init__()
|
| 266 |
+
self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride,
|
| 267 |
+
dilation=dilation, groups=groups, bias=bias, causal=causal,
|
| 268 |
+
norm=norm, norm_kwargs=norm_kwargs)
|
| 269 |
+
self.causal = causal
|
| 270 |
+
self.pad_mode = pad_mode
|
| 271 |
+
|
| 272 |
+
# Store configuration
|
| 273 |
+
self.kernel_size = kernel_size
|
| 274 |
+
self.dilation = dilation
|
| 275 |
+
self.stride = stride
|
| 276 |
+
self.in_channels = in_channels
|
| 277 |
+
self.out_channels = out_channels
|
| 278 |
+
|
| 279 |
+
# For causal convolution, we need to maintain kernel_size - 1 samples as context
|
| 280 |
+
# need to check use which context_size is more suitable
|
| 281 |
+
# self.context_size = (kernel_size - 1) * dilation
|
| 282 |
+
self.context_size = (kernel_size - 1) * dilation - (stride - 1)
|
| 283 |
+
|
| 284 |
+
# For non-streaming mode, calculate padding
|
| 285 |
+
self.padding_total = (kernel_size - 1) * dilation - (stride - 1)
|
| 286 |
+
|
| 287 |
+
# Create a unique layer ID for cache management
|
| 288 |
+
self._layer_id = None
|
| 289 |
+
|
| 290 |
+
@property
|
| 291 |
+
def layer_id(self):
|
| 292 |
+
if self._layer_id is None:
|
| 293 |
+
self._layer_id = f"sconv1d_{id(self)}"
|
| 294 |
+
return self._layer_id
|
| 295 |
+
|
| 296 |
+
def forward(self, x: torch.Tensor,
|
| 297 |
+
cache: Optional[VibeVoiceTokenizerStreamingCache] = None,
|
| 298 |
+
sample_indices: Optional[torch.Tensor] = None,
|
| 299 |
+
use_cache: bool = False,
|
| 300 |
+
debug: bool = False) -> torch.Tensor:
|
| 301 |
+
"""
|
| 302 |
+
Forward pass with optional streaming support via cache.
|
| 303 |
+
|
| 304 |
+
Args:
|
| 305 |
+
x: Input tensor [batch_size, channels, time]
|
| 306 |
+
cache: VibeVoiceTokenizerStreamingCache object for maintaining states
|
| 307 |
+
sample_indices: Indices identifying each sample for cache management
|
| 308 |
+
use_cache: Whether to use cached states for streaming
|
| 309 |
+
debug: Whether to print debug information
|
| 310 |
+
|
| 311 |
+
Returns:
|
| 312 |
+
Output tensor
|
| 313 |
+
"""
|
| 314 |
+
B, C, T = x.shape
|
| 315 |
+
|
| 316 |
+
# Non-streaming mode
|
| 317 |
+
if not use_cache or cache is None:
|
| 318 |
+
return self._forward_non_streaming(x, debug=debug)
|
| 319 |
+
|
| 320 |
+
# Streaming mode
|
| 321 |
+
assert self.causal, "Streaming mode is only supported for causal convolutions"
|
| 322 |
+
assert sample_indices is not None, "sample_indices must be provided for streaming mode"
|
| 323 |
+
assert len(sample_indices) == B, "sample_indices must match batch size"
|
| 324 |
+
|
| 325 |
+
return self._forward_streaming(x, cache, sample_indices, debug)
|
| 326 |
+
|
| 327 |
+
def _forward_streaming(self, x: torch.Tensor,
|
| 328 |
+
cache: VibeVoiceTokenizerStreamingCache,
|
| 329 |
+
sample_indices: torch.Tensor,
|
| 330 |
+
debug: bool = False) -> torch.Tensor:
|
| 331 |
+
"""Streaming forward pass with cache operations kept separate from compiled code"""
|
| 332 |
+
B, C, T = x.shape
|
| 333 |
+
|
| 334 |
+
# Cache operations (not compiled)
|
| 335 |
+
cached_states = cache.get(self.layer_id, sample_indices)
|
| 336 |
+
|
| 337 |
+
if cached_states is None:
|
| 338 |
+
# First chunk - initialize with zeros for context
|
| 339 |
+
if self.context_size > 0:
|
| 340 |
+
cached_states = torch.zeros(B, C, self.context_size, device=x.device, dtype=x.dtype)
|
| 341 |
+
if debug:
|
| 342 |
+
print(f"[DEBUG] Initialized cache with shape: {cached_states.shape}, context_size={self.context_size}")
|
| 343 |
+
else:
|
| 344 |
+
cached_states = torch.zeros(B, C, 0, device=x.device, dtype=x.dtype)
|
| 345 |
+
if debug:
|
| 346 |
+
print(f"[DEBUG] No context needed (kernel_size=stride)")
|
| 347 |
+
|
| 348 |
+
# Concatenate cached states with input
|
| 349 |
+
if cached_states.shape[2] > 0:
|
| 350 |
+
input_with_context = torch.cat([cached_states, x], dim=2)
|
| 351 |
+
else:
|
| 352 |
+
input_with_context = x
|
| 353 |
+
|
| 354 |
+
if debug:
|
| 355 |
+
print(f"[DEBUG] Input shape: {x.shape}, Cache shape: {cached_states.shape}, Combined: {input_with_context.shape}")
|
| 356 |
+
|
| 357 |
+
# Apply convolution directly - no extra padding in streaming mode
|
| 358 |
+
# The conv layer will handle its own padding internally
|
| 359 |
+
output = self.conv(input_with_context)
|
| 360 |
+
|
| 361 |
+
if debug:
|
| 362 |
+
print(f"[DEBUG] Output shape: {output.shape}")
|
| 363 |
+
|
| 364 |
+
# Update cache for next chunk
|
| 365 |
+
if self.context_size > 0:
|
| 366 |
+
# Calculate how many samples to keep
|
| 367 |
+
total_input_length = input_with_context.shape[2]
|
| 368 |
+
|
| 369 |
+
# Keep the last context_size samples
|
| 370 |
+
if total_input_length >= self.context_size:
|
| 371 |
+
new_cache_start = total_input_length - self.context_size
|
| 372 |
+
new_cache = input_with_context[:, :, new_cache_start:]
|
| 373 |
+
else:
|
| 374 |
+
# If we have less than context_size samples, keep everything
|
| 375 |
+
new_cache = input_with_context
|
| 376 |
+
|
| 377 |
+
if debug:
|
| 378 |
+
print(f"[DEBUG] New cache shape: {new_cache.shape}")
|
| 379 |
+
|
| 380 |
+
cache.set(self.layer_id, sample_indices, new_cache)
|
| 381 |
+
|
| 382 |
+
return output
|
| 383 |
+
|
| 384 |
+
def _forward_non_streaming(self, x: torch.Tensor, debug: bool = False) -> torch.Tensor:
|
| 385 |
+
"""Standard forward pass without streaming"""
|
| 386 |
+
B, C, T = x.shape
|
| 387 |
+
kernel_size = self.kernel_size
|
| 388 |
+
stride = self.stride
|
| 389 |
+
dilation = self.dilation
|
| 390 |
+
padding_total = self.padding_total
|
| 391 |
+
|
| 392 |
+
# Compute extra padding for stride alignment
|
| 393 |
+
extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total)
|
| 394 |
+
|
| 395 |
+
if debug:
|
| 396 |
+
print(f"[DEBUG NON-STREAMING] Input shape: {x.shape}, padding_total={padding_total}, extra_padding={extra_padding}")
|
| 397 |
+
|
| 398 |
+
if self.causal:
|
| 399 |
+
# Left padding for causal
|
| 400 |
+
if self.pad_mode == 'constant':
|
| 401 |
+
x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode, value=0)
|
| 402 |
+
else:
|
| 403 |
+
x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode)
|
| 404 |
+
else:
|
| 405 |
+
# Symmetric padding for non-causal
|
| 406 |
+
padding_right = padding_total // 2
|
| 407 |
+
padding_left = padding_total - padding_right
|
| 408 |
+
x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode)
|
| 409 |
+
|
| 410 |
+
if debug:
|
| 411 |
+
print(f"[DEBUG NON-STREAMING] After padding: {x.shape}")
|
| 412 |
+
|
| 413 |
+
output = self.conv(x)
|
| 414 |
+
|
| 415 |
+
if debug:
|
| 416 |
+
print(f"[DEBUG NON-STREAMING] Output shape: {output.shape}")
|
| 417 |
+
|
| 418 |
+
return output
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
class SConvTranspose1d(nn.Module):
|
| 422 |
+
"""ConvTranspose1d with built-in handling of asymmetric or causal padding and normalization."""
|
| 423 |
+
def __init__(self, in_channels: int, out_channels: int,
|
| 424 |
+
kernel_size: int, stride: int = 1, causal: bool = False,
|
| 425 |
+
norm: str = 'none', trim_right_ratio: float = 1.,
|
| 426 |
+
norm_kwargs: tp.Dict[str, tp.Any] = {}, bias: bool = True):
|
| 427 |
+
super().__init__()
|
| 428 |
+
self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride,
|
| 429 |
+
causal=causal, norm=norm, norm_kwargs=norm_kwargs, bias=bias)
|
| 430 |
+
self.causal = causal
|
| 431 |
+
self.trim_right_ratio = trim_right_ratio
|
| 432 |
+
assert self.causal or self.trim_right_ratio == 1., \
|
| 433 |
+
"`trim_right_ratio` != 1.0 only makes sense for causal convolutions"
|
| 434 |
+
assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1.
|
| 435 |
+
|
| 436 |
+
# Store configuration
|
| 437 |
+
self.kernel_size = kernel_size
|
| 438 |
+
self.stride = stride
|
| 439 |
+
self.in_channels = in_channels
|
| 440 |
+
self.out_channels = out_channels
|
| 441 |
+
|
| 442 |
+
# For transposed convolution, padding calculation is different
|
| 443 |
+
self.padding_total = kernel_size - stride
|
| 444 |
+
|
| 445 |
+
# For streaming, we need to keep track of input history
|
| 446 |
+
# Transposed conv needs to see multiple input samples to produce correct output
|
| 447 |
+
self.context_size = kernel_size - 1
|
| 448 |
+
|
| 449 |
+
# Create a unique layer ID for cache management
|
| 450 |
+
self._layer_id = None
|
| 451 |
+
|
| 452 |
+
@property
|
| 453 |
+
def layer_id(self):
|
| 454 |
+
if self._layer_id is None:
|
| 455 |
+
self._layer_id = f"sconvtr1d_{id(self)}"
|
| 456 |
+
return self._layer_id
|
| 457 |
+
|
| 458 |
+
def forward(self, x: torch.Tensor,
|
| 459 |
+
cache: Optional[VibeVoiceTokenizerStreamingCache] = None,
|
| 460 |
+
sample_indices: Optional[torch.Tensor] = None,
|
| 461 |
+
use_cache: bool = False,
|
| 462 |
+
debug: bool = False) -> torch.Tensor:
|
| 463 |
+
"""
|
| 464 |
+
Forward pass with optional streaming support via cache.
|
| 465 |
+
"""
|
| 466 |
+
B, C, T = x.shape
|
| 467 |
+
|
| 468 |
+
# Non-streaming mode
|
| 469 |
+
if not use_cache or cache is None:
|
| 470 |
+
return self._forward_non_streaming(x, debug=debug)
|
| 471 |
+
|
| 472 |
+
# Streaming mode
|
| 473 |
+
assert sample_indices is not None, "sample_indices must be provided for streaming mode"
|
| 474 |
+
assert len(sample_indices) == B, "sample_indices must match batch size"
|
| 475 |
+
|
| 476 |
+
return self._forward_streaming(x, cache, sample_indices, debug)
|
| 477 |
+
|
| 478 |
+
def _forward_streaming(self, x: torch.Tensor,
|
| 479 |
+
cache: VibeVoiceTokenizerStreamingCache,
|
| 480 |
+
sample_indices: torch.Tensor,
|
| 481 |
+
debug: bool = False) -> torch.Tensor:
|
| 482 |
+
"""Streaming forward pass with cache operations kept separate from compiled code"""
|
| 483 |
+
B, C, T = x.shape
|
| 484 |
+
|
| 485 |
+
# Cache operations (not compiled)
|
| 486 |
+
cached_input = cache.get(self.layer_id, sample_indices)
|
| 487 |
+
|
| 488 |
+
if cached_input is None:
|
| 489 |
+
# First chunk - no history yet
|
| 490 |
+
cached_input = torch.zeros(B, C, 0, device=x.device, dtype=x.dtype)
|
| 491 |
+
if debug:
|
| 492 |
+
print(f"[DEBUG] Initialized empty cache for transposed conv")
|
| 493 |
+
|
| 494 |
+
# Concatenate cached input with new input
|
| 495 |
+
full_input = torch.cat([cached_input, x], dim=2)
|
| 496 |
+
|
| 497 |
+
if debug:
|
| 498 |
+
print(f"[DEBUG] Input shape: {x.shape}, Cache shape: {cached_input.shape}, Combined: {full_input.shape}")
|
| 499 |
+
|
| 500 |
+
# First chunk or debug mode - use uncompiled version
|
| 501 |
+
full_output = self.convtr(full_input)
|
| 502 |
+
|
| 503 |
+
if debug:
|
| 504 |
+
print(f"[DEBUG] Full transposed conv output shape: {full_output.shape}")
|
| 505 |
+
|
| 506 |
+
# Calculate padding to remove
|
| 507 |
+
if self.causal:
|
| 508 |
+
padding_right = math.ceil(self.padding_total * self.trim_right_ratio)
|
| 509 |
+
padding_left = self.padding_total - padding_right
|
| 510 |
+
else:
|
| 511 |
+
padding_right = self.padding_total // 2
|
| 512 |
+
padding_left = self.padding_total - padding_right
|
| 513 |
+
|
| 514 |
+
# Remove padding
|
| 515 |
+
if padding_left + padding_right > 0:
|
| 516 |
+
full_output = unpad1d(full_output, (padding_left, padding_right))
|
| 517 |
+
|
| 518 |
+
if debug:
|
| 519 |
+
print(f"[DEBUG] After unpadding: {full_output.shape}")
|
| 520 |
+
|
| 521 |
+
# Determine which part of the output corresponds to the new input
|
| 522 |
+
if cached_input.shape[2] == 0:
|
| 523 |
+
# First chunk - return all output
|
| 524 |
+
output = full_output
|
| 525 |
+
else:
|
| 526 |
+
# Subsequent chunks - return only the new output
|
| 527 |
+
expected_new_output = T * self.stride
|
| 528 |
+
|
| 529 |
+
# Take the last expected_new_output samples
|
| 530 |
+
if full_output.shape[2] >= expected_new_output:
|
| 531 |
+
output = full_output[:, :, -expected_new_output:]
|
| 532 |
+
else:
|
| 533 |
+
output = full_output
|
| 534 |
+
|
| 535 |
+
if debug:
|
| 536 |
+
print(f"[DEBUG] Final streaming output shape: {output.shape}")
|
| 537 |
+
|
| 538 |
+
# Update cache
|
| 539 |
+
if full_input.shape[2] > self.context_size:
|
| 540 |
+
new_cache = full_input[:, :, -self.context_size:]
|
| 541 |
+
else:
|
| 542 |
+
new_cache = full_input
|
| 543 |
+
|
| 544 |
+
if debug:
|
| 545 |
+
print(f"[DEBUG] New cache shape: {new_cache.shape}")
|
| 546 |
+
|
| 547 |
+
cache.set(self.layer_id, sample_indices, new_cache)
|
| 548 |
+
|
| 549 |
+
return output
|
| 550 |
+
|
| 551 |
+
def _forward_non_streaming(self, x: torch.Tensor, debug: bool = False) -> torch.Tensor:
|
| 552 |
+
"""Standard forward pass without streaming"""
|
| 553 |
+
if debug:
|
| 554 |
+
print(f"[DEBUG NON-STREAMING] Input shape: {x.shape}")
|
| 555 |
+
|
| 556 |
+
# Apply transposed convolution
|
| 557 |
+
y = self.convtr(x)
|
| 558 |
+
|
| 559 |
+
if debug:
|
| 560 |
+
print(f"[DEBUG NON-STREAMING] After transposed conv: {y.shape}")
|
| 561 |
+
|
| 562 |
+
# Calculate and remove padding
|
| 563 |
+
if self.causal:
|
| 564 |
+
padding_right = math.ceil(self.padding_total * self.trim_right_ratio)
|
| 565 |
+
padding_left = self.padding_total - padding_right
|
| 566 |
+
else:
|
| 567 |
+
padding_right = self.padding_total // 2
|
| 568 |
+
padding_left = self.padding_total - padding_right
|
| 569 |
+
|
| 570 |
+
if padding_left + padding_right > 0:
|
| 571 |
+
y = unpad1d(y, (padding_left, padding_right))
|
| 572 |
+
|
| 573 |
+
if debug:
|
| 574 |
+
print(f"[DEBUG NON-STREAMING] Final output shape: {y.shape}")
|
| 575 |
+
|
| 576 |
+
return y
|
| 577 |
+
|
| 578 |
+
# FFN
|
| 579 |
+
class FFN(nn.Module):
|
| 580 |
+
def __init__(
|
| 581 |
+
self,
|
| 582 |
+
embed_dim,
|
| 583 |
+
ffn_dim,
|
| 584 |
+
bias=False,
|
| 585 |
+
):
|
| 586 |
+
super().__init__()
|
| 587 |
+
self.embed_dim = embed_dim
|
| 588 |
+
self.linear1 = nn.Linear(self.embed_dim, ffn_dim, bias=bias)
|
| 589 |
+
self.gelu = ACT2FN["gelu"]
|
| 590 |
+
self.linear2 = nn.Linear(ffn_dim, self.embed_dim, bias=bias)
|
| 591 |
+
|
| 592 |
+
def forward(self, x):
|
| 593 |
+
x = self.linear1(x)
|
| 594 |
+
x = self.gelu(x)
|
| 595 |
+
x = self.linear2(x)
|
| 596 |
+
return x
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
class Convlayer(nn.Module):
|
| 600 |
+
def __init__(
|
| 601 |
+
self,
|
| 602 |
+
in_channels,
|
| 603 |
+
out_channels,
|
| 604 |
+
kernel_size,
|
| 605 |
+
stride=1,
|
| 606 |
+
dilation=1,
|
| 607 |
+
groups=1,
|
| 608 |
+
bias=True,
|
| 609 |
+
pad_mode='zeros',
|
| 610 |
+
norm='weight_norm',
|
| 611 |
+
causal=True,
|
| 612 |
+
):
|
| 613 |
+
super().__init__()
|
| 614 |
+
self.conv = SConv1d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation,
|
| 615 |
+
groups=groups, bias=bias, pad_mode=pad_mode, norm=norm, causal=causal)
|
| 616 |
+
|
| 617 |
+
def forward(self, x):
|
| 618 |
+
return self.conv(x)
|
| 619 |
+
|
| 620 |
+
class Block1D(nn.Module):
|
| 621 |
+
def __init__(self, dim, kernel_size=7, drop_path=0., mixer_layer='conv',
|
| 622 |
+
layer_scale_init_value=1e-6, **kwargs):
|
| 623 |
+
super().__init__()
|
| 624 |
+
|
| 625 |
+
if kwargs.get('layernorm', 'LN') == 'LN':
|
| 626 |
+
self.norm = ConvLayerNorm(dim, eps=kwargs.get('eps', 1e-6))
|
| 627 |
+
self.ffn_norm = ConvLayerNorm(dim, eps=kwargs.get('eps', 1e-6))
|
| 628 |
+
elif kwargs.get('layernorm', 'RMSNorm') == 'RMSNorm':
|
| 629 |
+
self.norm = ConvRMSNorm(dim, eps=kwargs.get('eps', 1e-6))
|
| 630 |
+
self.ffn_norm = ConvRMSNorm(dim, eps=kwargs.get('eps', 1e-6))
|
| 631 |
+
|
| 632 |
+
if mixer_layer == 'conv':
|
| 633 |
+
self.mixer = Convlayer(dim, dim, groups=kwargs.get('groups', 1),
|
| 634 |
+
kernel_size=kernel_size,
|
| 635 |
+
pad_mode=kwargs.get('pad_mode', 'reflect'),
|
| 636 |
+
norm=kwargs.get('norm', 'none'),
|
| 637 |
+
causal=kwargs.get('causal', True),
|
| 638 |
+
bias=kwargs.get('bias', True),
|
| 639 |
+
)
|
| 640 |
+
elif mixer_layer == 'depthwise_conv':
|
| 641 |
+
self.mixer = Convlayer(dim, dim, groups=dim,
|
| 642 |
+
kernel_size=kernel_size,
|
| 643 |
+
pad_mode=kwargs.get('pad_mode', 'reflect'),
|
| 644 |
+
norm=kwargs.get('norm', 'none'),
|
| 645 |
+
causal=kwargs.get('causal', True),
|
| 646 |
+
bias=kwargs.get('bias', True),
|
| 647 |
+
)
|
| 648 |
+
else:
|
| 649 |
+
raise ValueError(f"Unsupported mixer layer: {mixer_layer}")
|
| 650 |
+
|
| 651 |
+
self.ffn = FFN(
|
| 652 |
+
dim,
|
| 653 |
+
kwargs.get('ffn_expansion', 4) * dim,
|
| 654 |
+
bias=kwargs.get('bias', False),
|
| 655 |
+
)
|
| 656 |
+
self.drop_path = nn.Identity() if drop_path <= 0. else nn.modules.DropPath(drop_path)
|
| 657 |
+
|
| 658 |
+
if layer_scale_init_value > 0:
|
| 659 |
+
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
|
| 660 |
+
self.ffn_gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
|
| 661 |
+
else:
|
| 662 |
+
self.gamma = None
|
| 663 |
+
self.ffn_gamma = None
|
| 664 |
+
|
| 665 |
+
def forward(self, x):
|
| 666 |
+
# mixer
|
| 667 |
+
residual = x
|
| 668 |
+
x = self.norm(x)
|
| 669 |
+
x = self.mixer(x)
|
| 670 |
+
if self.gamma is not None:
|
| 671 |
+
x = x * self.gamma.unsqueeze(-1)
|
| 672 |
+
x = residual + self.drop_path(x)
|
| 673 |
+
|
| 674 |
+
# ffn
|
| 675 |
+
residual = x
|
| 676 |
+
x = self.ffn_norm(x)
|
| 677 |
+
x = x.permute(0, 2, 1)
|
| 678 |
+
x = self.ffn(x)
|
| 679 |
+
x = x.permute(0, 2, 1)
|
| 680 |
+
if self.ffn_gamma is not None:
|
| 681 |
+
x = x * self.ffn_gamma.unsqueeze(-1)
|
| 682 |
+
x = residual + self.drop_path(x)
|
| 683 |
+
|
| 684 |
+
return x
|
| 685 |
+
|
| 686 |
+
|
| 687 |
+
class TokenizerEncoder(nn.Module):
|
| 688 |
+
"""
|
| 689 |
+
Encoder component for the VibeVoice tokenizer that converts audio to latent representations.
|
| 690 |
+
|
| 691 |
+
Args:
|
| 692 |
+
config: Configuration object with model parameters
|
| 693 |
+
"""
|
| 694 |
+
def __init__(self, config):
|
| 695 |
+
super().__init__()
|
| 696 |
+
|
| 697 |
+
# Extract parameters from config
|
| 698 |
+
self.channels = config.channels
|
| 699 |
+
self.dimension = config.dimension
|
| 700 |
+
self.n_filters = config.n_filters
|
| 701 |
+
self.ratios = list(reversed(config.ratios))
|
| 702 |
+
self.depths = config.depths
|
| 703 |
+
self.n_residual_layers = getattr(config, "n_residual_layers", 1)
|
| 704 |
+
self.hop_length = np.prod(self.ratios)
|
| 705 |
+
self.causal = config.causal
|
| 706 |
+
|
| 707 |
+
# Additional config parameters with defaults
|
| 708 |
+
kernel_size = getattr(config, "kernel_size", 7)
|
| 709 |
+
last_kernel_size = getattr(config, "last_kernel_size", 7)
|
| 710 |
+
norm = getattr(config, "norm", "none")
|
| 711 |
+
norm_params = getattr(config, "norm_params", {})
|
| 712 |
+
pad_mode = getattr(config, "pad_mode", "reflect")
|
| 713 |
+
bias = getattr(config, "bias", True)
|
| 714 |
+
layernorm = getattr(config, "layernorm", "LN")
|
| 715 |
+
layernorm_eps = getattr(config, "layernorm_eps", 1e-6)
|
| 716 |
+
layernorm_elementwise_affine = getattr(config, "layernorm_elementwise_affine", True)
|
| 717 |
+
drop_path_rate = getattr(config, "drop_path_rate", 0.0)
|
| 718 |
+
mixer_layer = getattr(config, "mixer_layer", "conv")
|
| 719 |
+
layer_scale_init_value = getattr(config, "layer_scale_init_value", 0)
|
| 720 |
+
disable_last_norm = getattr(config, "disable_last_norm", False)
|
| 721 |
+
|
| 722 |
+
# determine the norm type based on layernorm
|
| 723 |
+
if layernorm == 'LN':
|
| 724 |
+
norm_type = ConvLayerNorm
|
| 725 |
+
elif layernorm == 'RMSNorm':
|
| 726 |
+
norm_type = partial(ConvRMSNorm, elementwise_affine=layernorm_elementwise_affine)
|
| 727 |
+
else:
|
| 728 |
+
raise ValueError(f"Unsupported norm type: {layernorm}")
|
| 729 |
+
|
| 730 |
+
# stem and intermediate downsampling conv layers
|
| 731 |
+
stem = nn.Sequential(
|
| 732 |
+
SConv1d(self.channels, self.n_filters, kernel_size, norm=norm, norm_kwargs=norm_params, causal=self.causal, pad_mode=pad_mode, bias=bias),
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
self.downsample_layers = nn.ModuleList()
|
| 736 |
+
self.downsample_layers.append(stem)
|
| 737 |
+
for i in range(len(self.ratios)):
|
| 738 |
+
in_ch = self.n_filters * (2 ** i)
|
| 739 |
+
out_ch = self.n_filters * (2 ** (i + 1))
|
| 740 |
+
downsample_layer = nn.Sequential(
|
| 741 |
+
SConv1d(in_ch, out_ch, kernel_size=self.ratios[i] * 2, stride=self.ratios[i], causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias)
|
| 742 |
+
)
|
| 743 |
+
self.downsample_layers.append(downsample_layer)
|
| 744 |
+
|
| 745 |
+
# configure the transformer blocks
|
| 746 |
+
layer_type = partial(
|
| 747 |
+
Block1D,
|
| 748 |
+
mixer_layer=mixer_layer,
|
| 749 |
+
layernorm=layernorm,
|
| 750 |
+
eps=layernorm_eps,
|
| 751 |
+
causal=self.causal,
|
| 752 |
+
pad_mode=pad_mode,
|
| 753 |
+
norm=norm,
|
| 754 |
+
bias=bias,
|
| 755 |
+
layer_scale_init_value=layer_scale_init_value,
|
| 756 |
+
)
|
| 757 |
+
|
| 758 |
+
self.stages = nn.ModuleList()
|
| 759 |
+
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
|
| 760 |
+
cur = 0
|
| 761 |
+
|
| 762 |
+
for i in range(len(self.depths)):
|
| 763 |
+
in_ch = self.n_filters * (2 ** i)
|
| 764 |
+
stage = nn.Sequential(
|
| 765 |
+
*[layer_type(dim=in_ch, drop_path=dp_rates[cur + j]) for j in range(self.depths[i])]
|
| 766 |
+
)
|
| 767 |
+
self.stages.append(stage)
|
| 768 |
+
cur += self.depths[i]
|
| 769 |
+
|
| 770 |
+
if not disable_last_norm:
|
| 771 |
+
self.norm = norm_type(in_ch, eps=layernorm_eps)
|
| 772 |
+
else:
|
| 773 |
+
self.norm = nn.Identity()
|
| 774 |
+
self.head = SConv1d(in_ch, self.dimension, kernel_size=last_kernel_size, causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias)
|
| 775 |
+
|
| 776 |
+
def forward_features(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
|
| 777 |
+
for i in range(len(self.depths)):
|
| 778 |
+
# Apply downsampling
|
| 779 |
+
for layer in self.downsample_layers[i]:
|
| 780 |
+
if isinstance(layer, SConv1d):
|
| 781 |
+
x = layer(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 782 |
+
else:
|
| 783 |
+
x = layer(x)
|
| 784 |
+
|
| 785 |
+
# Apply stage (Block1D contains Convlayer which contains SConv1d)
|
| 786 |
+
for block in self.stages[i]:
|
| 787 |
+
if hasattr(block, 'mixer') and hasattr(block.mixer, 'conv') and isinstance(block.mixer.conv, SConv1d):
|
| 788 |
+
# Block1D forward with cache support
|
| 789 |
+
residual = x
|
| 790 |
+
x = block.norm(x)
|
| 791 |
+
x = block.mixer.conv(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 792 |
+
if block.gamma is not None:
|
| 793 |
+
x = x * block.gamma.unsqueeze(-1)
|
| 794 |
+
x = residual + x
|
| 795 |
+
|
| 796 |
+
# FFN part
|
| 797 |
+
residual = x
|
| 798 |
+
x = block.ffn_norm(x)
|
| 799 |
+
x = x.permute(0, 2, 1)
|
| 800 |
+
x = block.ffn(x)
|
| 801 |
+
x = x.permute(0, 2, 1)
|
| 802 |
+
if block.ffn_gamma is not None:
|
| 803 |
+
x = x * block.ffn_gamma.unsqueeze(-1)
|
| 804 |
+
x = residual + x
|
| 805 |
+
else:
|
| 806 |
+
x = block(x)
|
| 807 |
+
|
| 808 |
+
return self.norm(x)
|
| 809 |
+
|
| 810 |
+
def forward(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
|
| 811 |
+
x = self.forward_features(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 812 |
+
x = self.head(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 813 |
+
return x
|
| 814 |
+
|
| 815 |
+
|
| 816 |
+
class TokenizerDecoder(nn.Module):
|
| 817 |
+
"""
|
| 818 |
+
Decoder component for the VibeVoice tokenizer that converts latent representations back to audio.
|
| 819 |
+
|
| 820 |
+
Args:
|
| 821 |
+
config: Configuration object with model parameters
|
| 822 |
+
"""
|
| 823 |
+
def __init__(self, config):
|
| 824 |
+
super().__init__()
|
| 825 |
+
|
| 826 |
+
# Extract parameters from config
|
| 827 |
+
self.dimension = config.dimension
|
| 828 |
+
self.channels = config.channels
|
| 829 |
+
self.n_filters = config.n_filters
|
| 830 |
+
self.ratios = config.ratios
|
| 831 |
+
|
| 832 |
+
# IMPORTANT CHANGE: Don't reverse depths again since they're already reversed in VibeVoiceAcousticTokenizerModel
|
| 833 |
+
self.depths = config.depths # Changed from list(reversed(config.depths))
|
| 834 |
+
|
| 835 |
+
self.n_residual_layers = getattr(config, "n_residual_layers", 1)
|
| 836 |
+
self.hop_length = np.prod(self.ratios)
|
| 837 |
+
self.causal = config.causal
|
| 838 |
+
|
| 839 |
+
# Additional config parameters with defaults
|
| 840 |
+
kernel_size = getattr(config, "kernel_size", 7)
|
| 841 |
+
last_kernel_size = getattr(config, "last_kernel_size", 7)
|
| 842 |
+
norm = getattr(config, "norm", "none")
|
| 843 |
+
norm_params = getattr(config, "norm_params", {})
|
| 844 |
+
pad_mode = getattr(config, "pad_mode", "reflect")
|
| 845 |
+
bias = getattr(config, "bias", True)
|
| 846 |
+
layernorm = getattr(config, "layernorm", "LN")
|
| 847 |
+
layernorm_eps = getattr(config, "layernorm_eps", 1e-6)
|
| 848 |
+
trim_right_ratio = getattr(config, "trim_right_ratio", 1.0)
|
| 849 |
+
layernorm_elementwise_affine = getattr(config, "layernorm_elementwise_affine", True)
|
| 850 |
+
drop_path_rate = getattr(config, "drop_path_rate", 0.0)
|
| 851 |
+
mixer_layer = getattr(config, "mixer_layer", "conv")
|
| 852 |
+
layer_scale_init_value = getattr(config, "layer_scale_init_value", 0)
|
| 853 |
+
disable_last_norm = getattr(config, "disable_last_norm", False)
|
| 854 |
+
|
| 855 |
+
# determine the norm type based on layernorm
|
| 856 |
+
if layernorm == 'LN':
|
| 857 |
+
norm_type = ConvLayerNorm
|
| 858 |
+
elif layernorm == 'RMSNorm':
|
| 859 |
+
norm_type = partial(ConvRMSNorm, elementwise_affine=layernorm_elementwise_affine)
|
| 860 |
+
else:
|
| 861 |
+
raise ValueError(f"Unsupported norm type: {layernorm}")
|
| 862 |
+
|
| 863 |
+
# stem and upsampling layers
|
| 864 |
+
stem = nn.Sequential(
|
| 865 |
+
SConv1d(self.dimension, self.n_filters * 2 ** (len(self.depths) - 1), kernel_size, norm=norm,
|
| 866 |
+
norm_kwargs=norm_params, causal=self.causal, pad_mode=pad_mode, bias=bias),
|
| 867 |
+
)
|
| 868 |
+
|
| 869 |
+
self.upsample_layers = nn.ModuleList()
|
| 870 |
+
self.upsample_layers.append(stem)
|
| 871 |
+
for i in range(len(self.ratios)):
|
| 872 |
+
in_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i))
|
| 873 |
+
out_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i - 1))
|
| 874 |
+
upsample_layer = nn.Sequential(
|
| 875 |
+
SConvTranspose1d(in_ch, out_ch,
|
| 876 |
+
kernel_size=self.ratios[i] * 2, stride=self.ratios[i],
|
| 877 |
+
norm=norm, norm_kwargs=norm_params, bias=bias,
|
| 878 |
+
causal=self.causal, trim_right_ratio=trim_right_ratio),
|
| 879 |
+
)
|
| 880 |
+
self.upsample_layers.append(upsample_layer)
|
| 881 |
+
|
| 882 |
+
# configure transformer blocks
|
| 883 |
+
layer_type = partial(
|
| 884 |
+
Block1D,
|
| 885 |
+
mixer_layer=mixer_layer,
|
| 886 |
+
layernorm=layernorm,
|
| 887 |
+
eps=layernorm_eps,
|
| 888 |
+
causal=self.causal,
|
| 889 |
+
pad_mode=pad_mode,
|
| 890 |
+
norm=norm,
|
| 891 |
+
bias=bias,
|
| 892 |
+
layer_scale_init_value=layer_scale_init_value,
|
| 893 |
+
)
|
| 894 |
+
|
| 895 |
+
self.stages = nn.ModuleList()
|
| 896 |
+
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
|
| 897 |
+
cur = 0
|
| 898 |
+
|
| 899 |
+
# Create stages in the same order as the original model
|
| 900 |
+
for i in range(len(self.depths)):
|
| 901 |
+
in_ch = self.n_filters * (2 ** (len(self.depths) - 1 - i))
|
| 902 |
+
stage = nn.Sequential(
|
| 903 |
+
*[layer_type(dim=in_ch, drop_path=dp_rates[cur + j]) for j in range(self.depths[i])]
|
| 904 |
+
)
|
| 905 |
+
self.stages.append(stage)
|
| 906 |
+
cur += self.depths[i]
|
| 907 |
+
|
| 908 |
+
if not disable_last_norm:
|
| 909 |
+
self.norm = norm_type(in_ch, eps=layernorm_eps)
|
| 910 |
+
else:
|
| 911 |
+
self.norm = nn.Identity()
|
| 912 |
+
self.head = SConv1d(in_ch, self.channels, kernel_size=last_kernel_size, causal=self.causal, pad_mode=pad_mode, norm=norm, bias=bias)
|
| 913 |
+
|
| 914 |
+
def forward_features(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
|
| 915 |
+
for i in range(len(self.depths)):
|
| 916 |
+
# Apply upsampling
|
| 917 |
+
for layer in self.upsample_layers[i]:
|
| 918 |
+
if isinstance(layer, (SConv1d, SConvTranspose1d)):
|
| 919 |
+
x = layer(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 920 |
+
else:
|
| 921 |
+
x = layer(x)
|
| 922 |
+
|
| 923 |
+
# Apply stage (Block1D contains Convlayer which contains SConv1d)
|
| 924 |
+
for block in self.stages[i]:
|
| 925 |
+
if hasattr(block, 'mixer') and hasattr(block.mixer, 'conv') and isinstance(block.mixer.conv, SConv1d):
|
| 926 |
+
# Block1D forward with cache support
|
| 927 |
+
residual = x
|
| 928 |
+
x = block.norm(x)
|
| 929 |
+
x = block.mixer.conv(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 930 |
+
if block.gamma is not None:
|
| 931 |
+
x = x * block.gamma.unsqueeze(-1)
|
| 932 |
+
x = residual + x
|
| 933 |
+
|
| 934 |
+
# FFN part
|
| 935 |
+
residual = x
|
| 936 |
+
x = block.ffn_norm(x)
|
| 937 |
+
x = x.permute(0, 2, 1)
|
| 938 |
+
x = block.ffn(x)
|
| 939 |
+
x = x.permute(0, 2, 1)
|
| 940 |
+
if block.ffn_gamma is not None:
|
| 941 |
+
x = x * block.ffn_gamma.unsqueeze(-1)
|
| 942 |
+
x = residual + x
|
| 943 |
+
else:
|
| 944 |
+
x = block(x)
|
| 945 |
+
|
| 946 |
+
return self.norm(x)
|
| 947 |
+
|
| 948 |
+
def forward(self, x, cache=None, sample_indices=None, use_cache=False, debug=False):
|
| 949 |
+
x = self.forward_features(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 950 |
+
x = self.head(x, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 951 |
+
return x
|
| 952 |
+
|
| 953 |
+
|
| 954 |
+
@dataclass
|
| 955 |
+
class VibeVoiceTokenizerEncoderOutput:
|
| 956 |
+
"""
|
| 957 |
+
Output of VibeVoice tokenizer encoder, representing a Gaussian distribution with fixed variance.
|
| 958 |
+
|
| 959 |
+
Args:
|
| 960 |
+
mean (`torch.FloatTensor`): The mean parameters of the distribution.
|
| 961 |
+
std (`float` or `torch.FloatTensor`): Fixed standard deviation value.
|
| 962 |
+
"""
|
| 963 |
+
mean: torch.Tensor
|
| 964 |
+
std: Optional[Union[float, torch.Tensor]] = None
|
| 965 |
+
|
| 966 |
+
def sample(self, dist_type='fix'):
|
| 967 |
+
"""
|
| 968 |
+
Sample from the distribution.
|
| 969 |
+
|
| 970 |
+
Args:
|
| 971 |
+
dist_type (`str`): Sampling method, either 'fix' or 'gaussian'.
|
| 972 |
+
|
| 973 |
+
Returns:
|
| 974 |
+
`torch.FloatTensor`: Sampled values.
|
| 975 |
+
`torch.FloatTensor` (optional): Standard deviation used (only when dist_type='gaussian').
|
| 976 |
+
"""
|
| 977 |
+
if dist_type == 'fix':
|
| 978 |
+
x = self.mean + self.std * torch.randn_like(self.mean)
|
| 979 |
+
return x, self.std
|
| 980 |
+
elif dist_type == 'gaussian':
|
| 981 |
+
batch_size = self.mean.size(0)
|
| 982 |
+
value = self.std / 0.8
|
| 983 |
+
std = torch.randn(batch_size, device=self.mean.device, dtype=self.mean.dtype) * value
|
| 984 |
+
|
| 985 |
+
while std.dim() < self.mean.dim():
|
| 986 |
+
std = std.unsqueeze(-1)
|
| 987 |
+
|
| 988 |
+
x = self.mean + std * torch.randn_like(self.mean)
|
| 989 |
+
return x, std
|
| 990 |
+
else:
|
| 991 |
+
return self.mean, self.std
|
| 992 |
+
|
| 993 |
+
def kl(self):
|
| 994 |
+
"""Compute KL divergence between this distribution and a standard normal."""
|
| 995 |
+
target = torch.zeros_like(self.mean)
|
| 996 |
+
return F.mse_loss(self.mean, target, reduction='none')
|
| 997 |
+
|
| 998 |
+
def mode(self):
|
| 999 |
+
"""Return the distribution mode (which is the mean for Gaussian)."""
|
| 1000 |
+
return self.mean
|
| 1001 |
+
|
| 1002 |
+
class VibeVoiceAcousticTokenizerModel(PreTrainedModel):
|
| 1003 |
+
"""VibeVoice speech tokenizer model combining encoder and decoder for acoustic tokens"""
|
| 1004 |
+
|
| 1005 |
+
config_class = VibeVoiceAcousticTokenizerConfig
|
| 1006 |
+
base_model_prefix = "vibevoice_acoustic_tokenizer"
|
| 1007 |
+
_supports_flash_attn_2 = True
|
| 1008 |
+
_supports_sdpa = True
|
| 1009 |
+
_no_split_modules = ["TokenizerEncoder", "TokenizerDecoder"]
|
| 1010 |
+
|
| 1011 |
+
def __init__(self, config):
|
| 1012 |
+
super().__init__(config)
|
| 1013 |
+
|
| 1014 |
+
self.register_buffer('fix_std', torch.tensor(config.fix_std), persistent=False)
|
| 1015 |
+
self.std_dist_type = getattr(config, "std_dist_type", "fix")
|
| 1016 |
+
|
| 1017 |
+
# Parse encoder depths
|
| 1018 |
+
if isinstance(config.encoder_depths, str):
|
| 1019 |
+
encoder_depths = [int(d) for d in config.encoder_depths.split('-')]
|
| 1020 |
+
else:
|
| 1021 |
+
encoder_depths = config.encoder_depths
|
| 1022 |
+
|
| 1023 |
+
# Parse decoder depths if provided
|
| 1024 |
+
if config.decoder_depths is not None and isinstance(config.decoder_depths, str):
|
| 1025 |
+
decoder_depths = [int(d) for d in config.decoder_depths.split('-')]
|
| 1026 |
+
else:
|
| 1027 |
+
# Default: use reversed encoder depths if decoder_depths is None
|
| 1028 |
+
decoder_depths = list(reversed(encoder_depths))
|
| 1029 |
+
|
| 1030 |
+
# Create encoder config
|
| 1031 |
+
encoder_config = copy.deepcopy(config)
|
| 1032 |
+
encoder_config.dimension = config.vae_dim
|
| 1033 |
+
encoder_config.n_filters = config.encoder_n_filters
|
| 1034 |
+
encoder_config.ratios = config.encoder_ratios
|
| 1035 |
+
encoder_config.depths = encoder_depths
|
| 1036 |
+
encoder_config.norm = config.conv_norm
|
| 1037 |
+
encoder_config.pad_mode = config.pad_mode
|
| 1038 |
+
encoder_config.bias = config.conv_bias
|
| 1039 |
+
encoder_config.layernorm_eps = config.layernorm_eps
|
| 1040 |
+
encoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine
|
| 1041 |
+
encoder_config.mixer_layer = config.mixer_layer
|
| 1042 |
+
encoder_config.layer_scale_init_value = config.layer_scale_init_value
|
| 1043 |
+
encoder_config.disable_last_norm = config.disable_last_norm
|
| 1044 |
+
|
| 1045 |
+
# Create decoder config
|
| 1046 |
+
decoder_config = copy.deepcopy(config)
|
| 1047 |
+
decoder_config.dimension = config.vae_dim
|
| 1048 |
+
decoder_config.n_filters = config.decoder_n_filters
|
| 1049 |
+
decoder_config.ratios = config.decoder_ratios
|
| 1050 |
+
decoder_config.depths = decoder_depths
|
| 1051 |
+
decoder_config.norm = config.conv_norm
|
| 1052 |
+
decoder_config.pad_mode = config.pad_mode
|
| 1053 |
+
decoder_config.bias = config.conv_bias
|
| 1054 |
+
decoder_config.layernorm_eps = config.layernorm_eps
|
| 1055 |
+
decoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine
|
| 1056 |
+
decoder_config.mixer_layer = config.mixer_layer
|
| 1057 |
+
decoder_config.layer_scale_init_value = config.layer_scale_init_value
|
| 1058 |
+
decoder_config.disable_last_norm = config.disable_last_norm
|
| 1059 |
+
|
| 1060 |
+
# Initialize encoder and decoder
|
| 1061 |
+
self.encoder = TokenizerEncoder(encoder_config)
|
| 1062 |
+
self.decoder = TokenizerDecoder(decoder_config)
|
| 1063 |
+
|
| 1064 |
+
# Initialize weights
|
| 1065 |
+
self.apply(self._init_weights)
|
| 1066 |
+
|
| 1067 |
+
def _init_weights(self, module):
|
| 1068 |
+
"""Initialize weights for the model"""
|
| 1069 |
+
if isinstance(module, nn.Linear):
|
| 1070 |
+
nn.init.normal_(module.weight, std=self.config.weight_init_value)
|
| 1071 |
+
if module.bias is not None:
|
| 1072 |
+
nn.init.zeros_(module.bias)
|
| 1073 |
+
elif isinstance(module, nn.LayerNorm):
|
| 1074 |
+
nn.init.ones_(module.weight)
|
| 1075 |
+
nn.init.zeros_(module.bias)
|
| 1076 |
+
elif isinstance(module, nn.Conv1d):
|
| 1077 |
+
nn.init.normal_(module.weight, std=self.config.weight_init_value)
|
| 1078 |
+
if module.bias is not None:
|
| 1079 |
+
nn.init.zeros_(module.bias)
|
| 1080 |
+
|
| 1081 |
+
@torch.no_grad()
|
| 1082 |
+
def encode(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
|
| 1083 |
+
"""Convert audio to latent representations"""
|
| 1084 |
+
latents = self.encoder(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 1085 |
+
return VibeVoiceTokenizerEncoderOutput(mean=latents.permute(0, 2, 1), std=self.fix_std)
|
| 1086 |
+
|
| 1087 |
+
@torch.no_grad()
|
| 1088 |
+
def sampling(self, encoder_output, dist_type=None):
|
| 1089 |
+
"""Sample from the encoder output distribution"""
|
| 1090 |
+
dist_type = dist_type or self.std_dist_type
|
| 1091 |
+
|
| 1092 |
+
if dist_type == 'fix':
|
| 1093 |
+
return encoder_output.sample(dist_type='fix')
|
| 1094 |
+
elif dist_type == 'gaussian':
|
| 1095 |
+
return encoder_output.sample(dist_type='gaussian')
|
| 1096 |
+
else:
|
| 1097 |
+
raise ValueError(f"Unsupported dist_type: {dist_type}, expected 'fix' or 'gaussian'")
|
| 1098 |
+
|
| 1099 |
+
@torch.no_grad()
|
| 1100 |
+
def decode(self, latents, cache=None, sample_indices=None, use_cache=False, debug=False):
|
| 1101 |
+
"""Convert latent representations back to audio"""
|
| 1102 |
+
if latents.shape[1] == self.config.vae_dim:
|
| 1103 |
+
pass
|
| 1104 |
+
else:
|
| 1105 |
+
latents = latents.permute(0, 2, 1)
|
| 1106 |
+
|
| 1107 |
+
audio = self.decoder(latents, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 1108 |
+
return audio
|
| 1109 |
+
|
| 1110 |
+
def forward(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
|
| 1111 |
+
"""Full forward pass: encode audio to latents, then decode back to audio"""
|
| 1112 |
+
encoder_output = self.encode(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 1113 |
+
sampled_latents, _ = self.sampling(encoder_output)
|
| 1114 |
+
reconstructed = self.decode(sampled_latents, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 1115 |
+
return reconstructed, sampled_latents
|
| 1116 |
+
|
| 1117 |
+
|
| 1118 |
+
class VibeVoiceSemanticTokenizerModel(PreTrainedModel):
|
| 1119 |
+
"""VibeVoice speech tokenizer model with only encoder for semantic tokens"""
|
| 1120 |
+
|
| 1121 |
+
config_class = VibeVoiceSemanticTokenizerConfig
|
| 1122 |
+
base_model_prefix = "vibevoice_semantic_tokenizer"
|
| 1123 |
+
_supports_flash_attn_2 = True
|
| 1124 |
+
_supports_sdpa = True
|
| 1125 |
+
_no_split_modules = ["TokenizerEncoder"]
|
| 1126 |
+
|
| 1127 |
+
def __init__(self, config):
|
| 1128 |
+
super().__init__(config)
|
| 1129 |
+
|
| 1130 |
+
# Parse encoder depths
|
| 1131 |
+
if isinstance(config.encoder_depths, str):
|
| 1132 |
+
encoder_depths = [int(d) for d in config.encoder_depths.split('-')]
|
| 1133 |
+
else:
|
| 1134 |
+
encoder_depths = config.encoder_depths
|
| 1135 |
+
|
| 1136 |
+
# Create encoder config
|
| 1137 |
+
encoder_config = copy.deepcopy(config)
|
| 1138 |
+
encoder_config.dimension = config.vae_dim
|
| 1139 |
+
encoder_config.n_filters = config.encoder_n_filters
|
| 1140 |
+
encoder_config.ratios = config.encoder_ratios
|
| 1141 |
+
encoder_config.depths = encoder_depths
|
| 1142 |
+
encoder_config.norm = config.conv_norm
|
| 1143 |
+
encoder_config.pad_mode = config.pad_mode
|
| 1144 |
+
encoder_config.bias = config.conv_bias
|
| 1145 |
+
encoder_config.layernorm_eps = config.layernorm_eps
|
| 1146 |
+
encoder_config.layernorm_elementwise_affine = config.layernorm_elementwise_affine
|
| 1147 |
+
encoder_config.mixer_layer = config.mixer_layer
|
| 1148 |
+
encoder_config.layer_scale_init_value = config.layer_scale_init_value
|
| 1149 |
+
encoder_config.disable_last_norm = config.disable_last_norm
|
| 1150 |
+
|
| 1151 |
+
# Initialize encoder and decoder
|
| 1152 |
+
self.encoder = TokenizerEncoder(encoder_config)
|
| 1153 |
+
|
| 1154 |
+
# Initialize weights
|
| 1155 |
+
self.apply(self._init_weights)
|
| 1156 |
+
|
| 1157 |
+
def _init_weights(self, module):
|
| 1158 |
+
"""Initialize weights for the model"""
|
| 1159 |
+
if isinstance(module, nn.Linear):
|
| 1160 |
+
nn.init.normal_(module.weight, std=self.config.weight_init_value)
|
| 1161 |
+
if module.bias is not None:
|
| 1162 |
+
nn.init.zeros_(module.bias)
|
| 1163 |
+
elif isinstance(module, nn.LayerNorm):
|
| 1164 |
+
nn.init.ones_(module.weight)
|
| 1165 |
+
nn.init.zeros_(module.bias)
|
| 1166 |
+
elif isinstance(module, nn.Conv1d):
|
| 1167 |
+
nn.init.normal_(module.weight, std=self.config.weight_init_value)
|
| 1168 |
+
if module.bias is not None:
|
| 1169 |
+
nn.init.zeros_(module.bias)
|
| 1170 |
+
|
| 1171 |
+
@torch.no_grad()
|
| 1172 |
+
def encode(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
|
| 1173 |
+
"""Convert audio to latent representations"""
|
| 1174 |
+
latents = self.encoder(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 1175 |
+
return VibeVoiceTokenizerEncoderOutput(mean=latents.permute(0, 2, 1))
|
| 1176 |
+
|
| 1177 |
+
@torch.no_grad()
|
| 1178 |
+
def sampling(self, encoder_output, dist_type=None):
|
| 1179 |
+
"""Sample from the encoder output distribution"""
|
| 1180 |
+
return encoder_output.sample(dist_type='none')
|
| 1181 |
+
|
| 1182 |
+
def forward(self, audio, cache=None, sample_indices=None, use_cache=False, debug=False):
|
| 1183 |
+
"""Full forward pass: encode audio to latents, then decode back to audio"""
|
| 1184 |
+
encoder_output = self.encode(audio, cache=cache, sample_indices=sample_indices, use_cache=use_cache, debug=debug)
|
| 1185 |
+
sampled_latents, _ = self.sampling(encoder_output, dist_type='none')
|
| 1186 |
+
return None, sampled_latents
|
| 1187 |
+
|
| 1188 |
+
AutoModel.register(VibeVoiceAcousticTokenizerConfig, VibeVoiceAcousticTokenizerModel)
|
| 1189 |
+
AutoModel.register(VibeVoiceSemanticTokenizerConfig, VibeVoiceSemanticTokenizerModel)
|
| 1190 |
+
|
| 1191 |
+
__all__ = [
|
| 1192 |
+
"VibeVoiceTokenizerStreamingCache",
|
| 1193 |
+
"VibeVoiceAcousticTokenizerModel",
|
| 1194 |
+
"VibeVoiceSemanticTokenizerModel",
|
| 1195 |
+
]
|
src/vibevoice/modular/streamer.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
import asyncio
|
| 6 |
+
from queue import Queue
|
| 7 |
+
from typing import TYPE_CHECKING, Optional
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
from transformers.generation import BaseStreamer
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class AudioStreamer(BaseStreamer):
|
| 14 |
+
"""
|
| 15 |
+
Audio streamer that stores audio chunks in queues for each sample in the batch.
|
| 16 |
+
This allows streaming audio generation for multiple samples simultaneously.
|
| 17 |
+
|
| 18 |
+
Parameters:
|
| 19 |
+
batch_size (`int`):
|
| 20 |
+
The batch size for generation
|
| 21 |
+
stop_signal (`any`, *optional*):
|
| 22 |
+
The signal to put in the queue when generation ends. Defaults to None.
|
| 23 |
+
timeout (`float`, *optional*):
|
| 24 |
+
The timeout for the audio queue. If `None`, the queue will block indefinitely.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
def __init__(
|
| 28 |
+
self,
|
| 29 |
+
batch_size: int,
|
| 30 |
+
stop_signal: Optional[any] = None,
|
| 31 |
+
timeout: Optional[float] = None,
|
| 32 |
+
):
|
| 33 |
+
self.batch_size = batch_size
|
| 34 |
+
self.stop_signal = stop_signal
|
| 35 |
+
self.timeout = timeout
|
| 36 |
+
|
| 37 |
+
# Create a queue for each sample in the batch
|
| 38 |
+
self.audio_queues = [Queue() for _ in range(batch_size)]
|
| 39 |
+
self.finished_flags = [False for _ in range(batch_size)]
|
| 40 |
+
self.sample_indices_map = {} # Maps from sample index to queue index
|
| 41 |
+
|
| 42 |
+
def put(self, audio_chunks: torch.Tensor, sample_indices: torch.Tensor):
|
| 43 |
+
"""
|
| 44 |
+
Receives audio chunks and puts them in the appropriate queues.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
audio_chunks: Tensor of shape (num_samples, ...) containing audio chunks
|
| 48 |
+
sample_indices: Tensor indicating which samples these chunks belong to
|
| 49 |
+
"""
|
| 50 |
+
for i, sample_idx in enumerate(sample_indices):
|
| 51 |
+
idx = sample_idx.item()
|
| 52 |
+
if idx < self.batch_size and not self.finished_flags[idx]:
|
| 53 |
+
# Convert to numpy or keep as tensor based on preference
|
| 54 |
+
audio_chunk = audio_chunks[i].detach().cpu()
|
| 55 |
+
self.audio_queues[idx].put(audio_chunk, timeout=self.timeout)
|
| 56 |
+
|
| 57 |
+
def end(self, sample_indices: Optional[torch.Tensor] = None):
|
| 58 |
+
"""
|
| 59 |
+
Signals the end of generation for specified samples or all samples.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
sample_indices: Optional tensor of sample indices to end. If None, ends all.
|
| 63 |
+
"""
|
| 64 |
+
if sample_indices is None:
|
| 65 |
+
# End all samples
|
| 66 |
+
for idx in range(self.batch_size):
|
| 67 |
+
if not self.finished_flags[idx]:
|
| 68 |
+
self.audio_queues[idx].put(self.stop_signal, timeout=self.timeout)
|
| 69 |
+
self.finished_flags[idx] = True
|
| 70 |
+
else:
|
| 71 |
+
# End specific samples
|
| 72 |
+
for sample_idx in sample_indices:
|
| 73 |
+
idx = sample_idx.item() if torch.is_tensor(sample_idx) else sample_idx
|
| 74 |
+
if idx < self.batch_size and not self.finished_flags[idx]:
|
| 75 |
+
self.audio_queues[idx].put(self.stop_signal, timeout=self.timeout)
|
| 76 |
+
self.finished_flags[idx] = True
|
| 77 |
+
|
| 78 |
+
def __iter__(self):
|
| 79 |
+
"""Returns an iterator over the batch of audio streams."""
|
| 80 |
+
return AudioBatchIterator(self)
|
| 81 |
+
|
| 82 |
+
def get_stream(self, sample_idx: int):
|
| 83 |
+
"""Get the audio stream for a specific sample."""
|
| 84 |
+
if sample_idx >= self.batch_size:
|
| 85 |
+
raise ValueError(f"Sample index {sample_idx} exceeds batch size {self.batch_size}")
|
| 86 |
+
return AudioSampleIterator(self, sample_idx)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class AudioSampleIterator:
|
| 90 |
+
"""Iterator for a single audio stream from the batch."""
|
| 91 |
+
|
| 92 |
+
def __init__(self, streamer: AudioStreamer, sample_idx: int):
|
| 93 |
+
self.streamer = streamer
|
| 94 |
+
self.sample_idx = sample_idx
|
| 95 |
+
|
| 96 |
+
def __iter__(self):
|
| 97 |
+
return self
|
| 98 |
+
|
| 99 |
+
def __next__(self):
|
| 100 |
+
value = self.streamer.audio_queues[self.sample_idx].get(timeout=self.streamer.timeout)
|
| 101 |
+
if value == self.streamer.stop_signal:
|
| 102 |
+
raise StopIteration()
|
| 103 |
+
return value
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class AudioBatchIterator:
|
| 107 |
+
"""Iterator that yields audio chunks for all samples in the batch."""
|
| 108 |
+
|
| 109 |
+
def __init__(self, streamer: AudioStreamer):
|
| 110 |
+
self.streamer = streamer
|
| 111 |
+
self.active_samples = set(range(streamer.batch_size))
|
| 112 |
+
|
| 113 |
+
def __iter__(self):
|
| 114 |
+
return self
|
| 115 |
+
|
| 116 |
+
def __next__(self):
|
| 117 |
+
if not self.active_samples:
|
| 118 |
+
raise StopIteration()
|
| 119 |
+
|
| 120 |
+
batch_chunks = {}
|
| 121 |
+
samples_to_remove = set()
|
| 122 |
+
|
| 123 |
+
# Try to get chunks from all active samples
|
| 124 |
+
for idx in self.active_samples:
|
| 125 |
+
try:
|
| 126 |
+
value = self.streamer.audio_queues[idx].get(block=False)
|
| 127 |
+
if value == self.streamer.stop_signal:
|
| 128 |
+
samples_to_remove.add(idx)
|
| 129 |
+
else:
|
| 130 |
+
batch_chunks[idx] = value
|
| 131 |
+
except:
|
| 132 |
+
# Queue is empty for this sample, skip it this iteration
|
| 133 |
+
pass
|
| 134 |
+
|
| 135 |
+
# Remove finished samples
|
| 136 |
+
self.active_samples -= samples_to_remove
|
| 137 |
+
|
| 138 |
+
if batch_chunks:
|
| 139 |
+
return batch_chunks
|
| 140 |
+
elif self.active_samples:
|
| 141 |
+
# If no chunks were ready but we still have active samples,
|
| 142 |
+
# wait a bit and try again
|
| 143 |
+
import time
|
| 144 |
+
time.sleep(0.01)
|
| 145 |
+
return self.__next__()
|
| 146 |
+
else:
|
| 147 |
+
raise StopIteration()
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class AsyncAudioStreamer(AudioStreamer):
|
| 151 |
+
"""
|
| 152 |
+
Async version of AudioStreamer for use in async contexts.
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
def __init__(
|
| 156 |
+
self,
|
| 157 |
+
batch_size: int,
|
| 158 |
+
stop_signal: Optional[any] = None,
|
| 159 |
+
timeout: Optional[float] = None,
|
| 160 |
+
):
|
| 161 |
+
super().__init__(batch_size, stop_signal, timeout)
|
| 162 |
+
# Replace regular queues with async queues
|
| 163 |
+
self.audio_queues = [asyncio.Queue() for _ in range(batch_size)]
|
| 164 |
+
self.loop = asyncio.get_running_loop()
|
| 165 |
+
|
| 166 |
+
def put(self, audio_chunks: torch.Tensor, sample_indices: torch.Tensor):
|
| 167 |
+
"""Put audio chunks in the appropriate async queues."""
|
| 168 |
+
for i, sample_idx in enumerate(sample_indices):
|
| 169 |
+
idx = sample_idx.item()
|
| 170 |
+
if idx < self.batch_size and not self.finished_flags[idx]:
|
| 171 |
+
audio_chunk = audio_chunks[i].detach().cpu()
|
| 172 |
+
self.loop.call_soon_threadsafe(
|
| 173 |
+
self.audio_queues[idx].put_nowait, audio_chunk
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
def end(self, sample_indices: Optional[torch.Tensor] = None):
|
| 177 |
+
"""Signal the end of generation for specified samples."""
|
| 178 |
+
if sample_indices is None:
|
| 179 |
+
indices_to_end = range(self.batch_size)
|
| 180 |
+
else:
|
| 181 |
+
indices_to_end = [s.item() if torch.is_tensor(s) else s for s in sample_indices]
|
| 182 |
+
|
| 183 |
+
for idx in indices_to_end:
|
| 184 |
+
if idx < self.batch_size and not self.finished_flags[idx]:
|
| 185 |
+
self.loop.call_soon_threadsafe(
|
| 186 |
+
self.audio_queues[idx].put_nowait, self.stop_signal
|
| 187 |
+
)
|
| 188 |
+
self.finished_flags[idx] = True
|
| 189 |
+
|
| 190 |
+
async def get_stream(self, sample_idx: int):
|
| 191 |
+
"""Get async iterator for a specific sample's audio stream."""
|
| 192 |
+
if sample_idx >= self.batch_size:
|
| 193 |
+
raise ValueError(f"Sample index {sample_idx} exceeds batch size {self.batch_size}")
|
| 194 |
+
|
| 195 |
+
while True:
|
| 196 |
+
value = await self.audio_queues[sample_idx].get()
|
| 197 |
+
if value == self.stop_signal:
|
| 198 |
+
break
|
| 199 |
+
yield value
|
| 200 |
+
|
| 201 |
+
def __aiter__(self):
|
| 202 |
+
"""Returns an async iterator over all audio streams."""
|
| 203 |
+
return AsyncAudioBatchIterator(self)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
class AsyncAudioBatchIterator:
|
| 207 |
+
"""Async iterator for batch audio streaming."""
|
| 208 |
+
|
| 209 |
+
def __init__(self, streamer: AsyncAudioStreamer):
|
| 210 |
+
self.streamer = streamer
|
| 211 |
+
self.active_samples = set(range(streamer.batch_size))
|
| 212 |
+
|
| 213 |
+
def __aiter__(self):
|
| 214 |
+
return self
|
| 215 |
+
|
| 216 |
+
async def __anext__(self):
|
| 217 |
+
if not self.active_samples:
|
| 218 |
+
raise StopAsyncIteration()
|
| 219 |
+
|
| 220 |
+
batch_chunks = {}
|
| 221 |
+
samples_to_remove = set()
|
| 222 |
+
|
| 223 |
+
# Create tasks for all active samples
|
| 224 |
+
tasks = {
|
| 225 |
+
idx: asyncio.create_task(self._get_chunk(idx))
|
| 226 |
+
for idx in self.active_samples
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
# Wait for at least one chunk to be ready
|
| 230 |
+
done, pending = await asyncio.wait(
|
| 231 |
+
tasks.values(),
|
| 232 |
+
return_when=asyncio.FIRST_COMPLETED,
|
| 233 |
+
timeout=self.streamer.timeout
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
# Cancel pending tasks
|
| 237 |
+
for task in pending:
|
| 238 |
+
task.cancel()
|
| 239 |
+
|
| 240 |
+
# Process completed tasks
|
| 241 |
+
for idx, task in tasks.items():
|
| 242 |
+
if task in done:
|
| 243 |
+
try:
|
| 244 |
+
value = await task
|
| 245 |
+
if value == self.streamer.stop_signal:
|
| 246 |
+
samples_to_remove.add(idx)
|
| 247 |
+
else:
|
| 248 |
+
batch_chunks[idx] = value
|
| 249 |
+
except asyncio.CancelledError:
|
| 250 |
+
pass
|
| 251 |
+
|
| 252 |
+
self.active_samples -= samples_to_remove
|
| 253 |
+
|
| 254 |
+
if batch_chunks:
|
| 255 |
+
return batch_chunks
|
| 256 |
+
elif self.active_samples:
|
| 257 |
+
# Try again if we still have active samples
|
| 258 |
+
return await self.__anext__()
|
| 259 |
+
else:
|
| 260 |
+
raise StopAsyncIteration()
|
| 261 |
+
|
| 262 |
+
async def _get_chunk(self, idx):
|
| 263 |
+
"""Helper to get a chunk from a specific queue."""
|
| 264 |
+
return await self.streamer.audio_queues[idx].get()
|
src/vibevoice/processor/__init__.py
ADDED
|
File without changes
|
src/vibevoice/processor/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (162 Bytes). View file
|
|
|
src/vibevoice/processor/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (177 Bytes). View file
|
|
|
src/vibevoice/processor/__pycache__/vibevoice_processor.cpython-310.pyc
ADDED
|
Binary file (19.7 kB). View file
|
|
|