benchmarkTTS / comparisons /gen_vibevoice.py
Rcarvalo's picture
Upload folder using huggingface_hub
e89588f verified
#!/usr/bin/env python3
"""Generate 10 proof_unseen texts with VibeVoice baseline + finetuned."""
import torch
import copy
import soundfile as sf
import numpy as np
from pathlib import Path
from safetensors.torch import load_file
from vibevoice.modular.modeling_vibevoice_streaming_inference import (
VibeVoiceStreamingForConditionalGenerationInference,
)
from vibevoice.processor.vibevoice_streaming_processor import (
VibeVoiceStreamingProcessor,
)
TEXTS = {
"neut_book_s06_0336": "Azora fit l'éloge du défunt ; mais elle avoua qu'il avait des défauts dont Cador était exempt.",
"neut_book_s08_0163": "Le lendemain, dès le lever du jour, Cyrus Smith et Ayrton, montant le chariot attelé des deux onaggas, prenaient la route du corral et y couraient au grand trot.",
"neut_book_s09_0091": "À une certaine époque, la terre n'était formée que d'une écorce élastique, soumise à des mouvements alternatifs de haut et de bas, en vertu des lois de l'attraction.",
"neut_book_s09_0307": "L'île Tabor, sorte de côte basse, à peine émergée des flots, n'était pas éloignée de plus de quinze milles.",
"neut_parl_s01_0429": "A défaut, je suggérerai à l'Assemblée de le rejeter.",
"neut_parl_s01_0715": "Si peu de choses furent au rendez-vous !",
"neut_parl_s02_0152": "Là, vous évoquez les difficultés de quelques branches spécifiques.",
"neut_parl_s03_0695": "Nous refusons ce deux poids, deux mesures.",
"neut_parl_s03_0704": "En première lecture, onze jours et onze nuits ont été consacrés à ce texte.",
"neut_parl_s05_0355": "Cela ne changera rien à la compétitivité du pays ou au chômage.",
}
OUTPUT_DIR = Path("/home/spice/projects/tts-model-exploration/speech-dev-remi/comparisons/vibevoice")
FINETUNED_WEIGHTS = Path("/home/spice/projects/tts-model-exploration/finetuning_vibevoice/outputs/full_ft_vibevoice/tts_lm_best.safetensors")
VOICES_DIR = Path("/home/spice/speech/VibeVoice/demo/voices/streaming_model")
SAMPLE_RATE = 24000
device = torch.device("cuda:0")
def load_model():
model_path = "microsoft/VibeVoice-Realtime-0.5B"
processor = VibeVoiceStreamingProcessor.from_pretrained(model_path)
try:
model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
model_path, torch_dtype=torch.bfloat16, device_map=str(device),
attn_implementation="flash_attention_2",
)
except Exception:
model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
model_path, torch_dtype=torch.bfloat16, device_map=str(device),
attn_implementation="sdpa",
)
model.eval()
model.set_ddpm_inference_steps(num_steps=5)
return model, processor
def generate_sample(model, processor, text, voice_prompt):
inputs = processor.process_input_with_cached_prompt(
text=text, cached_prompt=voice_prompt,
padding=True, return_tensors="pt", return_attention_mask=True,
)
for k, v in inputs.items():
if torch.is_tensor(v):
inputs[k] = v.to(device)
max_tokens = min(max(int(len(text) * 3.0) + 100, 200), 800)
with torch.no_grad():
outputs = model.generate(
**inputs, max_new_tokens=max_tokens, cfg_scale=1.5,
tokenizer=processor.tokenizer,
generation_config={'do_sample': False}, verbose=False,
all_prefilled_outputs=copy.deepcopy(voice_prompt),
)
if not outputs.speech_outputs or outputs.speech_outputs[0] is None:
return None
audio = outputs.speech_outputs[0].cpu().float().numpy().flatten()
peak = np.max(np.abs(audio))
if peak > 1.0:
audio = audio / peak * 0.99
return audio
def main():
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
(OUTPUT_DIR / "baseline").mkdir(exist_ok=True)
(OUTPUT_DIR / "finetuned").mkdir(exist_ok=True)
voice_prompt = torch.load(
VOICES_DIR / "fr-Spk0_man.pt", map_location=device, weights_only=False
)
# --- Baseline ---
print("=" * 60)
print("VIBEVOICE BASELINE")
print("=" * 60)
model, processor = load_model()
for stem, text in TEXTS.items():
print(f" [{stem}] {text[:50]}...")
audio = generate_sample(model, processor, text, voice_prompt)
if audio is not None:
sf.write(str(OUTPUT_DIR / "baseline" / f"{stem}.wav"), audio, SAMPLE_RATE)
print(f" OK ({len(audio)/SAMPLE_RATE:.1f}s)")
else:
print(" FAILED")
# --- Finetuned ---
print("\n" + "=" * 60)
print("VIBEVOICE FINETUNED")
print("=" * 60)
# Load finetuned tts_language_model weights
print(f"Loading finetuned weights from {FINETUNED_WEIGHTS}...")
ft_weights = load_file(str(FINETUNED_WEIGHTS))
model.model.tts_language_model.load_state_dict(ft_weights)
print("Finetuned weights loaded")
for stem, text in TEXTS.items():
print(f" [{stem}] {text[:50]}...")
audio = generate_sample(model, processor, text, voice_prompt)
if audio is not None:
sf.write(str(OUTPUT_DIR / "finetuned" / f"{stem}.wav"), audio, SAMPLE_RATE)
print(f" OK ({len(audio)/SAMPLE_RATE:.1f}s)")
else:
print(" FAILED")
# Copy originals
print("\nCopying original SIWIS audio...")
import shutil
(OUTPUT_DIR / "original").mkdir(exist_ok=True)
siwis_wavs = Path("/home/spice/speech/app/liquid-audio/french_finetuning/data/raw/siwis/SiwisFrenchSpeechSynthesisDatabase/wavs")
for stem in TEXTS:
src = list(siwis_wavs.rglob(f"{stem}.wav"))
if src:
shutil.copy2(src[0], OUTPUT_DIR / "original" / f"{stem}.wav")
print(f" {stem} copied")
print("\nDone!")
if __name__ == "__main__":
main()