| |
| """Generate 10 proof_unseen texts with CeSAMe baseline + finetuned (LoRA).""" |
| import torch |
| import soundfile as sf |
| import numpy as np |
| import shutil |
| from pathlib import Path |
| from transformers import CsmForConditionalGeneration, AutoProcessor |
| from peft import PeftModel |
|
|
| TEXTS = { |
| "neut_book_s06_0336": "Azora fit l'éloge du défunt ; mais elle avoua qu'il avait des défauts dont Cador était exempt.", |
| "neut_book_s08_0163": "Le lendemain, dès le lever du jour, Cyrus Smith et Ayrton, montant le chariot attelé des deux onaggas, prenaient la route du corral et y couraient au grand trot.", |
| "neut_book_s09_0091": "À une certaine époque, la terre n'était formée que d'une écorce élastique, soumise à des mouvements alternatifs de haut et de bas, en vertu des lois de l'attraction.", |
| "neut_book_s09_0307": "L'île Tabor, sorte de côte basse, à peine émergée des flots, n'était pas éloignée de plus de quinze milles.", |
| "neut_parl_s01_0429": "A défaut, je suggérerai à l'Assemblée de le rejeter.", |
| "neut_parl_s01_0715": "Si peu de choses furent au rendez-vous !", |
| "neut_parl_s02_0152": "Là, vous évoquez les difficultés de quelques branches spécifiques.", |
| "neut_parl_s03_0695": "Nous refusons ce deux poids, deux mesures.", |
| "neut_parl_s03_0704": "En première lecture, onze jours et onze nuits ont été consacrés à ce texte.", |
| "neut_parl_s05_0355": "Cela ne changera rien à la compétitivité du pays ou au chômage.", |
| } |
|
|
| OUTPUT_DIR = Path("/home/spice/projects/tts-model-exploration/speech-dev-remi/comparisons/cesame") |
| LORA_PATH = Path("/home/spice/projects/tts-model-exploration/finetuning/cesame/lora_cesame_v2") |
| SAMPLE_RATE = 24000 |
|
|
| device = torch.device("cuda:0") |
|
|
|
|
| def generate_all(model, processor, output_subdir): |
| output_subdir.mkdir(parents=True, exist_ok=True) |
| for stem, text in TEXTS.items(): |
| print(f" [{stem}] {text[:50]}...") |
| try: |
| text_with_speaker = f"[0]{text}" |
| inputs = processor(text_with_speaker, add_special_tokens=True, return_tensors="pt").to(device) |
|
|
| with torch.no_grad(): |
| audio_output = model.generate(**inputs, output_audio=True) |
|
|
| if isinstance(audio_output, (list, tuple)) and len(audio_output) > 0: |
| if isinstance(audio_output[0], torch.Tensor): |
| audio = audio_output[0].to(torch.float32).cpu().numpy() |
| elif hasattr(audio_output[0], 'audio_values'): |
| audio = audio_output[0].audio_values.squeeze().to(torch.float32).cpu().numpy() |
| else: |
| raise ValueError(f"Unexpected type: {type(audio_output[0])}") |
| elif hasattr(audio_output, 'audio_values'): |
| audio = audio_output.audio_values.squeeze().to(torch.float32).cpu().numpy() |
| elif isinstance(audio_output, torch.Tensor): |
| audio = audio_output.squeeze().to(torch.float32).cpu().numpy() |
| else: |
| raise ValueError(f"Unexpected output: {type(audio_output)}") |
|
|
| audio = audio.astype("float32").flatten() |
| peak = np.max(np.abs(audio)) |
| if peak > 1.0: |
| audio = audio / peak * 0.99 |
|
|
| sf.write(str(output_subdir / f"{stem}.wav"), audio, SAMPLE_RATE) |
| print(f" OK ({len(audio)/SAMPLE_RATE:.1f}s)") |
| except Exception as e: |
| print(f" ERROR: {e}") |
| import traceback; traceback.print_exc() |
|
|
|
|
| def main(): |
| OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| |
| print("=" * 60) |
| print("CESAME CSM BASELINE (4-bit)") |
| print("=" * 60) |
| from unsloth import FastModel |
| model, _tokenizer = FastModel.from_pretrained( |
| model_name="unsloth/csm-1b", |
| max_seq_length=2048, |
| dtype=None, |
| auto_model=CsmForConditionalGeneration, |
| load_in_4bit=True, |
| ) |
| processor = AutoProcessor.from_pretrained("unsloth/csm-1b") |
| model.eval() |
|
|
| generate_all(model, processor, OUTPUT_DIR / "baseline") |
|
|
| |
| print("\n" + "=" * 60) |
| print("CESAME CSM FINETUNED (4-bit + LoRA)") |
| print("=" * 60) |
| print(f"Loading LoRA from {LORA_PATH}...") |
|
|
| |
| model = PeftModel.from_pretrained(model, str(LORA_PATH)) |
| model.eval() |
| print("LoRA loaded") |
|
|
| generate_all(model, processor, OUTPUT_DIR / "finetuned") |
|
|
| |
| print("\nCopying original SIWIS audio...") |
| (OUTPUT_DIR / "original").mkdir(exist_ok=True) |
| siwis_wavs = Path("/home/spice/speech/app/liquid-audio/french_finetuning/data/raw/siwis/SiwisFrenchSpeechSynthesisDatabase/wavs") |
| for stem in TEXTS: |
| src = list(siwis_wavs.rglob(f"{stem}.wav")) |
| if src: |
| shutil.copy2(src[0], OUTPUT_DIR / "original" / f"{stem}.wav") |
|
|
| print("\nDone!") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|