|
|
import sys |
|
|
import os |
|
|
|
|
|
current_dir = os.path.dirname(os.path.abspath(__file__)) |
|
|
sys.path.append(current_dir) |
|
|
|
|
|
from transformers import PreTrainedModel, PretrainedConfig, AutoConfig |
|
|
import torch |
|
|
import numpy as np |
|
|
from f5_tts.infer.utils_infer import ( |
|
|
infer_process, |
|
|
load_model, |
|
|
load_vocoder, |
|
|
preprocess_ref_audio_text, |
|
|
) |
|
|
from f5_tts.model import DiT |
|
|
import soundfile as sf |
|
|
import io |
|
|
from pydub import AudioSegment, silence |
|
|
from huggingface_hub import hf_hub_download |
|
|
from safetensors.torch import load_file |
|
|
|
|
|
class INF5Config(PretrainedConfig): |
|
|
model_type = "inf5" |
|
|
|
|
|
def __init__(self, ckpt_repo_id: str = None, vocab_repo_id: str = None, |
|
|
ckpt_filename: str = None, vocab_filename: str = "vocab.txt", |
|
|
speed: float = 1.0, remove_sil: bool = True, **kwargs): |
|
|
super().__init__(**kwargs) |
|
|
|
|
|
self.ckpt_repo_id = ckpt_repo_id |
|
|
self.vocab_repo_id = vocab_repo_id |
|
|
self.ckpt_filename = ckpt_filename |
|
|
self.vocab_filename = vocab_filename |
|
|
self.speed = speed |
|
|
self.remove_sil = remove_sil |
|
|
|
|
|
class INF5Model(PreTrainedModel): |
|
|
config_class = INF5Config |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
|
|
|
self.vocoder = torch.compile( |
|
|
load_vocoder(vocoder_name="vocos", is_local=False, device=device) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
vocab_repo = config.vocab_repo_id or config.name_or_path |
|
|
|
|
|
|
|
|
vocab_path = hf_hub_download(repo_id=vocab_repo, filename=config.vocab_filename) |
|
|
|
|
|
|
|
|
ckpt_repo = config.ckpt_repo_id or config.name_or_path |
|
|
|
|
|
ckpt_candidates = [ |
|
|
"model_26000.pt", |
|
|
"checkpoints/model.safetensors", |
|
|
"model.safetensors", |
|
|
"checkpoints/pytorch_model.bin", |
|
|
"pytorch_model.bin", |
|
|
"checkpoints/model.pt", |
|
|
"model.pt", |
|
|
"checkpoints/checkpoint.pt", |
|
|
"checkpoint.pt" |
|
|
] |
|
|
|
|
|
|
|
|
if config.ckpt_filename: |
|
|
ckpt_candidates = [config.ckpt_filename] |
|
|
|
|
|
ckpt_path = None |
|
|
|
|
|
for fname in ckpt_candidates: |
|
|
try: |
|
|
ckpt_path = hf_hub_download(repo_id=ckpt_repo, filename=fname) |
|
|
print(f"Found checkpoint on hub: {fname} -> {ckpt_path}") |
|
|
break |
|
|
except Exception as e: |
|
|
|
|
|
|
|
|
|
|
|
continue |
|
|
|
|
|
if ckpt_path is None: |
|
|
raise RuntimeError( |
|
|
"Could not find a checkpoint file on the Hub. " |
|
|
"Tried: " + ", ".join(ckpt_candidates) + ".\n" |
|
|
"If your checkpoint is stored under a different path or name, " |
|
|
"update ckpt_candidates or pass the path via config (e.g. config.ckpt_filename). " |
|
|
"If the file is >5GB, ensure Git LFS is enabled for the repo (hf lfs-enable-largefiles)." |
|
|
) |
|
|
|
|
|
|
|
|
self.ema_model = torch.compile( |
|
|
load_model( |
|
|
DiT, |
|
|
dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4), |
|
|
mel_spec_type="vocos", |
|
|
vocab_file=vocab_path, |
|
|
device=device, |
|
|
ckpt_path=ckpt_path |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, text: str, ref_audio_path: str, ref_text: str): |
|
|
""" |
|
|
Generate speech given a reference audio & text input. |
|
|
|
|
|
Args: |
|
|
text (str): The text to be synthesized. |
|
|
ref_audio_path (str): Path to the reference audio file. |
|
|
ref_text (str): The reference text. |
|
|
Returns: |
|
|
np.array: Generated waveform. |
|
|
""" |
|
|
|
|
|
if not os.path.exists(ref_audio_path): |
|
|
raise FileNotFoundError(f"Reference audio file {ref_audio_path} not found.") |
|
|
|
|
|
|
|
|
ref_audio, ref_text = preprocess_ref_audio_text(ref_audio_path, ref_text) |
|
|
|
|
|
|
|
|
self.ema_model.to(self.device) |
|
|
self.vocoder.to(self.device) |
|
|
|
|
|
|
|
|
audio, final_sample_rate, _ = infer_process( |
|
|
ref_audio, |
|
|
ref_text, |
|
|
text, |
|
|
self.ema_model, |
|
|
self.vocoder, |
|
|
mel_spec_type="vocos", |
|
|
speed=self.config.speed, |
|
|
device=self.device, |
|
|
) |
|
|
|
|
|
|
|
|
buffer = io.BytesIO() |
|
|
sf.write(buffer, audio, samplerate=24000, format="WAV") |
|
|
buffer.seek(0) |
|
|
audio_segment = AudioSegment.from_file(buffer, format="wav") |
|
|
|
|
|
if self.config.remove_sil: |
|
|
non_silent_segs = silence.split_on_silence( |
|
|
audio_segment, |
|
|
min_silence_len=1000, |
|
|
silence_thresh=-50, |
|
|
keep_silence=500, |
|
|
seek_step=10, |
|
|
) |
|
|
non_silent_wave = sum(non_silent_segs, AudioSegment.silent(duration=0)) |
|
|
audio_segment = non_silent_wave |
|
|
|
|
|
|
|
|
target_dBFS = -20.0 |
|
|
change_in_dBFS = target_dBFS - audio_segment.dBFS |
|
|
audio_segment = audio_segment.apply_gain(change_in_dBFS) |
|
|
|
|
|
return np.array(audio_segment.get_array_of_samples()) |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
model = INF5Model(INF5Config()) |
|
|
model.save_pretrained("INF5") |
|
|
model.config.save_pretrained("INF5") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|