File size: 6,874 Bytes
0c2021a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
"""
Upload the fixed model.py to HuggingFace
Run this script to update your model on HuggingFace
"""
from huggingface_hub import HfApi
import os
# Fixed model.py content with lazy loading
MODEL_PY_CONTENT = '''import sys
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)
from transformers import PreTrainedModel, PretrainedConfig, AutoConfig
import torch
import numpy as np
from f5_tts.infer.utils_infer import (
infer_process,
load_model,
load_vocoder,
preprocess_ref_audio_text,
)
from f5_tts.model import DiT
import soundfile as sf
import io
from pydub import AudioSegment, silence
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
import os
class INF5Config(PretrainedConfig):
model_type = "inf5"
def __init__(self, ckpt_path: str = "checkpoints/model_best.pt", vocab_path: str = "checkpoints/vocab.txt",
speed: float = 1.0, remove_sil: bool = True, **kwargs):
super().__init__(**kwargs)
self.ckpt_path = ckpt_path
self.vocab_path = vocab_path
self.speed = speed
self.remove_sil = remove_sil
class INF5Model(PreTrainedModel):
config_class = INF5Config
def __init__(self, config):
super().__init__(config)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.device = device
# CRITICAL FIX: Don't load vocoder/model in __init__
# Use lazy loading instead to avoid meta tensor issues
self._vocoder = None
self._ema_model = None
# Store vocab path for lazy loading
try:
self._vocab_path = hf_hub_download(config.name_or_path, filename="checkpoints/vocab.txt")
except:
self._vocab_path = "checkpoints/vocab.txt"
@property
def vocoder(self):
"""Lazy load vocoder only when needed (avoids meta tensor issues)"""
if self._vocoder is None:
print("โ๏ธ Loading vocoder on-demand...")
# Force regular device context (not meta)
with torch.device('cpu'):
self._vocoder = load_vocoder(vocoder_name="vocos", is_local=False, device='cpu')
# Move to target device if not CPU
if self.device.type != 'cpu':
self._vocoder = self._vocoder.to(self.device)
self._vocoder = self._vocoder.eval()
print(f"โ
Vocoder loaded on {self.device}")
return self._vocoder
@property
def ema_model(self):
"""Lazy load ema_model only when needed"""
if self._ema_model is None:
print("โ๏ธ Loading EMA model on-demand...")
self._ema_model = load_model(
DiT,
dict(dim=1024, depth=22, heads=16, ff_mult=2, text_dim=512, conv_layers=4),
mel_spec_type="vocos",
vocab_file=self._vocab_path,
device=self.device
)
self._ema_model = self._ema_model.eval()
print(f"โ
EMA model loaded on {self.device}")
return self._ema_model
def forward(self, text: str, ref_audio_path: str, ref_text: str, speed: float = None):
"""
Generate speech given a reference audio & text input.
Args:
text (str): The text to be synthesized.
ref_audio_path (str): Path to the reference audio file.
ref_text (str): The reference text.
speed (float): Override speed (optional)
Returns:
np.array: Generated waveform.
"""
if not os.path.exists(ref_audio_path):
raise FileNotFoundError(f"Reference audio file {ref_audio_path} not found.")
# Use config speed if not provided
if speed is None:
speed = self.config.speed
# Load reference audio & text
ref_audio, ref_text = preprocess_ref_audio_text(ref_audio_path, ref_text)
# Access properties to trigger lazy loading
ema_model = self.ema_model
vocoder = self.vocoder
# Ensure on correct device
ema_model.to(self.device)
vocoder.to(self.device)
# Perform inference
audio, final_sample_rate, _ = infer_process(
ref_audio,
ref_text,
text,
ema_model,
vocoder,
mel_spec_type="vocos",
speed=speed,
device=self.device,
)
# Convert to pydub format and remove silence if needed
buffer = io.BytesIO()
sf.write(buffer, audio, samplerate=24000, format="WAV")
buffer.seek(0)
audio_segment = AudioSegment.from_file(buffer, format="wav")
if self.config.remove_sil:
non_silent_segs = silence.split_on_silence(
audio_segment,
min_silence_len=1000,
silence_thresh=-50,
keep_silence=500,
seek_step=10,
)
non_silent_wave = sum(non_silent_segs, AudioSegment.silent(duration=0))
audio_segment = non_silent_wave
# Normalize loudness
target_dBFS = -20.0
change_in_dBFS = target_dBFS - audio_segment.dBFS
audio_segment = audio_segment.apply_gain(change_in_dBFS)
return np.array(audio_segment.get_array_of_samples())
'''
def upload_fixed_model():
"""Upload the fixed model.py to HuggingFace"""
repo_id = "svp19/INF5" # Your repo
# Save the fixed model.py locally
with open("model.py", "w", encoding="utf-8") as f:
f.write(MODEL_PY_CONTENT)
print(f"๐ Saved fixed model.py locally")
# Upload to HuggingFace
api = HfApi()
try:
api.upload_file(
path_or_fileobj="model.py",
path_in_repo="model.py",
repo_id=repo_id,
repo_type="model",
commit_message="Fix: Use lazy loading for vocoder to avoid meta tensor issues"
)
print(f"โ
Successfully uploaded fixed model.py to {repo_id}")
print(f"๐ https://huggingface.co/{repo_id}/blob/main/model.py")
except Exception as e:
print(f"โ Upload failed: {e}")
raise
# Clean up
os.remove("model.py")
print("๐งน Cleaned up local file")
if __name__ == "__main__":
print("="*60)
print("๐ Uploading Fixed model.py to HuggingFace")
print("="*60)
upload_fixed_model()
print("\nโจ Done! Now redeploy your Cerebrium app")
print(" Run: cerebrium deploy --no-cache") |