File size: 12,503 Bytes
84a2fb0
 
759578e
84a2fb0
 
 
90c9262
84a2fb0
 
f270e1b
84a2fb0
 
759578e
 
 
 
 
 
 
 
84a2fb0
f270e1b
84a2fb0
 
 
 
 
 
 
a675cb1
 
 
 
84a2fb0
 
 
 
 
 
 
 
 
f270e1b
8f0cf81
f270e1b
 
 
8f0cf81
f270e1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f0cf81
84a2fb0
d422fc0
 
a675cb1
90c9262
 
a675cb1
90c9262
 
a675cb1
d422fc0
 
a675cb1
f270e1b
a675cb1
f270e1b
 
a675cb1
d422fc0
b50edfb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f0cf81
 
 
 
 
 
e23275b
84a2fb0
a675cb1
 
 
 
 
 
 
 
84a2fb0
 
8f0cf81
 
a675cb1
8f0cf81
 
 
 
 
a675cb1
 
84a2fb0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a675cb1
84a2fb0
 
 
 
 
 
a675cb1
84a2fb0
a675cb1
 
8f0cf81
 
 
a675cb1
84a2fb0
8f0cf81
 
 
 
a675cb1
84a2fb0
 
a675cb1
 
84a2fb0
 
a675cb1
 
84a2fb0
8f0cf81
 
 
 
 
 
 
 
 
 
 
 
 
 
a675cb1
 
84a2fb0
8f0cf81
 
 
84a2fb0
 
8f0cf81
 
 
 
 
 
 
 
84a2fb0
 
 
 
a675cb1
84a2fb0
 
d733ada
84a2fb0
 
8f0cf81
a675cb1
84a2fb0
a675cb1
8f0cf81
 
 
 
 
d733ada
b50edfb
a675cb1
 
b50edfb
a675cb1
 
d733ada
84a2fb0
8f0cf81
d733ada
84a2fb0
 
d733ada
 
a675cb1
d733ada
a675cb1
84a2fb0
d733ada
a675cb1
 
 
 
 
 
 
84a2fb0
8f0cf81
 
 
 
 
 
 
 
 
84a2fb0
 
 
a675cb1
8f0cf81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d733ada
84a2fb0
 
 
 
a675cb1
8f0cf81
 
84a2fb0
 
 
 
 
 
a675cb1
b50edfb
 
 
 
 
 
84a2fb0
 
 
a675cb1
84a2fb0
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
import os
import sys
import subprocess
import traceback
import gradio as gr
import numpy as np
import librosa
import spaces
import torch
from pathlib import Path
from huggingface_hub import snapshot_download

REPO_URL = "https://github.com/fishaudio/fish-speech.git"
REPO_DIR = "fish-speech"

if not os.path.exists(REPO_DIR):
    subprocess.run(["git", "clone", REPO_URL, REPO_DIR], check=True)

os.chdir(REPO_DIR)
sys.path.insert(0, os.getcwd())

from fish_speech.models.text2semantic.inference import init_model, generate_long

device = "cuda" if torch.cuda.is_available() else "cpu"
precision = torch.bfloat16

checkpoint_dir = snapshot_download(repo_id="fishaudio/s2-pro")

llama_model, decode_one_token = init_model(
    checkpoint_path=checkpoint_dir,
    device=device,
    precision=precision,
    compile=False,
)

with torch.device(device):
    llama_model.setup_caches(
        max_batch_size=1,
        max_seq_len=llama_model.config.max_seq_len,
        dtype=next(llama_model.parameters()).dtype,
    )


def load_codec(codec_checkpoint_path, target_device, target_precision):
    from hydra.utils import instantiate
    from omegaconf import OmegaConf

    cfg = OmegaConf.load(Path("fish_speech/configs/modded_dac_vq.yaml"))
    codec = instantiate(cfg)

    state_dict = torch.load(codec_checkpoint_path, map_location="cpu")
    if "state_dict" in state_dict:
        state_dict = state_dict["state_dict"]
    if any("generator" in k for k in state_dict):
        state_dict = {
            k.replace("generator.", ""): v
            for k, v in state_dict.items()
            if "generator." in k
        }

    codec.load_state_dict(state_dict, strict=False)
    codec.eval()
    codec.to(device=target_device, dtype=target_precision)
    return codec


codec_model = load_codec(os.path.join(checkpoint_dir, "codec.pth"), device, precision)


@torch.no_grad()
def encode_reference_audio(audio_path):
    wav_np, _ = librosa.load(audio_path, sr=codec_model.sample_rate, mono=True)
    wav = torch.from_numpy(wav_np).to(device)
    model_dtype = next(codec_model.parameters()).dtype
    audios = wav[None, None, :].to(dtype=model_dtype)
    audio_lengths = torch.tensor([wav.shape[0]], device=device, dtype=torch.long)
    indices, feature_lengths = codec_model.encode(audios, audio_lengths)
    return indices[0, :, : feature_lengths[0]]


@torch.no_grad()
def decode_codes_to_audio(merged_codes):
    audio = codec_model.from_indices(merged_codes[None])
    return audio[0, 0]


whisper_model = None


def get_whisper_model():
    global whisper_model
    if whisper_model is None:
        from faster_whisper import WhisperModel
        whisper_model = WhisperModel("large-v3", device="cuda", compute_type="int8")
    return whisper_model


@spaces.GPU(duration=60)
def transcribe_audio(audio_path):
    if audio_path is None:
        raise gr.Error("Please upload a reference audio file first.")
    try:
        gr.Info("Transcribing audio with Whisper large-v3...")
        model = get_whisper_model()
        segments, info = model.transcribe(audio_path, beam_size=5, vad_filter=True)
        text = " ".join(seg.text.strip() for seg in segments).strip()
        if not text:
            raise gr.Error("Whisper could not detect any speech in the audio.")
        gr.Info(f"Detected language: {info.language} ({info.language_probability:.0%} confidence)")
        return text
    except gr.Error:
        raise
    except Exception as e:
        traceback.print_exc()
        raise gr.Error(f"Transcription error: {str(e)}")


def estimate_duration(text):
    words = len(text.split())
    seconds = max(5, int(words * 0.4))
    return seconds


@spaces.GPU(duration=180)
def tts_inference(
    text,
    ref_audio,
    ref_text,
    max_new_tokens,
    chunk_length,
    top_p,
    repetition_penalty,
    temperature,
):
    try:
        if not text or not text.strip():
            raise gr.Error("Please enter some text to synthesize.")

        est = estimate_duration(text)
        gr.Info(f"Generating audio... estimated ~{est}s depending on text length.")

        prompt_tokens_list = None
        if ref_audio is not None and ref_text and ref_text.strip():
            prompt_tokens_list = [encode_reference_audio(ref_audio).cpu()]

        generator = generate_long(
            model=llama_model,
            device=device,
            decode_one_token=decode_one_token,
            text=text,
            num_samples=1,
            max_new_tokens=max_new_tokens,
            top_p=top_p,
            top_k=30,
            temperature=temperature,
            repetition_penalty=repetition_penalty,
            compile=False,
            iterative_prompt=True,
            chunk_length=chunk_length,
            prompt_text=[ref_text] if ref_text else None,
            prompt_tokens=prompt_tokens_list,
        )

        codes = []
        for response in generator:
            if response.action == "sample":
                codes.append(response.codes)
            elif response.action == "next":
                break

        if not codes:
            raise gr.Error("No audio was generated. Please check your input text.")

        merged_codes = codes[0] if len(codes) == 1 else torch.cat(codes, dim=1)
        merged_codes = merged_codes.to(device)

        audio_waveform = decode_codes_to_audio(merged_codes)
        audio_np = audio_waveform.cpu().float().numpy()
        audio_np = (audio_np * 32767).clip(-32768, 32767).astype(np.int16)

        if torch.cuda.is_available():
            torch.cuda.empty_cache()

        return (codec_model.sample_rate, audio_np)

    except gr.Error:
        raise
    except Exception as e:
        traceback.print_exc()
        raise gr.Error(f"Inference error: {str(e)}")


TAGS = [
    "[pause]", "[emphasis]", "[laughing]", "[inhale]", "[chuckle]", "[tsk]",
    "[singing]", "[excited]", "[laughing tone]", "[interrupting]", "[chuckling]",
    "[excited tone]", "[volume up]", "[echo]", "[angry]", "[low volume]", "[sigh]",
    "[low voice]", "[whisper]", "[screaming]", "[shouting]", "[loud]", "[surprised]",
    "[short pause]", "[exhale]", "[delight]", "[panting]", "[audience laughter]",
    "[with strong accent]", "[volume down]", "[clearing throat]", "[sad]",
    "[moaning]", "[shocked]",
]

TAGS_HTML = " ".join(
    f'<code style="margin:2px;display:inline-block">{t}</code>' for t in TAGS
)

with gr.Blocks(title="Fish Audio S2 Pro") as app:

    gr.Markdown(
        f"""
        <div style="text-align:center;max-width:900px;margin:0 auto;padding:24px 0 8px">
            <h1 style="font-size:2.4rem;font-weight:800;color:#1E3A8A;margin-bottom:6px">
                🐟 Fish Audio S2 Pro
            </h1>
            <p style="font-size:1.05rem;color:#4B5563;margin-bottom:8px">
                State-of-the-Art Dual-Autoregressive Text-to-Speech &nbsp;Β·&nbsp;
                <a href="https://huggingface.co/fishaudio/s2-pro" target="_blank" style="color:#2563EB">Model Page β†—</a>
                &nbsp;Β·&nbsp;
                <a href="https://github.com/fishaudio/fish-speech" target="_blank" style="color:#2563EB">GitHub β†—</a>
            </p>
            <p style="font-size:0.95rem;color:#6B7280">
                80+ languages supported Β· Zero-shot voice cloning Β· 15,000+ inline emotion tags
            </p>
        </div>
        """
    )

    with gr.Row():
        with gr.Column(scale=5):
            gr.Markdown("### ✍️ Input Text")
            text_input = gr.Textbox(
                show_label=False,
                placeholder="Type the text you want to synthesize.\nLanguage is auto-detected β€” write in any language.\nAdd emotion tags like [laugh] or [whisper in small voice] anywhere in the text.",
                lines=7,
            )

            with gr.Accordion("πŸŽ™οΈ Voice Cloning β€” Optional", open=False):
                gr.Markdown(
                    "Upload a clean **5–10 second** audio clip and provide its exact transcription. "
                    "The model will clone that voice for synthesis. Language is inferred automatically."
                )
                ref_audio = gr.Audio(label="Reference Audio", type="filepath")
                transcribe_btn = gr.Button("🎀 Auto-transcribe with Whisper", variant="secondary", size="sm")
                ref_text = gr.Textbox(
                    label="Reference Audio Transcription",
                    placeholder="Exact transcription of the reference audio, or click Auto-transcribe above...",
                )

            with gr.Accordion("βš™οΈ Advanced Settings", open=False):
                with gr.Row():
                    max_new_tokens = gr.Slider(0, 2048, 1024, step=8, label="Max New Tokens (0 = auto)")
                    chunk_length = gr.Slider(100, 400, 200, step=8, label="Chunk Length")
                with gr.Row():
                    top_p = gr.Slider(0.1, 1.0, 0.7, step=0.01, label="Top-P")
                    repetition_penalty = gr.Slider(0.9, 2.0, 1.2, step=0.01, label="Repetition Penalty")
                    temperature = gr.Slider(0.1, 1.0, 0.7, step=0.01, label="Temperature")

            generate_btn = gr.Button("πŸš€ Generate Audio", variant="primary", size="lg")

        with gr.Column(scale=4):
            gr.Markdown("### 🎧 Result")
            audio_output = gr.Audio(
                label="Generated Audio",
                type="numpy",
                interactive=False,
                autoplay=True,
            )

            gr.Markdown(
                f"""
                <div style="background:#EFF6FF;padding:16px;border-radius:10px;margin-top:16px">
                    <h4 style="margin:0 0 8px;color:#1D4ED8">🏷️ Supported Emotion Tags</h4>
                    <p style="font-size:0.85rem;color:#374151;margin-bottom:8px">
                        15,000+ unique tags supported. Use free-form descriptions like
                        <code>[whisper in small voice]</code> or <code>[professional broadcast tone]</code>.
                        Common tags:
                    </p>
                    <div style="line-height:2">{TAGS_HTML}</div>
                </div>
                """
            )

    gr.Markdown(
        """
        <div style="background:#F0FDF4;padding:16px;border-radius:10px;margin-top:8px">
            <h4 style="margin:0 0 8px;color:#166534">🌍 Supported Languages</h4>
            <p style="font-size:0.9rem;color:#374151;margin:0">
                <strong>Tier 1:</strong> Japanese Β· English Β· Chinese &nbsp;|&nbsp;
                <strong>Tier 2:</strong> Korean Β· Spanish Β· Portuguese Β· Arabic Β· Russian Β· French Β· German<br>
                <strong>Also supported:</strong> sv, it, tr, no, nl, cy, eu, ca, da, gl, ta, hu, fi, pl, et, hi,
                la, ur, th, vi, jw, bn, yo, sl, cs, sw, nn, he, ms, uk, id, kk, bg, lv, my, tl, sk, ne, fa,
                af, el, bo, hr, ro, sn, mi, yi, am, be, km, is, az, sd, br, sq, ps, mn, ht, ml, sr, sa, te,
                ka, bs, pa, lt, kn, si, hy, mr, as, gu, fo, and more.
                Language is <strong>auto-detected</strong> from the input text β€” no configuration needed.
            </p>
        </div>
        """
    )

    gr.Markdown("### 🌟 Examples")
    gr.Examples(
        examples=[
            ["Hello world! This is a test of the Fish Audio S2 Pro model.", None, "", 1024, 200, 0.7, 1.2, 0.7],
            ["I can't believe it! [laugh] This is absolutely amazing!", None, "", 1024, 200, 0.7, 1.2, 0.7],
            ["[whisper in small voice] I have a secret to tell you... promise you won't tell anyone?", None, "", 1024, 200, 0.7, 1.2, 0.7],
            ["OlΓ‘! Este modelo suporta portuguΓͺs nativamente, sem configuraΓ§Γ£o extra.", None, "", 1024, 200, 0.7, 1.2, 0.7],
            ["[excited] ζ—₯本θͺžγ‚‚θ©±γ›γΎγ™οΌγ™γ”γ„γ§γ—γ‚‡γ†οΌŸ", None, "", 1024, 200, 0.7, 1.2, 0.7],
        ],
        inputs=[text_input, ref_audio, ref_text, max_new_tokens, chunk_length, top_p, repetition_penalty, temperature],
        outputs=[audio_output],
        fn=tts_inference,
        cache_examples=False,
    )

    transcribe_btn.click(
        fn=transcribe_audio,
        inputs=[ref_audio],
        outputs=[ref_text],
    )

    generate_btn.click(
        fn=tts_inference,
        inputs=[text_input, ref_audio, ref_text, max_new_tokens, chunk_length, top_p, repetition_penalty, temperature],
        outputs=[audio_output],
    )

if __name__ == "__main__":
    app.launch()