| """FastAPI server for Kani TTS with streaming support""" |
|
|
| import binascii |
| import io |
| import os |
| from fastapi import FastAPI, HTTPException |
| from fastapi.middleware.cors import CORSMiddleware |
| from fastapi.responses import StreamingResponse, Response |
| from pydantic import BaseModel, Field |
| from typing import Optional, Literal |
| import numpy as np |
| from scipy.io.wavfile import write as wav_write |
| import base64 |
| import json |
| import torch |
|
|
| from audio import LLMAudioPlayer, StreamingAudioWriter |
| from generation.vllm_generator import VLLMTTSGenerator |
| from config import ( |
| CHUNK_SIZE, |
| LOOKBACK_FRAMES, |
| TEMPERATURE, |
| TOP_P, |
| MAX_TOKENS, |
| LONG_FORM_THRESHOLD_SECONDS, |
| LONG_FORM_SILENCE_DURATION, |
| LONG_FORM_CHUNK_DURATION, |
| REF_AUDIO_SECONDS, |
| GPU_MEMORY_UTILIZATION, |
| MAX_MODEL_LEN, |
| MODEL_NAME, |
| ) |
|
|
| from nemo.utils.nemo_logging import Logger |
|
|
| nemo_logger = Logger() |
| nemo_logger.remove_stream_handlers() |
|
|
|
|
| app = FastAPI(title="Kani TTS API", version="1.0.0") |
|
|
| |
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=["*"], |
| allow_credentials=True, |
| allow_methods=["*"], |
| allow_headers=["*"], |
| ) |
|
|
| |
| generator = None |
| player = None |
|
|
|
|
| class TTSRequest(BaseModel): |
| text: str |
| temperature: Optional[float] = TEMPERATURE |
| max_tokens: Optional[int] = MAX_TOKENS |
| top_p: Optional[float] = TOP_P |
| chunk_size: Optional[int] = CHUNK_SIZE |
| lookback_frames: Optional[int] = LOOKBACK_FRAMES |
|
|
|
|
| class OpenAISpeechRequest(BaseModel): |
| """OpenAI-compatible speech request model""" |
| input: str = Field(..., description="Text to convert to speech") |
| model: Literal["tts-1", "tts-1-hd", "gpt-4o-mini-tts"] = Field(default="tts-1", description="TTS model to use") |
| voice: Optional[str] = Field(default=None, description="Deprecated for this ref-audio server") |
| reference_audio_path: Optional[str] = Field(default=None, description="Server-side path to a WAV file used as voice reference") |
| reference_audio_base64: Optional[str] = Field(default=None, description="Base64-encoded WAV bytes used as voice reference") |
| ref_seconds: Optional[float] = Field(default=REF_AUDIO_SECONDS, description="How many seconds to take from the reference audio") |
| response_format: Literal["wav", "pcm"] = Field(default="wav", description="Audio format: wav or pcm") |
| stream_format: Optional[Literal["sse", "audio"]] = Field(default=None, description="Use 'sse' for Server-Sent Events streaming") |
| |
| enable_long_form: Optional[bool] = Field(default=True, description="Auto-detect and use long-form generation for texts >15s") |
| max_chunk_duration: Optional[float] = Field(default=12.0, description="Max duration per chunk in long-form mode (seconds)") |
| silence_duration: Optional[float] = Field(default=0.2, description="Silence between chunks in long-form mode (seconds)") |
|
|
|
|
| @app.on_event("startup") |
| async def startup_event(): |
| """Initialize models on startup""" |
| global generator, player |
| print("🚀 Initializing VLLM TTS models...") |
|
|
| |
| generator = VLLMTTSGenerator( |
| tensor_parallel_size=1, |
| gpu_memory_utilization=GPU_MEMORY_UTILIZATION, |
| max_model_len=MAX_MODEL_LEN, |
| ) |
|
|
| |
| await generator.initialize_engine() |
|
|
| player = LLMAudioPlayer(generator.tokenizer) |
| print("✅ VLLM TTS models initialized successfully!") |
|
|
|
|
| @app.get("/health") |
| async def health_check(): |
| """Check if server is ready""" |
| return { |
| "status": "healthy", |
| "tts_initialized": generator is not None and player is not None, |
| "model_path": MODEL_NAME, |
| } |
|
|
|
|
| def _resolve_reference_tokens(request: OpenAISpeechRequest): |
| if not request.reference_audio_path and not request.reference_audio_base64: |
| raise HTTPException(status_code=400, detail="reference_audio_path or reference_audio_base64 is required") |
| if request.reference_audio_path and request.reference_audio_base64: |
| raise HTTPException(status_code=400, detail="Provide only one of reference_audio_path or reference_audio_base64") |
|
|
| try: |
| if request.reference_audio_path: |
| if not os.path.exists(request.reference_audio_path): |
| raise HTTPException(status_code=400, detail=f"Reference audio not found: {request.reference_audio_path}") |
| return player.prepare_reference_audio_tokens( |
| reference_audio_path=request.reference_audio_path, |
| ref_seconds=request.ref_seconds or REF_AUDIO_SECONDS, |
| ) |
|
|
| try: |
| reference_audio_bytes = base64.b64decode(request.reference_audio_base64) |
| except (binascii.Error, ValueError) as exc: |
| raise HTTPException(status_code=400, detail=f"Invalid reference_audio_base64: {exc}") from exc |
|
|
| return player.prepare_reference_audio_tokens( |
| reference_audio_bytes=reference_audio_bytes, |
| ref_seconds=request.ref_seconds or REF_AUDIO_SECONDS, |
| ) |
| except HTTPException: |
| raise |
| except Exception as exc: |
| raise HTTPException(status_code=400, detail=f"Failed to encode reference audio: {exc}") from exc |
|
|
|
|
| @app.post("/v1/audio/speech") |
| async def openai_speech(request: OpenAISpeechRequest): |
| """OpenAI-compatible speech generation endpoint |
| |
| Supports both streaming (SSE) and non-streaming modes: |
| - Without stream_format: Returns complete audio file (WAV or PCM) |
| - With stream_format="sse": Returns Server-Sent Events with audio chunks |
| """ |
| if not generator or not player: |
| raise HTTPException(status_code=503, detail="TTS models not initialized") |
|
|
| prompt_text = request.input |
| reference_audio_tokens, reference_frames = _resolve_reference_tokens(request) |
|
|
| |
| if request.stream_format == "sse": |
| async def sse_generator(): |
| """Generate Server-Sent Events with audio chunks""" |
| import asyncio |
| import queue as thread_queue |
| from generation.chunking import estimate_duration, split_into_sentences |
|
|
| chunk_queue = thread_queue.Queue() |
|
|
| |
| estimated_duration = estimate_duration(request.input) |
| use_long_form = estimated_duration > LONG_FORM_THRESHOLD_SECONDS |
|
|
| |
| input_token_count = 0 |
| output_token_count = 0 |
|
|
| if use_long_form: |
| |
| print(f"[Server] Using long-form SSE streaming (estimated {estimated_duration:.1f}s)") |
|
|
| async def generate_async_long_form(): |
| nonlocal input_token_count, output_token_count |
| try: |
| |
| chunks = split_into_sentences(request.input, max_duration_seconds=request.max_chunk_duration or LONG_FORM_CHUNK_DURATION) |
| total_chunks = len(chunks) |
|
|
| for i, text_chunk in enumerate(chunks): |
| |
| class ChunkList(list): |
| def append(self, chunk): |
| super().append(chunk) |
| chunk_queue.put(("chunk", chunk)) |
|
|
| audio_writer = StreamingAudioWriter( |
| player, |
| output_file=None, |
| chunk_size=CHUNK_SIZE, |
| lookback_frames=LOOKBACK_FRAMES |
| ) |
| audio_writer.audio_chunks = ChunkList() |
| audio_writer.start() |
|
|
| result = await generator._generate_async( |
| text_chunk, |
| audio_writer, |
| max_tokens=MAX_TOKENS, |
| reference_audio_tokens=reference_audio_tokens, |
| ) |
| audio_writer.finalize() |
|
|
| |
| input_token_count += len(generator.prepare_input(text_chunk, reference_audio_tokens=reference_audio_tokens)) |
| output_token_count += len(result.get('all_token_ids', [])) |
|
|
| |
| if i < total_chunks - 1: |
| silence_samples = int((request.silence_duration or LONG_FORM_SILENCE_DURATION) * 22050) |
| silence = np.zeros(silence_samples, dtype=np.float32) |
| chunk_queue.put(("chunk", silence)) |
|
|
| chunk_queue.put(("done", {"input": input_token_count, "output": output_token_count})) |
| except Exception as e: |
| print(f"Generation error: {e}") |
| import traceback |
| traceback.print_exc() |
| chunk_queue.put(("error", str(e))) |
|
|
| gen_task = asyncio.create_task(generate_async_long_form()) |
| else: |
| |
| print(f"[Server] Using standard SSE streaming (estimated {estimated_duration:.1f}s)") |
|
|
| |
| class ChunkList(list): |
| def append(self, chunk): |
| super().append(chunk) |
| chunk_queue.put(("chunk", chunk)) |
|
|
| audio_writer = StreamingAudioWriter( |
| player, |
| output_file=None, |
| chunk_size=CHUNK_SIZE, |
| lookback_frames=LOOKBACK_FRAMES |
| ) |
| audio_writer.audio_chunks = ChunkList() |
|
|
| |
| async def generate_async(): |
| nonlocal input_token_count, output_token_count |
| try: |
| audio_writer.start() |
| result = await generator._generate_async( |
| prompt_text, |
| audio_writer, |
| max_tokens=MAX_TOKENS, |
| reference_audio_tokens=reference_audio_tokens, |
| ) |
| audio_writer.finalize() |
|
|
| |
| input_token_count = len(generator.prepare_input(prompt_text, reference_audio_tokens=reference_audio_tokens)) |
| output_token_count = len(result.get('all_token_ids', [])) |
|
|
| chunk_queue.put(("done", {"input": input_token_count, "output": output_token_count})) |
| except Exception as e: |
| print(f"Generation error: {e}") |
| import traceback |
| traceback.print_exc() |
| chunk_queue.put(("error", str(e))) |
|
|
| |
| gen_task = asyncio.create_task(generate_async()) |
|
|
| |
| try: |
| while True: |
| msg_type, data = await asyncio.get_event_loop().run_in_executor( |
| None, lambda: chunk_queue.get(timeout=30) |
| ) |
|
|
| if msg_type == "chunk": |
| |
| pcm_data = (data * 32767).astype(np.int16) |
|
|
| |
| audio_base64 = base64.b64encode(pcm_data.tobytes()).decode('utf-8') |
|
|
| |
| event_data = { |
| "type": "speech.audio.delta", |
| "audio": audio_base64 |
| } |
| yield f"data: {json.dumps(event_data)}\n\n" |
|
|
| elif msg_type == "done": |
| |
| token_counts = data |
| event_data = { |
| "type": "speech.audio.done", |
| "usage": { |
| "input_tokens": token_counts["input"], |
| "output_tokens": token_counts["output"], |
| "total_tokens": token_counts["input"] + token_counts["output"] |
| } |
| } |
| yield f"data: {json.dumps(event_data)}\n\n" |
| break |
|
|
| elif msg_type == "error": |
| |
| error_data = { |
| "type": "error", |
| "error": data |
| } |
| yield f"data: {json.dumps(error_data)}\n\n" |
| break |
|
|
| finally: |
| await gen_task |
|
|
| return StreamingResponse( |
| sse_generator(), |
| media_type="text/event-stream", |
| headers={ |
| "Cache-Control": "no-cache", |
| "Connection": "keep-alive", |
| "X-Accel-Buffering": "no" |
| } |
| ) |
|
|
| |
| else: |
| try: |
| |
| from generation.chunking import estimate_duration |
| estimated_duration = estimate_duration(request.input) |
|
|
| |
| use_long_form = estimated_duration > 15.0 |
|
|
| if use_long_form: |
| print(f"[Server] Using long-form generation (estimated {estimated_duration:.1f}s)") |
| result = await generator.generate_long_form_async( |
| text=request.input, |
| reference_audio_tokens=reference_audio_tokens, |
| player=player, |
| max_chunk_duration=request.max_chunk_duration or LONG_FORM_CHUNK_DURATION, |
| silence_duration=request.silence_duration or LONG_FORM_SILENCE_DURATION, |
| max_tokens=MAX_TOKENS |
| ) |
| full_audio = result['audio'] |
| else: |
| |
| |
| |
| print(f"[Server] Using standard generation (estimated {estimated_duration:.1f}s)") |
| result = await generator._generate_async( |
| prompt_text, |
| audio_writer=None, |
| max_tokens=MAX_TOKENS, |
| reference_audio_tokens=reference_audio_tokens, |
| player=player, |
| ) |
|
|
| if not result.get("all_token_ids"): |
| raise HTTPException(status_code=500, detail="No audio generated") |
|
|
| full_audio, _ = player.get_waveform(torch.tensor(result["all_token_ids"], dtype=torch.long)) |
|
|
| |
| if request.response_format == "pcm": |
| |
| pcm_data = (full_audio * 32767).astype(np.int16) |
| return Response( |
| content=pcm_data.tobytes(), |
| media_type="application/octet-stream", |
| headers={ |
| "Content-Type": "application/octet-stream", |
| "X-Sample-Rate": "22050", |
| "X-Channels": "1", |
| "X-Bit-Depth": "16" |
| } |
| ) |
| else: |
| |
| wav_buffer = io.BytesIO() |
| wav_write(wav_buffer, 22050, full_audio) |
| wav_buffer.seek(0) |
|
|
| return Response( |
| content=wav_buffer.read(), |
| media_type="audio/wav" |
| ) |
|
|
| except Exception as e: |
| print(e) |
| raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
| @app.get("/") |
| async def root(): |
| """Root endpoint with API info""" |
| return { |
| "name": "Kani TTS API", |
| "version": "1.0.0", |
| "endpoints": { |
| "/v1/audio/speech": "POST - ref-audio speech generation", |
| "/health": "GET - Health check" |
| } |
| } |
|
|
|
|
| if __name__ == "__main__": |
| import uvicorn |
| print("🎤 Starting Kani TTS Server...") |
| uvicorn.run(app, host="0.0.0.0", port=8000) |
|
|