kani-tts-bg-refaudio-server-code / render_radio_play.py
beleata74's picture
Upload folder using huggingface_hub
256e3e6 verified
import argparse
import json
import re
import time
from dataclasses import dataclass
from pathlib import Path
import numpy as np
import requests
import soundfile as sf
SAMPLE_RATE = 22050
PUNCTUATION = ("!", "?", ".", ",")
LINE_PATTERN = re.compile(r"^\[(?P<speaker>[^\]]+)\]:\s*(?P<text>.*)$")
@dataclass
class ScriptLine:
line_number: int
speaker: str
text: str
@dataclass
class Chunk:
segment_id: int
chunk_id: int
line_number: int
speaker: str
text: str
reference_audio_path: Path
def parse_script(script_path: Path) -> list[ScriptLine]:
items: list[ScriptLine] = []
for line_number, raw_line in enumerate(script_path.read_text(encoding="utf-8").splitlines(), start=1):
stripped = raw_line.strip()
if not stripped:
continue
match = LINE_PATTERN.match(stripped)
if not match:
raise ValueError(f"Invalid script line {line_number}: {raw_line}")
text = re.sub(r"\s+", " ", match.group("text")).strip()
if not text:
continue
items.append(ScriptLine(line_number=line_number, speaker=match.group("speaker"), text=text))
return items
def split_text(text: str, max_chars: int) -> list[str]:
remaining = text.strip()
parts: list[str] = []
while len(remaining) > max_chars:
window = remaining[:max_chars]
split_at = max(window.rfind(mark) for mark in PUNCTUATION)
if split_at <= 0:
split_at = window.rfind(" ")
if split_at <= 0:
split_at = max_chars
else:
split_at += 1
part = remaining[:split_at].strip()
if part:
parts.append(part)
remaining = remaining[split_at:].lstrip()
if remaining:
parts.append(remaining)
return parts
def build_chunks(lines: list[ScriptLine], ref_dir: Path, max_chars: int) -> list[Chunk]:
chunks: list[Chunk] = []
for segment_id, line in enumerate(lines):
ref_path = ref_dir / f"{line.speaker}.wav"
if not ref_path.exists():
raise FileNotFoundError(f"Missing reference audio for speaker '{line.speaker}': {ref_path}")
for chunk_id, part in enumerate(split_text(line.text, max_chars), start=1):
chunks.append(
Chunk(
segment_id=segment_id,
chunk_id=chunk_id,
line_number=line.line_number,
speaker=line.speaker,
text=part,
reference_audio_path=ref_path,
)
)
return chunks
def synthesize_chunk(session: requests.Session, server_url: str, chunk: Chunk) -> tuple[np.ndarray, dict]:
payload = {
"input": chunk.text,
"model": "tts-1",
"reference_audio_path": str(chunk.reference_audio_path),
"response_format": "pcm",
"enable_long_form": False,
}
started = time.perf_counter()
response = session.post(server_url, json=payload, timeout=300)
elapsed = time.perf_counter() - started
response.raise_for_status()
pcm = np.frombuffer(response.content, dtype=np.int16).astype(np.float32) / 32768.0
duration = len(pcm) / SAMPLE_RATE
return pcm, {
"segment_id": chunk.segment_id,
"chunk_id": chunk.chunk_id,
"line_number": chunk.line_number,
"speaker": chunk.speaker,
"text": chunk.text,
"reference_audio_path": str(chunk.reference_audio_path),
"latency_seconds": elapsed,
"audio_duration_seconds": duration,
"rtf": elapsed / duration if duration else None,
"pcm_bytes": len(response.content),
}
def main() -> None:
parser = argparse.ArgumentParser(description="Render a radio play script through the local KaniTTS FastAPI server.")
parser.add_argument("--script", type=Path, required=True)
parser.add_argument("--ref-dir", type=Path, required=True)
parser.add_argument("--output-dir", type=Path, required=True)
parser.add_argument("--server-url", default="http://127.0.0.1:8010/v1/audio/speech")
parser.add_argument("--max-chars", type=int, default=180)
parser.add_argument("--same-line-pause-ms", type=float, default=60.0)
parser.add_argument("--line-pause-ms", type=float, default=180.0)
args = parser.parse_args()
lines = parse_script(args.script)
chunks = build_chunks(lines, args.ref_dir, args.max_chars)
args.output_dir.mkdir(parents=True, exist_ok=True)
output_wav = args.output_dir / f"{args.script.stem}_radio_play.wav"
output_json = args.output_dir / f"{args.script.stem}_report.json"
same_line_pause = np.zeros(int(SAMPLE_RATE * args.same_line_pause_ms / 1000.0), dtype=np.float32)
line_pause = np.zeros(int(SAMPLE_RATE * args.line_pause_ms / 1000.0), dtype=np.float32)
rendered_audio: list[np.ndarray] = []
report_chunks: list[dict] = []
total_started = time.perf_counter()
with requests.Session() as session:
for index, chunk in enumerate(chunks, start=1):
audio, chunk_report = synthesize_chunk(session, args.server_url, chunk)
rendered_audio.append(audio)
report_chunks.append(chunk_report)
next_chunk = chunks[index] if index < len(chunks) else None
if next_chunk is not None:
if next_chunk.segment_id == chunk.segment_id:
rendered_audio.append(same_line_pause)
else:
rendered_audio.append(line_pause)
print(
f"[{index}/{len(chunks)}] {chunk.speaker} line {chunk.line_number} part {chunk.chunk_id}: "
f"{chunk_report['audio_duration_seconds']:.2f}s audio in {chunk_report['latency_seconds']:.2f}s "
f"RTF={chunk_report['rtf']:.3f}"
)
total_elapsed = time.perf_counter() - total_started
full_audio = np.concatenate(rendered_audio) if rendered_audio else np.zeros(0, dtype=np.float32)
sf.write(output_wav, full_audio, SAMPLE_RATE)
total_audio_duration = len(full_audio) / SAMPLE_RATE
report = {
"script": str(args.script),
"ref_dir": str(args.ref_dir),
"server_url": args.server_url,
"max_chars": args.max_chars,
"source_lines": len(lines),
"rendered_chunks": len(chunks),
"total_latency_seconds": total_elapsed,
"total_audio_duration_seconds": total_audio_duration,
"overall_rtf": total_elapsed / total_audio_duration if total_audio_duration else None,
"output_wav": str(output_wav),
"chunks": report_chunks,
}
output_json.write_text(json.dumps(report, ensure_ascii=False, indent=2), encoding="utf-8")
print("\nRender complete")
print(f"Output WAV: {output_wav}")
print(f"Report JSON: {output_json}")
print(f"Source lines: {len(lines)}")
print(f"Rendered chunks: {len(chunks)}")
print(f"Total audio duration: {total_audio_duration:.2f}s")
print(f"Total latency: {total_elapsed:.2f}s")
print(f"Overall RTF: {report['overall_rtf']:.3f}")
if __name__ == "__main__":
main()