import os import time import base64 import hashlib from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import gradio as gr from faster_whisper import WhisperModel # Tencent Cloud SDK from tencentcloud.common import credential from tencentcloud.common.profile.client_profile import ClientProfile from tencentcloud.common.profile.http_profile import HttpProfile # Tencent TMT (Translate) from tencentcloud.tmt.v20180321 import tmt_client, models as tmt_models # Tencent TTS (Text-to-Speech) from tencentcloud.tts.v20190823 import tts_client, models as tts_models # ====================== # Config # ====================== MODEL_NAME = os.getenv("WHISPER_MODEL", "small") # CPU: small; if slow -> base DEVICE = "cpu" COMPUTE_TYPE = "int8" # Tencent region TENCENT_REGION = os.getenv("TENCENT_REGION", "ap-shanghai").strip() # Tencent TTS voice types # Default voice types: # - ZH default: 0 (often "云小宁" default timbre) # - EN: 101001 is commonly used in docs as an example timbre ID; if it fails, set your own in Secrets. VOICE_EN = int(os.getenv("TENCENT_TTS_VOICE_EN", "101001")) VOICE_ZH = int(os.getenv("TENCENT_TTS_VOICE_ZH", "0")) # Generate TTS only for latest published line (to avoid load) TTS_GENERATE_MODE = "latest_only" # keep MVP stable # ====================== # Helpers # ====================== def _now_ms() -> int: return int(time.time() * 1000) def _session_id() -> str: return str(_now_ms()) def _hash(s: str) -> str: return hashlib.sha256(s.encode("utf-8")).hexdigest()[:12] def _require_env(name: str) -> str: v = os.getenv(name, "").strip() if not v: raise RuntimeError(f"Missing env: {name}. Set it in HF Space Settings → Secrets.") return v # ====================== # Tencent Clients # ====================== _TMT_CLIENT: Optional[tmt_client.TmtClient] = None _TTS_CLIENT: Optional[tts_client.TtsClient] = None def _make_client(endpoint: str): secret_id = _require_env("TENCENT_SECRET_ID") secret_key = _require_env("TENCENT_SECRET_KEY") cred = credential.Credential(secret_id, secret_key) httpProfile = HttpProfile() httpProfile.endpoint = endpoint clientProfile = ClientProfile() clientProfile.httpProfile = httpProfile return cred, clientProfile def get_tmt_client() -> tmt_client.TmtClient: global _TMT_CLIENT if _TMT_CLIENT is not None: return _TMT_CLIENT cred, clientProfile = _make_client("tmt.tencentcloudapi.com") _TMT_CLIENT = tmt_client.TmtClient(cred, TENCENT_REGION, clientProfile) return _TMT_CLIENT def get_tts_client() -> tts_client.TtsClient: global _TTS_CLIENT if _TTS_CLIENT is not None: return _TTS_CLIENT cred, clientProfile = _make_client("tts.tencentcloudapi.com") _TTS_CLIENT = tts_client.TtsClient(cred, TENCENT_REGION, clientProfile) return _TTS_CLIENT # ====================== # Whisper Model # ====================== whisper = WhisperModel(MODEL_NAME, device=DEVICE, compute_type=COMPUTE_TYPE) # ====================== # In-memory State (MVP) # ====================== @dataclass class Chunk: chunk_id: int start_s: float end_s: float raw_text_en: str edited_text_en: str status: str = "raw" # raw / published rev: int = 0 zh_text: str = "" # translation (on publish) tts_en_path: str = "" # cached mp3 filepath tts_zh_path: str = "" # cached mp3 filepath STATE: Dict[str, List[Chunk]] = {} # session_id -> chunks # caches across sessions (MVP) TRANS_CACHE: Dict[str, str] = {} # key -> zh text TTS_CACHE: Dict[str, str] = {} # key -> mp3 path # ====================== # Translation (EN -> ZH) with caching # ====================== def translate_en_to_zh(text_en: str) -> str: text_en = (text_en or "").strip() if not text_en: return "" key = f"tmt:en->zh:{_hash(text_en)}" if key in TRANS_CACHE: return TRANS_CACHE[key] client = get_tmt_client() req = tmt_models.TextTranslateRequest() req.SourceText = text_en req.Source = "en" req.Target = "zh" req.ProjectId = 0 resp = client.TextTranslate(req) out = getattr(resp, "TargetText", "") or "" TRANS_CACHE[key] = out return out # ====================== # TTS (Text -> mp3) with caching # ====================== def tts_to_mp3(text: str, voice_type: int) -> str: text = (text or "").strip() if not text: return "" key = f"tts:{voice_type}:{_hash(text)}" if key in TTS_CACHE: return TTS_CACHE[key] client = get_tts_client() req = tts_models.TextToVoiceRequest() req.Text = text req.SessionId = key req.ModelType = 1 req.VoiceType = voice_type req.Volume = 5 req.Speed = 0 req.SampleRate = 16000 req.Codec = "mp3" resp = client.TextToVoice(req) audio_b64 = getattr(resp, "Audio", "") or "" if not audio_b64: return "" audio_bytes = base64.b64decode(audio_b64) out_dir = "outputs" os.makedirs(out_dir, exist_ok=True) path = os.path.join(out_dir, f"{key}.mp3") with open(path, "wb") as f: f.write(audio_bytes) TTS_CACHE[key] = path return path # ====================== # Core pipeline # ====================== def transcribe_to_chunks(audio_path: str, session_id: str) -> str: segments, info = whisper.transcribe(audio_path, vad_filter=True) detected = getattr(info, "language", None) or "auto" chunks: List[Chunk] = [] cid = 0 for seg in segments: txt = (seg.text or "").strip() if not txt: continue chunks.append(Chunk( chunk_id=cid, start_s=float(seg.start), end_s=float(seg.end), raw_text_en=txt, edited_text_en=txt, status="raw", rev=0, zh_text="", tts_en_path="", tts_zh_path="" )) cid += 1 STATE[session_id] = chunks return f"OK: {len(chunks)} chunks · detected_lang={detected} · model={MODEL_NAME}/{COMPUTE_TYPE}" def editor_table(session_id: str): rows = [] for c in STATE.get(session_id, []): rows.append([ c.chunk_id, f"{c.start_s:.2f}-{c.end_s:.2f}", c.status, c.raw_text_en, c.edited_text_en, c.zh_text, c.rev ]) return rows def publish_one(session_id: str, chunk_id: int, edited_text_en: str): chunks = STATE.get(session_id, []) if chunk_id < 0 or chunk_id >= len(chunks): return "Chunk ID out of range", editor_table(session_id) c = chunks[chunk_id] if edited_text_en and edited_text_en.strip(): c.edited_text_en = edited_text_en.strip() c.status = "published" c.rev += 1 # Translate after publish (cost control + higher quality) try: c.zh_text = translate_en_to_zh(c.edited_text_en) msg = f"Published #{chunk_id} rev={c.rev} · translated" except Exception as e: c.zh_text = "" msg = f"Published #{chunk_id} rev={c.rev} · translation failed: {str(e)}" # Reset TTS cache for this chunk if text changed c.tts_en_path = "" c.tts_zh_path = "" return msg, editor_table(session_id) def publish_all(session_id: str): chunks = STATE.get(session_id, []) ok, fail = 0, 0 for c in chunks: if c.status != "published": c.status = "published" c.rev += 1 if not c.zh_text and c.edited_text_en: try: c.zh_text = translate_en_to_zh(c.edited_text_en) ok += 1 except: fail += 1 c.tts_en_path = "" c.tts_zh_path = "" return f"Published ALL · translated_ok={ok} fail={fail}", editor_table(session_id) # ====================== # Audience rendering + TTS generation (stable MVP) # ====================== def render_audience_html(chunks: List[Chunk], view_lang: str) -> str: # show last 50 published published = [c for c in chunks if c.status == "published"][-50:] def one(c: Chunk) -> str: en = (c.edited_text_en or c.raw_text_en).strip() zh = (c.zh_text or "").strip() text = zh if view_lang == "zh" else en return ( "