STLooo commited on
Commit
4436daf
·
verified ·
1 Parent(s): 3028fe4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +315 -121
app.py CHANGED
@@ -1,208 +1,402 @@
1
- import hashlib
2
  import time
3
- from dataclasses import dataclass, asdict
4
- from typing import Dict, List, Optional
 
 
5
 
6
  import gradio as gr
7
  from faster_whisper import WhisperModel
8
 
9
- # ====== Config ======
10
- CHUNK_SECONDS = 4.0 # MVP建议 3~6 秒;CPU 上先用 4 秒更稳
11
- MODEL_NAME = "small" # CPU先 small;不够快再降到 base
12
- COMPUTE_TYPE = "int8"
 
 
 
 
 
 
 
 
 
 
 
 
13
  DEVICE = "cpu"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- # ====== Model ======
16
- model = WhisperModel(MODEL_NAME, device=DEVICE, compute_type=COMPUTE_TYPE)
17
 
18
- # ====== In-memory state (MVP) ======
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  @dataclass
20
  class Chunk:
21
  chunk_id: int
22
  start_s: float
23
  end_s: float
24
- raw_text: str
25
- edited_text: str = ""
26
- status: str = "raw" # raw / published
27
- lang: str = "auto"
28
  rev: int = 0
 
 
 
29
 
30
- STATE: Dict[str, List[Chunk]] = {} # key: session_id -> chunks
31
 
32
- def _session_id():
33
- # simple per-browser session id
34
- return str(int(time.time()*1000))
35
 
36
- def _hash_text(s: str) -> str:
37
- return hashlib.sha256(s.encode("utf-8")).hexdigest()[:10]
38
 
39
- def transcribe_audio_to_chunks(audio_path: str, session_id: str) -> str:
40
- """
41
- Split-like behavior by asking whisper for timestamps; we treat each segment as a "chunk".
42
- This is more robust than naive slicing when we don't control ffmpeg.
43
- """
44
- segments, info = model.transcribe(audio_path, vad_filter=True)
45
- lang = getattr(info, "language", None) or "auto"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  chunks: List[Chunk] = []
48
  cid = 0
49
  for seg in segments:
50
- text = (seg.text or "").strip()
51
- if not text:
52
  continue
53
  chunks.append(Chunk(
54
  chunk_id=cid,
55
  start_s=float(seg.start),
56
  end_s=float(seg.end),
57
- raw_text=text,
58
- edited_text=text,
59
  status="raw",
60
- lang=lang,
61
- rev=0
 
 
62
  ))
63
  cid += 1
64
 
65
  STATE[session_id] = chunks
66
- return f"OK: {len(chunks)} chunks · detected_lang={lang}"
67
 
68
- def get_editor_table(session_id: str):
69
- chunks = STATE.get(session_id, [])
70
  rows = []
71
- for c in chunks:
72
  rows.append([
73
  c.chunk_id,
74
  f"{c.start_s:.2f}-{c.end_s:.2f}",
75
- c.lang,
76
  c.status,
77
- c.raw_text,
78
- c.edited_text,
 
79
  c.rev
80
  ])
81
  return rows
82
 
83
- def publish_one(session_id: str, chunk_id: int, edited_text: str):
84
  chunks = STATE.get(session_id, [])
85
  if chunk_id < 0 or chunk_id >= len(chunks):
86
- return "Chunk ID out of range", get_editor_table(session_id)
87
 
88
  c = chunks[chunk_id]
89
- c.edited_text = edited_text.strip() if edited_text else c.edited_text
 
 
90
  c.status = "published"
91
  c.rev += 1
92
- return f"Published chunk #{chunk_id} rev={c.rev}", get_editor_table(session_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  def publish_all(session_id: str):
95
  chunks = STATE.get(session_id, [])
 
96
  for c in chunks:
97
  if c.status != "published":
98
  c.status = "published"
99
  c.rev += 1
100
- return f"Published ALL ({len(chunks)} chunks)", get_editor_table(session_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
- def audience_view(session_id: str, lang_choice: str):
103
  """
104
- MVP: 观众端只展示 published edited_text。
105
- 这里先不做翻译(你要英文/中文,下一版加翻译模块)。
106
  """
107
  chunks = STATE.get(session_id, [])
108
  published = [c for c in chunks if c.status == "published"]
 
 
109
 
110
- # 展示成简单 HTML,带一个免费TTS按钮(浏览器 speechSynthesis)
111
- lines = []
112
- for c in published[-50:]:
113
- text = c.edited_text or c.raw_text
114
- # 观众端语言选择先只影响TTS朗读语言,不做翻译(下一版做)
115
- lines.append(f"<div style='padding:10px 12px;border:1px solid #ddd;border-radius:10px;margin:10px 0;'>"
116
- f"<div style='font-size:12px;color:#666'>#{c.chunk_id} · {c.start_s:.2f}-{c.end_s:.2f} · {c.lang}</div>"
117
- f"<div class='txt'>{text}</div>"
118
- f"</div>")
119
- html = f"""
120
- <div>
121
- <div style="display:flex;gap:8px;align-items:center;flex-wrap:wrap;">
122
- <b>Audience (Published)</b>
123
- <button onclick="toggleTTS()" style="padding:8px 10px;">TTS: <span id='ttsState'>Off</span></button>
124
- </div>
125
- <div id="wrap">{''.join(lines) if lines else '<i>No published captions yet.</i>'}</div>
126
- </div>
127
- <script>
128
- let ttsOn = false;
129
- function toggleTTS(){{
130
- ttsOn = !ttsOn;
131
- document.getElementById('ttsState').innerText = ttsOn ? 'On' : 'Off';
132
- if(!ttsOn) window.speechSynthesis.cancel();
133
- }}
134
- function speakAll(){{
135
- if(!ttsOn) return;
136
- if(!('speechSynthesis' in window)) return;
137
- window.speechSynthesis.cancel();
138
- const nodes = document.querySelectorAll('.txt');
139
- const lang = "{'zh-CN' if lang_choice=='zh' else 'en-US'}";
140
- nodes.forEach(n => {{
141
- const u = new SpeechSynthesisUtterance(n.innerText);
142
- u.lang = lang;
143
- window.speechSynthesis.speak(u);
144
- }});
145
- }}
146
- // 点击任何字幕块就朗读该句(更实用)
147
- document.querySelectorAll('.txt').forEach(n => {{
148
- n.style.cursor = 'pointer';
149
- n.onclick = () => {{
150
- if(!ttsOn) return;
151
- window.speechSynthesis.cancel();
152
- const u = new SpeechSynthesisUtterance(n.innerText);
153
- u.lang = "{'zh-CN' if lang_choice=='zh' else 'en-US'}";
154
- window.speechSynthesis.speak(u);
155
- }};
156
- }});
157
- </script>
158
- """
159
- return html
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  with gr.Blocks(title="Live Caption MVP (HF)") as demo:
162
- gr.Markdown("# Live Caption MVP (HF)\n上傳音檔 → Whisper 轉寫 → 校對 → 發佈 → 觀眾端字幕(可選免費TTS)")
 
 
 
163
 
164
- session_id = gr.State(_session_id())
165
 
166
  with gr.Tab("1) Ingest"):
167
- audio = gr.Audio(type="filepath", label="Upload audio (iPhone m4a/wav/mp3)")
 
168
  btn_run = gr.Button("Transcribe & Build Chunks")
169
  ingest_status = gr.Textbox(label="Status", interactive=False)
170
 
171
  with gr.Tab("2) Editor"):
172
- gr.Markdown("校對台:選 chunk_id,修改 edited_text,Publish。")
173
  table = gr.Dataframe(
174
- headers=["chunk_id", "time", "lang", "status", "raw_text", "edited_text", "rev"],
175
- datatype=["number","str","str","str","str","str","number"],
176
- row_count=10,
177
- col_count=(7, "fixed"),
178
  interactive=False
179
  )
180
  chunk_id_in = gr.Number(label="chunk_id", value=0, precision=0)
181
- edited_in = gr.Textbox(label="edited_text (paste here)", lines=3)
182
- btn_pub = gr.Button("Publish One")
183
- btn_pub_all = gr.Button("Publish All")
184
  editor_status = gr.Textbox(label="Editor Status", interactive=False)
185
 
186
  with gr.Tab("3) Audience"):
187
- lang_choice = gr.Radio(choices=["zh","en"], value="zh", label="TTS language (MVP only affects reading voice)")
 
 
 
188
  btn_refresh = gr.Button("Refresh Audience View")
189
- audience_html = gr.HTML()
 
 
190
 
191
- def _do_ingest(audio_path, sid):
 
192
  if not audio_path:
193
  return "Please upload an audio file first.", []
194
- msg = transcribe_audio_to_chunks(audio_path, sid)
195
- return msg, get_editor_table(sid)
196
 
197
- btn_run.click(_do_ingest, inputs=[audio, session_id], outputs=[ingest_status, table])
198
 
199
- def _pub_one(sid, cid, text):
200
- msg, rows = publish_one(sid, int(cid), text)
201
- return msg, rows
202
 
203
- btn_pub.click(_pub_one, inputs=[session_id, chunk_id_in, edited_in], outputs=[editor_status, table])
204
- btn_pub_all.click(lambda sid: publish_all(sid), inputs=[session_id], outputs=[editor_status, table])
205
 
206
- btn_refresh.click(lambda sid, lc: audience_view(sid, lc), inputs=[session_id, lang_choice], outputs=[audience_html])
207
 
208
  demo.launch()
 
1
+ import os
2
  import time
3
+ import base64
4
+ import hashlib
5
+ from dataclasses import dataclass
6
+ from typing import Dict, List, Optional, Tuple
7
 
8
  import gradio as gr
9
  from faster_whisper import WhisperModel
10
 
11
+ # Tencent Cloud SDK
12
+ from tencentcloud.common import credential
13
+ from tencentcloud.common.profile.client_profile import ClientProfile
14
+ from tencentcloud.common.profile.http_profile import HttpProfile
15
+
16
+ # Tencent TMT (Translate)
17
+ from tencentcloud.tmt.v20180321 import tmt_client, models as tmt_models
18
+
19
+ # Tencent TTS (Text-to-Speech)
20
+ from tencentcloud.tts.v20190823 import tts_client, models as tts_models
21
+
22
+
23
+ # ======================
24
+ # Config
25
+ # ======================
26
+ MODEL_NAME = os.getenv("WHISPER_MODEL", "small") # CPU: small; if slow -> base
27
  DEVICE = "cpu"
28
+ COMPUTE_TYPE = "int8"
29
+
30
+ # Tencent region
31
+ TENCENT_REGION = os.getenv("TENCENT_REGION", "ap-shanghai").strip()
32
+
33
+ # Tencent TTS voice types
34
+ # Default voice types:
35
+ # - ZH default: 0 (often "云小宁" default timbre)
36
+ # - EN: 101001 is commonly used in docs as an example timbre ID; if it fails, set your own in Secrets.
37
+ VOICE_EN = int(os.getenv("TENCENT_TTS_VOICE_EN", "101001"))
38
+ VOICE_ZH = int(os.getenv("TENCENT_TTS_VOICE_ZH", "0"))
39
+
40
+ # Generate TTS only for latest published line (to avoid load)
41
+ TTS_GENERATE_MODE = "latest_only" # keep MVP stable
42
+
43
+
44
+ # ======================
45
+ # Helpers
46
+ # ======================
47
+ def _now_ms() -> int:
48
+ return int(time.time() * 1000)
49
+
50
+ def _session_id() -> str:
51
+ return str(_now_ms())
52
+
53
+ def _hash(s: str) -> str:
54
+ return hashlib.sha256(s.encode("utf-8")).hexdigest()[:12]
55
+
56
+ def _require_env(name: str) -> str:
57
+ v = os.getenv(name, "").strip()
58
+ if not v:
59
+ raise RuntimeError(f"Missing env: {name}. Set it in HF Space Settings → Secrets.")
60
+ return v
61
 
 
 
62
 
63
+ # ======================
64
+ # Tencent Clients
65
+ # ======================
66
+ _TMT_CLIENT: Optional[tmt_client.TmtClient] = None
67
+ _TTS_CLIENT: Optional[tts_client.TtsClient] = None
68
+
69
+ def _make_client(endpoint: str):
70
+ secret_id = _require_env("TENCENT_SECRET_ID")
71
+ secret_key = _require_env("TENCENT_SECRET_KEY")
72
+
73
+ cred = credential.Credential(secret_id, secret_key)
74
+
75
+ httpProfile = HttpProfile()
76
+ httpProfile.endpoint = endpoint
77
+
78
+ clientProfile = ClientProfile()
79
+ clientProfile.httpProfile = httpProfile
80
+
81
+ return cred, clientProfile
82
+
83
+ def get_tmt_client() -> tmt_client.TmtClient:
84
+ global _TMT_CLIENT
85
+ if _TMT_CLIENT is not None:
86
+ return _TMT_CLIENT
87
+
88
+ cred, clientProfile = _make_client("tmt.tencentcloudapi.com")
89
+ _TMT_CLIENT = tmt_client.TmtClient(cred, TENCENT_REGION, clientProfile)
90
+ return _TMT_CLIENT
91
+
92
+ def get_tts_client() -> tts_client.TtsClient:
93
+ global _TTS_CLIENT
94
+ if _TTS_CLIENT is not None:
95
+ return _TTS_CLIENT
96
+
97
+ cred, clientProfile = _make_client("tts.tencentcloudapi.com")
98
+ _TTS_CLIENT = tts_client.TtsClient(cred, TENCENT_REGION, clientProfile)
99
+ return _TTS_CLIENT
100
+
101
+
102
+ # ======================
103
+ # Whisper Model
104
+ # ======================
105
+ whisper = WhisperModel(MODEL_NAME, device=DEVICE, compute_type=COMPUTE_TYPE)
106
+
107
+
108
+ # ======================
109
+ # In-memory State (MVP)
110
+ # ======================
111
  @dataclass
112
  class Chunk:
113
  chunk_id: int
114
  start_s: float
115
  end_s: float
116
+ raw_text_en: str
117
+ edited_text_en: str
118
+ status: str = "raw" # raw / published
 
119
  rev: int = 0
120
+ zh_text: str = "" # translation (on publish)
121
+ tts_en_path: str = "" # cached mp3 filepath
122
+ tts_zh_path: str = "" # cached mp3 filepath
123
 
124
+ STATE: Dict[str, List[Chunk]] = {} # session_id -> chunks
125
 
126
+ # caches across sessions (MVP)
127
+ TRANS_CACHE: Dict[str, str] = {} # key -> zh text
128
+ TTS_CACHE: Dict[str, str] = {} # key -> mp3 path
129
 
 
 
130
 
131
+ # ======================
132
+ # Translation (EN -> ZH) with caching
133
+ # ======================
134
+ def translate_en_to_zh(text_en: str) -> str:
135
+ text_en = (text_en or "").strip()
136
+ if not text_en:
137
+ return ""
138
+
139
+ key = f"tmt:en->zh:{_hash(text_en)}"
140
+ if key in TRANS_CACHE:
141
+ return TRANS_CACHE[key]
142
+
143
+ client = get_tmt_client()
144
+ req = tmt_models.TextTranslateRequest()
145
+ req.SourceText = text_en
146
+ req.Source = "en"
147
+ req.Target = "zh"
148
+ req.ProjectId = 0
149
+
150
+ resp = client.TextTranslate(req)
151
+ out = getattr(resp, "TargetText", "") or ""
152
+ TRANS_CACHE[key] = out
153
+ return out
154
+
155
+
156
+ # ======================
157
+ # TTS (Text -> mp3) with caching
158
+ # ======================
159
+ def tts_to_mp3(text: str, voice_type: int) -> str:
160
+ text = (text or "").strip()
161
+ if not text:
162
+ return ""
163
+
164
+ key = f"tts:{voice_type}:{_hash(text)}"
165
+ if key in TTS_CACHE:
166
+ return TTS_CACHE[key]
167
+
168
+ client = get_tts_client()
169
+ req = tts_models.TextToVoiceRequest()
170
+ req.Text = text
171
+ req.SessionId = key
172
+ req.ModelType = 1
173
+ req.VoiceType = voice_type
174
+ req.Volume = 5
175
+ req.Speed = 0
176
+ req.SampleRate = 16000
177
+ req.Codec = "mp3"
178
+
179
+ resp = client.TextToVoice(req)
180
+ audio_b64 = getattr(resp, "Audio", "") or ""
181
+ if not audio_b64:
182
+ return ""
183
+
184
+ audio_bytes = base64.b64decode(audio_b64)
185
+
186
+ out_dir = "outputs"
187
+ os.makedirs(out_dir, exist_ok=True)
188
+ path = os.path.join(out_dir, f"{key}.mp3")
189
+ with open(path, "wb") as f:
190
+ f.write(audio_bytes)
191
+
192
+ TTS_CACHE[key] = path
193
+ return path
194
+
195
+
196
+ # ======================
197
+ # Core pipeline
198
+ # ======================
199
+ def transcribe_to_chunks(audio_path: str, session_id: str) -> str:
200
+ segments, info = whisper.transcribe(audio_path, vad_filter=True)
201
+ detected = getattr(info, "language", None) or "auto"
202
 
203
  chunks: List[Chunk] = []
204
  cid = 0
205
  for seg in segments:
206
+ txt = (seg.text or "").strip()
207
+ if not txt:
208
  continue
209
  chunks.append(Chunk(
210
  chunk_id=cid,
211
  start_s=float(seg.start),
212
  end_s=float(seg.end),
213
+ raw_text_en=txt,
214
+ edited_text_en=txt,
215
  status="raw",
216
+ rev=0,
217
+ zh_text="",
218
+ tts_en_path="",
219
+ tts_zh_path=""
220
  ))
221
  cid += 1
222
 
223
  STATE[session_id] = chunks
224
+ return f"OK: {len(chunks)} chunks · detected_lang={detected} · model={MODEL_NAME}/{COMPUTE_TYPE}"
225
 
226
+ def editor_table(session_id: str):
 
227
  rows = []
228
+ for c in STATE.get(session_id, []):
229
  rows.append([
230
  c.chunk_id,
231
  f"{c.start_s:.2f}-{c.end_s:.2f}",
 
232
  c.status,
233
+ c.raw_text_en,
234
+ c.edited_text_en,
235
+ c.zh_text,
236
  c.rev
237
  ])
238
  return rows
239
 
240
+ def publish_one(session_id: str, chunk_id: int, edited_text_en: str):
241
  chunks = STATE.get(session_id, [])
242
  if chunk_id < 0 or chunk_id >= len(chunks):
243
+ return "Chunk ID out of range", editor_table(session_id)
244
 
245
  c = chunks[chunk_id]
246
+ if edited_text_en and edited_text_en.strip():
247
+ c.edited_text_en = edited_text_en.strip()
248
+
249
  c.status = "published"
250
  c.rev += 1
251
+
252
+ # Translate after publish (cost control + higher quality)
253
+ try:
254
+ c.zh_text = translate_en_to_zh(c.edited_text_en)
255
+ msg = f"Published #{chunk_id} rev={c.rev} · translated"
256
+ except Exception as e:
257
+ c.zh_text = ""
258
+ msg = f"Published #{chunk_id} rev={c.rev} · translation failed: {str(e)}"
259
+
260
+ # Reset TTS cache for this chunk if text changed
261
+ c.tts_en_path = ""
262
+ c.tts_zh_path = ""
263
+
264
+ return msg, editor_table(session_id)
265
 
266
  def publish_all(session_id: str):
267
  chunks = STATE.get(session_id, [])
268
+ ok, fail = 0, 0
269
  for c in chunks:
270
  if c.status != "published":
271
  c.status = "published"
272
  c.rev += 1
273
+ if not c.zh_text and c.edited_text_en:
274
+ try:
275
+ c.zh_text = translate_en_to_zh(c.edited_text_en)
276
+ ok += 1
277
+ except:
278
+ fail += 1
279
+ c.tts_en_path = ""
280
+ c.tts_zh_path = ""
281
+ return f"Published ALL · translated_ok={ok} fail={fail}", editor_table(session_id)
282
+
283
+
284
+ # ======================
285
+ # Audience rendering + TTS generation (stable MVP)
286
+ # ======================
287
+ def render_audience_html(chunks: List[Chunk], view_lang: str) -> str:
288
+ # show last 50 published
289
+ published = [c for c in chunks if c.status == "published"][-50:]
290
+
291
+ def one(c: Chunk) -> str:
292
+ en = (c.edited_text_en or c.raw_text_en).strip()
293
+ zh = (c.zh_text or "").strip()
294
+ text = zh if view_lang == "zh" else en
295
+ return (
296
+ "<div style='padding:10px 12px;border:1px solid #ddd;border-radius:10px;margin:10px 0;'>"
297
+ f"<div style='font-size:12px;color:#666'>#{c.chunk_id} · {c.start_s:.2f}-{c.end_s:.2f}</div>"
298
+ f"<div style='font-size:16px;line-height:1.45'>{text}</div>"
299
+ "</div>"
300
+ )
301
+
302
+ if not published:
303
+ return "<i>No published captions yet.</i>"
304
+
305
+ return "".join(one(c) for c in published)
306
 
307
+ def ensure_latest_tts(session_id: str, view_lang: str) -> Tuple[str, Optional[str]]:
308
  """
309
+ Returns (status_msg, audio_filepath_or_None) for the latest published chunk in selected language.
310
+ This avoids heavy load and avoids relying on browser speechSynthesis.
311
  """
312
  chunks = STATE.get(session_id, [])
313
  published = [c for c in chunks if c.status == "published"]
314
+ if not published:
315
+ return "No published captions yet.", None
316
 
317
+ latest = published[-1]
318
+
319
+ # Ensure translation exists if user wants ZH
320
+ if view_lang == "zh" and not latest.zh_text:
321
+ try:
322
+ latest.zh_text = translate_en_to_zh(latest.edited_text_en)
323
+ except Exception as e:
324
+ return f"ZH translation failed: {str(e)}", None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
 
326
+ try:
327
+ if view_lang == "en":
328
+ if not latest.tts_en_path:
329
+ latest.tts_en_path = tts_to_mp3(latest.edited_text_en, VOICE_EN)
330
+ return f"TTS ready (EN) for chunk #{latest.chunk_id}", latest.tts_en_path or None
331
+ else:
332
+ if not latest.tts_zh_path:
333
+ latest.tts_zh_path = tts_to_mp3(latest.zh_text, VOICE_ZH)
334
+ return f"TTS ready (ZH) for chunk #{latest.chunk_id}", latest.tts_zh_path or None
335
+ except Exception as e:
336
+ return f"TTS failed: {str(e)}", None
337
+
338
+ def refresh_audience(session_id: str, view_lang: str):
339
+ chunks = STATE.get(session_id, [])
340
+ html = render_audience_html(chunks, view_lang)
341
+ tts_msg, audio_path = ensure_latest_tts(session_id, view_lang)
342
+ return html, tts_msg, audio_path
343
+
344
+
345
+ # ======================
346
+ # Gradio UI
347
+ # ======================
348
  with gr.Blocks(title="Live Caption MVP (HF)") as demo:
349
+ gr.Markdown(
350
+ "# Live Caption MVP (HF)\n"
351
+ "全英文轉寫 → 校對(EN)→ 自動翻譯(ZH)→ 發佈 → 觀眾端 EN/ZH 字幕 + 後端 TTS 生成 mp3 播放(不依賴手機瀏覽器 TTS)"
352
+ )
353
 
354
+ sid = gr.State(_session_id())
355
 
356
  with gr.Tab("1) Ingest"):
357
+ gr.Markdown("上傳 iPhone 錄音檔(m4a/wav/mp3)→ 轉寫切段(Whisper segments)")
358
+ audio = gr.Audio(type="filepath", label="Upload audio")
359
  btn_run = gr.Button("Transcribe & Build Chunks")
360
  ingest_status = gr.Textbox(label="Status", interactive=False)
361
 
362
  with gr.Tab("2) Editor"):
363
+ gr.Markdown("校對台:修改英文後 Publish系統自動翻譯成中文(只對 Publish 後內容翻譯,省錢且更準)。")
364
  table = gr.Dataframe(
365
+ headers=["chunk_id", "time", "status", "raw_en", "edited_en", "zh", "rev"],
366
+ datatype=["number", "str", "str", "str", "str", "str", "number"],
 
 
367
  interactive=False
368
  )
369
  chunk_id_in = gr.Number(label="chunk_id", value=0, precision=0)
370
+ edited_in = gr.Textbox(label="edited_en (paste here)", lines=3)
371
+ btn_pub_one = gr.Button("Publish One (translate)")
372
+ btn_pub_all = gr.Button("Publish All (translate missing)")
373
  editor_status = gr.Textbox(label="Editor Status", interactive=False)
374
 
375
  with gr.Tab("3) Audience"):
376
+ gr.Markdown(
377
+ "觀眾端:顯示已發佈字幕。按 Refresh 會同時產生「最新一句」的音檔(EN 或 ZH 取決於選擇),用播放器播放。"
378
+ )
379
+ view_lang = gr.Radio(choices=["en", "zh"], value="zh", label="View language")
380
  btn_refresh = gr.Button("Refresh Audience View")
381
+ aud_html = gr.HTML(label="Captions")
382
+ tts_status = gr.Textbox(label="TTS Status", interactive=False)
383
+ aud_audio = gr.Audio(label="Play latest line", type="filepath")
384
 
385
+ # ---- Actions ----
386
+ def _do_ingest(audio_path, session_id):
387
  if not audio_path:
388
  return "Please upload an audio file first.", []
389
+ msg = transcribe_to_chunks(audio_path, session_id)
390
+ return msg, editor_table(session_id)
391
 
392
+ btn_run.click(_do_ingest, inputs=[audio, sid], outputs=[ingest_status, table])
393
 
394
+ def _pub_one(session_id, cid, txt):
395
+ return publish_one(session_id, int(cid), txt)
 
396
 
397
+ btn_pub_one.click(_pub_one, inputs=[sid, chunk_id_in, edited_in], outputs=[editor_status, table])
398
+ btn_pub_all.click(lambda session_id: publish_all(session_id), inputs=[sid], outputs=[editor_status, table])
399
 
400
+ btn_refresh.click(refresh_audience, inputs=[sid, view_lang], outputs=[aud_html, tts_status, aud_audio])
401
 
402
  demo.launch()