hmdliu commited on
Commit
c4e7ebc
·
verified ·
1 Parent(s): be6cb0e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +224 -0
app.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import os
3
+ import json
4
+ import http.client
5
+ from io import BytesIO
6
+
7
+ import gradio as gr
8
+ from dotenv import load_dotenv
9
+ from elevenlabs.client import ElevenLabs
10
+
11
+ # ----------------------------
12
+ # Config & clients
13
+ # ----------------------------
14
+ load_dotenv() # supports local .env; on HF Spaces, set secrets in the UI
15
+
16
+ ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY", "")
17
+ API_KEY_302 = os.getenv("API_KEY_302", "")
18
+
19
+ # ElevenLabs client (only if key is present)
20
+ elevenlabs_client = None
21
+ if ELEVENLABS_API_KEY:
22
+ elevenlabs_client = ElevenLabs(api_key=ELEVENLABS_API_KEY)
23
+
24
+
25
+ # ----------------------------
26
+ # Prompt templates (placeholders)
27
+ # ----------------------------
28
+ PROMPT_TEMPLATE_1 = """\
29
+ You are a speech-language assistant. Given the ORIGINAL script and the TRANSCRIPT (imperfect ASR),
30
+ list words/phrases likely to trigger stuttering (e.g., consonant clusters, long multisyllabic words).
31
+ Output a short, structured summary and diagnosis for easy-to-stutter scenarios.
32
+
33
+ ORIGINAL:
34
+ {original_text}
35
+
36
+ TRANSCRIPT:
37
+ {transcribed_text}
38
+ """
39
+
40
+ PROMPT_TEMPLATE_2 = """\
41
+ You are a speech-language assistant. Rewrite the ORIGINAL script to reduce stuttering risk, while
42
+ preserving meaning and tone. Prefer simpler synonyms, shorter clauses, easier onsets. Keep it concise.
43
+
44
+ Diagnosis notes on easy-to-stutter scenarios:
45
+ {notes}
46
+
47
+ ORIGINAL:
48
+ {original_text}
49
+
50
+ Only return the revised script.
51
+ """
52
+
53
+ PROMPT_TEMPLATE_3 = """\
54
+ You are a speech-language assistant. Rewrite the ORIGINAL script to reduce stuttering risk, while
55
+ preserving meaning and tone. Prefer simpler synonyms, shorter clauses, easier onsets. Keep it concise.
56
+
57
+ Diagnosis notes on easy-to-stutter scenarios:
58
+ {notes}
59
+
60
+ ORIGINAL:
61
+ {original_text}
62
+
63
+ Only return the revised script.
64
+ """
65
+
66
+
67
+ # ----------------------------
68
+ # Helpers: STT & LLM calls
69
+ # ----------------------------
70
+ def transcribe_audio(upload_path: str | None, record_path: str | None) -> str:
71
+ """
72
+ Prioritize uploaded file if both provided.
73
+ Returns the transcribed text (or an error message).
74
+ """
75
+ # Choose input
76
+ audio_path = upload_path or record_path
77
+ if not audio_path:
78
+ return "No audio provided. Please upload or record audio."
79
+
80
+ if not ELEVENLABS_API_KEY:
81
+ return "ELEVENLABS_API_KEY not set. Please configure your environment."
82
+
83
+ # Read file as bytes -> BytesIO
84
+ try:
85
+ with open(audio_path, "rb") as f:
86
+ audio_data = BytesIO(f.read())
87
+ except Exception as e:
88
+ return f"Failed to read audio: {e}"
89
+
90
+ try:
91
+ transcription = elevenlabs_client.speech_to_text.convert(
92
+ file=audio_data,
93
+ model_id="scribe_v1",
94
+ tag_audio_events=True,
95
+ language_code="eng",
96
+ diarize=True,
97
+ )
98
+ # Minimal output: just return text
99
+ return transcription.text or ""
100
+ except Exception as e:
101
+ return f"Transcription error: {e}"
102
+
103
+
104
+ def call_llm_302(prompt: str) -> str:
105
+ """
106
+ Minimal wrapper around 302.ai /v1/chat/completions.
107
+ Returns assistant text or an error string.
108
+ """
109
+ if not API_KEY_302:
110
+ return "API_KEY_302 not set. Please configure your environment."
111
+
112
+ try:
113
+ conn = http.client.HTTPSConnection("api.302.ai")
114
+ payload = json.dumps({
115
+ "model": "gpt-4o-mini",
116
+ "messages": [
117
+ {"role": "user", "content": prompt}
118
+ ]
119
+ })
120
+ headers = {
121
+ "Accept": "application/json",
122
+ "Authorization": f"Bearer {API_KEY_302}",
123
+ "Content-Type": "application/json"
124
+ }
125
+ conn.request("POST", "/v1/chat/completions", payload, headers)
126
+ res = conn.getresponse()
127
+ raw = res.read().decode("utf-8")
128
+ conn.close()
129
+
130
+ output = json.loads(raw)
131
+ # Defensive parsing
132
+ msg = output.get("choices", [{}])[0].get("message", {})
133
+ text = msg.get("content") or msg.get("text") or str(msg)
134
+ return text.strip()
135
+ except Exception as e:
136
+ return f"LLM API error: {e}"
137
+
138
+
139
+ # ----------------------------
140
+ # Button handlers
141
+ # ----------------------------
142
+ def on_click_transcribe(upload_path, record_path):
143
+ """
144
+ Button 1: Transcribe audio -> fill Textbox1 (transcribed text, non-editable).
145
+ """
146
+ text = transcribe_audio(upload_path, record_path)
147
+ return gr.update(value=text)
148
+
149
+
150
+ def on_click_analyze(original_text, transcribed_text):
151
+ """
152
+ Button 2: Analyze easy-to-stutter words -> fill Textbox3 using PROMPT_TEMPLATE_1.
153
+ """
154
+ prompt = PROMPT_TEMPLATE_1.format(
155
+ original_text=original_text or "",
156
+ transcribed_text=transcribed_text or "",
157
+ )
158
+ analysis = call_llm_302(prompt)
159
+ return gr.update(value=analysis)
160
+
161
+
162
+ def on_click_rewrite(mode, original_text, transcribed_text, summary):
163
+ """
164
+ Button 3: Rewrite script -> fill Textbox4 using PROMPT_TEMPLATE_2 or PROMPT_TEMPLATE_3.
165
+ """
166
+ if mode == "mode1":
167
+ prompt = PROMPT_TEMPLATE_2.format(
168
+ original_text=original_text or "",
169
+ )
170
+ else:
171
+ prompt = PROMPT_TEMPLATE_3.format(
172
+ original_text=original_text or "",
173
+ )
174
+ revised = call_llm_302(prompt)
175
+ return gr.update(value=revised)
176
+
177
+
178
+ # ----------------------------
179
+ # Gradio UI
180
+ # ----------------------------
181
+ with gr.Blocks(title="DeStammerer: Script Revise (Minimal)") as demo:
182
+ gr.Markdown("### DeStammerer (Minimal Demo)\n"
183
+ "A minimal 3-row UI for transcription, analysis, and revision.")
184
+
185
+ # Row 1: [audio upload, audio record, button1]
186
+ with gr.Row():
187
+ audio_upload = gr.Audio(label="Upload Audio", sources=["upload"], type="filepath")
188
+ audio_record = gr.Audio(label="Record Audio", sources=["microphone"], type="filepath")
189
+ btn_transcribe = gr.Button("1) Transcribe")
190
+
191
+ # Row 2: [textbox1 (ASR, readonly), textbox2 (original input), button2]
192
+ with gr.Row():
193
+ txt_transcribed = gr.Textbox(label="Transcribed Text (ASR)", interactive=False, lines=6, placeholder="ASR output appears here.")
194
+ txt_original = gr.Textbox(label="Original Script (input)", lines=6, placeholder="Paste your original script here.")
195
+ btn_analyze = gr.Button("2) Analyze (Find Easy-to-Stutter Words)")
196
+
197
+ # Row 3: [textbox3 (LLM summary), textbox4 (revised script), selector, button3]
198
+ with gr.Row():
199
+ txt_summary = gr.Textbox(label="LLM Summary: Easy-to-Stutter Words", lines=8, placeholder="Analysis will appear here.")
200
+ txt_revised = gr.Textbox(label="Revised Script", lines=8, placeholder="Rewritten script will appear here.")
201
+ mode_selector = gr.Dropdown(choices=["mode1", "mode2"], value="mode1", label="Rewrite Mode")
202
+ btn_rewrite = gr.Button("3) Revise Script")
203
+
204
+ # Wiring
205
+ btn_transcribe.click(
206
+ fn=on_click_transcribe,
207
+ inputs=[audio_upload, audio_record],
208
+ outputs=[txt_transcribed],
209
+ )
210
+
211
+ btn_analyze.click(
212
+ fn=on_click_analyze,
213
+ inputs=[txt_original, txt_transcribed],
214
+ outputs=[txt_summary],
215
+ )
216
+
217
+ btn_rewrite.click(
218
+ fn=on_click_rewrite,
219
+ inputs=[mode_selector, txt_original, txt_transcribed, txt_summary],
220
+ outputs=[txt_revised],
221
+ )
222
+
223
+ if __name__ == "__main__":
224
+ demo.launch()