Spaces:
Running on CPU Upgrade
Running on CPU Upgrade
mariesig commited on
Commit ·
0a77e8e
1
Parent(s): 6274414
remove redundant code from tab switch
Browse files- app.py +7 -12
- stream_pipeline.py +0 -9
app.py
CHANGED
|
@@ -298,27 +298,22 @@ with gr.Blocks() as demo:
|
|
| 298 |
)
|
| 299 |
|
| 300 |
upload_tab.select(
|
| 301 |
-
lambda:
|
| 302 |
-
|
| 303 |
-
outputs=[audio_stream],
|
| 304 |
-
).then(
|
| 305 |
-
lambda: (
|
| 306 |
gr.update(visible=True),
|
| 307 |
*on_stop_recording(),
|
| 308 |
),
|
| 309 |
-
|
|
|
|
| 310 |
)
|
| 311 |
-
|
| 312 |
dataset_tab.select(
|
| 313 |
-
lambda: gr.update(streaming=False, interactive=False),
|
| 314 |
-
inputs=None,
|
| 315 |
-
outputs=[audio_stream],
|
| 316 |
-
).then(
|
| 317 |
lambda: (
|
|
|
|
| 318 |
gr.update(visible=True),
|
| 319 |
*on_stop_recording(),
|
| 320 |
),
|
| 321 |
-
outputs=[enhance_btn, vad_led, streaming_sr],
|
| 322 |
).then(
|
| 323 |
load_dataset_sample_on_tab_visit,
|
| 324 |
inputs=[dataset_dropdown],
|
|
|
|
| 298 |
)
|
| 299 |
|
| 300 |
upload_tab.select(
|
| 301 |
+
lambda: (
|
| 302 |
+
gr.update(streaming=False, interactive=False),
|
|
|
|
|
|
|
|
|
|
| 303 |
gr.update(visible=True),
|
| 304 |
*on_stop_recording(),
|
| 305 |
),
|
| 306 |
+
inputs=None,
|
| 307 |
+
outputs=[audio_stream,enhance_btn, vad_led, streaming_sr],
|
| 308 |
)
|
| 309 |
+
|
| 310 |
dataset_tab.select(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 311 |
lambda: (
|
| 312 |
+
gr.update(streaming=False, interactive=False),
|
| 313 |
gr.update(visible=True),
|
| 314 |
*on_stop_recording(),
|
| 315 |
),
|
| 316 |
+
outputs=[audio_stream,enhance_btn, vad_led, streaming_sr],
|
| 317 |
).then(
|
| 318 |
load_dataset_sample_on_tab_visit,
|
| 319 |
inputs=[dataset_dropdown],
|
stream_pipeline.py
CHANGED
|
@@ -63,7 +63,6 @@ def _to_float32_mono(y: np.ndarray) -> np.ndarray:
|
|
| 63 |
|
| 64 |
|
| 65 |
def transcribe_stream(current_sr: int | None, new_chunk, enhancement_level, input_gain_db: float = 0.0, stt_streamer: str = "deepgram") -> tuple[int | None, str, str, Any]:
|
| 66 |
-
print("Transcribing")
|
| 67 |
if new_chunk is None or new_chunk[1] is None:
|
| 68 |
return None, _ENHANCED_TRANSCRIPT, _RAW_TRANSCRIPT, VAD_OFF_HTML
|
| 69 |
sr, y = new_chunk
|
|
@@ -122,14 +121,6 @@ def clear_ui():
|
|
| 122 |
_RAW_TRANSCRIPT = ""
|
| 123 |
return _ENHANCED_TRANSCRIPT, _RAW_TRANSCRIPT
|
| 124 |
|
| 125 |
-
def stop_online_backend():
|
| 126 |
-
"""Stop streamers and clear transcripts. Do not update the Audio component:
|
| 127 |
-
toggling streaming=False then back to True can make the frontend lose the
|
| 128 |
-
microphone (getUserMedia not re-called), so we leave it unchanged."""
|
| 129 |
-
shutdown_streamers()
|
| 130 |
-
enhanced_transcript, raw_transcript = clear_ui()
|
| 131 |
-
return None, enhanced_transcript, raw_transcript, gr.update(streaming=False, interactive=False)
|
| 132 |
-
|
| 133 |
|
| 134 |
def set_stt_streamer(model_name, fs_hz):
|
| 135 |
StreamerCls = STREAMER_CLASSES.get(model_name, DeepgramStreamer)
|
|
|
|
| 63 |
|
| 64 |
|
| 65 |
def transcribe_stream(current_sr: int | None, new_chunk, enhancement_level, input_gain_db: float = 0.0, stt_streamer: str = "deepgram") -> tuple[int | None, str, str, Any]:
|
|
|
|
| 66 |
if new_chunk is None or new_chunk[1] is None:
|
| 67 |
return None, _ENHANCED_TRANSCRIPT, _RAW_TRANSCRIPT, VAD_OFF_HTML
|
| 68 |
sr, y = new_chunk
|
|
|
|
| 121 |
_RAW_TRANSCRIPT = ""
|
| 122 |
return _ENHANCED_TRANSCRIPT, _RAW_TRANSCRIPT
|
| 123 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
def set_stt_streamer(model_name, fs_hz):
|
| 126 |
StreamerCls = STREAMER_CLASSES.get(model_name, DeepgramStreamer)
|