EYEDOL commited on
Commit
53c76c9
·
verified ·
1 Parent(s): 24ed6f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +201 -35
app.py CHANGED
@@ -1,53 +1,219 @@
1
  import os
 
 
2
  import torch
 
3
  from transformers import pipeline
4
  import gradio as gr
5
 
 
 
 
6
  MODEL_ID = "EYEDOL/Yoruba-ASRNEW"
7
 
 
8
  device = 0 if torch.cuda.is_available() else -1
 
 
9
  asr = pipeline("automatic-speech-recognition", model=MODEL_ID, device=device)
10
 
11
- def transcribe_from_file(audio):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  """
13
- audio: Tuple (sample_rate, numpy array) or filepath depending on mode
 
14
  """
15
- if audio is None:
 
16
  return "No audio provided."
17
 
18
- # If audio is a tuple (sample_rate, data), save temporarily
19
- if isinstance(audio, tuple):
20
- import tempfile
21
- import soundfile as sf
22
- temp_wav = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
23
- sf.write(temp_wav.name, audio[1], audio[0])
24
- audio_path = temp_wav.name
25
  else:
26
- audio_path = audio # already a path
27
-
28
- result = asr(audio_path)
29
- return result.get("text", "")
30
-
31
- with gr.Blocks(title="Yoruba ASR Demo") as demo:
32
- gr.Markdown("## Yoruba ASR — try microphone or upload an audio file 🎙️")
33
-
34
- with gr.Tabs():
35
- with gr.TabItem("🎤 Microphone"):
36
- mic_input = gr.Audio(label="Record from mic", type="numpy") # microphone input as numpy array
37
- mic_button = gr.Button("Transcribe")
38
- mic_output = gr.Textbox(label="Transcription")
39
- mic_button.click(fn=transcribe_from_file, inputs=mic_input, outputs=mic_output)
40
-
41
- with gr.TabItem("📁 Upload File"):
42
- file_input = gr.Audio(label="Upload audio file", type="filepath") # uploaded file path
43
- file_button = gr.Button("Transcribe")
44
- file_output = gr.Textbox(label="Transcription")
45
- file_button.click(fn=transcribe_from_file, inputs=file_input, outputs=file_output)
46
-
47
- gr.Markdown(
48
- "✅ **Tips:** Use clear Yoruba speech. For private models, add an `HF_TOKEN` secret in Space settings. "
49
- "GPU is recommended for faster inference."
50
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  if __name__ == "__main__":
53
  demo.launch()
 
1
  import os
2
+ import tempfile
3
+ import math
4
  import torch
5
+ import soundfile as sf
6
  from transformers import pipeline
7
  import gradio as gr
8
 
9
+ # Optional: pydub helps with splitting arbitrary audio formats (mp3, m4a, etc.)
10
+ from pydub import AudioSegment
11
+
12
  MODEL_ID = "EYEDOL/Yoruba-ASRNEW"
13
 
14
+ # device for transformers pipeline
15
  device = 0 if torch.cuda.is_available() else -1
16
+
17
+ # Create pipeline (automatic-speech-recognition)
18
  asr = pipeline("automatic-speech-recognition", model=MODEL_ID, device=device)
19
 
20
+ # Utility: write numpy (rate, data) to wav
21
+ def save_numpy_to_wav(np_tuple):
22
+ samplerate, data = np_tuple
23
+ tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
24
+ sf.write(tmp.name, data, samplerate)
25
+ return tmp.name
26
+
27
+ # Utility: return audio duration in seconds (works for file paths)
28
+ def get_duration_seconds(path):
29
+ try:
30
+ info = sf.info(path)
31
+ return info.duration
32
+ except Exception:
33
+ # fallback to pydub
34
+ seg = AudioSegment.from_file(path)
35
+ return len(seg) / 1000.0
36
+
37
+ # Split an audio file into chunks (ms). Returns list of (chunk_path, start_ms, end_ms)
38
+ def split_audio_file(path, chunk_length_ms=25000, overlap_ms=500):
39
+ audio = AudioSegment.from_file(path)
40
+ duration_ms = len(audio)
41
+ chunks = []
42
+ start = 0
43
+ while start < duration_ms:
44
+ end = start + chunk_length_ms
45
+ if end > duration_ms:
46
+ end = duration_ms
47
+ chunk = audio[start:end]
48
+ tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
49
+ chunk.export(tmp.name, format="wav")
50
+ chunks.append((tmp.name, start, end))
51
+ # advance start by chunk_length - overlap
52
+ start += chunk_length_ms - overlap_ms
53
+ return chunks
54
+
55
+ # Transcribe a single file path (wraps pipeline call). Supports passing return_timestamps param optionally.
56
+ def transcribe_file(path, return_timestamps=False):
57
+ if return_timestamps:
58
+ # some pipelines accept return_timestamps=True and return timestamps tokens;
59
+ # exact format can vary by library version. We'll pass the kwarg and try to handle the output.
60
+ out = asr(path, return_timestamps=True)
61
+ else:
62
+ out = asr(path)
63
+ return out
64
+
65
+ # Main: handle any input (numpy tuple or path)
66
+ def transcribe(audio_input, allow_longform_with_timestamps=False, chunk_length_seconds=25, overlap_seconds=0.5):
67
  """
68
+ audio_input: either a tuple (sr, numpy array) from gradio mic, or a filepath string from upload
69
+ returns: dict with 'full_text' and 'segments' list of {start_s, end_s, text}
70
  """
71
+ # Normalize input to a filepath
72
+ if audio_input is None:
73
  return "No audio provided."
74
 
75
+ if isinstance(audio_input, tuple):
76
+ # Gradio microphone when type="numpy" sends (sample_rate, numpy_array)
77
+ audio_path = save_numpy_to_wav(audio_input)
 
 
 
 
78
  else:
79
+ audio_path = audio_input # uploaded filepath
80
+
81
+ # determine duration
82
+ duration_s = get_duration_seconds(audio_path)
83
+
84
+ # If short enough, just transcribe directly
85
+ if duration_s <= 30:
86
+ out = transcribe_file(audio_path, return_timestamps=False)
87
+ text = out.get("text", out)
88
+ segments = [{"start_s": 0.0, "end_s": duration_s, "text": text}]
89
+ full_text = text
90
+ # cleanup if we created a temp file
91
+ if isinstance(audio_input, tuple):
92
+ try:
93
+ os.unlink(audio_path)
94
+ except Exception:
95
+ pass
96
+ return {"full_text": full_text, "segments": segments}
97
+
98
+ # duration > 30s -> handle long audio
99
+ if allow_longform_with_timestamps:
100
+ # try calling the pipeline with return_timestamps=True
101
+ try:
102
+ out = transcribe_file(audio_path, return_timestamps=True)
103
+ # expected: out may contain 'text' and 'chunks' or 'segments' with timestamps depending on HF version.
104
+ # We'll try to be flexible.
105
+ full_text = out.get("text", None)
106
+ segments = []
107
+
108
+ # If the pipeline returned timestamps in 'chunks' or 'segments':
109
+ if isinstance(out, dict):
110
+ if "chunks" in out and isinstance(out["chunks"], list):
111
+ for c in out["chunks"]:
112
+ # chunk may contain 'text', 'timestamp' or 'start'/'end'
113
+ start = c.get("timestamp", [None, None])
114
+ if isinstance(start, list) and len(start) == 2:
115
+ start_s, end_s = start[0], start[1]
116
+ else:
117
+ start_s = c.get("start", None)
118
+ end_s = c.get("end", None)
119
+ segments.append({
120
+ "start_s": start_s,
121
+ "end_s": end_s,
122
+ "text": c.get("text", "")
123
+ })
124
+ elif "words" in out and isinstance(out["words"], list):
125
+ # group words into coarse segments (simple approach: group by contiguous words)
126
+ # For simplicity, transform words items into tiny segments
127
+ for w in out["words"]:
128
+ segments.append({
129
+ "start_s": w.get("start", None),
130
+ "end_s": w.get("end", None),
131
+ "text": w.get("word", "")
132
+ })
133
+ else:
134
+ # fallback: no structured chunks — return whole text as single segment
135
+ if full_text is None:
136
+ full_text = str(out)
137
+ segments = [{"start_s": 0.0, "end_s": duration_s, "text": full_text}]
138
+ else:
139
+ # pipeline returned a string or something else
140
+ full_text = str(out)
141
+ segments = [{"start_s": 0.0, "end_s": duration_s, "text": full_text}]
142
+
143
+ if isinstance(audio_input, tuple):
144
+ try:
145
+ os.unlink(audio_path)
146
+ except Exception:
147
+ pass
148
+ return {"full_text": full_text, "segments": segments}
149
+ except Exception as e:
150
+ # Fall back to chunking if long-form timestamps fail
151
+ print("Long-form timestamps failed, falling back to chunking:", e)
152
+
153
+ # Default: chunking approach
154
+ chunk_length_ms = int(chunk_length_seconds * 1000)
155
+ overlap_ms = int(overlap_seconds * 1000)
156
+
157
+ chunks = split_audio_file(audio_path, chunk_length_ms=chunk_length_ms, overlap_ms=overlap_ms)
158
+ stitched_texts = []
159
+ segments = []
160
+ for chunk_path, start_ms, end_ms in chunks:
161
+ try:
162
+ out = transcribe_file(chunk_path, return_timestamps=False)
163
+ text = out.get("text", out)
164
+ except Exception as e:
165
+ text = f"[ERROR transcribing chunk: {e}]"
166
+
167
+ start_s = start_ms / 1000.0
168
+ end_s = end_ms / 1000.0
169
+ segments.append({"start_s": start_s, "end_s": end_s, "text": text})
170
+ stitched_texts.append(text)
171
+
172
+ # cleanup chunk file
173
+ try:
174
+ os.unlink(chunk_path)
175
+ except Exception:
176
+ pass
177
+
178
+ # cleanup original temp if microphone
179
+ if isinstance(audio_input, tuple):
180
+ try:
181
+ os.unlink(audio_path)
182
+ except Exception:
183
+ pass
184
+
185
+ full_text = " ".join([s for s in stitched_texts if s])
186
+ return {"full_text": full_text, "segments": segments}
187
+
188
+ # Gradio UI
189
+ with gr.Blocks(title="Yoruba ASR — long audio ready") as demo:
190
+ gr.Markdown("## Yoruba ASR — Upload or use microphone. Supports long audio via chunking or long-form timestamps 🎧")
191
+
192
+ with gr.Row():
193
+ with gr.Column():
194
+ mic = gr.Audio(label="Record from mic (use 'Record' then 'Stop')", type="numpy")
195
+ upload = gr.Audio(label="Or upload audio file", type="filepath")
196
+ mode = gr.Radio(choices=["Use microphone input", "Use uploaded file"], value="Use microphone input", label="Input source")
197
+ longform_checkbox = gr.Checkbox(label="Try model's long-form timestamps (may be supported by some Whisper forks)", value=False)
198
+ chunk_len = gr.Slider(minimum=10, maximum=60, value=25, step=5, label="Chunk length (seconds) — used when chunking")
199
+ overlap = gr.Slider(minimum=0, maximum=5, value=0.5, step=0.5, label="Chunk overlap (seconds)")
200
+ transcribe_btn = gr.Button("Transcribe")
201
+ with gr.Column():
202
+ full_text_out = gr.Textbox(label="Full transcription", lines=8)
203
+ segments_out = gr.JSON(label="Segments (start_s, end_s, text)")
204
+
205
+ def handle_transcription(mic_input, upload_input, mode_choice, use_longform, chunk_len_s, overlap_s):
206
+ audio_src = mic_input if mode_choice == "Use microphone input" else upload_input
207
+ res = transcribe(audio_src, allow_longform_with_timestamps=use_longform, chunk_length_seconds=chunk_len_s, overlap_seconds=overlap_s)
208
+ if isinstance(res, str):
209
+ return res, []
210
+ return res["full_text"], res["segments"]
211
+
212
+ transcribe_btn.click(fn=handle_transcription, inputs=[mic, upload, mode, longform_checkbox, chunk_len, overlap], outputs=[full_text_out, segments_out])
213
+
214
+ gr.Markdown("**Notes:**\n\n- Chunking is robust and recommended if you experience errors. Default chunk length is 25s with 0.5s overlap. "
215
+ "- If you enable long-form timestamps, the pipeline will attempt `return_timestamps=True` and return timestamps if the model supports it. "
216
+ "- Ensure your Space has enough compute (GPU recommended) for faster transcription.")
217
 
218
  if __name__ == "__main__":
219
  demo.launch()