Chia Woon Yap
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -266,37 +266,38 @@ def process_document(file):
|
|
| 266 |
# Function to handle speech-to-text conversion
|
| 267 |
|
| 268 |
#def transcribe_audio(audio):
|
| 269 |
-
# sr, y = audio
|
| 270 |
-
# if y.ndim > 1:
|
| 271 |
-
# y = y.mean(axis=1)
|
| 272 |
-
# y = y.astype(np.float32)
|
| 273 |
-
# y /= np.max(np.abs(y))
|
| 274 |
-
# return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
| 275 |
-
|
| 276 |
-
#Quick Fixes You Can Try First:
|
| 277 |
-
|
| 278 |
-
def transcribe_audio(audio):
|
| 279 |
sr, y = audio
|
| 280 |
if y.ndim > 1:
|
| 281 |
y = y.mean(axis=1)
|
| 282 |
y = y.astype(np.float32)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 283 |
|
| 284 |
# Improved normalization
|
| 285 |
-
max_val = np.max(np.abs(y))
|
| 286 |
-
if max_val > 0:
|
| 287 |
-
y /= max_val
|
| 288 |
|
| 289 |
# Use better model
|
| 290 |
-
better_transcriber = pipeline(
|
| 291 |
-
"automatic-speech-recognition",
|
| 292 |
-
model="openai/whisper-small.en", # More accurate
|
| 293 |
-
chunk_length_s=30
|
| 294 |
-
)
|
| 295 |
|
| 296 |
-
return better_transcriber({"sampling_rate": sr, "raw": y})["text"]
|
| 297 |
|
| 298 |
# the remaining is the same
|
| 299 |
-
|
| 300 |
|
| 301 |
|
| 302 |
|
|
|
|
| 266 |
# Function to handle speech-to-text conversion
|
| 267 |
|
| 268 |
#def transcribe_audio(audio):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
sr, y = audio
|
| 270 |
if y.ndim > 1:
|
| 271 |
y = y.mean(axis=1)
|
| 272 |
y = y.astype(np.float32)
|
| 273 |
+
y /= np.max(np.abs(y))
|
| 274 |
+
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
| 275 |
+
|
| 276 |
+
"""
|
| 277 |
+
#Quick Fixes You Can Try First:
|
| 278 |
+
|
| 279 |
+
#def transcribe_audio(audio):
|
| 280 |
+
# sr, y = audio
|
| 281 |
+
# if y.ndim > 1:
|
| 282 |
+
# y = y.mean(axis=1)
|
| 283 |
+
# y = y.astype(np.float32)
|
| 284 |
|
| 285 |
# Improved normalization
|
| 286 |
+
# max_val = np.max(np.abs(y))
|
| 287 |
+
# if max_val > 0:
|
| 288 |
+
# y /= max_val
|
| 289 |
|
| 290 |
# Use better model
|
| 291 |
+
# better_transcriber = pipeline(
|
| 292 |
+
# "automatic-speech-recognition",
|
| 293 |
+
# model="openai/whisper-small.en", # More accurate
|
| 294 |
+
# chunk_length_s=30
|
| 295 |
+
# )
|
| 296 |
|
| 297 |
+
# return better_transcriber({"sampling_rate": sr, "raw": y})["text"]
|
| 298 |
|
| 299 |
# the remaining is the same
|
| 300 |
+
"""
|
| 301 |
|
| 302 |
|
| 303 |
|