Temporarily remove Whisper functionality
Browse files
app.py
CHANGED
|
@@ -9,8 +9,8 @@ import gradio as gr
|
|
| 9 |
import requests
|
| 10 |
|
| 11 |
# UNCOMMENT TO USE WHISPER
|
| 12 |
-
import warnings
|
| 13 |
-
import whisper
|
| 14 |
|
| 15 |
from langchain import ConversationChain, LLMChain
|
| 16 |
|
|
@@ -57,29 +57,29 @@ POLLY_VOICE_DATA = PollyVoiceData()
|
|
| 57 |
|
| 58 |
|
| 59 |
# UNCOMMENT TO USE WHISPER
|
| 60 |
-
warnings.filterwarnings("ignore")
|
| 61 |
-
WHISPER_MODEL = whisper.load_model("tiny")
|
| 62 |
-
print("WHISPER_MODEL", WHISPER_MODEL)
|
| 63 |
|
| 64 |
|
| 65 |
# UNCOMMENT TO USE WHISPER
|
| 66 |
-
def transcribe(aud_inp):
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
|
| 84 |
|
| 85 |
# Pertains to Express-inator functionality
|
|
@@ -228,14 +228,13 @@ def run_chain(chain, inp, capture_hidden_text):
|
|
| 228 |
except AuthenticationError as ae:
|
| 229 |
error_msg = AUTH_ERR_MSG
|
| 230 |
except RateLimitError as rle:
|
| 231 |
-
error_msg = "\n\nRateLimitError
|
| 232 |
-
rle) + "\n\nPlease see https://help.openai.com/en/articles/6891831-error-code-429-you-exceeded-your-current-quota-please-check-your-plan-and-billing-details for more information."
|
| 233 |
except ValueError as ve:
|
| 234 |
-
error_msg = "\n\nValueError
|
| 235 |
except InvalidRequestError as ire:
|
| 236 |
-
error_msg = "\n\
|
| 237 |
except Exception as e:
|
| 238 |
-
error_msg = "\n"
|
| 239 |
|
| 240 |
sys.stdout = tmp
|
| 241 |
hidden_text = hidden_text_io.getvalue()
|
|
@@ -452,10 +451,10 @@ with gr.Blocks(css=".gradio-container {background-color: lightgray}") as block:
|
|
| 452 |
submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
|
| 453 |
|
| 454 |
# UNCOMMENT TO USE WHISPER
|
| 455 |
-
with gr.Row():
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
|
| 460 |
gr.Examples(
|
| 461 |
examples=["How many people live in Canada?",
|
|
|
|
| 9 |
import requests
|
| 10 |
|
| 11 |
# UNCOMMENT TO USE WHISPER
|
| 12 |
+
# import warnings
|
| 13 |
+
# import whisper
|
| 14 |
|
| 15 |
from langchain import ConversationChain, LLMChain
|
| 16 |
|
|
|
|
| 57 |
|
| 58 |
|
| 59 |
# UNCOMMENT TO USE WHISPER
|
| 60 |
+
# warnings.filterwarnings("ignore")
|
| 61 |
+
# WHISPER_MODEL = whisper.load_model("tiny")
|
| 62 |
+
# print("WHISPER_MODEL", WHISPER_MODEL)
|
| 63 |
|
| 64 |
|
| 65 |
# UNCOMMENT TO USE WHISPER
|
| 66 |
+
# def transcribe(aud_inp):
|
| 67 |
+
# if aud_inp is None:
|
| 68 |
+
# return ""
|
| 69 |
+
# aud = whisper.load_audio(aud_inp)
|
| 70 |
+
# aud = whisper.pad_or_trim(aud)
|
| 71 |
+
# mel = whisper.log_mel_spectrogram(aud).to(WHISPER_MODEL.device)
|
| 72 |
+
# _, probs = WHISPER_MODEL.detect_language(mel)
|
| 73 |
+
#
|
| 74 |
+
# options = whisper.DecodingOptions()
|
| 75 |
+
# # options = whisper.DecodingOptions(language="ja")
|
| 76 |
+
#
|
| 77 |
+
# result = whisper.decode(WHISPER_MODEL, mel, options)
|
| 78 |
+
# print("result.text", result.text)
|
| 79 |
+
# result_text = ""
|
| 80 |
+
# if result and result.text:
|
| 81 |
+
# result_text = result.text
|
| 82 |
+
# return result_text
|
| 83 |
|
| 84 |
|
| 85 |
# Pertains to Express-inator functionality
|
|
|
|
| 228 |
except AuthenticationError as ae:
|
| 229 |
error_msg = AUTH_ERR_MSG
|
| 230 |
except RateLimitError as rle:
|
| 231 |
+
error_msg = "\n\nRateLimitError."
|
|
|
|
| 232 |
except ValueError as ve:
|
| 233 |
+
error_msg = "\n\nValueError."
|
| 234 |
except InvalidRequestError as ire:
|
| 235 |
+
error_msg = "\n\nInvalidRequestError."
|
| 236 |
except Exception as e:
|
| 237 |
+
error_msg = "\n\nException."
|
| 238 |
|
| 239 |
sys.stdout = tmp
|
| 240 |
hidden_text = hidden_text_io.getvalue()
|
|
|
|
| 451 |
submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
|
| 452 |
|
| 453 |
# UNCOMMENT TO USE WHISPER
|
| 454 |
+
# with gr.Row():
|
| 455 |
+
# audio_comp = gr.Microphone(source="microphone", type="filepath", label="Just say it!",
|
| 456 |
+
# interactive=True, streaming=False)
|
| 457 |
+
# audio_comp.change(transcribe, inputs=[audio_comp], outputs=[message])
|
| 458 |
|
| 459 |
gr.Examples(
|
| 460 |
examples=["How many people live in Canada?",
|