JLW commited on
Commit
2f21e31
·
1 Parent(s): 70c5171

Temporarily comment out OpenAI Whisper (STT) functionality

Browse files
Files changed (1) hide show
  1. app.py +24 -26
app.py CHANGED
@@ -9,8 +9,8 @@ import gradio as gr
9
  import requests
10
 
11
  # UNCOMMENT TO USE WHISPER
12
- import warnings
13
- import whisper
14
 
15
  from langchain import ConversationChain, LLMChain
16
 
@@ -59,29 +59,27 @@ POLLY_VOICE_DATA = PollyVoiceData()
59
 
60
 
61
  # UNCOMMENT TO USE WHISPER
62
- warnings.filterwarnings("ignore")
63
- WHISPER_MODEL = whisper.load_model("tiny")
64
- print("WHISPER_MODEL", WHISPER_MODEL)
65
 
66
 
67
  # UNCOMMENT TO USE WHISPER
68
- def transcribe(aud_inp):
69
- if aud_inp is None:
70
- return ""
71
- aud = whisper.load_audio(aud_inp)
72
- aud = whisper.pad_or_trim(aud)
73
- mel = whisper.log_mel_spectrogram(aud).to(WHISPER_MODEL.device)
74
- _, probs = WHISPER_MODEL.detect_language(mel)
75
-
76
- options = whisper.DecodingOptions()
77
- # options = whisper.DecodingOptions(language="ja")
78
-
79
- result = whisper.decode(WHISPER_MODEL, mel, options)
80
- print("result.text", result.text)
81
- result_text = ""
82
- if result and result.text:
83
- result_text = result.text
84
- return result_text
85
 
86
 
87
  # Pertains to Express-inator functionality
@@ -470,10 +468,10 @@ with gr.Blocks(css=".gradio-container {background-color: lightgray}") as block:
470
  submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
471
 
472
  # UNCOMMENT TO USE WHISPER
473
- with gr.Row():
474
- audio_comp = gr.Microphone(source="microphone", type="filepath", label="Just say it!",
475
- interactive=True, streaming=False)
476
- audio_comp.change(transcribe, inputs=[audio_comp], outputs=[message])
477
 
478
  gr.Examples(
479
  examples=["How many people live in Canada?",
 
9
  import requests
10
 
11
  # UNCOMMENT TO USE WHISPER
12
+ # import warnings
13
+ # import whisper
14
 
15
  from langchain import ConversationChain, LLMChain
16
 
 
59
 
60
 
61
  # UNCOMMENT TO USE WHISPER
62
+ # warnings.filterwarnings("ignore")
63
+ # WHISPER_MODEL = whisper.load_model("tiny")
64
+ # print("WHISPER_MODEL", WHISPER_MODEL)
65
 
66
 
67
  # UNCOMMENT TO USE WHISPER
68
+ # def transcribe(aud_inp):
69
+ # if aud_inp is None:
70
+ # return ""
71
+ # aud = whisper.load_audio(aud_inp)
72
+ # aud = whisper.pad_or_trim(aud)
73
+ # mel = whisper.log_mel_spectrogram(aud).to(WHISPER_MODEL.device)
74
+ # _, probs = WHISPER_MODEL.detect_language(mel)
75
+ # options = whisper.DecodingOptions()
76
+ # # options = whisper.DecodingOptions(language="ja")
77
+ # result = whisper.decode(WHISPER_MODEL, mel, options)
78
+ # print("result.text", result.text)
79
+ # result_text = ""
80
+ # if result and result.text:
81
+ # result_text = result.text
82
+ # return result_text
 
 
83
 
84
 
85
  # Pertains to Express-inator functionality
 
468
  submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
469
 
470
  # UNCOMMENT TO USE WHISPER
471
+ # with gr.Row():
472
+ # audio_comp = gr.Microphone(source="microphone", type="filepath", label="Just say it!",
473
+ # interactive=True, streaming=False)
474
+ # audio_comp.change(transcribe, inputs=[audio_comp], outputs=[message])
475
 
476
  gr.Examples(
477
  examples=["How many people live in Canada?",