Drbrain0620 commited on
Commit
7570ba2
ยท
verified ยท
1 Parent(s): 7dcd037

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +5 -1
main.py CHANGED
@@ -8,11 +8,15 @@ whisper = pipeline("automatic-speech-recognition", model="openai/whisper-base")
8
 
9
  # ์Œ์„ฑ์„ ํ…์ŠคํŠธ๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ํ•จ์ˆ˜
10
  def transcribe_audio(audio):
 
 
 
11
  result = whisper(audio)
12
  return result["text"], base64.b64encode(result["text"].encode()).decode() #type: ignore
13
 
14
  # Gradio ์ธํ„ฐํŽ˜์ด์Šค
15
- demo = gr.Interface(fn=transcribe_audio,
 
16
  inputs=gr.Audio(label = '์˜ค๋””์˜ค', sources="microphone", type='numpy'),
17
  outputs=[gr.Textbox(label='๊ฒฐ๊ณผ'), gr.Textbox(label='์•”ํ˜ธํ™”๋œ ๊ฒฐ๊ณผ')],
18
  title='์ด์šฐ์ง„์˜ Speech to Text',
 
8
 
9
  # ์Œ์„ฑ์„ ํ…์ŠคํŠธ๋กœ ๋ณ€ํ™˜ํ•˜๋Š” ํ•จ์ˆ˜
10
  def transcribe_audio(audio):
11
+ if audio is None:
12
+ return "No audio detected. Please try recording again.", ""
13
+
14
  result = whisper(audio)
15
  return result["text"], base64.b64encode(result["text"].encode()).decode() #type: ignore
16
 
17
  # Gradio ์ธํ„ฐํŽ˜์ด์Šค
18
+ demo = gr.Interface(
19
+ fn=transcribe_audio,
20
  inputs=gr.Audio(label = '์˜ค๋””์˜ค', sources="microphone", type='numpy'),
21
  outputs=[gr.Textbox(label='๊ฒฐ๊ณผ'), gr.Textbox(label='์•”ํ˜ธํ™”๋œ ๊ฒฐ๊ณผ')],
22
  title='์ด์šฐ์ง„์˜ Speech to Text',