Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import base64 | |
| import torch | |
| from transformers import pipeline | |
| # Whisper ๋ชจ๋ธ์ pipeline์ผ๋ก ๋ถ๋ฌ์ค๊ธฐ | |
| whisper = pipeline("automatic-speech-recognition", model="openai/whisper-small") | |
| # ์์ฑ์ ํ ์คํธ๋ก ๋ณํํ๋ ํจ์ | |
| def transcribe_audio(audio): | |
| if audio is None: | |
| return "์๋ฌ: ์ค๋์ค ์์", "" | |
| result = whisper(audio) | |
| return result["text"], base64.b64encode(result["text"].encode()).decode() | |
| # Gradio ์ธํฐํ์ด์ค | |
| demo = gr.Interface( | |
| fn=transcribe_audio, | |
| inputs=gr.Audio(label = '์ค๋์ค', sources="microphone", type='filepath'), | |
| outputs=[gr.Textbox(label='๊ฒฐ๊ณผ'), gr.Textbox(label='์ํธํ๋ ๊ฒฐ๊ณผ')], | |
| title='์ด์ฐ์ง์ Speech to Text (โป ๋ น์ ํ ๋ฐ๋ก ์คํ ๋๋ฅด๋ฉด ์๋ฌ๋จ)', | |
| description='๊ธฐ์ฌ๋: AI 60% ๋ 40%', | |
| submit_btn='์คํ', | |
| clear_btn='์ง์ฐ๊ธฐ') | |
| # ์ฑ ์คํ | |
| demo.launch() |