Spaces:
Runtime error
Runtime error
File size: 1,296 Bytes
d72f122 9116f0e d72f122 cf00923 6006931 8c781ac d72f122 787d286 d72f122 8c781ac fbf86c1 1a4c859 b6c4628 d72f122 47f33f2 26c90c0 7a12269 7bc7d21 26c90c0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import os
import io
import streamlit as st
from audiorecorder import audiorecorder
from openai import OpenAI
import tempfile
import base64
# Set an environment variable for key
os.environ['OPENAI_API_KEY'] = os.environ.get('OPENAI_API_KEY')
def autoplay_audio(exported_audio):
with exported_audio:
data = exported_audio.read()
b64 = base64.b64encode(data).decode()
md = f"""
<audio controls autoplay="true">
<source src="data:audio/mp3;base64,{b64}" type="audio/mp3">
</audio>
"""
st.markdown(
md,
unsafe_allow_html=True,
)
def transcribe_audio(filepath):
client = OpenAI()
audio_file = open(filepath, "rb")
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
audio_file.close()
return transcript
#model = whisper.load_model("base")
client = OpenAI()
st.title("Whisper App")
audio = audiorecorder("Click to record", "Click to stop recording")
if len(audio) > 0:
temp_dir = tempfile.mkdtemp()
temp_file_path = os.path.join(temp_dir, 'temp_audio.mp3')
audio.export(temp_file_path, format="mp3")
transcript = transcribe_audio(temp_file_path)
st.write(transcript)
|