Chan55's picture
Update app.py
5569985 verified
import openai
import time
# from elevenlabs import set_api_key
import requests
import whisper
import os
# from elevenlabs import generate, play , stream , save
import gradio as gr
from typing import Optional
from deta import Deta
deta = Deta("d0uj3nfpeok_kuCKpWGv4jeHCqKZVANdW8z37ksTJzUZ")
db1 = deta.Base("elevenlab")
openai.api_key = os.getenv("openaiapikey")
model = whisper.load_model("base")
os.environ['PATH'] += os.pathsep + '/path/to/mpv/'
class Chat:
def __init__(self , system: Optional[str] = None):
self.system = system
self.messages = []
if system is not None:
self.messages.append({
"role": "system",
"content": system
})
def prompt(self, content: str) -> str:
self.messages.append({
"role": "user",
"content": content
})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.messages
)
response_content = response["choices"][0]["message"]["content"]
self.messages.append({
"role": "assistant",
"content": response_content
})
return response_content
def read_and_print_file(file_path):
with open(file_path, 'r') as file:
return file.read()
# Example usage:
file_path = 'the_interview_questions'
contents = read_and_print_file(file_path)
chat = Chat(system="""You are a helpfull assistant
""")
from gradio_client import Client
API_URL = "https://sanchit-gandhi-whisper-jax.hf.space/"
# set up the Gradio client
client = Client(API_URL)
def transcribe_audio(audio_path, task="transcribe", return_timestamps=False):
"""Function to transcribe an audio file using the Whisper JAX endpoint."""
if task not in ["transcribe", "translate"]:
raise ValueError("task should be one of 'transcribe' or 'translate'.")
text, runtime = client.predict(
audio_path,
task,
return_timestamps,
api_name="/predict_1",
)
return text
def convert_tuples_to_list(tuples_list):
result = []
conversation = ""
for tuple_item in tuples_list:
result.append(tuple_item[0]) # Append question
result.append(tuple_item[1]) # Append answer
for i in result:
conversation = conversation + i + " \n"
return conversation
def run_text_prompt(message, chat_history):
bot_message = chat.prompt(content=message)
db1.update(
{"value": bot_message},
"my-key",
)
# audio = generate(
# text=bot_message,
# voice="Antoni",
# )
#
# (audio)
# save(audio, 'myvoice.mp3')
chat_history.append((message, bot_message))
return "", chat_history
def run_audio_prompt(audio, chat_history):
if audio is None:
return None, chat_history
message_transcription = transcribe_audio(audio)
# message_transcription = model.transcribe(audio)["text"]
_, chat_history = run_text_prompt(message_transcription, chat_history)
return None, chat_history
def process_text(conversation, text):
# print(text)
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo",
messages=[{
"role":
"user",
"content":
conversation + text
}])
# completion = text;
# print(completion)
print(completion.choices[0].message.content.strip())
return completion.choices[0].message.content.strip()
with gr.Blocks(title="hi") as app2:
chatbot = gr.Chatbot(label="Agent")
msg = gr.Textbox(label="Write")
msg.submit(run_text_prompt, [msg, chatbot], [msg, chatbot])
with gr.Row():
audio = gr.Audio(source="microphone", type="filepath", label="Speak")
fn = run_audio_prompt,
inputs = [audio, chatbot],
outputs = [audio, chatbot]
# fn = run_audio_prompt,
# inputs = [audio, chatbot],
# outputs = [audio, chatbot]
audio.change(run_audio_prompt, [audio, chatbot], [audio, chatbot])
# send_audio_button = gr.Button("Send Audio", interactive=True)
# send_audio_button.click(run_audio_prompt, [audio, chatbot], [audio, chatbot])
demo = gr.TabbedInterface([app2], [ "Interview"])
demo.launch(share=False,
debug=False,
)