|
|
import gradio as gr |
|
|
import os |
|
|
from openai import OpenAI |
|
|
from datetime import datetime |
|
|
import threading |
|
|
import time |
|
|
import os.path |
|
|
import requests |
|
|
|
|
|
|
|
|
elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") |
|
|
|
|
|
|
|
|
CHECK_INTERVAL = 10 |
|
|
|
|
|
|
|
|
USER_TIMEOUT = 4 * 60 |
|
|
|
|
|
|
|
|
last_interaction_time = time.time() |
|
|
current_user_id = None |
|
|
last_file_mod_time = None |
|
|
|
|
|
|
|
|
def get_system_prompt(user_id): |
|
|
user_summ_file = f"jcTSS-{user_id.lower().replace(' ', '')}-summ.txt" |
|
|
if os.path.exists(user_summ_file): |
|
|
with open(user_summ_file, "r", encoding="UTF-8") as file: |
|
|
past_summary = file.read().strip() |
|
|
|
|
|
|
|
|
return [ |
|
|
{"role": "system", "content": os.getenv("PROMPT1") + user_id + os.getenv("PROMPT2") + past_summary} |
|
|
] |
|
|
else: |
|
|
|
|
|
return [ |
|
|
{"role": "system", "content": os.getenv("PROMPT1") + user_id + os.getenv("PROMPT3")} |
|
|
] |
|
|
|
|
|
|
|
|
def get_user_hist_file_mod_time(user_id): |
|
|
user_hist_file = f"jcTSS-{user_id.lower().replace(' ', '')}.txt" |
|
|
if os.path.exists(user_hist_file): |
|
|
return os.path.getmtime(user_hist_file) |
|
|
else: |
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
def on_timeout(user_id): |
|
|
global last_file_mod_time |
|
|
current_mod_time = get_user_hist_file_mod_time(user_id) |
|
|
if current_mod_time is not None and (last_file_mod_time is None or current_mod_time > last_file_mod_time): |
|
|
|
|
|
last_file_mod_time = current_mod_time |
|
|
|
|
|
|
|
|
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
|
|
|
|
|
|
|
|
user_summ_file = "jcTSS-" + current_user_id + "-summ.txt" |
|
|
user_hist_file = "jcTSS-" + current_user_id + ".txt" |
|
|
|
|
|
if os.path.exists(user_hist_file): |
|
|
|
|
|
with open(user_hist_file, "r", encoding="UTF-8") as file: |
|
|
user_hist = file.read().strip() |
|
|
|
|
|
assistant = client.beta.assistants.create( |
|
|
name="GP-Summarizer", |
|
|
instructions=os.getenv("ASST_PROMPT") + user_hist, |
|
|
model="gpt-3.5-turbo-1106" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assistant_id=assistant.id |
|
|
|
|
|
|
|
|
thread = client.beta.threads.create( |
|
|
messages=[ |
|
|
{ |
|
|
"role": "user", |
|
|
"content": "", |
|
|
} |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
run = client.beta.threads.runs.create( |
|
|
thread_id=thread.id, |
|
|
assistant_id=assistant.id |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
def wait_on_run(run, thread): |
|
|
while run.status == "queued" or run.status == "in_progress": |
|
|
|
|
|
run = client.beta.threads.runs.retrieve( |
|
|
thread_id=thread.id, |
|
|
run_id=run.id, |
|
|
) |
|
|
time.sleep(0.5) |
|
|
return run |
|
|
|
|
|
|
|
|
run = wait_on_run(run, thread) |
|
|
|
|
|
|
|
|
messages = client.beta.threads.messages.list( |
|
|
thread_id=thread.id |
|
|
) |
|
|
|
|
|
|
|
|
for message in reversed(messages.data): |
|
|
print(message.content[0].text.value) |
|
|
|
|
|
|
|
|
with open(user_summ_file, "w", encoding="UTF-8") as file: |
|
|
file.write("Date/Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n\n") |
|
|
for message in reversed(messages.data): |
|
|
file.write(message.content[0].text.value) |
|
|
|
|
|
new_user="False" |
|
|
|
|
|
client.beta.assistants.delete(assistant_id) |
|
|
|
|
|
|
|
|
if os.path.exists(f"jcTSS-{current_user_id}-transcript.txt"): |
|
|
os.remove(f"jcTSS-{current_user_id}-transcript.txt") |
|
|
|
|
|
else: |
|
|
|
|
|
new_user="True" |
|
|
|
|
|
|
|
|
|
|
|
def check_for_inactivity(): |
|
|
global last_interaction_time, current_user_id, USER_TIMEOUT |
|
|
while True: |
|
|
time.sleep(CHECK_INTERVAL) |
|
|
current_time = time.time() |
|
|
if current_time - last_interaction_time > USER_TIMEOUT and current_user_id is not None: |
|
|
on_timeout(current_user_id) |
|
|
|
|
|
last_interaction_time = current_time |
|
|
|
|
|
|
|
|
inactivity_thread = threading.Thread(target=check_for_inactivity) |
|
|
inactivity_thread.daemon = True |
|
|
inactivity_thread.start() |
|
|
|
|
|
|
|
|
|
|
|
dt = datetime.now() |
|
|
dt_string = str(dt) |
|
|
|
|
|
|
|
|
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
|
|
|
|
|
|
|
|
|
|
|
def generate_speech(name, input_text): |
|
|
global last_interaction_time, current_user_id, last_file_mod_time |
|
|
|
|
|
|
|
|
if not name.strip(): |
|
|
return None, "Please enter your FIRST NAME to continue." |
|
|
|
|
|
|
|
|
last_interaction_time = time.time() |
|
|
|
|
|
|
|
|
user_id = name.lower().replace(" ", "") |
|
|
if current_user_id is not None and user_id != current_user_id: |
|
|
user_id = current_user_id |
|
|
return None, "Do not change your name DURING a session." |
|
|
else: |
|
|
current_user_id = user_id |
|
|
user_hist_file = f"jcTSS-{user_id}.txt" |
|
|
|
|
|
last_file_mod_time = get_user_hist_file_mod_time(user_id) |
|
|
|
|
|
|
|
|
if os.path.exists(user_hist_file): |
|
|
with open(user_hist_file, "r", encoding="UTF-8") as file: |
|
|
user_hist = file.read().strip() |
|
|
else: |
|
|
with open(user_hist_file, "w", encoding="UTF-8") as file: |
|
|
file.write("User ID: " + user_id) |
|
|
|
|
|
|
|
|
history_openai_format = get_system_prompt(current_user_id) |
|
|
|
|
|
|
|
|
|
|
|
input_text1 = input_text |
|
|
history_openai_format.append({"role": "user", "content": input_text1}) |
|
|
|
|
|
|
|
|
completion = client.chat.completions.create( |
|
|
model="gpt-3.5-turbo-1106", |
|
|
messages=history_openai_format |
|
|
) |
|
|
|
|
|
|
|
|
message_content = completion.choices[0].message.content.strip() |
|
|
|
|
|
|
|
|
if message_content.lower().startswith("johnny"): |
|
|
|
|
|
message_content = message_content[6:].strip() |
|
|
|
|
|
|
|
|
history_openai_format.append({"role": "assistant", "content": message_content}) |
|
|
|
|
|
|
|
|
url = os.getenv("URL1") |
|
|
headers = { |
|
|
"Accept": "audio/mpeg", |
|
|
"Content-Type": "application/json", |
|
|
"xi-api-key": elevenlabs_api_key |
|
|
} |
|
|
data = { |
|
|
"text": message_content, |
|
|
"model_id": "eleven_multilingual_v2", |
|
|
"voice_settings": { |
|
|
"stability": 1.0, |
|
|
"similarity_boost": 1.0, |
|
|
"excitement": 0.9, |
|
|
"speed": 1.1, |
|
|
"volume": 80, |
|
|
"pitch": 2.0, |
|
|
"breathiness": 0.8, |
|
|
"voice_id": os.getenv("VOICE_ID") |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
response = requests.post(url, json=data, headers=headers) |
|
|
|
|
|
""" |
|
|
# Error handling causing problems |
|
|
# Return the response content if successful, otherwise print error details |
|
|
if response.status_code == 200: |
|
|
# Return only the audio of the latest assistant message |
|
|
return response.content |
|
|
else: |
|
|
print("Error with ElevenLabs API:", response.status_code, response.text) |
|
|
raise Exception(f"Failed to generate speech, status code: {response.status_code}, response: {response.text}") |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
transcript_file_path = f"jcTSS-{current_user_id}-transcript.txt" |
|
|
if os.path.exists(transcript_file_path): |
|
|
with open(transcript_file_path, "r", encoding="UTF-8") as file: |
|
|
transcript = file.read() |
|
|
else: |
|
|
transcript = "Date/Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n\n" |
|
|
|
|
|
|
|
|
|
|
|
user_input = input_text |
|
|
transcript += f"GUEST: {user_input}\n" |
|
|
transcript += f"JOHNNY: {message_content}\n\n" |
|
|
|
|
|
|
|
|
with open(transcript_file_path, "w", encoding="UTF-8") as file: |
|
|
file.write(transcript) |
|
|
|
|
|
|
|
|
with open(user_hist_file, "a+", encoding="UTF-8") as file: |
|
|
file.write("\n\nDate/Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")) |
|
|
for message in history_openai_format[-2:]: |
|
|
file.write(f"\n{message['role'].title()}: {message['content']}") |
|
|
|
|
|
|
|
|
return response.content, transcript |
|
|
|
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=generate_speech, |
|
|
inputs=[ |
|
|
gr.Textbox(label="Your Name (REQUIRED):", placeholder="Enter your FIRST NAME"), |
|
|
gr.Textbox(label="Your question or comment for Johnny:") |
|
|
], |
|
|
outputs=[gr.Audio(autoplay=True, label="Johnny's response:"), gr.Textbox(label="Transcript", max_lines=12, autoscroll="True", show_copy_button="True")], |
|
|
live=False, |
|
|
allow_flagging="never" |
|
|
) |
|
|
|
|
|
|
|
|
iface.launch(show_api=False) |