import openai import os import gradio as gr from dotenv import load_dotenv, find_dotenv GPT3 = "gpt-3.5-turbo" GPT4 = "gpt-4" # Load environment variables load_dotenv(find_dotenv()) # Fetch OpenAI key openai_key_env = os.getenv('OPENAI_API_KEY') def get_model_for_task(task): task = task.lower() if "translate" in task or "shorten" in task or "fix spelling and grammar" in task: return GPT3 if "lengthen" in task or "draft answer" in task or "improve writing" in task: return GPT4 return GPT4 def get_task_prompt(task, email_history, compose_text): task = task.lower() if task == "shorten": return f"""Given this email conversation delimited by triple quotes '''{email_history}''' Shorten the following reply in triple backticks to less than one third its original size ```{compose_text}```""" if task == "lengthen": return f"""Given this email conversation delimited by triple quotes '''{email_history}''' Lengthen the following reply in triple backticks ```{compose_text}```""" if task == "improve writing": return f"""Improve the writing of the text delimited by triple backticks ```{compose_text}```""" if task == "fix spelling and grammar": return f"""Fix the spelling and grammar of the text delimited by triple backticks ```{compose_text}```""" if task == "draft answer": return f"""Given this email conversation delimited by triple quotes '''{email_history}''' Write a reply emai. Do not include the subject.""" return f"""Given this email conversation delimited by triple quotes '''{email_history}''' and the following reply in triple backticks ```{compose_text}```, {task}.""" def get_completion(prompt, model="gpt-3.5-turbo", max_tokens=1000): messages = [{"role": "user", "content": prompt}] response = openai.ChatCompletion.create( model=model, messages=messages, temperature=0.3, max_tokens=max_tokens) return response.choices[0].message["content"] def respond(email_history, compose_text, task, openai_key): openai_key = openai_key if openai_key else openai_key_env if not openai_key: return "OpenAI API key not provided." openai.api_key = openai_key model = get_model_for_task(task) prompt = get_task_prompt(task, email_history, compose_text) prompt = f"""{prompt}. Just provide the answer text without surrounding it with any quoting. Unless translating, always replay in the same language as the prompt.""" print(f"Prompt: {prompt}") try: parsed_response = get_completion(prompt, model) except Exception as e: return f"Error: {str(e)}" return parsed_response with gr.Blocks() as demo: gr.Markdown("## Chat") with gr.Row(): openai_key_input = gr.Textbox(label="OpenAI API Key", placeholder="Enter OpenAI API Key", show_label=False) with gr.Row(): task_selector = gr.Textbox(label="Task", placeholder="Summarize, Lengthen, Translate") # New input with gr.Row(): with gr.Column(): email_history = gr.Textbox(label="Email Content", placeholder="Email history.", lines=6, show_label=False) compose_text = gr.Textbox(label="Compose Box", placeholder="Compose box text.", lines=6, show_label=False) with gr.Column(): output = gr.Textbox(label="Output", placeholder="Output", lines=10, show_label=False) with gr.Row(): submit = gr.Button("Submit") submit.click(fn=respond, inputs=[email_history, compose_text, task_selector, openai_key_input], outputs=[output], api_name="rai") demo.launch()