from openai import OpenAI import json import os import requests import gradio as gr from tools import make_a_mail_json, make_a_mail client = OpenAI( base_url='https://api.groq.com/openai/v1', api_key=os.getenv("GROK_API") ) with open('summary.txt', 'r') as f: summary = f.read() name = 'Shekar' system_prompt = f"You are acting as {name}. You are answering questions on {name}'s website, \ particularly questions related to {name}'s career, background, skills, and experience. \ Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \ You are given a summary of {name}'s background which you can use to answer questions. \ Be professional and engaging, as if talking to a potential client or future employer who came across the website. \ Always respond concisely, limiting answers to 3–4 lines by default. \ After answering, ask the user: “Do you want a more detailed explanation?” \ Provide detailed or extended answers only if the user explicitly says yes. \ If the user shows interest in connecting further, politely ask for their name and email address so you can follow up. \ (When the user provides an email and name, capture it using the available tools, without mentioning the tool itself.). And remember not to share phone number. \ If the mailing tool has already been used, don’t use it again unless the user explicitly requests it. \ Note: You shouldn't answer any other questions not directly related to {name}. If faced with such questions politely decline to answer" system_prompt += f"\n\n## Summary:\n{summary}\n\n" system_prompt += f"With this context, please chat with the user, always staying in character as {name}." from pydantic import BaseModel class Evaluation(BaseModel): is_acceptable:bool feedback:str evaluator_system_prompt = f"You are an evaluator that decides whether a response to a question is acceptable. \ You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \ The Agent is playing the role of {name} and is representing {name} on their website.\ The Agent has been instructed to be professional and engaging, as if talking to a potential client or future employer who came across the website. \ The Agent has been provided with context on {name} in the form of their summary. Here's the information:" evaluator_system_prompt += f"\n\n## Summary:\n{summary}\n\n" evaluator_system_prompt += f"With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback." def evaluator_user_prompt(reply, message, history): user_prompt = f"Here's the conversation between the User and the Agent: \n\n{history}\n\n" user_prompt += f"Here's the latest message from the User: \n\n{message}\n\n" user_prompt += f"Here's the latest response from the Agent: \n\n{reply}\n\n" user_prompt += "Please evaluate the response, replying with whether it is acceptable and your feedback." return user_prompt def evaluate(reply, message, history) -> Evaluation: messages = [{"role": "system", "content": evaluator_system_prompt}] + [{"role": "user", "content": evaluator_user_prompt(reply, message, history)}] response = client.chat.completions.parse( model='meta-llama/llama-4-scout-17b-16e-instruct', messages=messages, response_format=Evaluation ) return response.choices[0].message.parsed def rerun(reply, message, history, feedback): updated_system_prompt = system_prompt + "\n\n## Previous answer rejected\nYou just tried to reply, but the quality control rejected your reply\n" updated_system_prompt += f"## Your attempted answer:\n{reply}\n\n" updated_system_prompt += f"## Reason for rejection:\n{feedback}\n\n" messages = [{"role": "system", "content": updated_system_prompt}] + history + [{"role": "user", "content": message}] response = client.chat.completions.create(model='meta-llama/llama-4-scout-17b-16e-instruct', messages=messages) return response.choices[0].message.content def convert_history(history): messages = [] for entry in history: if isinstance(entry, tuple): # Gradio gives tuples (user, assistant) user_msg, assistant_msg = entry if user_msg: messages.append({"role": "user", "content": str(user_msg)}) if assistant_msg: messages.append({"role": "assistant", "content": str(assistant_msg)}) elif isinstance(entry, dict): # Ensure only role + content are preserved if "role" in entry and "content" in entry: messages.append({"role": entry["role"], "content": str(entry["content"])}) return messages def chat(message, history): messages = [{"role": "system", "content": system_prompt}] messages += convert_history(history) messages.append({"role": "user", "content": message}) done = False tool_executed = set() while not done: response = client.chat.completions.create( model="meta-llama/llama-4-scout-17b-16e-instruct", messages=messages, tools=[make_a_mail_json] ) choice = response.choices[0] finish_reason = choice.finish_reason reply = choice.message.content if finish_reason == 'tool_calls': msg_obj = choice.message for tool_call in msg_obj.tool_calls: tool_name = tool_call.function.name if tool_name in tool_executed: continue arguments = json.loads(tool_call.function.arguments) tool = globals().get(tool_name) if tool: result = tool(**arguments) tool_executed.add(tool_name) messages.append(msg_obj) messages.append({ "role": "tool", "tool_call_id": tool_call.id, "content": json.dumps(result) }) continue evaluated_ans = evaluate(reply, message, convert_history(history)) if evaluated_ans.is_acceptable: done = True else: reply = rerun( reply, message, convert_history(history), evaluated_ans.feedback ) return reply