Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import os | |
| from openai import OpenAI | |
| from pydantic import BaseModel | |
| from PyPDF2 import PdfReader | |
| # --- Configuration and API Key Setup --- | |
| openai_api_key = os.environ.get('OPENAI_API_KEY') | |
| google_api_key = os.environ.get('GOOGLE_API_KEY') | |
| if not openai_api_key: | |
| raise ValueError("OPENAI_API_KEY not found in environment variables. Please set it as a Space Secret.") | |
| if not google_api_key: | |
| raise ValueError("GOOGLE_API_KEY not found in environment variables. Please set it as a Space Secret.") | |
| openai_client_deployed = OpenAI(api_key=openai_api_key) | |
| gemini_client_deployed = OpenAI(api_key=google_api_key, base_url="https://generativelanguage.googleapis.com/v1beta/openai/") | |
| # --- Data Loading (for deployment) --- | |
| name = "Ed Donner" | |
| try: | |
| reader = PdfReader("linkedin.pdf") | |
| linkedin = "" | |
| for page in reader.pages: | |
| text = page.extract_text() | |
| if text: | |
| linkedin += text | |
| except Exception as e: | |
| print(f"Could not load linkedin.pdf in deployed app: {e}. Using placeholder.") | |
| linkedin = "LinkedIn profile content could not be loaded. Please ensure linkedin.pdf is in your Space." | |
| try: | |
| with open("summary.txt", "r", encoding="utf-8") as f: | |
| summary = f.read() | |
| except Exception as e: | |
| print(f"Could not load summary.txt in deployed app: {e}. Using placeholder.") | |
| summary = "Summary content could not be loaded. Please ensure summary.txt is in your Space." | |
| # --- System Prompts and Pydantic Model --- | |
| system_prompt = f"""You are acting as {name}. You are answering questions on {name}'s website, \ | |
| particularly questions related to {name}'s career, background, skills and experience. \ | |
| Your responsibility is to represent {name} for interactions on the website as faithfully as possible. \ | |
| You are given a summary of {name}'s background and LinkedIn profile which you can use to answer questions. \ | |
| Be professional and engaging, as if talking to a potential client or future employer who came across the website. \ | |
| If you don't know the answer, say so.""" | |
| system_prompt += f"""\n\n## Summary:\n{summary}\n\n## LinkedIn Profile:\n{linkedin}\n\n""" | |
| system_prompt += f"""With this context, please chat with the user, always staying in character as {name}.""" | |
| class Evaluation(BaseModel): | |
| is_acceptable: bool | |
| feedback: str | |
| evaluator_system_prompt = f"""You are an evaluator that decides whether a response to a question is acceptable. \ | |
| You are provided with a conversation between a User and an Agent. Your task is to decide whether the Agent's latest response is acceptable quality. \ | |
| The Agent is playing the role of {name} and is representing {name} on their website. \ | |
| The Agent has been instructed to be professional and engaging, as if talking to a potential client or future employer who came across the website. \ | |
| The Agent has been provided with context on {name} in the form of their summary and LinkedIn details. Here's the information:""" | |
| evaluator_system_prompt += f"""\n\n## Summary:\n{summary}\n\n## LinkedIn Profile:\n{linkedin}\n\n""" | |
| evaluator_system_prompt += f"""With this context, please evaluate the latest response, replying with whether the response is acceptable and your feedback.""" | |
| def evaluator_user_prompt(reply, message, history): | |
| user_prompt = f"""Here's the conversation between the User and the Agent: \n\n{history}\n\n""" | |
| user_prompt += f"""Here's the latest message from the User: \n\n{message}\n\n""" | |
| user_prompt += f"""Here's the latest response from the Agent: \n\n{reply}\n\n""" | |
| user_prompt += f"""Please evaluate the response, replying with whether it is acceptable and your feedback.""" | |
| return user_prompt | |
| def evaluate(reply, message, history) -> Evaluation: | |
| messages = [{"role": "system", "content": evaluator_system_prompt}] + [{"role": "user", "content": evaluator_user_prompt(reply, message, history)}] | |
| response = gemini_client_deployed.beta.chat.completions.parse(model="gemini-2.0-flash", messages=messages, response_format=Evaluation) | |
| return response.choices[0].message.parsed | |
| def rerun(reply, message, history, feedback): | |
| updated_system_prompt = system_prompt + f"""\n\n## Previous answer rejected\nYou just tried to reply, but the quality control rejected your reply\n""" | |
| updated_system_prompt += f"""## Your attempted answer:\n{reply}\n\n""" | |
| updated_system_prompt += f"""## Reason for rejection:\n{feedback}\n\n""" | |
| messages = [{"role": "system", "content": updated_system_prompt}] + history + [{"role": "user", "content": message}] | |
| response = openai_client_deployed.chat.completions.create(model="gpt-4o-mini", messages=messages) | |
| return response.choices[0].message.content | |
| # --- Main Chat Function for Gradio --- | |
| def chat(message, history): | |
| if "patent" in message: | |
| system = system_prompt + "\n\nEverything in your reply needs to be in pig latin - \ | |
| it is mandatory that you respond only and entirely in pig latin" | |
| else: | |
| system = system_prompt | |
| messages = [{"role": "system", "content": system}] + history + [{"role": "user", "content": message}] | |
| response = openai_client_deployed.chat.completions.create(model="gpt-4o-mini", messages=messages) | |
| reply = response.choices[0].message.content | |
| # Evaluation logic | |
| evaluation = evaluate(reply, message, history) | |
| if evaluation.is_acceptable: | |
| print("Passed evaluation - returning reply") | |
| else: | |
| print("Failed evaluation - retrying") | |
| print(evaluation.feedback) | |
| reply = rerun(reply, message, history, evaluation.feedback) | |
| return reply | |
| # --- Gradio Interface (for Hugging Face Spaces) --- | |
| # Define the Gradio interface | |
| demo = gr.ChatInterface(chat, type="messages") | |
| # This is the line that Hugging Face Spaces looks for to run your app. | |
| # It does NOT need a .launch() call here when deployed to HF Spaces, as the Space environment handles that. | |
| # However, for `gradio deploy` to infer the SDK, this `demo` object definition is key. | |
| # The `if __name__ == "__main__":` block is for local Colab testing only. | |
| if __name__ == "__main__": | |
| demo.launch() | |