Spaces:
Runtime error
Runtime error
| from dotenv import load_dotenv | |
| from openai import OpenAI | |
| from pypdf import PdfReader | |
| import gradio as gr | |
| import datetime | |
| from collections import defaultdict | |
| import os | |
| # Load environment variables from .env (includes OPENAI_API_KEY) | |
| load_dotenv(override=True) | |
| # In-memory daily question tracker | |
| user_question_counter = defaultdict(lambda: {"date": None, "count": 0}) | |
| class Me: | |
| def __init__(self): | |
| self.openai = OpenAI() | |
| self.name = "Narendra" | |
| # Load LinkedIn profile text from PDF | |
| reader = PdfReader("me/linkedin.pdf") | |
| self.linkedin = "" | |
| for page in reader.pages: | |
| text = page.extract_text() | |
| if text: | |
| self.linkedin += text | |
| # Load summary text | |
| with open("me/summary.txt", "r", encoding="utf-8") as f: | |
| self.summary = f.read() | |
| def system_prompt(self): | |
| return ( | |
| f"You are acting as {self.name}, an experienced Python technical interviewer. " | |
| f"You are helping users by asking or answering Python-related technical questions. " | |
| f"Always stay professional, helpful, and concise. Do not generate responses over 100 tokens. " | |
| f"The user can only ask 3 questions per day—enforce this limit politely. " | |
| f"\n\n## About {self.name} (your interviewer):\n" | |
| f"{self.summary}\n\n" | |
| f"## LinkedIn Profile:\n{self.linkedin}\n\n" | |
| f"Use this background to answer in character as {self.name}." | |
| ) | |
| def chat(self, message, history): | |
| user_id = "user" # Replace with session-based ID for real tracking | |
| today = datetime.date.today() | |
| record = user_question_counter[user_id] | |
| # Reset question count if date changed | |
| if record["date"] != today: | |
| record["date"] = today | |
| record["count"] = 0 | |
| # Check daily question limit | |
| if record["count"] >= 3: | |
| return "🚫 You've reached your daily limit of 3 questions. Please try again tomorrow." | |
| # Prepare conversation | |
| messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}] | |
| response = self.openai.chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=messages, | |
| max_tokens=100 | |
| ) | |
| record["count"] += 1 | |
| return f"👋 Narendra is your Python interviewer. Let's begin!\n\n{response.choices[0].message.content}" | |
| if __name__ == "__main__": | |
| me = Me() | |
| gr.ChatInterface(me.chat, type="messages").launch(share=True) | |