Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from pydantic import BaseModel | |
| import pandas as pd | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.chains import ConversationChain | |
| from langchain_google_genai.chat_models import ChatGoogleGenerativeAI | |
| import os | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| app = FastAPI() | |
| # Enable CORS for all origins (for development) | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Load Excel file | |
| question_df = pd.read_excel("coding_questions.xlsx") | |
| # Load Gemini API Key | |
| google_api_key = os.getenv("GEMINI_KEY") | |
| if not google_api_key: | |
| raise ValueError("Please set the GEMINI_KEY environment variable in your .env file.") | |
| # Initialize Gemini model | |
| llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0.7, google_api_key=google_api_key) | |
| # Store conversation memories for each session | |
| session_memories = {} | |
| def get_conversation_for_session(session_id: str): | |
| """Get or create a conversation for a specific session""" | |
| if session_id not in session_memories: | |
| memory = ConversationBufferMemory() | |
| session_memories[session_id] = ConversationChain(llm=llm, memory=memory) | |
| return session_memories[session_id] | |
| # ---------- MODELS ---------- | |
| class Message(BaseModel): | |
| message: str | |
| session_id: str = "default" | |
| class ResetSession(BaseModel): | |
| session_id: str | |
| # ---------- ROUTES ---------- | |
| def get_daily_questions(msg: Message): | |
| easy_q = question_df[question_df["problem_level"].str.lower() == "easy"].sample(1).iloc[0] | |
| medium_q = question_df[question_df["problem_level"].str.lower() == "medium"].sample(1).iloc[0] | |
| hard_q = question_df[question_df["problem_level"].str.lower() == "hard"].sample(1).iloc[0] | |
| questions = [ | |
| { | |
| "level": easy_q["problem_level"], | |
| "statement": easy_q["problem_statement"], | |
| "link": easy_q["problem_link"] | |
| }, | |
| { | |
| "level": medium_q["problem_level"], | |
| "statement": medium_q["problem_statement"], | |
| "link": medium_q["problem_link"] | |
| }, | |
| { | |
| "level": hard_q["problem_level"], | |
| "statement": hard_q["problem_statement"], | |
| "link": hard_q["problem_link"] | |
| } | |
| ] | |
| # Format for chat frontend as HTML with <a> tags and blue color | |
| formatted = "<br><br>".join([ | |
| f"<b>{q['level']}</b>: {q['statement']}<br>" | |
| f"<a href='{q['link']}' target='_blank' rel='noopener noreferrer' style='color:#2563eb;'>Practice the question</a>" | |
| for q in questions | |
| ]) | |
| return {"reply": formatted,"session_id": msg.session_id} | |
| async def interview_flow(msg: Message): | |
| # Get conversation for this session (always retrieve it first) | |
| conversation = get_conversation_for_session(msg.session_id) | |
| if msg.message.startswith("__start_hr__"): | |
| name = msg.message.replace("__start_hr__", "").strip() | |
| conversation.memory.save_context({"input": "name"}, {"output": name}) | |
| hr_intro_prompt = f""" | |
| You are a professional HR (Maha Laxmi), for 10 years in ProMVP (It provides innovative solutions for businesses and startups). You conducts interviews for candidates applying for various positions. | |
| Today you are interviewing {name}. | |
| 1. Ask the candidate about their background, skills, and experiences. | |
| 2. Ask about their career goals and why they are interested in this position. | |
| 3. Ask about their strengths and weaknesses. | |
| 4. Ask about their problem-solving skills and how they handle challenges. | |
| 5. Ask about their teamwork and communication skills. | |
| 6. Ask about their availability and salary expectations. | |
| 7. Conclude the interview by thanking the candidate for their time and explaining the next steps in the hiring process. | |
| 8. You will ask questions one by one, and the candidate will respond to each question. | |
| 9. If the candidate hints to exit the interview, you will conclude the interview politely. | |
| Note: You will keep the questions short and concise | |
| """ | |
| question = conversation.predict(input=hr_intro_prompt) | |
| return {"reply": question, "session_id": msg.session_id} | |
| elif msg.message.startswith("__start_mock__"): | |
| tech_stack = msg.message.replace("__start_mock__", "").strip() | |
| conversation.memory.save_context({"input": "Tech stack"}, {"output": tech_stack}) | |
| prompt = f""" | |
| You are a technical interviewer with expertise in {tech_stack} for 10 years. | |
| 1. You will conduct a mock technical interview for a beginner candidate with skills in {tech_stack}. | |
| 2. You will ask 10 questions in total, covering the fundanmentals of {tech_stack}. | |
| 3. You will ask questions one by one, and the candidate will respond to each question. | |
| Note: You will keep the questions short and concise | |
| If the candidate hints to exit the interview, you will conclude the interview politely. | |
| """ | |
| question = conversation.predict(input=prompt) | |
| return {"reply": question, "session_id": msg.session_id} | |
| # For all other messages, use the session-specific conversation | |
| response = conversation.predict(input=msg.message) | |
| return {"reply": response, "session_id": msg.session_id} | |
| def reset_session(reset_data: ResetSession): | |
| """Reset or clear the conversation memory for a specific session""" | |
| session_id = reset_data.session_id | |
| if session_id in session_memories: | |
| # Remove the session from memory | |
| del session_memories[session_id] | |
| return {"message": f"Session {session_id} has been reset", "session_id": session_id} | |
| def root(): | |
| return {"message": "Welcome to the AI Interview Agent (Gemini Edition)"} | |