satheeshbhukya commited on
Commit
56a9143
·
1 Parent(s): 9495107

Per-user API key support

Browse files
Files changed (3) hide show
  1. main.py +70 -34
  2. src/App.jsx +14 -4
  3. src/index.css +8 -1
main.py CHANGED
@@ -33,10 +33,15 @@ from langgraph.prebuilt import ToolNode
33
  from pydantic import BaseModel, Field
34
 
35
  load_dotenv()
36
- GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY", "")
37
- if not GOOGLE_API_KEY:
38
- print("WARNING: GOOGLE_API_KEY is not set. Set it in HuggingFace Space secrets.")
39
- os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
 
 
 
 
 
40
 
41
  app = FastAPI(
42
  title="AI Mock Interviewer API",
@@ -62,13 +67,24 @@ except Exception as e:
62
  print(f"ERROR loading data.json: {e}")
63
  df = pd.DataFrame()
64
 
65
- client = genai.Client(api_key=GOOGLE_API_KEY)
66
-
67
- is_retriable = lambda e: isinstance(e, genai.errors.APIError) and e.code in {429, 503}
68
- if not hasattr(genai.models.Models.generate_content, "__wrapped__"):
69
- genai.models.Models.generate_content = retry.Retry(predicate=is_retriable)(
70
- genai.models.Models.generate_content
71
- )
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  INTERVIEWER_SYSTEM_PROMPT = """
74
  COMPANY NAME: "Mock Technologie Inc."
@@ -265,6 +281,7 @@ class InterviewState(TypedDict):
265
  code: str
266
  report: str
267
  finished: bool
 
268
 
269
  DIFFICULTY = tuple(df.difficulty.unique().tolist()) if not df.empty else ("Easy", "Medium", "Hard")
270
  TOPICS = tuple(df.topic.unique().tolist()) if not df.empty else ("Array Manipulation",)
@@ -323,18 +340,14 @@ def end_interview() -> bool:
323
  Use this ONLY when the candidate confirms they want to end the interview.
324
  """
325
 
326
- _llm = None
327
- _llm_with_tools = None
328
 
329
- def get_llm():
330
- global _llm, _llm_with_tools
331
- if _llm is None:
332
- api_key = os.environ.get("GOOGLE_API_KEY", "")
333
- if not api_key:
334
- raise ValueError("GOOGLE_API_KEY is not set. Add it in HuggingFace Space secrets.")
335
- _llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash", google_api_key=api_key)
336
- _llm_with_tools = _llm.bind_tools(auto_tools + interview_tools)
337
- return _llm, _llm_with_tools
338
 
339
  auto_tools: List[BaseTool] = [get_difficulty_levels, get_topic_categories, get_random_problem, list_questions]
340
  tool_node = ToolNode(auto_tools)
@@ -348,7 +361,7 @@ def get_interview_transcript(messages: List[BaseMessage]) -> str:
348
  transcript = ""
349
  for message in messages:
350
  if isinstance(message, AIMessage) and message.content:
351
- content = message.content if isinstance(message.content, str) else message.content[0]
352
  transcript += f"Interviewer: {content}\n\n"
353
 
354
  elif isinstance(message, HumanMessage):
@@ -357,8 +370,8 @@ def get_interview_transcript(messages: List[BaseMessage]) -> str:
357
  text += part.get("text", "") + "\n"
358
  if image_data := part.get("image_url"):
359
  try:
360
- response = client.models.generate_content(
361
- model="gemini-2.5-flash",
362
  contents=[DESCRIBE_IMAGE_PROMPT.format(transcript=transcript), image_data.get("url")],
363
  )
364
  text += f"[Whiteboard description: {response.text}]\n"
@@ -387,8 +400,8 @@ def get_learning_resources(question: str, analytics: str, topics: str, language:
387
  rc = None
388
  for attempt in range(5):
389
  try:
390
- response = client.models.generate_content(
391
- model="gemini-2.5-flash",
392
  contents=RESOURCES_SEARCH_PROMPT.format(
393
  question=question, analytics=analytics, topics=topics, language=language
394
  ),
@@ -437,7 +450,7 @@ def chatbot_with_tools(state: InterviewState) -> InterviewState:
437
  if not messages:
438
  ai_message = AIMessage(content=WELCOME_MSG)
439
  else:
440
- _, llm_with_tools = get_llm()
441
  ai_message = llm_with_tools.invoke(system_and_messages)
442
 
443
  return state | {"messages": [ai_message]}
@@ -500,8 +513,8 @@ def create_report_node(state: InterviewState) -> InterviewState:
500
  code = state.get("code", "")
501
 
502
  try:
503
- eval_response = client.models.generate_content(
504
- model="gemini-2.5-flash",
505
  contents=CANDIDATE_EVALUATION_PROMPT.format(
506
  question=question, transcript=transcript, code=code
507
  ),
@@ -576,7 +589,8 @@ class SendMessageRequest(BaseModel):
576
  message: str = ""
577
  code: str = ""
578
  code_changed: bool = False
579
- image_base64: Optional[str] = None # base64-encoded PNG from whiteboard
 
580
 
581
  class SendMessageResponse(BaseModel):
582
  message: str
@@ -601,8 +615,11 @@ def root():
601
  "questions_loaded": len(df),
602
  }
603
 
 
 
 
604
  @app.post("/api/session/start", response_model=StartSessionResponse, tags=["Session"])
605
- def start_session():
606
  """
607
  Start a new interview session.
608
  Returns a session_id and the AI's welcome message.
@@ -614,6 +631,7 @@ def start_session():
614
  "code": "# Your solution here\n",
615
  "report": "",
616
  "finished": False,
 
617
  }
618
 
619
  try:
@@ -624,9 +642,10 @@ def start_session():
624
  welcome = WELCOME_MSG
625
  for msg in reversed(new_state.get("messages", [])):
626
  if isinstance(msg, AIMessage):
627
- welcome = msg.content if isinstance(msg.content, str) else msg.content[0]
628
  break
629
 
 
630
  sessions[session_id] = new_state
631
  return StartSessionResponse(session_id=session_id, message=welcome)
632
 
@@ -676,17 +695,34 @@ def chat(req: SendMessageRequest):
676
  current_messages = list(state.get("messages", []))
677
  current_messages.append(HumanMessage(content=content))
678
 
 
679
  graph_input: Dict[str, Any] = {
680
  "messages": current_messages,
681
  "question": state.get("question", ""),
682
  "code": req.code if req.code_changed else state.get("code", ""),
683
  "report": state.get("report", ""),
684
  "finished": False,
 
685
  }
686
 
687
  try:
688
  new_state = interviewer_graph.invoke(graph_input)
689
  except Exception as e:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
690
  raise HTTPException(status_code=500, detail=f"Interview graph error: {e}")
691
 
692
  sessions[req.session_id] = new_state
@@ -694,7 +730,7 @@ def chat(req: SendMessageRequest):
694
  ai_response = "Processing..."
695
  for msg in reversed(new_state.get("messages", [])):
696
  if isinstance(msg, AIMessage):
697
- ai_response = msg.content if isinstance(msg.content, str) else msg.content[0]
698
  break
699
  elif isinstance(msg, ToolMessage) and msg.name == "end_interview":
700
  ai_response = "Thank you for your time! The interview has ended. Your evaluation report is being prepared..."
 
33
  from pydantic import BaseModel, Field
34
 
35
  load_dotenv()
36
+
37
+ def extract_text(content) -> str:
38
+ if isinstance(content, str):
39
+ return content
40
+ if isinstance(content, list):
41
+ parts = [p["text"] for p in content if isinstance(p, dict) and p.get("type") == "text"]
42
+ return " ".join(parts) if parts else ""
43
+ return str(content)
44
+
45
 
46
  app = FastAPI(
47
  title="AI Mock Interviewer API",
 
67
  print(f"ERROR loading data.json: {e}")
68
  df = pd.DataFrame()
69
 
70
+ _client_cache: Dict[str, Any] = {}
71
+
72
+ def get_api_key(user_key: str = "") -> str:
73
+ key = user_key or os.environ.get("GOOGLE_API_KEY", "")
74
+ if not key:
75
+ raise ValueError("No Gemini API key provided. Please enter your API key.")
76
+ return key
77
+
78
+ def get_client(user_key: str = ""):
79
+ key = get_api_key(user_key)
80
+ if key not in _client_cache:
81
+ _client_cache[key] = genai.Client(api_key=key)
82
+ is_retriable = lambda e: isinstance(e, genai.errors.APIError) and e.code in {429, 503}
83
+ if not hasattr(genai.models.Models.generate_content, "__wrapped__"):
84
+ genai.models.Models.generate_content = retry.Retry(predicate=is_retriable)(
85
+ genai.models.Models.generate_content
86
+ )
87
+ return _client_cache[key]
88
 
89
  INTERVIEWER_SYSTEM_PROMPT = """
90
  COMPANY NAME: "Mock Technologie Inc."
 
281
  code: str
282
  report: str
283
  finished: bool
284
+ api_key: str
285
 
286
  DIFFICULTY = tuple(df.difficulty.unique().tolist()) if not df.empty else ("Easy", "Medium", "Hard")
287
  TOPICS = tuple(df.topic.unique().tolist()) if not df.empty else ("Array Manipulation",)
 
340
  Use this ONLY when the candidate confirms they want to end the interview.
341
  """
342
 
343
+ _llm_cache: Dict[str, Any] = {}
 
344
 
345
+ def get_llm(user_key: str = ""):
346
+ key = get_api_key(user_key)
347
+ if key not in _llm_cache:
348
+ llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", google_api_key=key)
349
+ _llm_cache[key] = (llm, llm.bind_tools(auto_tools + interview_tools))
350
+ return _llm_cache[key]
 
 
 
351
 
352
  auto_tools: List[BaseTool] = [get_difficulty_levels, get_topic_categories, get_random_problem, list_questions]
353
  tool_node = ToolNode(auto_tools)
 
361
  transcript = ""
362
  for message in messages:
363
  if isinstance(message, AIMessage) and message.content:
364
+ content = extract_text(message.content)
365
  transcript += f"Interviewer: {content}\n\n"
366
 
367
  elif isinstance(message, HumanMessage):
 
370
  text += part.get("text", "") + "\n"
371
  if image_data := part.get("image_url"):
372
  try:
373
+ response = get_client(state.get("api_key", "") if isinstance(state, dict) else "").models.generate_content(
374
+ model="gemini-1.5-flash",
375
  contents=[DESCRIBE_IMAGE_PROMPT.format(transcript=transcript), image_data.get("url")],
376
  )
377
  text += f"[Whiteboard description: {response.text}]\n"
 
400
  rc = None
401
  for attempt in range(5):
402
  try:
403
+ response = get_client(state.get("api_key", "") if isinstance(state, dict) else "").models.generate_content(
404
+ model="gemini-1.5-flash",
405
  contents=RESOURCES_SEARCH_PROMPT.format(
406
  question=question, analytics=analytics, topics=topics, language=language
407
  ),
 
450
  if not messages:
451
  ai_message = AIMessage(content=WELCOME_MSG)
452
  else:
453
+ _, llm_with_tools = get_llm(state.get("api_key", ""))
454
  ai_message = llm_with_tools.invoke(system_and_messages)
455
 
456
  return state | {"messages": [ai_message]}
 
513
  code = state.get("code", "")
514
 
515
  try:
516
+ eval_response = get_client(state.get("api_key", "") if isinstance(state, dict) else "").models.generate_content(
517
+ model="gemini-1.5-flash",
518
  contents=CANDIDATE_EVALUATION_PROMPT.format(
519
  question=question, transcript=transcript, code=code
520
  ),
 
589
  message: str = ""
590
  code: str = ""
591
  code_changed: bool = False
592
+ image_base64: Optional[str] = None
593
+ api_key: Optional[str] = None # base64-encoded PNG from whiteboard
594
 
595
  class SendMessageResponse(BaseModel):
596
  message: str
 
615
  "questions_loaded": len(df),
616
  }
617
 
618
+ class StartSessionRequest(BaseModel):
619
+ api_key: Optional[str] = None
620
+
621
  @app.post("/api/session/start", response_model=StartSessionResponse, tags=["Session"])
622
+ def start_session(req: StartSessionRequest = StartSessionRequest()):
623
  """
624
  Start a new interview session.
625
  Returns a session_id and the AI's welcome message.
 
631
  "code": "# Your solution here\n",
632
  "report": "",
633
  "finished": False,
634
+ "api_key": req.api_key or "",
635
  }
636
 
637
  try:
 
642
  welcome = WELCOME_MSG
643
  for msg in reversed(new_state.get("messages", [])):
644
  if isinstance(msg, AIMessage):
645
+ welcome = extract_text(msg.content)
646
  break
647
 
648
+ new_state["api_key"] = req.api_key or ""
649
  sessions[session_id] = new_state
650
  return StartSessionResponse(session_id=session_id, message=welcome)
651
 
 
695
  current_messages = list(state.get("messages", []))
696
  current_messages.append(HumanMessage(content=content))
697
 
698
+ user_key = req.api_key or state.get("api_key", "")
699
  graph_input: Dict[str, Any] = {
700
  "messages": current_messages,
701
  "question": state.get("question", ""),
702
  "code": req.code if req.code_changed else state.get("code", ""),
703
  "report": state.get("report", ""),
704
  "finished": False,
705
+ "api_key": user_key,
706
  }
707
 
708
  try:
709
  new_state = interviewer_graph.invoke(graph_input)
710
  except Exception as e:
711
+ err = str(e)
712
+ if "429" in err or "RESOURCE_EXHAUSTED" in err:
713
+ return SendMessageResponse(
714
+ message="The AI is receiving too many requests right now. Please wait a few seconds and try again.",
715
+ problem=state.get("question", ""),
716
+ code=state.get("code", ""),
717
+ finished=False,
718
+ )
719
+ if "quota" in err.lower():
720
+ return SendMessageResponse(
721
+ message="API quota exceeded. Please wait a minute before sending another message.",
722
+ problem=state.get("question", ""),
723
+ code=state.get("code", ""),
724
+ finished=False,
725
+ )
726
  raise HTTPException(status_code=500, detail=f"Interview graph error: {e}")
727
 
728
  sessions[req.session_id] = new_state
 
730
  ai_response = "Processing..."
731
  for msg in reversed(new_state.get("messages", [])):
732
  if isinstance(msg, AIMessage):
733
+ ai_response = extract_text(msg.content)
734
  break
735
  elif isinstance(msg, ToolMessage) and msg.name == "end_interview":
736
  ai_response = "Thank you for your time! The interview has ended. Your evaluation report is being prepared..."
src/App.jsx CHANGED
@@ -100,6 +100,7 @@ function CodeEditor({ value, onChange }) {
100
 
101
  export default function App() {
102
  const [screen, setScreen] = useState("home");
 
103
  const [sessionId, setSessionId] = useState(null);
104
  const [messages, setMessages] = useState([]);
105
  const [problem, setProblem] = useState("");
@@ -123,9 +124,10 @@ export default function App() {
123
  });
124
 
125
  const startSession = async () => {
 
126
  setError(""); setStarting(true);
127
  try {
128
- const res = await apiFetch("/api/session/start", { method: "POST" });
129
  if (!res.ok) throw new Error(await res.text());
130
  const data = await res.json();
131
  setSessionId(data.session_id);
@@ -144,7 +146,7 @@ export default function App() {
144
  setMessages(m => [...m, { role: "user", text: userMsg }]);
145
  setLoading(true);
146
  try {
147
- const body = { session_id: sessionId, message: userText, code, code_changed: codeChanged, image_base64: extraImageBase64 || null };
148
  setCodeChanged(false);
149
  const res = await apiFetch("/api/chat", { method: "POST", body: JSON.stringify(body) });
150
  if (!res.ok) throw new Error(await res.text());
@@ -154,7 +156,7 @@ export default function App() {
154
  if (data.code && data.code !== "# Your code here") setCode(data.code);
155
  if (data.finished) { setFinished(true); if (data.report) setReport(data.report); }
156
  } catch (e) {
157
- setMessages(m => [...m, { role: "ai", text: `Something went wrong. Please try again.` }]);
158
  } finally { setLoading(false); }
159
  };
160
 
@@ -180,9 +182,17 @@ export default function App() {
180
  <div className="feat"><span>📊</span><p>Report</p></div>
181
  </div>
182
 
 
 
 
 
 
 
 
 
183
  {error && <p className="err-msg">{error}</p>}
184
 
185
- <button className="start-btn" onClick={startSession} disabled={starting}>
186
  {starting ? <span className="spinner"/> : "Start Interview →"}
187
  </button>
188
  </div>
 
100
 
101
  export default function App() {
102
  const [screen, setScreen] = useState("home");
103
+ const [apiKey, setApiKey] = useState("");
104
  const [sessionId, setSessionId] = useState(null);
105
  const [messages, setMessages] = useState([]);
106
  const [problem, setProblem] = useState("");
 
124
  });
125
 
126
  const startSession = async () => {
127
+ if (!apiKey.trim()) { setError("Please enter your Google Gemini API key."); return; }
128
  setError(""); setStarting(true);
129
  try {
130
+ const res = await apiFetch("/api/session/start", { method: "POST", body: JSON.stringify({ api_key: apiKey.trim() }) });
131
  if (!res.ok) throw new Error(await res.text());
132
  const data = await res.json();
133
  setSessionId(data.session_id);
 
146
  setMessages(m => [...m, { role: "user", text: userMsg }]);
147
  setLoading(true);
148
  try {
149
+ const body = { session_id: sessionId, message: userText, code, code_changed: codeChanged, image_base64: extraImageBase64 || null, api_key: apiKey.trim() };
150
  setCodeChanged(false);
151
  const res = await apiFetch("/api/chat", { method: "POST", body: JSON.stringify(body) });
152
  if (!res.ok) throw new Error(await res.text());
 
156
  if (data.code && data.code !== "# Your code here") setCode(data.code);
157
  if (data.finished) { setFinished(true); if (data.report) setReport(data.report); }
158
  } catch (e) {
159
+ setMessages(m => [...m, { role: "ai", text: `Something went wrong. Please try again in a moment.` }]);
160
  } finally { setLoading(false); }
161
  };
162
 
 
182
  <div className="feat"><span>📊</span><p>Report</p></div>
183
  </div>
184
 
185
+ <div className="config-section">
186
+ <label className="input-label">Google Gemini API Key</label>
187
+ <input type="password" className="config-input" placeholder="AIza..."
188
+ value={apiKey} onChange={e => setApiKey(e.target.value)}
189
+ onKeyDown={e => e.key === "Enter" && startSession()} />
190
+ <p className="key-hint">Free key at <a href="https://aistudio.google.com/apikey" target="_blank" rel="noreferrer">aistudio.google.com/apikey</a> — your quota, your usage</p>
191
+ </div>
192
+
193
  {error && <p className="err-msg">{error}</p>}
194
 
195
+ <button className="start-btn" onClick={startSession} disabled={starting || !apiKey.trim()}>
196
  {starting ? <span className="spinner"/> : "Start Interview →"}
197
  </button>
198
  </div>
src/index.css CHANGED
@@ -203,4 +203,11 @@ body { font-family: var(--font); background: var(--bg); color: var(--text); font
203
  .md-body pre code { background: none; padding: 0; font-size: 12.5px; color: #e8edf5; }
204
  .md-body li { margin: 3px 0 3px 18px; list-style: none; position: relative; }
205
  .md-body li::before { content: "›"; position: absolute; left: -14px; color: var(--accent); }
206
- .md-body hr { border: none; border-top: 1px solid var(--border); margin: 16px 0; }
 
 
 
 
 
 
 
 
203
  .md-body pre code { background: none; padding: 0; font-size: 12.5px; color: #e8edf5; }
204
  .md-body li { margin: 3px 0 3px 18px; list-style: none; position: relative; }
205
  .md-body li::before { content: "›"; position: absolute; left: -14px; color: var(--accent); }
206
+ .md-body hr { border: none; border-top: 1px solid var(--border); margin: 16px 0; }
207
+
208
+ .config-section { margin-bottom: 20px; }
209
+ .input-label { display: block; font-size: 11px; color: var(--text-muted); margin-bottom: 6px; font-weight: 600; letter-spacing: 0.06em; text-transform: uppercase; }
210
+ .config-input { width: 100%; background: var(--bg3); border: 1px solid var(--border); border-radius: 10px; padding: 11px 14px; color: var(--text); font-size: 13.5px; font-family: var(--mono); outline: none; transition: border-color 0.2s; }
211
+ .config-input:focus { border-color: var(--accent); }
212
+ .key-hint { font-size: 11px; color: var(--text-muted); margin-top: 6px; }
213
+ .key-hint a { color: var(--accent); text-decoration: none; }