Basu03 commited on
Commit
e56ce8f
·
1 Parent(s): 391c788

vertex ai minor bugs 8

Browse files
Files changed (2) hide show
  1. .DS_Store +0 -0
  2. app.py +20 -37
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
app.py CHANGED
@@ -2,70 +2,56 @@ import gradio as gr
2
  from src.graph import build_graph
3
  from src.interview_logic import EXCEL_QUESTIONS
4
  from src.local_llm_handler import load_llm_pipeline
5
- from src.perplexity_detector import load_detector_model # Import questions for state building
6
 
7
- # Initialize the graph, which is stateless and operates on the state dict we provide.
8
  graph = build_graph()
9
 
10
  def run_graph_logic(message: str, history: list[list[str]]):
11
- """
12
- This helper function contains the core logic for running the LangGraph chain.
13
- It's separated from the UI code for clarity.
14
- """
15
- # 1. Convert Gradio's history (list of lists) into our graph's internal format (list of tuples)
16
  internal_history = []
17
  for user_msg, assistant_msg in history:
18
  if user_msg:
19
  internal_history.append(("user", user_msg))
20
  if assistant_msg:
21
- # Split combined messages back into individual parts
22
  parts = assistant_msg.split("\n\n")
23
  for part in parts:
24
  internal_history.append(("ai", part))
25
 
26
- # NOTE: The user's new message is already in the history passed to this function.
27
- # We find the length of the history *before* the bot adds its response.
28
  len_before = len(internal_history)
29
 
30
- # 2. Build the state dictionary to pass to the graph on every turn.
 
 
31
  question_count = sum(1 for role, content in internal_history if content in EXCEL_QUESTIONS)
32
 
 
 
 
 
 
33
  current_state = {
34
- "interview_status": 0 if len(history) <= 1 else 1, # Status is 0 only for the very first message
35
  "interview_history": internal_history,
36
  "questions": EXCEL_QUESTIONS,
37
- "question_index": question_count,
38
  "evaluations": [],
39
  }
40
 
41
- # 3. Invoke the graph with the current state.
42
- print("Invoking graph with current state...")
43
  new_state = graph.invoke(current_state)
44
 
45
- # 4. Extract ONLY the new messages generated by the bot in this turn.
46
  new_messages = new_state["interview_history"][len_before:]
47
  bot_responses = [content for role, content in new_messages if role == "ai"]
48
 
49
- # 5. Return the bot's reply as a single string.
50
  return "\n\n".join(bot_responses)
51
 
52
 
53
  def user_sends_message(user_message: str, history: list[list[str]]):
54
- """
55
- This function is called when the user submits their message.
56
- It provides instant UI feedback by adding the user's message to the chat,
57
- then streams the bot's response.
58
- """
59
- # Append the user's message to the history for immediate display
60
  history.append([user_message, None])
61
-
62
- # Get the bot's response from our backend logic
63
  bot_response = run_graph_logic(user_message, history)
64
-
65
- # Update the last message in the history with the bot's full response
66
  history[-1][1] = bot_response
67
-
68
- # Return the updated history for the chatbot and an empty string to clear the textbox
69
  return history, ""
70
 
71
 
@@ -84,24 +70,24 @@ with gr.Blocks(theme="soft", css=".gradio-container {max-width: 1200px; margin:
84
  """
85
  )
86
 
 
87
  chatbot = gr.Chatbot(
88
  label="Interview Conversation",
 
89
  height=600,
90
  show_copy_button=True,
91
  placeholder="The interview will begin after you send your first message.",
92
  avatar_images=(None, "https://upload.wikimedia.org/wikipedia/commons/1/1d/Microsoft_Excel_2013-2019_logo.svg")
93
  )
94
 
95
- # Create a row for the textbox and submit button
96
  with gr.Row():
97
  user_input = gr.Textbox(
98
  show_label=False,
99
  placeholder="Type your answer here and press Enter...",
100
- scale=5 # Make the textbox larger
101
  )
102
  submit_btn = gr.Button("Submit", variant="primary", scale=1)
103
 
104
- # Create a row for the examples and clear button
105
  with gr.Row():
106
  clear_btn = gr.Button("Clear and Restart Interview", variant="stop")
107
  gr.Examples(
@@ -109,24 +95,21 @@ with gr.Blocks(theme="soft", css=".gradio-container {max-width: 1200px; margin:
109
  inputs=user_input
110
  )
111
 
112
- # --- EVENT LISTENERS ---
113
- # Define what happens when the user clicks the submit button or presses Enter
114
  submit_btn.click(
115
  fn=user_sends_message,
116
  inputs=[user_input, chatbot],
117
- outputs=[chatbot, user_input] # Update chatbot and clear user_input
118
  )
119
  user_input.submit(
120
  fn=user_sends_message,
121
  inputs=[user_input, chatbot],
122
  outputs=[chatbot, user_input]
123
  )
124
- # Define what happens when the user clicks the clear button
125
  clear_btn.click(
126
  fn=clear_chat,
127
  inputs=None,
128
  outputs=[chatbot, user_input],
129
- queue=False # Clearing should be instant
130
  )
131
 
132
  if __name__ == "__main__":
 
2
  from src.graph import build_graph
3
  from src.interview_logic import EXCEL_QUESTIONS
4
  from src.local_llm_handler import load_llm_pipeline
5
+ from src.perplexity_detector import load_detector_model
6
 
7
+ # Initialize the graph
8
  graph = build_graph()
9
 
10
  def run_graph_logic(message: str, history: list[list[str]]):
11
+ """Contains the core logic for running the LangGraph chain."""
 
 
 
 
12
  internal_history = []
13
  for user_msg, assistant_msg in history:
14
  if user_msg:
15
  internal_history.append(("user", user_msg))
16
  if assistant_msg:
 
17
  parts = assistant_msg.split("\n\n")
18
  for part in parts:
19
  internal_history.append(("ai", part))
20
 
 
 
21
  len_before = len(internal_history)
22
 
23
+ # --- THIS BLOCK CONTAINS THE CRITICAL FIX ---
24
+
25
+ # 1. Count how many questions have already been asked.
26
  question_count = sum(1 for role, content in internal_history if content in EXCEL_QUESTIONS)
27
 
28
+ # 2. Determine the index of the question that was JUST answered.
29
+ # If 1 question is in the history, its index is 0. This is the correct index for evaluation.
30
+ current_question_index = question_count - 1 if question_count > 0 else 0
31
+
32
+ # 3. Build the state dictionary with the CORRECT index.
33
  current_state = {
34
+ "interview_status": 0 if len(history) <= 1 else 1,
35
  "interview_history": internal_history,
36
  "questions": EXCEL_QUESTIONS,
37
+ "question_index": current_question_index, # Use the corrected index
38
  "evaluations": [],
39
  }
40
 
41
+ print(f"Invoking graph. Question count in history: {question_count}. Using index for evaluation: {current_question_index}.")
 
42
  new_state = graph.invoke(current_state)
43
 
 
44
  new_messages = new_state["interview_history"][len_before:]
45
  bot_responses = [content for role, content in new_messages if role == "ai"]
46
 
 
47
  return "\n\n".join(bot_responses)
48
 
49
 
50
  def user_sends_message(user_message: str, history: list[list[str]]):
51
+ """Called when the user submits their message."""
 
 
 
 
 
52
  history.append([user_message, None])
 
 
53
  bot_response = run_graph_logic(user_message, history)
 
 
54
  history[-1][1] = bot_response
 
 
55
  return history, ""
56
 
57
 
 
70
  """
71
  )
72
 
73
+ # Added type="messages" to fix the UserWarning from the logs
74
  chatbot = gr.Chatbot(
75
  label="Interview Conversation",
76
+ type="messages",
77
  height=600,
78
  show_copy_button=True,
79
  placeholder="The interview will begin after you send your first message.",
80
  avatar_images=(None, "https://upload.wikimedia.org/wikipedia/commons/1/1d/Microsoft_Excel_2013-2019_logo.svg")
81
  )
82
 
 
83
  with gr.Row():
84
  user_input = gr.Textbox(
85
  show_label=False,
86
  placeholder="Type your answer here and press Enter...",
87
+ scale=5
88
  )
89
  submit_btn = gr.Button("Submit", variant="primary", scale=1)
90
 
 
91
  with gr.Row():
92
  clear_btn = gr.Button("Clear and Restart Interview", variant="stop")
93
  gr.Examples(
 
95
  inputs=user_input
96
  )
97
 
 
 
98
  submit_btn.click(
99
  fn=user_sends_message,
100
  inputs=[user_input, chatbot],
101
+ outputs=[chatbot, user_input]
102
  )
103
  user_input.submit(
104
  fn=user_sends_message,
105
  inputs=[user_input, chatbot],
106
  outputs=[chatbot, user_input]
107
  )
 
108
  clear_btn.click(
109
  fn=clear_chat,
110
  inputs=None,
111
  outputs=[chatbot, user_input],
112
+ queue=False
113
  )
114
 
115
  if __name__ == "__main__":