Basu03 commited on
Commit
9e821c7
·
1 Parent(s): e56ce8f

vertex ai minor bugs 9

Browse files
Files changed (2) hide show
  1. .DS_Store +0 -0
  2. app.py +38 -32
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
app.py CHANGED
@@ -7,57 +7,60 @@ from src.perplexity_detector import load_detector_model
7
  # Initialize the graph
8
  graph = build_graph()
9
 
10
- def run_graph_logic(message: str, history: list[list[str]]):
11
- """Contains the core logic for running the LangGraph chain."""
 
 
 
12
  internal_history = []
13
- for user_msg, assistant_msg in history:
14
- if user_msg:
15
- internal_history.append(("user", user_msg))
16
- if assistant_msg:
17
- parts = assistant_msg.split("\n\n")
18
- for part in parts:
19
- internal_history.append(("ai", part))
20
 
21
  len_before = len(internal_history)
22
 
23
- # --- THIS BLOCK CONTAINS THE CRITICAL FIX ---
24
-
25
- # 1. Count how many questions have already been asked.
26
  question_count = sum(1 for role, content in internal_history if content in EXCEL_QUESTIONS)
27
-
28
- # 2. Determine the index of the question that was JUST answered.
29
- # If 1 question is in the history, its index is 0. This is the correct index for evaluation.
30
  current_question_index = question_count - 1 if question_count > 0 else 0
31
 
32
- # 3. Build the state dictionary with the CORRECT index.
33
  current_state = {
34
  "interview_status": 0 if len(history) <= 1 else 1,
35
  "interview_history": internal_history,
36
  "questions": EXCEL_QUESTIONS,
37
- "question_index": current_question_index, # Use the corrected index
38
  "evaluations": [],
39
  }
40
 
41
  print(f"Invoking graph. Question count in history: {question_count}. Using index for evaluation: {current_question_index}.")
42
  new_state = graph.invoke(current_state)
43
 
 
44
  new_messages = new_state["interview_history"][len_before:]
45
- bot_responses = [content for role, content in new_messages if role == "ai"]
46
 
47
  return "\n\n".join(bot_responses)
48
 
49
 
50
- def user_sends_message(user_message: str, history: list[list[str]]):
51
- """Called when the user submits their message."""
52
- history.append([user_message, None])
53
- bot_response = run_graph_logic(user_message, history)
54
- history[-1][1] = bot_response
55
- return history, ""
 
 
 
 
 
 
 
 
 
56
 
57
 
58
  def clear_chat():
59
- """Called when the 'Clear' button is pressed."""
60
- return [], ""
61
 
62
 
63
  # --- MANUAL UI LAYOUT WITH GR.BLOCKS ---
@@ -70,7 +73,7 @@ with gr.Blocks(theme="soft", css=".gradio-container {max-width: 1200px; margin:
70
  """
71
  )
72
 
73
- # Added type="messages" to fix the UserWarning from the logs
74
  chatbot = gr.Chatbot(
75
  label="Interview Conversation",
76
  type="messages",
@@ -95,20 +98,23 @@ with gr.Blocks(theme="soft", css=".gradio-container {max-width: 1200px; margin:
95
  inputs=user_input
96
  )
97
 
 
 
 
98
  submit_btn.click(
99
  fn=user_sends_message,
100
- inputs=[user_input, chatbot],
101
- outputs=[chatbot, user_input]
102
  )
103
  user_input.submit(
104
  fn=user_sends_message,
105
- inputs=[user_input, chatbot],
106
- outputs=[chatbot, user_input]
107
  )
108
  clear_btn.click(
109
  fn=clear_chat,
110
  inputs=None,
111
- outputs=[chatbot, user_input],
112
  queue=False
113
  )
114
 
 
7
  # Initialize the graph
8
  graph = build_graph()
9
 
10
+ def run_graph_logic(history: list[dict[str, str]]):
11
+ """
12
+ This helper function now works directly with Gradio's "messages" format.
13
+ """
14
+ # 1. Convert Gradio's history (list of dicts) to our graph's internal format (list of tuples)
15
  internal_history = []
16
+ for turn in history:
17
+ # The 'role' and 'content' keys are guaranteed by Gradio's new format
18
+ internal_history.append((turn["role"], turn["content"]))
 
 
 
 
19
 
20
  len_before = len(internal_history)
21
 
22
+ # 2. Build the state dictionary with the correct index for the current operation
 
 
23
  question_count = sum(1 for role, content in internal_history if content in EXCEL_QUESTIONS)
 
 
 
24
  current_question_index = question_count - 1 if question_count > 0 else 0
25
 
 
26
  current_state = {
27
  "interview_status": 0 if len(history) <= 1 else 1,
28
  "interview_history": internal_history,
29
  "questions": EXCEL_QUESTIONS,
30
+ "question_index": current_question_index,
31
  "evaluations": [],
32
  }
33
 
34
  print(f"Invoking graph. Question count in history: {question_count}. Using index for evaluation: {current_question_index}.")
35
  new_state = graph.invoke(current_state)
36
 
37
+ # 3. Extract ONLY the new bot messages to append to the history
38
  new_messages = new_state["interview_history"][len_before:]
39
+ bot_responses = [content for role, content in new_messages if role in ["ai", "assistant"]]
40
 
41
  return "\n\n".join(bot_responses)
42
 
43
 
44
+ def user_sends_message(history: list[dict[str, str]]):
45
+ """
46
+ This function is now simpler and works with the correct data format.
47
+ Gradio automatically appends the user's message to the history.
48
+ """
49
+ # The user's message is the last item in the history list
50
+ user_message = history[-1]["content"]
51
+
52
+ # Get the bot's response from our backend logic
53
+ bot_response = run_graph_logic(history)
54
+
55
+ # Append the bot's response to the history
56
+ history.append({"role": "assistant", "content": bot_response})
57
+
58
+ return history
59
 
60
 
61
  def clear_chat():
62
+ """Called when the 'Clear' button is pressed. Returns an empty list."""
63
+ return []
64
 
65
 
66
  # --- MANUAL UI LAYOUT WITH GR.BLOCKS ---
 
73
  """
74
  )
75
 
76
+ # This chatbot component now correctly receives the data format it expects.
77
  chatbot = gr.Chatbot(
78
  label="Interview Conversation",
79
  type="messages",
 
98
  inputs=user_input
99
  )
100
 
101
+ # --- EVENT LISTENERS (Updated for simplicity) ---
102
+ # The submit function now only takes the chatbot history as input
103
+ # and returns the updated history. Gradio handles the user input automatically.
104
  submit_btn.click(
105
  fn=user_sends_message,
106
+ inputs=[chatbot],
107
+ outputs=[chatbot]
108
  )
109
  user_input.submit(
110
  fn=user_sends_message,
111
+ inputs=[chatbot],
112
+ outputs=[chatbot]
113
  )
114
  clear_btn.click(
115
  fn=clear_chat,
116
  inputs=None,
117
+ outputs=[chatbot],
118
  queue=False
119
  )
120