Basu03 commited on
Commit
f90498e
·
1 Parent(s): e7d20ff

vertex ai minor bugs 2

Browse files
Files changed (2) hide show
  1. .DS_Store +0 -0
  2. app.py +39 -47
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
app.py CHANGED
@@ -1,70 +1,61 @@
1
  import gradio as gr
2
  from src.graph import build_graph
 
3
 
4
  # Initialize the graph, which is stateless and operates on the state dict we provide.
5
  graph = build_graph()
6
 
7
- def chat_fn(message: str, history: list[dict[str, str]]):
8
  """
9
- Main chat function for Gradio. It is now self-contained and uses the provided
10
- history as the source of truth for the conversation state.
11
 
12
- Args:
13
- message: The new message from the user.
14
- history: The entire conversation history in Gradio's format.
15
  """
16
- # 1. Convert Gradio's history (list of dicts) to the graph's expected
17
- # internal format (list of tuples).
18
  internal_history = []
19
- for turn in history:
20
- internal_history.append((turn["role"], turn["content"]))
 
 
 
 
 
 
 
21
 
22
- # 2. Determine the current state based on the history.
23
- # - How many questions have been asked by the assistant?
24
- question_count = sum(1 for role, content in internal_history if content in EXCEL_QUESTIONS)
25
 
26
- # - Is the interview over?
27
- interview_status = 2 if any("interview will now be terminated" in msg[1] for msg in internal_history) or any("performance summary" in msg[1].lower() for msg in internal_history) else 1
 
 
 
28
 
29
- # 3. Build the state dictionary to pass to the graph on every turn.
30
- # This makes our function stateless, avoiding session-related bugs.
31
  current_state = {
32
- "interview_status": 0 if not internal_history else interview_status,
33
  "interview_history": internal_history,
34
- "questions": EXCEL_QUESTIONS, # Assuming EXCEL_QUESTIONS is accessible
35
  "question_index": question_count,
36
- "evaluations": [], # Note: This simple state doesn't persist evaluations across turns. A more robust solution might need to parse them from history.
37
- "final_feedback": "",
38
- "warnings": []
39
  }
40
 
41
- # Add the user's new message to the history for the graph to process
42
- current_state["interview_history"].append(("user", message))
43
-
44
- # 4. Invoke the graph with the current state.
45
- print(f"Invoking graph with question index: {current_state['question_index']}")
46
  new_state = graph.invoke(current_state)
47
 
48
- # 5. Convert the graph's output history back to Gradio's format.
49
- # The graph returns the *complete* new history.
50
- gradio_history = []
51
- for role, content in new_state["interview_history"]:
52
- if role in ["user", "assistant", "ai"]: # Handle 'ai' as 'assistant'
53
- gradio_history.append({"role": "assistant" if role == "ai" else role, "content": content})
54
-
55
- return gradio_history
56
 
57
- # This list must be accessible to the chat function.
58
- # It's better to define it here or import it from a shared config.
59
- EXCEL_QUESTIONS = [
60
- "What is the difference between the VLOOKUP and HLOOKUP functions in Excel?",
61
- "Explain how to use the INDEX and MATCH functions together, and why you might prefer them over VLOOKUP.",
62
- "Describe what a Pivot Table is and give an example of a scenario where it would be useful.",
63
- "What is Conditional Formatting in Excel? Can you provide an example?",
64
- ]
65
 
66
- # Create the ChatInterface. No separate clear function is needed if we don't have a session.
67
- # The 'Clear' button is built-in and will correctly clear the UI.
68
  demo = gr.ChatInterface(
69
  fn=chat_fn,
70
  title="🤖 AI-Powered Excel Interviewer (Phi-3 Mini)",
@@ -72,7 +63,8 @@ demo = gr.ChatInterface(
72
  chatbot=gr.Chatbot(
73
  show_copy_button=True,
74
  height=600,
75
- placeholder="The interview will begin after you send your first message."
 
76
  ),
77
  textbox=gr.Textbox(
78
  placeholder="Type your answer here and press Enter...",
@@ -81,7 +73,7 @@ demo = gr.ChatInterface(
81
  ),
82
  theme="soft",
83
  submit_btn="Submit Answer",
84
- examples=[ # These examples now work as intended.
85
  "I'm ready to start the interview",
86
  "Let's begin",
87
  "Start the assessment"
 
1
  import gradio as gr
2
  from src.graph import build_graph
3
+ from src.interview_logic import EXCEL_QUESTIONS # Import questions for state building
4
 
5
  # Initialize the graph, which is stateless and operates on the state dict we provide.
6
  graph = build_graph()
7
 
8
+ def chat_fn(message: str, history: list[list[str]]):
9
  """
10
+ Main chat function for Gradio.
 
11
 
12
+ THE FIX: This function now returns a single string for the bot's reply.
13
+ Gradio's ChatInterface handles the history update automatically and reliably,
14
+ which prevents the submit button from disappearing.
15
  """
16
+ # 1. Convert Gradio's history (list of lists) into our graph's internal format (list of tuples)
 
17
  internal_history = []
18
+ for user_msg, assistant_msg in history:
19
+ if user_msg:
20
+ internal_history.append(("user", user_msg))
21
+ if assistant_msg:
22
+ # Split assistant messages that might contain multiple parts from previous turns
23
+ # This is important for correctly reconstructing the state
24
+ parts = assistant_msg.split("\n\n")
25
+ for part in parts:
26
+ internal_history.append(("ai", part))
27
 
28
+ # Store the length of the history *before* adding the new message.
29
+ # We will use this to find out what the bot's new reply is.
30
+ len_before = len(internal_history)
31
 
32
+ # Add the user's new message to the history for the graph to process
33
+ internal_history.append(("user", message))
34
+
35
+ # 2. Build the state dictionary to pass to the graph on every turn.
36
+ question_count = sum(1 for role, content in internal_history if content in EXCEL_QUESTIONS)
37
 
 
 
38
  current_state = {
39
+ "interview_status": 0 if not history else 1, # Status is 0 only if history is empty
40
  "interview_history": internal_history,
41
+ "questions": EXCEL_QUESTIONS,
42
  "question_index": question_count,
43
+ "evaluations": [], # This remains stateless for simplicity
 
 
44
  }
45
 
46
+ # 3. Invoke the graph with the current state.
47
+ print(f"Invoking graph with current state...")
 
 
 
48
  new_state = graph.invoke(current_state)
49
 
50
+ # 4. Extract ONLY the new messages generated by the bot in this turn.
51
+ new_messages = new_state["interview_history"][len_before:]
52
+ bot_responses = [content for role, content in new_messages if role == "ai"]
53
+
54
+ # 5. Return the bot's reply as a single string.
55
+ return "\n\n".join(bot_responses)
 
 
56
 
 
 
 
 
 
 
 
 
57
 
58
+ # Create the ChatInterface.
 
59
  demo = gr.ChatInterface(
60
  fn=chat_fn,
61
  title="🤖 AI-Powered Excel Interviewer (Phi-3 Mini)",
 
63
  chatbot=gr.Chatbot(
64
  show_copy_button=True,
65
  height=600,
66
+ placeholder="The interview will begin after you send your first message.",
67
+ avatar_images=(None, "https://upload.wikimedia.org/wikipedia/commons/1/1d/Microsoft_Excel_2013-2019_logo.svg")
68
  ),
69
  textbox=gr.Textbox(
70
  placeholder="Type your answer here and press Enter...",
 
73
  ),
74
  theme="soft",
75
  submit_btn="Submit Answer",
76
+ examples=[
77
  "I'm ready to start the interview",
78
  "Let's begin",
79
  "Start the assessment"