Basu03 commited on
Commit
5c084c2
·
1 Parent(s): 970c493

final working demo fix

Browse files
Files changed (3) hide show
  1. .DS_Store +0 -0
  2. app.py +89 -93
  3. src/interview_logic.py +54 -22
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
app.py CHANGED
@@ -1,109 +1,105 @@
1
  import gradio as gr
2
  from src.graph import build_graph
3
 
4
- # Initialize the graph
5
  graph = build_graph()
6
 
7
- class InterviewSession:
8
- def __init__(self):
9
- """Initializes a new, clean interview state."""
10
- self.state = {
11
- "interview_status": 0, # 0: Not started, 1: In progress, 2: Finished/Terminated
12
- "interview_history": [], # Flat list of (role, content) tuples
13
- "questions": [],
14
- "question_index": 0,
15
- "evaluations": [],
16
- "final_feedback": "",
17
- "warnings": []
18
- }
19
- print("--- New Interview Session Initialized ---")
20
-
21
- def run(self, user_input):
22
- """
23
- Main chat function that handles the interview flow.
24
- It now returns only the new bot messages for the current turn as a single string.
25
- """
26
- history_len_before = len(self.state["interview_history"])
27
-
28
- # If this is the very first interaction, start the interview
29
- if self.state["interview_status"] == 0:
30
- print("Starting interview...")
31
- self.state = graph.invoke(self.state)
32
-
33
- # If there is user input, process it
34
- elif user_input and user_input.strip():
35
- print(f"Processing user input...")
36
- # Add user input to state history
37
- self.state["interview_history"].append(("user", user_input))
38
- # Process the user response through the graph
39
- self.state = graph.invoke(self.state)
40
-
41
- # Get all new messages added by the graph in this turn
42
- new_messages = self.state["interview_history"][history_len_before:]
43
-
44
- # Extract the content of the new assistant messages
45
- response_texts = [content for role, content in new_messages if role != "user"]
46
-
47
- # Combine new messages into a single response string for Gradio
48
- return "\n\n".join(response_texts)
49
-
50
- def reset_interview(self):
51
- """Resets the interview session to start over."""
52
- print("--- Resetting interview session... ---")
53
- self.__init__()
54
- # Return an empty list to clear the chatbot display in Gradio
55
- return []
56
-
57
- # --- Gradio UI Setup ---
58
-
59
- # Create a single global session instance
60
- session = InterviewSession()
61
-
62
- def chat_fn(message, history):
63
  """
64
- Chat function for Gradio ChatInterface.
65
- 'history' is managed by Gradio. We use our own session state.
 
 
 
 
66
  """
67
- return session.run(message)
 
 
 
 
68
 
69
- def clear_fn():
70
- """Clear function to reset the interview session and the UI."""
71
- return session.reset_interview()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
- # Use gr.Blocks for more control over the layout and components
74
- with gr.Blocks(theme="soft", css=".gradio-container {max-width: 1200px; margin: 0 auto;}") as demo:
75
- gr.Markdown(
76
- """
77
- # 🤖 AI-Powered Excel Interviewer (Phi-3 Mini)
78
- An AI-powered interview system that asks Excel-related questions and provides feedback.
79
- The system includes AI-generated response detection to ensure authentic answers.
80
- """
81
- )
82
 
83
- chatbot = gr.Chatbot(
84
- elem_id="chatbot",
85
- label="Interview Conversation",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  show_copy_button=True,
87
  height=600,
88
- avatar_images=(None, "https://upload.wikimedia.org/wikipedia/commons/1/1d/Microsoft_Excel_2013-2019_logo.svg"),
89
- bubble_full_width=False,
90
- placeholder="Click the 'Start Interview' button below to begin."
91
- )
92
-
93
- # Use gr.ChatInterface as a component within gr.Blocks
94
- # We will hide its internal textbox and use our own for better control.
95
- interface = gr.ChatInterface(
96
- fn=chat_fn,
97
- chatbot=chatbot,
98
- submit_btn="Submit Answer",
99
- retry_btn=None,
100
- undo_btn=None,
101
- )
102
-
103
- # Custom clear button that correctly resets the session state
104
- clear_button = gr.Button("Clear and Restart Interview", variant="stop")
105
- clear_button.click(fn=clear_fn, inputs=[], outputs=[chatbot], queue=False)
106
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
  if __name__ == "__main__":
109
  demo.launch(
 
1
  import gradio as gr
2
  from src.graph import build_graph
3
 
4
+ # Initialize the graph, which is stateless and operates on the state dict we provide.
5
  graph = build_graph()
6
 
7
+ def chat_fn(message: str, history: list[dict[str, str]]):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  """
9
+ Main chat function for Gradio. It is now self-contained and uses the provided
10
+ history as the source of truth for the conversation state.
11
+
12
+ Args:
13
+ message: The new message from the user.
14
+ history: The entire conversation history in Gradio's format.
15
  """
16
+ # 1. Convert Gradio's history (list of dicts) to the graph's expected
17
+ # internal format (list of tuples).
18
+ internal_history = []
19
+ for turn in history:
20
+ internal_history.append((turn["role"], turn["content"]))
21
 
22
+ # 2. Determine the current state based on the history.
23
+ # - How many questions have been asked by the assistant?
24
+ question_count = sum(1 for role, content in internal_history if content in EXCEL_QUESTIONS)
25
+
26
+ # - Is the interview over?
27
+ interview_status = 2 if any("interview will now be terminated" in msg[1] for msg in internal_history) or any("performance summary" in msg[1].lower() for msg in internal_history) else 1
28
+
29
+ # 3. Build the state dictionary to pass to the graph on every turn.
30
+ # This makes our function stateless, avoiding session-related bugs.
31
+ current_state = {
32
+ "interview_status": 0 if not internal_history else interview_status,
33
+ "interview_history": internal_history,
34
+ "questions": EXCEL_QUESTIONS, # Assuming EXCEL_QUESTIONS is accessible
35
+ "question_index": question_count,
36
+ "evaluations": [], # Note: This simple state doesn't persist evaluations across turns. A more robust solution might need to parse them from history.
37
+ "final_feedback": "",
38
+ "warnings": []
39
+ }
40
 
41
+ # Add the user's new message to the history for the graph to process
42
+ current_state["interview_history"].append(("user", message))
 
 
 
 
 
 
 
43
 
44
+ # 4. Invoke the graph with the current state.
45
+ print(f"Invoking graph with question index: {current_state['question_index']}")
46
+ new_state = graph.invoke(current_state)
47
+
48
+ # 5. Convert the graph's output history back to Gradio's format.
49
+ # The graph returns the *complete* new history.
50
+ gradio_history = []
51
+ for role, content in new_state["interview_history"]:
52
+ if role in ["user", "assistant", "ai"]: # Handle 'ai' as 'assistant'
53
+ gradio_history.append({"role": "assistant" if role == "ai" else role, "content": content})
54
+
55
+ return gradio_history
56
+
57
+ # This list must be accessible to the chat function.
58
+ # It's better to define it here or import it from a shared config.
59
+ EXCEL_QUESTIONS = [
60
+ "What is the difference between the VLOOKUP and HLOOKUP functions in Excel?",
61
+ "Explain how to use the INDEX and MATCH functions together, and why you might prefer them over VLOOKUP.",
62
+ "Describe what a Pivot Table is and give an example of a scenario where it would be useful.",
63
+ "What is Conditional Formatting in Excel? Can you provide an example?",
64
+ ]
65
+
66
+ # Create the ChatInterface. No separate clear function is needed if we don't have a session.
67
+ # The 'Clear' button is built-in and will correctly clear the UI.
68
+ demo = gr.ChatInterface(
69
+ fn=chat_fn,
70
+ title="🤖 AI-Powered Excel Interviewer (Phi-3 Mini)",
71
+ description="An AI-powered interview system that asks Excel-related questions and provides feedback. Click one of the examples or type a message like 'start' to begin.",
72
+ chatbot=gr.Chatbot(
73
  show_copy_button=True,
74
  height=600,
75
+ placeholder="The interview will begin after you send your first message."
76
+ ),
77
+ textbox=gr.Textbox(
78
+ placeholder="Type your answer here and press Enter...",
79
+ label="Your Response",
80
+ lines=3
81
+ ),
82
+ theme="soft",
83
+ submit_btn="Submit Answer",
84
+ examples=[ # These examples now work as intended.
85
+ "I'm ready to start the interview",
86
+ "Let's begin",
87
+ "Start the assessment"
88
+ ],
89
+ cache_examples=False
90
+ )
 
 
91
 
92
+ # Add custom CSS for better styling
93
+ demo.css = """
94
+ .chat-message {
95
+ font-size: 16px;
96
+ line-height: 1.5;
97
+ }
98
+ .gradio-container {
99
+ max-width: 1200px;
100
+ margin: 0 auto;
101
+ }
102
+ """
103
 
104
  if __name__ == "__main__":
105
  demo.launch(
src/interview_logic.py CHANGED
@@ -1,6 +1,5 @@
1
  # src/interview_logic.py
2
 
3
- # No more external LLM libraries needed here!
4
  from .perplexity_detector import is_ai_generated
5
  from .local_llm_handler import get_llm_response
6
 
@@ -11,19 +10,28 @@ EXCEL_QUESTIONS = [
11
  "What is Conditional Formatting in Excel? Can you provide an example?",
12
  ]
13
 
 
14
  def start_interview(state: dict) -> dict:
15
- intro_message = "Welcome to the automated Excel skills assessment..."
16
- current_question = EXCEL_QUESTIONS[0]
 
 
 
 
 
 
 
 
 
 
17
 
 
18
  return {
19
  **state,
20
  "interview_status": 1,
21
- "interview_history": [
22
- ("ai", intro_message),
23
- ("ai", current_question)
24
- ],
25
  "questions": EXCEL_QUESTIONS,
26
- "question_index": 0,
27
  "evaluations": [],
28
  "warnings": [],
29
  "final_feedback": "",
@@ -31,28 +39,31 @@ def start_interview(state: dict) -> dict:
31
 
32
 
33
  def ask_question(state: dict) -> dict:
 
 
 
 
34
  question_index = state["question_index"]
35
  current_question = state["questions"][question_index]
36
 
37
  history = state.get("interview_history", [])
38
- history.append(("ai", current_question)) # append as a separate entry
39
 
40
  return { **state, "interview_history": history }
41
 
42
 
43
- # --- MODIFIED FUNCTION ---
44
  def process_user_response(state: dict) -> dict:
 
45
  history = state.get("interview_history", [])
46
  user_response = history[-1][1]
47
 
48
  if is_ai_generated(user_response, threshold=35.0):
49
- termination_message = "This interview will now be terminated..."
50
  history.append(("ai", termination_message))
51
  return {**state, "interview_history": history, "interview_status": 2}
52
 
53
  current_question = state["questions"][state["question_index"]]
54
 
55
- # Create the prompt manually for our local LLM
56
  evaluation_prompt = (
57
  "You are an expert evaluator. Concisely evaluate the following answer to an Excel interview question "
58
  "based on its technical accuracy and clarity. Start the evaluation directly without conversational filler.\n\n"
@@ -60,38 +71,59 @@ def process_user_response(state: dict) -> dict:
60
  f"Answer: {user_response}"
61
  )
62
 
63
- # Get the evaluation from our new local LLM handler
64
  evaluation = get_llm_response(evaluation_prompt)
65
 
 
 
 
66
  evaluations = state.get("evaluations", [])
67
  evaluations.append(evaluation)
68
 
69
- return {**state, "evaluations": evaluations, "question_index": state["question_index"] + 1}
 
 
 
 
 
 
70
 
71
  def generate_final_report(state: dict) -> dict:
 
72
  interview_transcript = "\n".join([f"{speaker.capitalize()}: {text}" for speaker, text in state['interview_history']])
73
  evaluations_summary = "\n\n".join(f"Evaluation for Q{i+1}:\n{e}" for i, e in enumerate(state['evaluations']))
74
 
75
- # Create the report prompt manually
76
  report_prompt = (
77
  "You are a career coach. Based on the interview transcript and evaluations below, write a brief, constructive performance summary. "
78
  "Use Markdown for a 'Strengths' section and an 'Areas for Improvement' section.\n\n"
79
  f"---TRANSCRIPT---\n{interview_transcript}\n\n---EVALUATIONS---\n{evaluations_summary}"
80
  )
81
 
82
- # Get the report from our local LLM handler
83
  final_feedback = get_llm_response(report_prompt)
84
 
85
  history = state.get("interview_history", [])
86
- history.append(("ai", final_feedback))
87
 
88
  return {**state, "final_feedback": final_feedback, "interview_history": history, "interview_status": 2}
89
 
90
- # Routing logic does not need to change
 
 
 
 
91
  def route_after_evaluation(state: dict):
92
- if state.get("interview_status") == 2: return "terminate"
93
- elif state["question_index"] >= len(state["questions"]): return "generate_final_report"
94
- else: return "ask_question"
 
 
 
 
95
 
96
  def route_start_of_interview(state: dict):
97
- return "start_interview" if state.get("interview_status", 0) == 0 else "process_user_response"
 
 
 
 
 
 
 
1
  # src/interview_logic.py
2
 
 
3
  from .perplexity_detector import is_ai_generated
4
  from .local_llm_handler import get_llm_response
5
 
 
10
  "What is Conditional Formatting in Excel? Can you provide an example?",
11
  ]
12
 
13
+ # --- CORRECTED FUNCTION ---
14
  def start_interview(state: dict) -> dict:
15
+ """
16
+ This function now ONLY adds a welcome message. It APPENDS to the history,
17
+ preserving the user's first message. The graph will then transition to 'ask_question'
18
+ to ask the first question.
19
+ """
20
+ intro_message = "Welcome to the automated Excel skills assessment! I will ask you a series of questions to gauge your knowledge. Let's start with the first one."
21
+
22
+ # Get the existing history from the state
23
+ history = state.get("interview_history", [])
24
+
25
+ # Append the welcome message
26
+ history.append(("ai", intro_message))
27
 
28
+ # Return the updated state. We do NOT ask a question here.
29
  return {
30
  **state,
31
  "interview_status": 1,
32
+ "interview_history": history, # Use the modified history
 
 
 
33
  "questions": EXCEL_QUESTIONS,
34
+ "question_index": 0, # Ensure index starts at 0 for the first question
35
  "evaluations": [],
36
  "warnings": [],
37
  "final_feedback": "",
 
39
 
40
 
41
  def ask_question(state: dict) -> dict:
42
+ """
43
+ This function is now responsible for asking ALL questions, including the first one.
44
+ It remains unchanged but its role is now clearer.
45
+ """
46
  question_index = state["question_index"]
47
  current_question = state["questions"][question_index]
48
 
49
  history = state.get("interview_history", [])
50
+ history.append(("ai", current_question))
51
 
52
  return { **state, "interview_history": history }
53
 
54
 
 
55
  def process_user_response(state: dict) -> dict:
56
+ """This function remains unchanged."""
57
  history = state.get("interview_history", [])
58
  user_response = history[-1][1]
59
 
60
  if is_ai_generated(user_response, threshold=35.0):
61
+ termination_message = "This interview will now be terminated due to the detection of AI-generated content."
62
  history.append(("ai", termination_message))
63
  return {**state, "interview_history": history, "interview_status": 2}
64
 
65
  current_question = state["questions"][state["question_index"]]
66
 
 
67
  evaluation_prompt = (
68
  "You are an expert evaluator. Concisely evaluate the following answer to an Excel interview question "
69
  "based on its technical accuracy and clarity. Start the evaluation directly without conversational filler.\n\n"
 
71
  f"Answer: {user_response}"
72
  )
73
 
 
74
  evaluation = get_llm_response(evaluation_prompt)
75
 
76
+ # Prepend the evaluation with a clear marker for the user
77
+ history.append(("ai", f"**Evaluation:**\n{evaluation}"))
78
+
79
  evaluations = state.get("evaluations", [])
80
  evaluations.append(evaluation)
81
 
82
+ # Increment the question index for the next turn
83
+ return {
84
+ **state,
85
+ "interview_history": history,
86
+ "evaluations": evaluations,
87
+ "question_index": state["question_index"] + 1
88
+ }
89
 
90
  def generate_final_report(state: dict) -> dict:
91
+ """This function remains unchanged."""
92
  interview_transcript = "\n".join([f"{speaker.capitalize()}: {text}" for speaker, text in state['interview_history']])
93
  evaluations_summary = "\n\n".join(f"Evaluation for Q{i+1}:\n{e}" for i, e in enumerate(state['evaluations']))
94
 
 
95
  report_prompt = (
96
  "You are a career coach. Based on the interview transcript and evaluations below, write a brief, constructive performance summary. "
97
  "Use Markdown for a 'Strengths' section and an 'Areas for Improvement' section.\n\n"
98
  f"---TRANSCRIPT---\n{interview_transcript}\n\n---EVALUATIONS---\n{evaluations_summary}"
99
  )
100
 
 
101
  final_feedback = get_llm_response(report_prompt)
102
 
103
  history = state.get("interview_history", [])
104
+ history.append(("ai", f"**Final Performance Summary:**\n{final_feedback}"))
105
 
106
  return {**state, "final_feedback": final_feedback, "interview_history": history, "interview_status": 2}
107
 
108
+
109
+ # --- CORRECTED ROUTING ---
110
+ # The routing logic itself is fine, but its behavior is now correct because
111
+ # the nodes it routes to have been fixed.
112
+
113
  def route_after_evaluation(state: dict):
114
+ if state.get("interview_status") == 2:
115
+ return "terminate"
116
+ # The evaluation node now just adds feedback, so we check the index to see if we should ask another question or end.
117
+ elif state["question_index"] >= len(state["questions"]):
118
+ return "generate_final_report"
119
+ else:
120
+ return "ask_question"
121
 
122
  def route_start_of_interview(state: dict):
123
+ # This logic is now correct. If history is empty, the status is 0.
124
+ # The app.py will add the first user message, making the history non-empty,
125
+ # so the graph will correctly call start_interview.
126
+ if not state.get("interview_history"):
127
+ return "start_interview"
128
+ else:
129
+ return "process_user_response"