jdesiree commited on
Commit
ba13384
·
verified ·
1 Parent(s): 7868388

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -12
app.py CHANGED
@@ -663,19 +663,92 @@ class Educational_Agent:
663
  tools = [Create_Graph_Tool]
664
  tool_node = ToolNode(tools)
665
 
666
- def should_continue(state: EducationalAgentState) -> str:
667
- """Determine next step in the workflow"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
668
  messages = state["messages"]
669
- last_message = messages[-1]
670
 
671
- # Check if we have tool calls to execute
672
- if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
673
- logger.info("Executing tools based on model decision")
674
- return "tools"
 
 
 
 
 
675
 
676
- # Check if the message content contains JSON for tool calling
677
- if isinstance(last_message, AIMessage) and last_message.content:
678
- content = last_message.content
679
 
680
  # Look for JSON blocks in the message
681
  json_pattern = r'```json\s*\n?(.*?)\n?```'
@@ -721,7 +794,7 @@ class Educational_Agent:
721
  except Exception as e:
722
  logger.error(f"Error processing JSON tools: {e}")
723
  return {"messages": []}
724
-
725
  def make_tool_decision(state: EducationalAgentState) -> dict:
726
  """Decide whether tools are needed and update state"""
727
  start_tool_decision_time = time.perf_counter()
@@ -747,7 +820,7 @@ class Educational_Agent:
747
  log_metric(f"Tool decision workflow time: {tool_decision_time:0.4f} seconds. Decision: {needs_visualization}. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
748
 
749
  return {"needs_tools": needs_visualization}
750
-
751
  # Create the workflow graph
752
  workflow = StateGraph(EducationalAgentState)
753
 
 
663
  tools = [Create_Graph_Tool]
664
  tool_node = ToolNode(tools)
665
 
666
+ def call_model(state: EducationalAgentState) -> dict:
667
+ """Call the LLM to generate a response"""
668
+ start_call_model_time = time.perf_counter()
669
+ current_time = datetime.now()
670
+
671
+ messages = state["messages"]
672
+
673
+ # Get the latest human message
674
+ user_query = ""
675
+ for msg in reversed(messages):
676
+ if isinstance(msg, HumanMessage):
677
+ user_query = msg.content
678
+ break
679
+
680
+ if not user_query:
681
+ return {"messages": [AIMessage(content="I didn't receive a question. Please ask me something!")]}
682
+
683
+ try:
684
+ # Check if tools are needed based on state
685
+ needs_tools = state.get("needs_tools", False)
686
+
687
+ if needs_tools:
688
+ # Create tool prompt for visualization
689
+ tool_prompt = f"""
690
+ You are an educational AI assistant. The user has asked: "{user_query}"
691
+
692
+ This query would benefit from a visualization. Please provide a helpful educational response AND include a JSON configuration for creating a graph or chart.
693
+
694
+ Format your response with explanatory text followed by a JSON block like this:
695
+
696
+ ```json
697
+ {{
698
+ "data": {{"Category 1": value1, "Category 2": value2}},
699
+ "plot_type": "bar|line|pie",
700
+ "title": "Descriptive Title",
701
+ "x_label": "X Axis Label",
702
+ "y_label": "Y Axis Label",
703
+ "educational_context": "Explanation of why this visualization helps learning"
704
+ }}
705
+ ```
706
+
707
+ Provide your educational response followed by the JSON configuration.
708
+ """
709
+ prompt = tool_prompt
710
+ else:
711
+ prompt = user_query
712
+
713
+ # Generate response using the LLM (self.llm is accessible here)
714
+ response = self.llm.invoke(prompt)
715
+
716
+ # Create AI message
717
+ ai_message = AIMessage(content=response)
718
+
719
+ end_call_model_time = time.perf_counter()
720
+ call_model_time = end_call_model_time - start_call_model_time
721
+ log_metric(f"Call model time: {call_model_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
722
+
723
+ return {"messages": [ai_message]}
724
+
725
+ except Exception as e:
726
+ logger.error(f"Error in call_model: {e}")
727
+ end_call_model_time = time.perf_counter()
728
+ call_model_time = end_call_model_time - start_call_model_time
729
+ log_metric(f"Call model time (error): {call_model_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
730
+
731
+ error_message = AIMessage(content=f"I encountered an error generating a response: {str(e)}")
732
+ return {"messages": [error_message]}
733
+
734
+ def process_json_tools(state: EducationalAgentState) -> dict:
735
+ """Process JSON tool configurations from the model response"""
736
+ start_process_tools_time = time.perf_counter()
737
+ current_time = datetime.now()
738
+
739
  messages = state["messages"]
 
740
 
741
+ # Get the last AI message
742
+ last_ai_message = None
743
+ for msg in reversed(messages):
744
+ if isinstance(msg, AIMessage):
745
+ last_ai_message = msg
746
+ break
747
+
748
+ if not last_ai_message or not last_ai_message.content:
749
+ return {"messages": []}
750
 
751
+ content = last_ai_message.content
 
 
752
 
753
  # Look for JSON blocks in the message
754
  json_pattern = r'```json\s*\n?(.*?)\n?```'
 
794
  except Exception as e:
795
  logger.error(f"Error processing JSON tools: {e}")
796
  return {"messages": []}
797
+
798
  def make_tool_decision(state: EducationalAgentState) -> dict:
799
  """Decide whether tools are needed and update state"""
800
  start_tool_decision_time = time.perf_counter()
 
820
  log_metric(f"Tool decision workflow time: {tool_decision_time:0.4f} seconds. Decision: {needs_visualization}. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
821
 
822
  return {"needs_tools": needs_visualization}
823
+
824
  # Create the workflow graph
825
  workflow = StateGraph(EducationalAgentState)
826