jdesiree commited on
Commit
6d062bf
·
verified ·
1 Parent(s): 79d5341

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -4
app.py CHANGED
@@ -368,15 +368,20 @@ class Qwen25SmallLLM(Runnable):
368
  class Educational_Agent:
369
  """Modern LangGraph-based educational agent"""
370
 
371
- start_init_and_langgraph_time = time.perf_counter()
372
-
373
  def __init__(self):
 
 
 
374
  self.llm = Qwen25SmallLLM(model_path="Qwen/Qwen2.5-1.5B-Instruct", use_4bit=True)
375
  self.tool_decision_engine = Tool_Decision_Engine(self.llm)
376
 
377
  # Create LangGraph workflow
378
  self.app = self._create_langgraph_workflow()
379
 
 
 
 
 
380
  def _create_langgraph_workflow(self):
381
  """Create the LangGraph workflow"""
382
  # Define tools
@@ -401,6 +406,9 @@ start_init_and_langgraph_time = time.perf_counter()
401
 
402
  def call_model(state: EducationalAgentState) -> dict:
403
  """Call the model with tool decision logic"""
 
 
 
404
  messages = state["messages"]
405
 
406
  # Get the user's query from the last human message
@@ -468,19 +476,36 @@ Otherwise, provide a regular educational response.
468
  "id": "tool_call_1"
469
  }]
470
  )
 
 
 
 
 
471
  return {"messages": [tool_call_message]}
472
 
473
  # Regular response without tools
474
  response = model_with_tools.invoke(enhanced_messages)
 
 
 
 
 
475
  return {"messages": [AIMessage(content=response)]}
476
 
477
  except Exception as e:
478
  logger.error(f"Error in model call: {e}")
 
 
 
 
479
  error_response = AIMessage(content=f"I encountered an error: {str(e)}")
480
  return {"messages": [error_response]}
481
 
482
  def handle_tools(state: EducationalAgentState) -> dict:
483
  """Handle tool execution"""
 
 
 
484
  try:
485
  messages = state["messages"]
486
  last_message = messages[-1]
@@ -497,13 +522,26 @@ Otherwise, provide a regular educational response.
497
  content=result,
498
  tool_call_id=tool_call["id"]
499
  )
 
 
 
 
 
500
  return {"messages": [tool_message]}
501
 
502
  # If no valid tool call, return empty
 
 
 
 
503
  return {"messages": []}
504
 
505
  except Exception as e:
506
  logger.error(f"Error in tool execution: {e}")
 
 
 
 
507
  error_msg = ToolMessage(
508
  content=f"Tool execution failed: {str(e)}",
509
  tool_call_id="error"
@@ -537,6 +575,9 @@ Otherwise, provide a regular educational response.
537
 
538
  def chat(self, message: str, thread_id: str = "default") -> str:
539
  """Main chat interface"""
 
 
 
540
  try:
541
  config = {"configurable": {"thread_id": thread_id}}
542
 
@@ -562,12 +603,21 @@ Otherwise, provide a regular educational response.
562
  response_parts.append(msg.content)
563
 
564
  if response_parts:
565
- return "\n\n".join(response_parts)
566
  else:
567
- return "I apologize, but I couldn't generate a proper response."
 
 
 
 
 
 
568
 
569
  except Exception as e:
570
  logger.error(f"Error in LangGraph chat: {e}")
 
 
 
571
  return f"I apologize, but I encountered an error: {str(e)}"
572
 
573
  # --- Global Agent Instance ---
 
368
  class Educational_Agent:
369
  """Modern LangGraph-based educational agent"""
370
 
 
 
371
  def __init__(self):
372
+ start_init_and_langgraph_time = time.perf_counter()
373
+ current_time = datetime.now()
374
+
375
  self.llm = Qwen25SmallLLM(model_path="Qwen/Qwen2.5-1.5B-Instruct", use_4bit=True)
376
  self.tool_decision_engine = Tool_Decision_Engine(self.llm)
377
 
378
  # Create LangGraph workflow
379
  self.app = self._create_langgraph_workflow()
380
 
381
+ end_init_and_langgraph_time = time.perf_counter()
382
+ init_and_langgraph_time = end_init_and_langgraph_time - start_init_and_langgraph_time
383
+ log_metric(f"Init and LangGraph workflow setup time: {init_and_langgraph_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
384
+
385
  def _create_langgraph_workflow(self):
386
  """Create the LangGraph workflow"""
387
  # Define tools
 
406
 
407
  def call_model(state: EducationalAgentState) -> dict:
408
  """Call the model with tool decision logic"""
409
+ start_call_model_time = time.perf_counter()
410
+ current_time = datetime.now()
411
+
412
  messages = state["messages"]
413
 
414
  # Get the user's query from the last human message
 
476
  "id": "tool_call_1"
477
  }]
478
  )
479
+
480
+ end_call_model_time = time.perf_counter()
481
+ call_model_time = end_call_model_time - start_call_model_time
482
+ log_metric(f"Call model time (with tools): {call_model_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
483
+
484
  return {"messages": [tool_call_message]}
485
 
486
  # Regular response without tools
487
  response = model_with_tools.invoke(enhanced_messages)
488
+
489
+ end_call_model_time = time.perf_counter()
490
+ call_model_time = end_call_model_time - start_call_model_time
491
+ log_metric(f"Call model time (no tools): {call_model_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
492
+
493
  return {"messages": [AIMessage(content=response)]}
494
 
495
  except Exception as e:
496
  logger.error(f"Error in model call: {e}")
497
+ end_call_model_time = time.perf_counter()
498
+ call_model_time = end_call_model_time - start_call_model_time
499
+ log_metric(f"Call model time (error): {call_model_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
500
+
501
  error_response = AIMessage(content=f"I encountered an error: {str(e)}")
502
  return {"messages": [error_response]}
503
 
504
  def handle_tools(state: EducationalAgentState) -> dict:
505
  """Handle tool execution"""
506
+ start_handle_tools_time = time.perf_counter()
507
+ current_time = datetime.now()
508
+
509
  try:
510
  messages = state["messages"]
511
  last_message = messages[-1]
 
522
  content=result,
523
  tool_call_id=tool_call["id"]
524
  )
525
+
526
+ end_handle_tools_time = time.perf_counter()
527
+ handle_tools_time = end_handle_tools_time - start_handle_tools_time
528
+ log_metric(f"Handle tools time: {handle_tools_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
529
+
530
  return {"messages": [tool_message]}
531
 
532
  # If no valid tool call, return empty
533
+ end_handle_tools_time = time.perf_counter()
534
+ handle_tools_time = end_handle_tools_time - start_handle_tools_time
535
+ log_metric(f"Handle tools time (no valid call): {handle_tools_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
536
+
537
  return {"messages": []}
538
 
539
  except Exception as e:
540
  logger.error(f"Error in tool execution: {e}")
541
+ end_handle_tools_time = time.perf_counter()
542
+ handle_tools_time = end_handle_tools_time - start_handle_tools_time
543
+ log_metric(f"Handle tools time (error): {handle_tools_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
544
+
545
  error_msg = ToolMessage(
546
  content=f"Tool execution failed: {str(e)}",
547
  tool_call_id="error"
 
575
 
576
  def chat(self, message: str, thread_id: str = "default") -> str:
577
  """Main chat interface"""
578
+ start_chat_time = time.perf_counter()
579
+ current_time = datetime.now()
580
+
581
  try:
582
  config = {"configurable": {"thread_id": thread_id}}
583
 
 
603
  response_parts.append(msg.content)
604
 
605
  if response_parts:
606
+ final_response = "\n\n".join(response_parts)
607
  else:
608
+ final_response = "I apologize, but I couldn't generate a proper response."
609
+
610
+ end_chat_time = time.perf_counter()
611
+ chat_time = end_chat_time - start_chat_time
612
+ log_metric(f"Complete chat time: {chat_time:0.4f} seconds. Response length: {len(final_response)} chars. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
613
+
614
+ return final_response
615
 
616
  except Exception as e:
617
  logger.error(f"Error in LangGraph chat: {e}")
618
+ end_chat_time = time.perf_counter()
619
+ chat_time = end_chat_time - start_chat_time
620
+ log_metric(f"Complete chat time (error): {chat_time:0.4f} seconds. Timestamp: {current_time:%Y-%m-%d %H:%M:%S}")
621
  return f"I apologize, but I encountered an error: {str(e)}"
622
 
623
  # --- Global Agent Instance ---