| | from typing import Dict, Any |
| | from langchain_core.messages import HumanMessage |
| | from ComputeAgent.models.model_manager import ModelManager |
| | from constant import Constants |
| | import logging |
| |
|
| | logger = logging.getLogger("ReAct Tool Rejection Exit") |
| |
|
| | |
| | model_manager = ModelManager() |
| |
|
| |
|
| | async def tool_rejection_exit_node(state: Dict[str, Any]) -> Dict[str, Any]: |
| | """ |
| | Node that handles the case when human declines all tool executions. |
| | Provides a helpful, personalized response using memory context and LLM. |
| | |
| | Args: |
| | state: Current ReAct state with memory fields |
| | |
| | Returns: |
| | Updated state with helpful exit message and final response |
| | """ |
| | logger.info("🚪 User declined all tool executions - generating helpful response") |
| | |
| | |
| | query = state.get("query", "") |
| | user_id = state.get("user_id", "") |
| | session_id = state.get("session_id", "") |
| | |
| | |
| | memory_context = "" |
| | if user_id and session_id: |
| | try: |
| | from helpers.memory import get_memory_manager |
| | memory_manager = get_memory_manager() |
| | memory_context = await memory_manager.build_context_for_node(user_id, session_id, "general") |
| | if memory_context: |
| | logger.info(f"🧠 Using memory context for tool rejection response") |
| | except Exception as e: |
| | logger.warning(f"⚠️ Could not load memory context for tool rejection: {e}") |
| | |
| | |
| | try: |
| | |
| | llm = await model_manager.load_llm_model(Constants.DEFAULT_LLM_NAME) |
| | |
| | |
| | pending_tool_calls = state.get("pending_tool_calls", []) |
| | tool_names = [tool.get('name', 'unknown tool') for tool in pending_tool_calls] if pending_tool_calls else ["tools"] |
| | |
| | |
| | system_prompt = Constants.GENERAL_SYSTEM_PROMPT + r""" |
| | You are ComputeAgent, a helpful AI assistant. The user has chosen **not to use** the recommended {', '.join(tool_names)} for their query. |
| | |
| | Your task is to respond in a **positive, supportive, and helpful way** that: |
| | 1. Respectfully acknowledges their choice. |
| | 2. Suggests alternative ways to assist them. |
| | 3. Offers ideas on how they might **rephrase or clarify** their query for better results. |
| | 4. Personalizes the response using any available conversation context. |
| | |
| | User's Query: {query} |
| | |
| | {f"Conversation Context: {memory_context}" if memory_context else ""} |
| | |
| | Provide a **helpful, encouraging, and concise response** (2-3 sentences) that guides the user toward next steps without pressuring them to use the tool. |
| | """ |
| |
|
| | response = await llm.ainvoke([HumanMessage(content=system_prompt)]) |
| | exit_message = response.content.strip() |
| | |
| | logger.info(f"🤖 Generated personalized tool rejection response for user {user_id}, session {session_id}") |
| | |
| | except Exception as e: |
| | logger.warning(f"⚠️ Could not generate LLM response for tool rejection: {e}") |
| | |
| | if memory_context: |
| | exit_message = f"I understand you'd prefer not to use the suggested tools. Based on our conversation, I can try to help you in other ways. Could you please rephrase your question or let me know what specific information you're looking for? I'm here to assist you however I can." |
| | else: |
| | exit_message = "I understand you'd prefer not to use the suggested tools. I'm happy to help you in other ways! Could you please rephrase your question or provide more details about what you're looking for? I'm here to assist you with direct answers whenever possible." |
| | |
| | |
| | updated_state = state.copy() |
| | updated_state["response"] = exit_message |
| | updated_state["current_step"] = "tool_rejection_exit" |
| |
|
| | |
| | if "tools" in updated_state: |
| | del updated_state["tools"] |
| |
|
| | logger.info("✅ Tool rejection exit complete with helpful response") |
| |
|
| | return updated_state |