Spaces:
Sleeping
Sleeping
File size: 3,941 Bytes
bf6dbfa 0643073 bf6dbfa 0643073 bf6dbfa 0643073 bf6dbfa 0643073 bf6dbfa 0643073 bf6dbfa 0643073 bf6dbfa 0643073 bf6dbfa 0643073 bf6dbfa 0643073 bf6dbfa 0643073 bf6dbfa | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | import pytest
from agent.graph import app
from agent.state import AgentState
from agent.nodes import IntentResponse, LeadExtractionResponse
from langchain_core.runnables import RunnableLambda
def simulate_conversation(messages, mock_llm_setup_func):
"""
Helper utility that simulates a multi-turn conversation.
Feeds messages sequentially through the agent graph and returns the final state.
"""
state = AgentState(
conversation_history=[],
current_message="",
detected_intent=None,
retrieved_documents=[],
user_name=None,
user_email=None,
creator_platform=None,
lead_ready=False,
response=""
)
for idx, msg in enumerate(messages):
state["current_message"] = msg
mock_llm_setup_func(idx)
state = app.invoke(state)
state["conversation_history"].append({"role": "user", "content": state["current_message"]})
state["conversation_history"].append({"role": "assistant", "content": state["response"]})
return state
def test_agent_e2e(mocker):
mock_llm = mocker.MagicMock()
mocker.patch('agent.nodes.get_llm', return_value=mock_llm)
mocker.patch('agent.nodes.retrieve_documents', return_value=["We have Basic and Pro plans for $29 and $79."])
mock_tool = mocker.patch('agent.nodes.mock_lead_capture')
messages = [
"Hi",
"Tell me about pricing",
"I want the Pro plan for my YouTube channel",
"My name is Alex",
"alex@email.com"
]
def setup_mocks_for_turn(idx):
if idx == 0:
mock_chain = RunnableLambda(lambda x: IntentResponse(intent="GREETING", confidence=0.99))
mock_llm.with_structured_output.return_value = mock_chain
elif idx == 1:
mock_chain = RunnableLambda(lambda x: IntentResponse(intent="PRICING_QUERY", confidence=0.99))
mock_llm.with_structured_output.return_value = mock_chain
class FakeResponse:
content = "We have Basic and Pro plans."
mock_llm.invoke.return_value = FakeResponse()
elif idx == 2:
def mock_structured_output(schema):
if schema.__name__ == "IntentResponse":
return RunnableLambda(lambda x: IntentResponse(intent="HIGH_INTENT_LEAD", confidence=0.99))
else:
return RunnableLambda(lambda x: LeadExtractionResponse(user_name=None, user_email=None, creator_platform="YouTube"))
mock_llm.with_structured_output.side_effect = mock_structured_output
elif idx == 3:
def mock_structured_output(schema):
if schema.__name__ == "IntentResponse":
return RunnableLambda(lambda x: IntentResponse(intent="HIGH_INTENT_LEAD", confidence=0.99))
else:
return RunnableLambda(lambda x: LeadExtractionResponse(user_name="Alex", user_email=None, creator_platform=None))
mock_llm.with_structured_output.side_effect = mock_structured_output
elif idx == 4:
def mock_structured_output(schema):
if schema.__name__ == "IntentResponse":
return RunnableLambda(lambda x: IntentResponse(intent="HIGH_INTENT_LEAD", confidence=0.99))
else:
return RunnableLambda(lambda x: LeadExtractionResponse(user_name=None, user_email="alex@email.com", creator_platform=None))
mock_llm.with_structured_output.side_effect = mock_structured_output
final_state = simulate_conversation(messages, setup_mocks_for_turn)
assert final_state.get("user_name") == "Alex"
assert final_state.get("user_email") == "alex@email.com"
assert final_state.get("creator_platform") == "YouTube"
assert final_state.get("lead_ready") is True
mock_tool.assert_called_once_with("Alex", "alex@email.com", "YouTube")
|