Spaces:
Sleeping
Sleeping
google-labs-jules[bot]
feat: implement AutoStream conversational AI sales agent with LangGraph
0643073 | import pytest | |
| from agent.nodes import process_lead, LeadExtractionResponse | |
| from agent.state import AgentState | |
| from langchain_core.runnables import RunnableLambda | |
| def test_lead_workflow_step_by_step(mocker): | |
| state = AgentState( | |
| conversation_history=[], | |
| current_message="I want the Pro plan for my YouTube channel", | |
| detected_intent="HIGH_INTENT_LEAD", | |
| retrieved_documents=[], | |
| user_name=None, | |
| user_email=None, | |
| creator_platform=None, | |
| lead_ready=False, | |
| response="" | |
| ) | |
| mock_llm = mocker.MagicMock() | |
| mock_chain_1 = RunnableLambda(lambda x: LeadExtractionResponse(user_name=None, user_email=None, creator_platform="YouTube")) | |
| mock_llm.with_structured_output.return_value = mock_chain_1 | |
| mocker.patch('agent.nodes.get_llm', return_value=mock_llm) | |
| result = process_lead(state) | |
| assert result.get("user_name") is None | |
| assert result.get("creator_platform") == "YouTube" | |
| assert "name" in result["response"].lower() | |
| state.update(result) | |
| state["conversation_history"].append({"role": "user", "content": state["current_message"]}) | |
| state["conversation_history"].append({"role": "assistant", "content": state["response"]}) | |
| state["current_message"] = "My name is Alex" | |
| mock_chain_2 = RunnableLambda(lambda x: LeadExtractionResponse(user_name="Alex", user_email=None, creator_platform=None)) | |
| mock_llm.with_structured_output.return_value = mock_chain_2 | |
| result = process_lead(state) | |
| assert result.get("user_name") == "Alex" | |
| assert "email" in result["response"].lower() | |
| state.update(result) | |
| state["conversation_history"].append({"role": "user", "content": state["current_message"]}) | |
| state["conversation_history"].append({"role": "assistant", "content": state["response"]}) | |
| state["current_message"] = "alex@email.com" | |
| mock_chain_3 = RunnableLambda(lambda x: LeadExtractionResponse(user_name=None, user_email="alex@email.com", creator_platform=None)) | |
| mock_llm.with_structured_output.return_value = mock_chain_3 | |
| result = process_lead(state) | |
| assert result.get("user_email") == "alex@email.com" | |
| assert result.get("lead_ready") is True | |