trygithubactions / my_agent /utils /initial_interaction.py
subashpoudel's picture
Edited the filename of initial interaction
2c2c90a
raw
history blame
3.03 kB
import os
from langchain_groq import ChatGroq
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.checkpoint.memory import MemorySaver
from langchain_core.messages import SystemMessage
from pydantic import BaseModel, ConfigDict, Field
from typing import Optional, List
from .models_loader import llm
from .prompts import introduction_prompt , details_extract_prompt
# Pydantic model for extracted business info
class DetailsFormatter(BaseModel):
business_type: str = Field(description="The type of the business")
platform: str = Field(description="The platform used for the business")
target_audience: str = Field(description="The target audience of the business")
business_goals: str = Field(description="The business goals of the business")
offerings: str = Field(description="The offerings of the business")
Challenges_faced: str = Field(description="The challenges faced by the business")
# State model
class State(BaseModel):
interactions: Optional[list] = []
model_config = ConfigDict(arbitrary_types_allowed=True)
# Global business state (shared)
business_state = State()
class IntroductionChatbot:
def __init__(self):
self.memory = MemorySaver()
# self.llm = ChatGroq(model_name="Gemma2-9b-It")
self.llm = llm
self.workflow = self._initialize_workflow()
self.interact_agent = self.workflow.compile(checkpointer=self.memory)
self.messages = []
def _initialize_workflow(self):
workflow = StateGraph(MessagesState)
workflow.add_node("chatbot", self._call_model)
workflow.add_edge(START, "chatbot")
workflow.add_edge("chatbot", END)
return workflow
def _call_model(self, state):
template = introduction_prompt
messages = [SystemMessage(content=template)] + state["messages"]
response = self.llm.invoke(messages)
return {"messages": [response]}
def chat(self, user_input: str):
self.messages.append({"role": "user", "content": user_input})
config = {"configurable": {"thread_id": "1"}}
response = self.interact_agent.invoke({"messages": [user_input]}, config)['messages'][-1].content
self.messages.append({"role": "assistant", "content": response})
business_state.interactions.append({'user': user_input, 'agent_response': response})
return response
def is_complete(self, latest_response: str) -> bool:
return "Thanks for providing all your required business details" in latest_response
def extract_details(self):
template = details_extract_prompt(business_state.interactions)
messages = [SystemMessage(content=template)]
response = self.llm.bind_tools([DetailsFormatter]).invoke(messages)
if hasattr(response, 'tool_calls') and response.tool_calls:
return response.tool_calls[0]['args']
elif hasattr(response, 'content'):
return response.content
else:
return "No response"