subashpoudel's picture
next commit
9f72bcf
raw
history blame
2.06 kB
import logging
from langchain_core.messages import SystemMessage , HumanMessage
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.checkpoint.memory import MemorySaver
from .utils.state import State, CompletionFormatter
from .utils.nodes import IntroductionNode
from .utils.utils import DetailsExtractor
from src.genai.utils.models_loader import llm_gpt
from .utils.prompts import completion_check_prompt
business_state = State()
class IntroductionChatbot:
def __init__(self):
self.memory = MemorySaver()
self.llm = llm_gpt
self.workflow = self._initialize_workflow()
self.interact_agent = self.workflow.compile(checkpointer=self.memory)
self.messages = []
def _initialize_workflow(self):
workflow = StateGraph(MessagesState)
workflow.add_node("chatbot", lambda state: IntroductionNode().run(state, self.llm))
workflow.add_edge(START, "chatbot")
workflow.add_edge("chatbot", END)
return workflow
def chat(self, user_input: str):
self.messages.append({"role": "user", "content": user_input})
config = {"configurable": {"thread_id": "1"}}
for message_chunk, metadata in self.interact_agent.stream(
{"messages": [user_input]},
config=config,
stream_mode="messages"
):
yield message_chunk.content
def is_complete(self, latest_response: str) -> bool:
messages = [SystemMessage(content=completion_check_prompt()),HumanMessage(content=f'''The response of assistant is: {latest_response}''')]
response = llm_gpt.with_structured_output(CompletionFormatter).invoke(messages)
print('Completion response:', response.completion)
return response.completion
def extract_details(self):
response = DetailsExtractor(business_state.interactions).run()
return response
def reset(self):
self.memory= MemorySaver()
self.interact_agent = self.workflow.compile(checkpointer=self.memory)