File size: 2,058 Bytes
5c271a3
9f72bcf
b55b8d4
 
9f72bcf
ef9fa4b
 
 
9f72bcf
b55b8d4
5c271a3
b55b8d4
 
2c2c90a
b55b8d4
 
ef9fa4b
b55b8d4
 
 
 
5c271a3
b55b8d4
 
ef9fa4b
b55b8d4
 
 
 
 
 
 
8039e4b
 
 
 
 
 
 
b55b8d4
 
9f72bcf
 
 
 
b55b8d4
 
5c271a3
fbc17f4
 
 
 
 
5c271a3
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import logging
from langchain_core.messages import SystemMessage , HumanMessage
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.checkpoint.memory import MemorySaver
from .utils.state import State, CompletionFormatter
from .utils.nodes import IntroductionNode
from .utils.utils import DetailsExtractor
from src.genai.utils.models_loader import llm_gpt
from .utils.prompts import completion_check_prompt


business_state = State()

class IntroductionChatbot:
    def __init__(self):
        self.memory = MemorySaver()
        self.llm = llm_gpt
        self.workflow = self._initialize_workflow()
        self.interact_agent = self.workflow.compile(checkpointer=self.memory)
        self.messages = []


    def _initialize_workflow(self):
        workflow = StateGraph(MessagesState)
        workflow.add_node("chatbot", lambda state: IntroductionNode().run(state, self.llm))
        workflow.add_edge(START, "chatbot")
        workflow.add_edge("chatbot", END)
        return workflow

    def chat(self, user_input: str):
        self.messages.append({"role": "user", "content": user_input})
        config = {"configurable": {"thread_id": "1"}}
        for message_chunk, metadata in self.interact_agent.stream(
            {"messages": [user_input]},
            config=config,
            stream_mode="messages"
        ):
            yield message_chunk.content


    def is_complete(self, latest_response: str) -> bool:
        messages = [SystemMessage(content=completion_check_prompt()),HumanMessage(content=f'''The response of assistant is: {latest_response}''')]
        response = llm_gpt.with_structured_output(CompletionFormatter).invoke(messages)
        print('Completion response:', response.completion)
        return response.completion

    def extract_details(self):
        response = DetailsExtractor(business_state.interactions).run()
        return response
    
    def reset(self):
        self.memory= MemorySaver()
        self.interact_agent = self.workflow.compile(checkpointer=self.memory)