BishalRD commited on
Commit
6369fd4
·
1 Parent(s): 0c0e15d

single thread conversational in-memory chatbot

Browse files
Files changed (1) hide show
  1. conversational_bot.py +75 -0
conversational_bot.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain_ollama import ChatOllama
3
+ from langgraph.graph import StateGraph, START, END
4
+ from langgraph.graph.message import MessagesState
5
+ from langgraph.checkpoint.memory import MemorySaver
6
+ from langchain_core.messages import (
7
+ convert_to_openai_messages,
8
+ SystemMessage,
9
+ HumanMessage,
10
+ )
11
+
12
+ def create_conversation_graph():
13
+ """
14
+ Create a conversational graph with a memory saver.
15
+ """
16
+ memory = MemorySaver()
17
+
18
+ llm = ChatOllama(model="gemma3:4b", temperature=0)
19
+
20
+ sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic on a set of inputs.")
21
+
22
+ def assistant(state: MessagesState) -> MessagesState:
23
+ return {"messages": [llm.invoke([sys_msg] + state["messages"])]}
24
+
25
+ builder = StateGraph(MessagesState)
26
+
27
+ builder.add_node("assistant", assistant)
28
+ builder.add_edge(START, "assistant")
29
+ builder.add_edge("assistant", END)
30
+
31
+ graph = builder.compile(checkpointer=memory)
32
+ return graph
33
+
34
+
35
+ def create_chat_interface():
36
+ """
37
+ Create and configure the chat interface with the conversation graph.
38
+ """
39
+ graph = create_conversation_graph()
40
+
41
+ # Specify a thread id
42
+ thread_id = "123"
43
+ config = {"configurable": {"thread_id": thread_id}}
44
+
45
+ def chat_with_assistant(message, history):
46
+ """
47
+ Chat with the assistant using the conversational graph.
48
+ """
49
+ # Create a MessagesState with a HumanMessage
50
+ messages_state = MessagesState(messages=[HumanMessage(content=message)])
51
+
52
+ # Invoke the graph with the properly formatted input
53
+ response = graph.invoke(messages_state, config)
54
+
55
+ # Extract the last message from the response's messages list
56
+ ai_message = response["messages"][-1]
57
+
58
+ # Return just the content of the AI message
59
+ return convert_to_openai_messages(ai_message)
60
+
61
+
62
+ demo = gr.ChatInterface(
63
+ fn=chat_with_assistant,
64
+ type="messages",
65
+ title="Conversational Bot",
66
+ description="Ask anything you want",
67
+ examples=["Hello", "What is your name?", "What is the weather in Tokyo?"],
68
+ )
69
+ return demo
70
+
71
+ if __name__ == "__main__":
72
+ demo = create_chat_interface()
73
+ demo.launch()
74
+
75
+