Prince-1 commited on
Commit
f293c24
·
verified ·
1 Parent(s): df067f2

Update lang_graph.py

Browse files
Files changed (1) hide show
  1. lang_graph.py +83 -83
lang_graph.py CHANGED
@@ -1,84 +1,84 @@
1
- from langchain_core.messages import HumanMessage, SystemMessage,AIMessageChunk
2
- from langchain_core.runnables.config import RunnableConfig
3
- from langchain_google_genai import ChatGoogleGenerativeAI
4
- from langchain_google_genai import GoogleGenerativeAIEmbeddings
5
- from langchain_core.prompts import ChatPromptTemplate
6
- from langgraph.checkpoint.memory import MemorySaver
7
- from langgraph.graph import START, MessagesState, StateGraph
8
- from langsmith import traceable
9
- import chainlit as cl
10
-
11
- from dotenv import load_dotenv
12
-
13
- load_dotenv()
14
-
15
- workflow = StateGraph(state_schema=MessagesState)
16
-
17
-
18
- #print(os.environ.get("GOOGLE_API_KEY"))
19
- model = ChatGoogleGenerativeAI(model="gemini-2.5-pro", temperature=0.5)
20
- with open("sys_prompt.txt", "r",encoding="utf-8") as f:
21
- sys_prompt=f.read()
22
- ChatPromptTemplate.from_messages([SystemMessage(content=sys_prompt) ])
23
-
24
- #model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
25
-
26
-
27
- def call_model(state: MessagesState):
28
- response = model.invoke(state["messages"])
29
- return {"messages": response}
30
-
31
-
32
- workflow.add_edge(START, "model")
33
- workflow.add_node("model", call_model)
34
-
35
- memory = MemorySaver()
36
-
37
- app = workflow.compile(checkpointer=memory)
38
-
39
-
40
- @cl.password_auth_callback
41
- def auth_callback(username: str, password: str):
42
- # Fetch the user matching username from your database
43
- # and compare the hashed password with the value stored in the database
44
- if (username, password) == ("admin", "admin"):
45
- return cl.User(
46
- identifier="admin", metadata={"role": "admin", "provider": "credentials"}
47
- )
48
- else:
49
- return None
50
-
51
-
52
- # @cl.on_chat_resume
53
- # async def on_chat_resume(thread):
54
- # pass
55
-
56
-
57
- @cl.on_message
58
- async def main(message: cl.Message):
59
-
60
- if message.elements:
61
- for file in message.elements:
62
- if file.mime not in ["image/png", "image/jpeg" , "document/pgf"]:
63
- await cl.ErrorMessage(content="Unsupported file type").send()
64
-
65
- answer = cl.Message(content="")
66
- await answer.send()
67
-
68
- config: RunnableConfig = {
69
- "configurable": {"thread_id": cl.context.session.thread_id}
70
- }
71
-
72
-
73
- for msg, _ in app.stream(
74
- {"messages": [HumanMessage(content=message.content)]},
75
- config,
76
- stream_mode="messages",
77
- ):
78
- if isinstance(msg, AIMessageChunk):
79
- answer.content += msg.content # type: ignore
80
- await answer.update()
81
-
82
- @cl.on_audio_chunk
83
- async def on_audio_chunk(chunk: cl.InputAudioChunk):
84
  return {"audio": chunk}
 
1
+ from langchain_core.messages import HumanMessage, SystemMessage,AIMessageChunk
2
+ from langchain_core.runnables.config import RunnableConfig
3
+ from langchain_google_genai import ChatGoogleGenerativeAI
4
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
5
+ from langchain_core.prompts import ChatPromptTemplate
6
+ from langgraph.checkpoint.memory import MemorySaver
7
+ from langgraph.graph import START, MessagesState, StateGraph
8
+ from langsmith import traceable
9
+ import chainlit as cl
10
+
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+
15
+ workflow = StateGraph(state_schema=MessagesState)
16
+
17
+
18
+ #print(os.environ.get("GOOGLE_API_KEY"))
19
+ model = ChatGoogleGenerativeAI(model="gemini-2.5-pro", temperature=0.5)
20
+ with open("sys_prompt.txt", "r",encoding="utf-8") as f:
21
+ sys_prompt=f.read()
22
+ ChatPromptTemplate.from_messages([SystemMessage(content=sys_prompt) ])
23
+
24
+ #model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
25
+
26
+
27
+ def call_model(state: MessagesState):
28
+ response = model.invoke(state["messages"])
29
+ return {"messages": response}
30
+
31
+
32
+ workflow.add_edge(START, "model")
33
+ workflow.add_node("model", call_model)
34
+
35
+ memory = MemorySaver()
36
+
37
+ app = workflow.compile(checkpointer=memory)
38
+
39
+
40
+ # @cl.password_auth_callback
41
+ # def auth_callback(username: str, password: str):
42
+ # # Fetch the user matching username from your database
43
+ # # and compare the hashed password with the value stored in the database
44
+ # if (username, password) == ("admin", "admin"):
45
+ # return cl.User(
46
+ # identifier="admin", metadata={"role": "admin", "provider": "credentials"}
47
+ # )
48
+ # else:
49
+ # return None
50
+
51
+
52
+ # @cl.on_chat_resume
53
+ # async def on_chat_resume(thread):
54
+ # pass
55
+
56
+
57
+ @cl.on_message
58
+ async def main(message: cl.Message):
59
+
60
+ if message.elements:
61
+ for file in message.elements:
62
+ if file.mime not in ["image/png", "image/jpeg" , "document/pgf"]:
63
+ await cl.ErrorMessage(content="Unsupported file type").send()
64
+
65
+ answer = cl.Message(content="")
66
+ await answer.send()
67
+
68
+ config: RunnableConfig = {
69
+ "configurable": {"thread_id": cl.context.session.thread_id}
70
+ }
71
+
72
+
73
+ for msg, _ in app.stream(
74
+ {"messages": [HumanMessage(content=message.content)]},
75
+ config,
76
+ stream_mode="messages",
77
+ ):
78
+ if isinstance(msg, AIMessageChunk):
79
+ answer.content += msg.content # type: ignore
80
+ await answer.update()
81
+
82
+ @cl.on_audio_chunk
83
+ async def on_audio_chunk(chunk: cl.InputAudioChunk):
84
  return {"audio": chunk}