from langchain_core.messages import HumanMessage, SystemMessage,AIMessageChunk from langchain_core.runnables.config import RunnableConfig from langchain_google_genai import ChatGoogleGenerativeAI from langchain_google_genai import GoogleGenerativeAIEmbeddings from langchain_core.prompts import ChatPromptTemplate from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import START, MessagesState, StateGraph from langsmith import traceable import chainlit as cl from dotenv import load_dotenv load_dotenv() workflow = StateGraph(state_schema=MessagesState) #print(os.environ.get("GOOGLE_API_KEY")) model = ChatGoogleGenerativeAI(model="gemini-2.5-pro", temperature=0.5) with open("sys_prompt.txt", "r",encoding="utf-8") as f: sys_prompt=f.read() ChatPromptTemplate.from_messages([SystemMessage(content=sys_prompt) ]) #model = ChatOpenAI(model="gpt-4o-mini", temperature=0) def call_model(state: MessagesState): response = model.invoke(state["messages"]) return {"messages": response} workflow.add_edge(START, "model") workflow.add_node("model", call_model) memory = MemorySaver() app = workflow.compile(checkpointer=memory) @cl.password_auth_callback def auth_callback(username: str, password: str): # Fetch the user matching username from your database # and compare the hashed password with the value stored in the database if (username, password) == ("admin", "admin"): return cl.User( identifier="admin", metadata={"role": "admin", "provider": "credentials"} ) else: return None # @cl.on_chat_resume # async def on_chat_resume(thread): # pass @cl.on_message async def main(message: cl.Message): if message.elements: for file in message.elements: if file.mime not in ["image/png", "image/jpeg" , "document/pgf"]: await cl.ErrorMessage(content="Unsupported file type").send() answer = cl.Message(content="") await answer.send() config: RunnableConfig = { "configurable": {"thread_id": cl.context.session.thread_id} } for msg, _ in app.stream( {"messages": [HumanMessage(content=message.content)]}, config, stream_mode="messages", ): if isinstance(msg, AIMessageChunk): answer.content += msg.content # type: ignore await answer.update() @cl.on_audio_chunk async def on_audio_chunk(chunk: cl.InputAudioChunk): return {"audio": chunk}