from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.schema import StrOutputParser from langchain.schema.runnable import Runnable from langchain.schema.runnable.config import RunnableConfig from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_core.runnables.passthrough import RunnablePassthrough import logging import chainlit as cl import os openai_api_key = os.environ["OPENAI_API_KEY"] @cl.on_chat_start async def on_chat_start(): prompt_template = """ You're a helpful AI assistent tasked to answer the user's questions. You can only make conversations based on the provided context. If a response cannot be formed strictly using the context, politely say you don’t have knowledge about that topic. CONTEXT: {context} QUESTION: {question} YOUR ANSWER:""" embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key, model="text-embedding-3-large") vector_store = FAISS.load_local("./faiss_index", embeddings, allow_dangerous_deserialization=True) model = ChatOpenAI(api_key=openai_api_key, streaming=True) prompt = ChatPromptTemplate.from_messages([("system", prompt_template)]) retriever = vector_store.as_retriever() runnable = {"context": retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser() cl.user_session.set("runnable", runnable) @cl.on_message async def on_message(message: cl.Message): logging.info(f""" ================================================================================= ON MESSAGE: {message.content} ================================================================================= """) runnable = cl.user_session.get("runnable") msg = cl.Message(content="") res = runnable.invoke(message.content) await cl.Message(content=res).send() logging.info(f"Sending message wirh res <{res}>") logging.info(f"Done with <{message.content}>")