import os import streamlit as st import random import time from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.document_loaders import DataFrameLoader from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Chroma # Get OpenAI setup openai_api_key = os.getenv("openai_token") embedding = OpenAIEmbeddings(openai_api_key=openai_api_key) # Setup vector database persist_directory = './chroma_db' vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding) llm_name = "gpt-3.5-turbo" llm = ChatOpenAI(model_name=llm_name, temperature=0, openai_api_key=openai_api_key) qa_chain = RetrievalQA.from_chain_type( llm, retriever=vectordb.as_retriever() ) # Conversation memory from langchain.memory import ConversationBufferMemory memory = ConversationBufferMemory( memory_key="chat_history", return_messages=True ) from langchain.chains import ConversationalRetrievalChain retriever=vectordb.as_retriever() qa_memory = ConversationalRetrievalChain.from_llm( llm, retriever=vectordb.as_retriever(), memory=memory ) # Streamed response emulator def response_generator(prompt): response = qa_memory({"question": prompt})['result'] # Fake streaming for word in response.split(): yield word + " " time.sleep(0.05) st.title("Simple chat") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Accept user input if prompt := st.chat_input("What is up?"): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Display assistant response in chat message container with st.chat_message("assistant"): response = qa_memory({"question": prompt})['result'] st.write(response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response})