import os import streamlit as st import random import time from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.document_loaders import DataFrameLoader from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import Chroma # Get OpenAI setup openai_api_key = os.getenv("openapi_token") embedding = OpenAIEmbeddings(openai_api_key=openai_api_key) # Setup vector database persist_directory = './chroma_db' vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding) # Streamed response emulator def response_generator(): response = random.choice( [ "Hello there! How can I assist you today?", "Hi, human! Is there anything I can help you with?", "Do you need help?", ] ) for word in response.split(): yield word + " " time.sleep(0.05) st.title("Simple chat") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Accept user input if prompt := st.chat_input("What is up?"): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Display assistant response in chat message container with st.chat_message("assistant"): response = st.write_stream(response_generator()) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response})