Spaces:
Sleeping
Sleeping
File size: 1,852 Bytes
924e6b4 39432e3 924e6b4 e6d588b 924e6b4 39432e3 924e6b4 5fa0c53 924e6b4 39432e3 e6d588b 39432e3 924e6b4 39432e3 924e6b4 39432e3 47a74e3 924e6b4 39432e3 cb86cbf 39432e3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | import os
import logging
import openai
import gradio as gr
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.schema import Document
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Environment setup
openai_api_key = os.getenv('OPENAI_API_KEY')
if not openai_api_key:
raise ValueError("OPENAI_API_KEY environment variable is not set")
# Constants
DB_NAME = 'vector_db'
MODEL = "gpt-3.5-turbo"
# Load vector DB
if not os.path.exists(DB_NAME):
logger.error(f"Vector database '{DB_NAME}' not found")
raise FileNotFoundError(f"Vector database '{DB_NAME}' not found in the current directory")
db = FAISS.load_local(DB_NAME, HuggingFaceEmbeddings(model_name="intfloat/e5-base"), allow_dangerous_deserialization=True)
# Load LLM
llm = ChatOpenAI(
model_name=MODEL,
temperature=0,
api_key=openai_api_key
)
# Memory for chat history
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True,
output_key='answer'
)
# Retriever and chain
retriever = db.as_retriever(search_kwargs={"k": 3})
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory,
return_source_documents=True
)
# Gradio interface function
def chat(question, history):
result = chain.invoke({"question": question})
return result["answer"]
# Gradio UI
view = gr.ChatInterface(fn=chat, type='messages', theme=gr.themes.Soft())
view.launch() |