stephenmccartney1234's picture
Update app.py
490d216 verified
import os
import gradio as gr
import pinecone
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Load your keys from environment variables (set in HF Space secrets)
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY")
PINECONE_ENV = os.environ.get("PINECONE_ENV") # e.g., 'us-east1-gcp'
# Initialize Pinecone
pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENV)
index_name = "workflow-helper-index"
embedding = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
# Connect to Pinecone index or create if it doesn't exist
if index_name not in pinecone.list_indexes():
pinecone.create_index(index_name, dimension=1536)
vectorstore = Pinecone.from_existing_index(index_name, embedding)
# Setup LLM and memory
llm = ChatOpenAI(model_name="gpt-4o", temperature=0, openai_api_key=OPENAI_API_KEY)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Create Conversational Retrieval Chain
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(search_type="similarity", k=4),
memory=memory,
return_source_documents=False,
)
def chat(message, chat_history):
response = qa_chain.run(message)
chat_history.append((message, response))
return chat_history, chat_history
with gr.Blocks() as demo:
gr.Markdown("# 💬 Workflow Approvals Assistant")
chatbot = gr.Chatbot()
msg = gr.Textbox(placeholder="Ask about a PO or approval process...")
clear = gr.Button("Clear Chat")
state = gr.State([])
msg.submit(chat, [msg, state], [chatbot, state])
clear.click(lambda: ([], []), None, [chatbot, state])
if __name__ == "__main__":
demo.launch()