Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,91 +1,63 @@
|
|
| 1 |
-
from dotenv import load_dotenv
|
| 2 |
-
import os
|
| 3 |
-
|
| 4 |
-
load_dotenv()
|
| 5 |
-
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 6 |
-
|
| 7 |
-
import gradio as gr
|
| 8 |
-
from langchain_community.document_loaders import PyPDFLoader
|
| 9 |
-
from langchain_text_splitters import CharacterTextSplitter
|
| 10 |
-
from langchain_openai import OpenAIEmbeddings
|
| 11 |
-
from langchain_community.vectorstores import Chroma
|
| 12 |
-
from langchain_openai import ChatOpenAI
|
| 13 |
-
from langchain.prompts import PromptTemplate
|
| 14 |
-
from langchain.chains.question_answering import load_qa_chain
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
)
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
output = stuff_chain(
|
| 66 |
-
{"input_documents": docs, "human_input": query}, return_only_outputs=False
|
| 67 |
-
)
|
| 68 |
-
|
| 69 |
-
final_answer = output["output_text"]
|
| 70 |
-
print(f"Final Answer ---> {final_answer}")
|
| 71 |
-
|
| 72 |
-
return final_answer
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
def chat(query, chat_history):
|
| 76 |
-
response = rag_bot(query, chat_history)
|
| 77 |
-
return response
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
if __name__ == "__main__":
|
| 81 |
-
# create_vectordb()
|
| 82 |
-
|
| 83 |
-
chatbot = gr.Chatbot(avatar_images=["user.jpg", "bot.png"], height=600)
|
| 84 |
-
clear_but = gr.Button(value="Clear Chat")
|
| 85 |
-
demo = gr.ChatInterface(
|
| 86 |
-
fn=chat,
|
| 87 |
-
title="RAG Chatbot Prototype",
|
| 88 |
-
multimodal=False,
|
| 89 |
-
chatbot=chatbot,
|
| 90 |
-
)
|
| 91 |
-
demo.launch(debug=True, share=True)
|
|
|
|
| 1 |
+
from dotenv import load_dotenv
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
load_dotenv()
|
| 5 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 6 |
+
|
| 7 |
+
import gradio as gr
|
| 8 |
+
from langchain_community.document_loaders import PyPDFLoader
|
| 9 |
+
from langchain_text_splitters import CharacterTextSplitter
|
| 10 |
+
from langchain_openai import OpenAIEmbeddings
|
| 11 |
+
from langchain_community.vectorstores import Chroma
|
| 12 |
+
from langchain_openai import ChatOpenAI
|
| 13 |
+
from langchain.prompts import PromptTemplate
|
| 14 |
+
from langchain.chains.question_answering import load_qa_chain
|
| 15 |
+
|
| 16 |
+
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
|
| 17 |
+
llm = ChatOpenAI(model="gpt-4-1106-preview", api_key=OPENAI_API_KEY)
|
| 18 |
+
|
| 19 |
+
vectordb_path = "./vector_db"
|
| 20 |
+
|
| 21 |
+
def rag_bot(query):
|
| 22 |
+
print(f"Received query: {query}")
|
| 23 |
+
|
| 24 |
+
template = """Please answer to human's input based on context. If possible, you should provide reference link with answer. The answer should be very politely, clear and short since it will be the response for client's query. The answer should be finished like this:
|
| 25 |
+
Best regards,
|
| 26 |
+
Support Team
|
| 27 |
+
Use the American English. If the input is not mentioned in context, output something like 'I don't know'.
|
| 28 |
+
Context: {context}
|
| 29 |
+
Human: {human_input}
|
| 30 |
+
Your Response as Chatbot:"""
|
| 31 |
+
|
| 32 |
+
prompt_s = PromptTemplate(
|
| 33 |
+
input_variables=["human_input", "context"], template=template
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
vectorstore = Chroma(
|
| 37 |
+
persist_directory=os.path.join(vectordb_path), embedding_function=embeddings
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
docs = vectorstore.similarity_search(query)
|
| 41 |
+
|
| 42 |
+
stuff_chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt_s)
|
| 43 |
+
|
| 44 |
+
output = stuff_chain(
|
| 45 |
+
{"input_documents": docs, "human_input": query}, return_only_outputs=False
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
final_answer = output["output_text"]
|
| 49 |
+
print(f"Final Answer ---> {final_answer}")
|
| 50 |
+
|
| 51 |
+
return final_answer
|
| 52 |
+
|
| 53 |
+
if __name__ == "__main__":
|
| 54 |
+
chatbot = gr.Chatbot(avatar_images=["user.jpg", "bot.png"], height=600)
|
| 55 |
+
clear_but = gr.Button(value="Clear Chat")
|
| 56 |
+
demo = gr.ChatInterface(
|
| 57 |
+
fn=rag_bot,
|
| 58 |
+
title="TraderFyles AI Assistant",
|
| 59 |
+
multimodal=False,
|
| 60 |
+
chatbot=chatbot,
|
| 61 |
+
clear_btn=clear_but
|
| 62 |
+
)
|
| 63 |
+
demo.launch(debug=True, share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|