hi-tech commited on
Commit
79ed267
·
verified ·
1 Parent(s): 4cc5b4d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -91
app.py CHANGED
@@ -1,91 +1,63 @@
1
- from dotenv import load_dotenv
2
- import os
3
-
4
- load_dotenv()
5
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
6
-
7
- import gradio as gr
8
- from langchain_community.document_loaders import PyPDFLoader
9
- from langchain_text_splitters import CharacterTextSplitter
10
- from langchain_openai import OpenAIEmbeddings
11
- from langchain_community.vectorstores import Chroma
12
- from langchain_openai import ChatOpenAI
13
- from langchain.prompts import PromptTemplate
14
- from langchain.chains.question_answering import load_qa_chain
15
-
16
- text_splitter = CharacterTextSplitter(
17
- separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len
18
- )
19
- embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
20
- llm = ChatOpenAI(model="gpt-4-1106-preview", api_key=OPENAI_API_KEY)
21
-
22
- vectordb_path = "./vector_db"
23
- uploaded_files = ["./pdf/knowledgebase.pdf"]
24
- vectorstore = None
25
-
26
-
27
- def create_vectordb():
28
- global vectorstore
29
-
30
- for file in uploaded_files:
31
- loader = PyPDFLoader(file)
32
- data = loader.load()
33
- texts = text_splitter.split_documents(data)
34
-
35
- if vectorstore is None:
36
- vectorstore = Chroma.from_documents(
37
- documents=texts,
38
- embedding=embeddings,
39
- persist_directory=os.path.join(vectordb_path),
40
- )
41
- else:
42
- vectorstore.add_documents(texts)
43
-
44
-
45
- def rag_bot(query, chat_history):
46
- print(f"Received query: {query}")
47
-
48
- template = """Please answer to human's input based on context. If the input is not mentioned in context, output something like 'I don't know'.
49
- Context: {context}
50
- Human: {human_input}
51
- Your Response as Chatbot:"""
52
-
53
- prompt_s = PromptTemplate(
54
- input_variables=["human_input", "context"], template=template
55
- )
56
-
57
- vectorstore = Chroma(
58
- persist_directory=os.path.join(vectordb_path), embedding_function=embeddings
59
- )
60
-
61
- docs = vectorstore.similarity_search(query)
62
-
63
- stuff_chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt_s)
64
-
65
- output = stuff_chain(
66
- {"input_documents": docs, "human_input": query}, return_only_outputs=False
67
- )
68
-
69
- final_answer = output["output_text"]
70
- print(f"Final Answer ---> {final_answer}")
71
-
72
- return final_answer
73
-
74
-
75
- def chat(query, chat_history):
76
- response = rag_bot(query, chat_history)
77
- return response
78
-
79
-
80
- if __name__ == "__main__":
81
- # create_vectordb()
82
-
83
- chatbot = gr.Chatbot(avatar_images=["user.jpg", "bot.png"], height=600)
84
- clear_but = gr.Button(value="Clear Chat")
85
- demo = gr.ChatInterface(
86
- fn=chat,
87
- title="RAG Chatbot Prototype",
88
- multimodal=False,
89
- chatbot=chatbot,
90
- )
91
- demo.launch(debug=True, share=True)
 
1
+ from dotenv import load_dotenv
2
+ import os
3
+
4
+ load_dotenv()
5
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
6
+
7
+ import gradio as gr
8
+ from langchain_community.document_loaders import PyPDFLoader
9
+ from langchain_text_splitters import CharacterTextSplitter
10
+ from langchain_openai import OpenAIEmbeddings
11
+ from langchain_community.vectorstores import Chroma
12
+ from langchain_openai import ChatOpenAI
13
+ from langchain.prompts import PromptTemplate
14
+ from langchain.chains.question_answering import load_qa_chain
15
+
16
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
17
+ llm = ChatOpenAI(model="gpt-4-1106-preview", api_key=OPENAI_API_KEY)
18
+
19
+ vectordb_path = "./vector_db"
20
+
21
+ def rag_bot(query):
22
+ print(f"Received query: {query}")
23
+
24
+ template = """Please answer to human's input based on context. If possible, you should provide reference link with answer. The answer should be very politely, clear and short since it will be the response for client's query. The answer should be finished like this:
25
+ Best regards,
26
+ Support Team
27
+ Use the American English. If the input is not mentioned in context, output something like 'I don't know'.
28
+ Context: {context}
29
+ Human: {human_input}
30
+ Your Response as Chatbot:"""
31
+
32
+ prompt_s = PromptTemplate(
33
+ input_variables=["human_input", "context"], template=template
34
+ )
35
+
36
+ vectorstore = Chroma(
37
+ persist_directory=os.path.join(vectordb_path), embedding_function=embeddings
38
+ )
39
+
40
+ docs = vectorstore.similarity_search(query)
41
+
42
+ stuff_chain = load_qa_chain(llm, chain_type="stuff", prompt=prompt_s)
43
+
44
+ output = stuff_chain(
45
+ {"input_documents": docs, "human_input": query}, return_only_outputs=False
46
+ )
47
+
48
+ final_answer = output["output_text"]
49
+ print(f"Final Answer ---> {final_answer}")
50
+
51
+ return final_answer
52
+
53
+ if __name__ == "__main__":
54
+ chatbot = gr.Chatbot(avatar_images=["user.jpg", "bot.png"], height=600)
55
+ clear_but = gr.Button(value="Clear Chat")
56
+ demo = gr.ChatInterface(
57
+ fn=rag_bot,
58
+ title="TraderFyles AI Assistant",
59
+ multimodal=False,
60
+ chatbot=chatbot,
61
+ clear_btn=clear_but
62
+ )
63
+ demo.launch(debug=True, share=True)