SaranRaj-12 commited on
Commit
6100c13
·
verified ·
1 Parent(s): 8a871f5

Upload 6 files

Browse files
Files changed (6) hide show
  1. .huggingface/spaces.yaml +3 -0
  2. app.py +106 -0
  3. indexing.py +83 -0
  4. prompt_template.json +5 -0
  5. requirements.txt +11 -0
  6. retrieval.py +114 -0
.huggingface/spaces.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ sdk: gradio
2
+ sdk_version: 4.26.0
3
+ app_file: app.py
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from dotenv import load_dotenv
4
+
5
+ import indexing
6
+ import retrieval
7
+
8
+ list_llm = [
9
+ "mistralai/Mistral-7B-Instruct-v0.3",
10
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
11
+ "HuggingFaceH4/zephyr-7b-beta"
12
+ ]
13
+ list_llm_simple = [os.path.basename(llm) for llm in list_llm]
14
+
15
+ def retrieve_api():
16
+ load_dotenv()
17
+ global huggingfacehub_api_token
18
+ huggingfacehub_api_token = os.environ.get("HUGGINGFACE_API_KEY")
19
+
20
+ def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
21
+ list_file_path = [x.name for x in list_file_obj if x is not None]
22
+ progress(0.1, desc="Creating collection name...")
23
+ collection_name = indexing.create_collection_name(list_file_path[0])
24
+ progress(0.25, desc="Loading document...")
25
+ doc_splits = indexing.load_doc(list_file_path, chunk_size, chunk_overlap)
26
+ progress(0.5, desc="Generating vector database...")
27
+ vector_db = indexing.create_db(doc_splits, collection_name)
28
+ return vector_db, collection_name, "Complete!"
29
+
30
+ def initialize_llm(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
31
+ llm_name = list_llm[llm_option]
32
+ qa_chain = retrieval.initialize_llmchain(
33
+ llm_name, huggingfacehub_api_token, llm_temperature, max_tokens, top_k, vector_db, progress
34
+ )
35
+ return qa_chain, "Complete!"
36
+
37
+ def conversation(qa_chain, message, history):
38
+ qa_chain, new_history, response_sources = retrieval.invoke_qa_chain(qa_chain, message, history)
39
+ response_source1 = response_sources[0].page_content.strip()
40
+ response_source2 = response_sources[1].page_content.strip()
41
+ response_source3 = response_sources[2].page_content.strip()
42
+ response_source1_page = response_sources[0].metadata["page"] + 1
43
+ response_source2_page = response_sources[1].metadata["page"] + 1
44
+ response_source3_page = response_sources[2].metadata["page"] + 1
45
+ return (
46
+ qa_chain,
47
+ gr.update(value=""),
48
+ new_history,
49
+ response_source1,
50
+ response_source1_page,
51
+ response_source2,
52
+ response_source2_page,
53
+ response_source3,
54
+ response_source3_page,
55
+ )
56
+
57
+ with gr.Blocks(theme="base") as demo:
58
+ vector_db = gr.State()
59
+ qa_chain = gr.State()
60
+ collection_name = gr.State()
61
+ gr.Markdown("""# PDF Chatbot with RAG""")
62
+ with gr.Row():
63
+ document = gr.File(file_types=[".pdf"], file_count="multiple", label="Upload PDF(s)")
64
+ with gr.Accordion("Document Settings", open=False):
65
+ slider_chunk_size = gr.Slider(100, 1000, value=600, step=20, label="Chunk size")
66
+ slider_chunk_overlap = gr.Slider(10, 200, value=40, step=10, label="Chunk overlap")
67
+ db_progress = gr.Textbox(label="DB Init Status", value="None")
68
+ db_btn = gr.Button("Generate vector database")
69
+
70
+ db_btn.click(
71
+ initialize_database,
72
+ inputs=[document, slider_chunk_size, slider_chunk_overlap],
73
+ outputs=[vector_db, collection_name, db_progress],
74
+ )
75
+
76
+ with gr.Row():
77
+ llm_btn = gr.Radio(list_llm_simple, label="Choose LLM", type="index")
78
+ with gr.Accordion("LLM Parameters", open=False):
79
+ slider_temperature = gr.Slider(0.01, 1.0, value=0.7, step=0.1, label="Temperature")
80
+ slider_maxtokens = gr.Slider(224, 4096, value=1024, step=32, label="Max Tokens")
81
+ slider_topk = gr.Slider(1, 10, value=3, step=1, label="Top-k")
82
+ llm_progress = gr.Textbox(label="LLM Status", value="None")
83
+ qachain_btn = gr.Button("Initialize QA Chain")
84
+
85
+ qachain_btn.click(
86
+ initialize_llm,
87
+ inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db],
88
+ outputs=[qa_chain, llm_progress],
89
+ )
90
+
91
+ chatbot = gr.Chatbot(height=300)
92
+ msg = gr.Textbox(placeholder="Ask something...", label="Message")
93
+ submit_btn = gr.Button("Submit")
94
+ clear_btn = gr.ClearButton([msg, chatbot])
95
+
96
+ doc_source1 = gr.Textbox(label="Reference 1")
97
+ source1_page = gr.Number(label="Page 1")
98
+ doc_source2 = gr.Textbox(label="Reference 2")
99
+ source2_page = gr.Number(label="Page 2")
100
+ doc_source3 = gr.Textbox(label="Reference 3")
101
+ source3_page = gr.Number(label="Page 3")
102
+
103
+ msg.submit(conversation, [qa_chain, msg, chatbot], [qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page])
104
+ submit_btn.click(conversation, [qa_chain, msg, chatbot], [qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page])
105
+
106
+ demo.queue().launch()
indexing.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Indexing with vector database
3
+ """
4
+
5
+ from pathlib import Path
6
+ import re
7
+
8
+ import chromadb
9
+
10
+ from unidecode import unidecode
11
+
12
+ from langchain_community.document_loaders import PyPDFLoader
13
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
14
+ from langchain_chroma import Chroma
15
+ from langchain_huggingface import HuggingFaceEmbeddings
16
+
17
+
18
+
19
+ # Load PDF document and create doc splits
20
+ def load_doc(list_file_path, chunk_size, chunk_overlap):
21
+ """Load PDF document and create doc splits"""
22
+
23
+ loaders = [PyPDFLoader(x) for x in list_file_path]
24
+ pages = []
25
+ for loader in loaders:
26
+ pages.extend(loader.load())
27
+ text_splitter = RecursiveCharacterTextSplitter(
28
+ chunk_size=chunk_size, chunk_overlap=chunk_overlap
29
+ )
30
+ doc_splits = text_splitter.split_documents(pages)
31
+ return doc_splits
32
+
33
+
34
+ # Generate collection name for vector database
35
+ # - Use filepath as input, ensuring unicode text
36
+ # - Handle multiple languages (arabic, chinese)
37
+ def create_collection_name(filepath):
38
+ """Create collection name for vector database"""
39
+
40
+ # Extract filename without extension
41
+ collection_name = Path(filepath).stem
42
+ # Fix potential issues from naming convention
43
+ ## Remove space
44
+ collection_name = collection_name.replace(" ", "-")
45
+ ## ASCII transliterations of Unicode text
46
+ collection_name = unidecode(collection_name)
47
+ ## Remove special characters
48
+ collection_name = re.sub("[^A-Za-z0-9]+", "-", collection_name)
49
+ ## Limit length to 50 characters
50
+ collection_name = collection_name[:50]
51
+ ## Minimum length of 3 characters
52
+ if len(collection_name) < 3:
53
+ collection_name = collection_name + "xyz"
54
+ ## Enforce start and end as alphanumeric character
55
+ if not collection_name[0].isalnum():
56
+ collection_name = "A" + collection_name[1:]
57
+ if not collection_name[-1].isalnum():
58
+ collection_name = collection_name[:-1] + "Z"
59
+ print("\n\nFilepath: ", filepath)
60
+ print("Collection name: ", collection_name)
61
+ return collection_name
62
+
63
+
64
+ # Create vector database
65
+ def create_db(splits, collection_name):
66
+ """Create embeddings and vector database"""
67
+
68
+ embedding = HuggingFaceEmbeddings(
69
+ model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2",
70
+ # model_name="sentence-transformers/all-MiniLM-L6-v2",
71
+ # model_kwargs={"device": "cpu"},
72
+ # encode_kwargs={'normalize_embeddings': False}
73
+ )
74
+ chromadb.api.client.SharedSystemClient.clear_system_cache()
75
+ new_client = chromadb.EphemeralClient()
76
+ vectordb = Chroma.from_documents(
77
+ documents=splits,
78
+ embedding=embedding,
79
+ client=new_client,
80
+ collection_name=collection_name,
81
+ # persist_directory=default_persist_directory
82
+ )
83
+ return vectordb
prompt_template.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "title": "System prompt",
3
+ "prompt": "You are an assistant for question-answering tasks. Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer concise. Question: {question} \\n Context: {context} \\n Helpful Answer:"
4
+ }
5
+
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio==4.26.0
2
+ langchain
3
+ chromadb
4
+ pypdf
5
+ python-dotenv
6
+ huggingface_hub
7
+ unidecode
8
+ sentence-transformers
9
+ langchain-community
10
+ langchain-core
11
+ langchain-chroma
retrieval.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LLM chain retrieval
3
+ """
4
+
5
+ import json
6
+ import gradio as gr
7
+
8
+ from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
9
+ from langchain.memory import ConversationBufferMemory
10
+ from langchain_huggingface import HuggingFaceEndpoint
11
+ from langchain_core.prompts import PromptTemplate
12
+
13
+
14
+ # Add system template for RAG application
15
+ PROMPT_TEMPLATE = """
16
+ You are an assistant for question-answering tasks. Use the following pieces of context to answer the question at the end.
17
+ If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer concise.
18
+ Question: {question}
19
+ Context: {context}
20
+ Helpful Answer:
21
+ """
22
+
23
+
24
+ # Initialize langchain LLM chain
25
+ def initialize_llmchain(
26
+ llm_model,
27
+ huggingfacehub_api_token,
28
+ temperature,
29
+ max_tokens,
30
+ top_k,
31
+ vector_db,
32
+ progress=gr.Progress(),
33
+ ):
34
+ """Initialize Langchain LLM chain"""
35
+
36
+ progress(0.1, desc="Initializing HF tokenizer...")
37
+ # HuggingFaceHub uses HF inference endpoints
38
+ progress(0.5, desc="Initializing HF Hub...")
39
+ # Use of trust_remote_code as model_kwargs
40
+ # Warning: langchain issue
41
+ # URL: https://github.com/langchain-ai/langchain/issues/6080
42
+
43
+ llm = HuggingFaceEndpoint(
44
+ repo_id=llm_model,
45
+ task="text-generation",
46
+ temperature=temperature,
47
+ max_new_tokens=max_tokens,
48
+ top_k=top_k,
49
+ huggingfacehub_api_token=huggingfacehub_api_token,
50
+ )
51
+
52
+ progress(0.75, desc="Defining buffer memory...")
53
+ memory = ConversationBufferMemory(
54
+ memory_key="chat_history", output_key="answer", return_messages=True
55
+ )
56
+ # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
57
+ retriever = vector_db.as_retriever()
58
+
59
+ progress(0.8, desc="Defining retrieval chain...")
60
+ with open('prompt_template.json', 'r') as file:
61
+ system_prompt = json.load(file)
62
+ prompt_template = system_prompt["prompt"]
63
+ rag_prompt = PromptTemplate(
64
+ template=prompt_template, input_variables=["context", "question"]
65
+ )
66
+ qa_chain = ConversationalRetrievalChain.from_llm(
67
+ llm,
68
+ retriever=retriever,
69
+ chain_type="stuff",
70
+ memory=memory,
71
+ combine_docs_chain_kwargs={"prompt": rag_prompt},
72
+ return_source_documents=True,
73
+ # return_generated_question=False,
74
+ verbose=False,
75
+ )
76
+ progress(0.9, desc="Done!")
77
+
78
+ return qa_chain
79
+
80
+
81
+ def format_chat_history(message, chat_history):
82
+ """Format chat history for llm chain"""
83
+
84
+ formatted_chat_history = []
85
+ for user_message, bot_message in chat_history:
86
+ formatted_chat_history.append(f"User: {user_message}")
87
+ formatted_chat_history.append(f"Assistant: {bot_message}")
88
+ return formatted_chat_history
89
+
90
+
91
+ def invoke_qa_chain(qa_chain, message, history):
92
+ """Invoke question-answering chain"""
93
+
94
+ formatted_chat_history = format_chat_history(message, history)
95
+ # print("formatted_chat_history",formatted_chat_history)
96
+
97
+ # Generate response using QA chain
98
+ response = qa_chain.invoke(
99
+ {"question": message, "chat_history": formatted_chat_history}
100
+ )
101
+
102
+ response_sources = response["source_documents"]
103
+
104
+ response_answer = response["answer"]
105
+ if response_answer.find("Helpful Answer:") != -1:
106
+ response_answer = response_answer.split("Helpful Answer:")[-1]
107
+
108
+ # Append user message and response to chat history
109
+ new_history = history + [(message, response_answer)]
110
+
111
+ # print ('chat response: ', response_answer)
112
+ # print('DB source', response_sources)
113
+
114
+ return qa_chain, new_history, response_sources