eik-1 commited on
Commit
2df6a19
·
verified ·
1 Parent(s): 4869be3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -56
app.py CHANGED
@@ -1,64 +1,126 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- response = ""
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
+ import os
2
  import gradio as gr
3
+ from pypdf import PdfReader
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain.embeddings import HuggingFaceEmbeddings
6
+ from langchain.vectorstores import FAISS
7
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
8
+ from langchain.llms import HuggingFacePipeline
9
+ from langchain.chains import RetrievalQA
10
+ from langchain.memory import ConversationBufferMemory
11
 
12
+ # Function to extract text from PDFs
13
+ def extract_text_from_pdf(pdf_file):
14
+ try:
15
+ reader = PdfReader(pdf_file)
16
+ text = ""
17
+ for page in reader.pages:
18
+ extracted = page.extract_text()
19
+ if extracted:
20
+ text += extracted + "\n"
21
+ return text
22
+ except Exception as e:
23
+ return f"Error reading PDF: {e}"
24
 
25
+ # Function to process PDFs and create vector store
26
+ def process_pdfs(pdf_files):
27
+ documents = []
28
+ for pdf_file in pdf_files:
29
+ text = extract_text_from_pdf(pdf_file)
30
+ if text and not text.startswith("Error"):
31
+ documents.append(text)
32
+
33
+ # Chunk documents
34
+ text_splitter = RecursiveCharacterTextSplitter(
35
+ chunk_size=1000,
36
+ chunk_overlap=150,
37
+ length_function=len
38
+ )
39
+ chunks = []
40
+ for doc in documents:
41
+ splits = text_splitter.split_text(doc)
42
+ chunks.extend(splits)
43
+
44
+ # Create embeddings and vector store
45
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
46
+ vector_store = FAISS.from_texts(chunks, embeddings)
47
+ return vector_store
48
 
49
+ # Initialize LLM
50
+ def initialize_llm():
51
+ model_name = "google/flan-t5-base"
52
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
53
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
54
+ pipe = pipeline(
55
+ "text2text-generation",
56
+ model=model,
57
+ tokenizer=tokenizer,
58
+ max_length=512,
59
+ temperature=0.7,
60
+ device=0 if torch.cuda.is_available() else -1
61
+ )
62
+ llm = HuggingFacePipeline(pipeline=pipe)
63
+ return llm
64
 
65
+ # Create RAG chain
66
+ def create_rag_chain(vector_store, llm):
67
+ prompt_template = """Use the following pieces of context to answer the question. If you don't know the answer, say so. Do not make up information.
68
+ {context}
69
+ Question: {question}
70
+ Answer: """
71
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
72
+ memory = ConversationBufferMemory(
73
+ memory_key="chat_history",
74
+ input_key="question",
75
+ output_key="answer",
76
+ max_len=4
77
+ )
78
+ chain = RetrievalQA.from_chain_type(
79
+ llm=llm,
80
+ chain_type="stuff",
81
+ retriever=vector_store.as_retriever(search_kwargs={"k": 5}),
82
+ return_source_documents=True,
83
+ chain_type_kwargs={"prompt": prompt, "memory": memory}
84
+ )
85
+ return chain
86
 
87
+ # Gradio interface function
88
+ def rag_interface(pdf_files, question):
89
+ if not pdf_files:
90
+ return "Please upload at least one PDF file.", ""
91
+
92
+ # Process PDFs and create vector store
93
+ vector_store = process_pdfs(pdf_files)
94
+
95
+ # Initialize LLM and RAG chain
96
+ llm = initialize_llm()
97
+ rag_chain = create_rag_chain(vector_store, llm)
98
+
99
+ # Get answer
100
+ result = rag_chain({"query": question})
101
+ answer = result["result"]
102
+ chat_history = rag_chain.combine_documents_chain.memory.chat_memory.messages
103
+
104
+ # Format chat history
105
+ history_text = ""
106
+ for i in range(0, len(chat_history), 2):
107
+ if i + 1 < len(chat_history):
108
+ history_text += f"Q: {chat_history[i].content}\nA: {chat_history[i+1].content}\n\n"
109
+
110
+ return answer, history_text
111
 
112
+ # Gradio interface
113
+ with gr.Blocks() as demo:
114
+ gr.Markdown("# RAG Question Answering System")
115
+ pdf_input = gr.File(label="Upload PDFs", file_count="multiple", file_types=[".pdf"])
116
+ question_input = gr.Textbox(label="Ask a question")
117
+ answer_output = gr.Textbox(label="Answer")
118
+ history_output = gr.Textbox(label="Chat History")
119
+ submit_button = gr.Button("Submit")
120
+ submit_button.click(
121
+ fn=rag_interface,
122
+ inputs=[pdf_input, question_input],
123
+ outputs=[answer_output, history_output]
124
+ )
125
 
126
+ demo.launch(share=True)