Nav772 commited on
Commit
91a07db
Β·
verified Β·
1 Parent(s): ffe772a

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +15 -26
app.py CHANGED
@@ -5,15 +5,12 @@ from langchain_text_splitters import RecursiveCharacterTextSplitter
5
  from langchain_huggingface import HuggingFaceEmbeddings
6
  from langchain_community.vectorstores import FAISS
7
  from huggingface_hub import InferenceClient
8
- import os
9
 
10
- # Initialize embedding model
11
  embedding_model = HuggingFaceEmbeddings(
12
  model_name="sentence-transformers/all-MiniLM-L6-v2",
13
  model_kwargs={'device': 'cpu'}
14
  )
15
 
16
- # Initialize Inference Client
17
  client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
18
 
19
  vectorstore = None
@@ -39,10 +36,10 @@ def process_pdf(pdf_file):
39
  embedding=embedding_model
40
  )
41
 
42
- return f"βœ… Processed {len(documents)} pages into {len(chunks)} chunks. Ready for questions!"
43
 
44
  except Exception as e:
45
- return f"❌ Error: {str(e)}"
46
 
47
  def answer_question(question):
48
  global vectorstore
@@ -58,7 +55,7 @@ def answer_question(question):
58
  context = "\n\n".join([doc.page_content for doc in docs])
59
 
60
  prompt = f"""<|system|>
61
- You are a helpful assistant that answers questions based on the provided context. Only use information from the context. If the answer is not in the context, say "I cannot find this information in the document."
62
  </s>
63
  <|user|>
64
  Context:
@@ -85,25 +82,17 @@ Question: {question}
85
  return response, "\n".join(sources)
86
 
87
  except Exception as e:
88
- return f"❌ Error: {str(e)}", ""
89
 
90
- with gr.Blocks() as demo:
91
- gr.Markdown("# πŸ“š RAG Document Q&A System")
92
- gr.Markdown("Upload a PDF and ask questions about its content.")
93
-
94
- with gr.Row():
95
- with gr.Column():
96
- pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
97
- process_btn = gr.Button("πŸ“„ Process PDF")
98
- status = gr.Textbox(label="Status")
99
-
100
- with gr.Column():
101
- question = gr.Textbox(label="Question", placeholder="Ask about the document...")
102
- ask_btn = gr.Button("πŸ” Ask")
103
- answer = gr.Textbox(label="Answer", lines=5)
104
- sources = gr.Textbox(label="Sources", lines=3)
105
-
106
- process_btn.click(process_pdf, inputs=[pdf_input], outputs=[status])
107
- ask_btn.click(answer_question, inputs=[question], outputs=[answer, sources])
108
 
109
- demo.launch(share=True)
 
5
  from langchain_huggingface import HuggingFaceEmbeddings
6
  from langchain_community.vectorstores import FAISS
7
  from huggingface_hub import InferenceClient
 
8
 
 
9
  embedding_model = HuggingFaceEmbeddings(
10
  model_name="sentence-transformers/all-MiniLM-L6-v2",
11
  model_kwargs={'device': 'cpu'}
12
  )
13
 
 
14
  client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
15
 
16
  vectorstore = None
 
36
  embedding=embedding_model
37
  )
38
 
39
+ return f"Processed {len(documents)} pages into {len(chunks)} chunks. Ready!"
40
 
41
  except Exception as e:
42
+ return f"Error: {str(e)}"
43
 
44
  def answer_question(question):
45
  global vectorstore
 
55
  context = "\n\n".join([doc.page_content for doc in docs])
56
 
57
  prompt = f"""<|system|>
58
+ You are a helpful assistant. Answer based on the context only.
59
  </s>
60
  <|user|>
61
  Context:
 
82
  return response, "\n".join(sources)
83
 
84
  except Exception as e:
85
+ return f"Error: {str(e)}", ""
86
 
87
+ demo = gr.Interface(
88
+ fn=answer_question,
89
+ inputs=gr.Textbox(label="Question"),
90
+ outputs=[
91
+ gr.Textbox(label="Answer"),
92
+ gr.Textbox(label="Sources")
93
+ ],
94
+ title="RAG Document Q&A",
95
+ description="Ask questions about uploaded documents."
96
+ )
 
 
 
 
 
 
 
 
97
 
98
+ demo.launch()