Spaces:
Running
Running
Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -7,23 +7,18 @@ from langchain_community.vectorstores import FAISS
|
|
| 7 |
from huggingface_hub import InferenceClient
|
| 8 |
import os
|
| 9 |
|
| 10 |
-
#
|
| 11 |
-
os.environ["GRADIO_SERVER_NAME"] = "0.0.0.0"
|
| 12 |
-
|
| 13 |
-
# Initialize embedding model (runs on CPU, small enough for free tier)
|
| 14 |
embedding_model = HuggingFaceEmbeddings(
|
| 15 |
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
| 16 |
model_kwargs={'device': 'cpu'}
|
| 17 |
)
|
| 18 |
|
| 19 |
-
# Initialize Inference Client
|
| 20 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
|
| 21 |
|
| 22 |
-
# Global variable to store vectorstore
|
| 23 |
vectorstore = None
|
| 24 |
|
| 25 |
def process_pdf(pdf_file):
|
| 26 |
-
"""Process uploaded PDF and create vector store."""
|
| 27 |
global vectorstore
|
| 28 |
|
| 29 |
if pdf_file is None:
|
|
@@ -50,7 +45,6 @@ def process_pdf(pdf_file):
|
|
| 50 |
return f"β Error: {str(e)}"
|
| 51 |
|
| 52 |
def answer_question(question):
|
| 53 |
-
"""Answer question using RAG."""
|
| 54 |
global vectorstore
|
| 55 |
|
| 56 |
if vectorstore is None:
|
|
@@ -93,7 +87,6 @@ Question: {question}
|
|
| 93 |
except Exception as e:
|
| 94 |
return f"β Error: {str(e)}", ""
|
| 95 |
|
| 96 |
-
# Create Gradio interface using simpler Interface API
|
| 97 |
with gr.Blocks() as demo:
|
| 98 |
gr.Markdown("# π RAG Document Q&A System")
|
| 99 |
gr.Markdown("Upload a PDF and ask questions about its content.")
|
|
@@ -113,5 +106,4 @@ with gr.Blocks() as demo:
|
|
| 113 |
process_btn.click(process_pdf, inputs=[pdf_input], outputs=[status])
|
| 114 |
ask_btn.click(answer_question, inputs=[question], outputs=[answer, sources])
|
| 115 |
|
| 116 |
-
|
| 117 |
-
demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
|
|
|
|
| 7 |
from huggingface_hub import InferenceClient
|
| 8 |
import os
|
| 9 |
|
| 10 |
+
# Initialize embedding model
|
|
|
|
|
|
|
|
|
|
| 11 |
embedding_model = HuggingFaceEmbeddings(
|
| 12 |
model_name="sentence-transformers/all-MiniLM-L6-v2",
|
| 13 |
model_kwargs={'device': 'cpu'}
|
| 14 |
)
|
| 15 |
|
| 16 |
+
# Initialize Inference Client
|
| 17 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta")
|
| 18 |
|
|
|
|
| 19 |
vectorstore = None
|
| 20 |
|
| 21 |
def process_pdf(pdf_file):
|
|
|
|
| 22 |
global vectorstore
|
| 23 |
|
| 24 |
if pdf_file is None:
|
|
|
|
| 45 |
return f"β Error: {str(e)}"
|
| 46 |
|
| 47 |
def answer_question(question):
|
|
|
|
| 48 |
global vectorstore
|
| 49 |
|
| 50 |
if vectorstore is None:
|
|
|
|
| 87 |
except Exception as e:
|
| 88 |
return f"β Error: {str(e)}", ""
|
| 89 |
|
|
|
|
| 90 |
with gr.Blocks() as demo:
|
| 91 |
gr.Markdown("# π RAG Document Q&A System")
|
| 92 |
gr.Markdown("Upload a PDF and ask questions about its content.")
|
|
|
|
| 106 |
process_btn.click(process_pdf, inputs=[pdf_input], outputs=[status])
|
| 107 |
ask_btn.click(answer_question, inputs=[question], outputs=[answer, sources])
|
| 108 |
|
| 109 |
+
demo.launch(share=True)
|
|
|