Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,50 +1,53 @@
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
| 3 |
from langchain.text_splitter import CharacterTextSplitter
|
| 4 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 5 |
from langchain_community.vectorstores import FAISS
|
| 6 |
from langchain.chains import ConversationalRetrievalChain
|
| 7 |
-
from langchain_community.llms import HuggingFaceHub
|
| 8 |
from langchain_community.document_loaders import PyPDFLoader
|
| 9 |
|
| 10 |
-
# Load PDF
|
| 11 |
loader = PyPDFLoader("chimera.pdf")
|
| 12 |
documents = loader.load()
|
| 13 |
|
| 14 |
-
# Split
|
| 15 |
text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=100)
|
| 16 |
texts = text_splitter.split_documents(documents)
|
| 17 |
|
| 18 |
-
#
|
| 19 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
| 20 |
db = FAISS.from_documents(texts, embeddings)
|
| 21 |
-
|
| 22 |
retriever = db.as_retriever(search_kwargs={"k": 3})
|
| 23 |
|
| 24 |
-
# Hugging Face
|
| 25 |
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 26 |
if hf_token is None:
|
| 27 |
-
raise ValueError(
|
|
|
|
|
|
|
| 28 |
|
| 29 |
-
llm =
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
)
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
retriever=retriever
|
| 38 |
-
)
|
| 39 |
|
| 40 |
chat_history = []
|
| 41 |
|
|
|
|
| 42 |
def respond(message, history):
|
| 43 |
-
history = history[-6:]
|
| 44 |
result = qa({"question": message, "chat_history": history})
|
| 45 |
history.append((message, result["answer"]))
|
| 46 |
return history, history
|
| 47 |
|
|
|
|
| 48 |
with gr.Blocks() as demo:
|
| 49 |
with gr.Column():
|
| 50 |
warning_text = gr.HTML(
|
|
@@ -58,12 +61,12 @@ with gr.Blocks() as demo:
|
|
| 58 |
|
| 59 |
def enter_case():
|
| 60 |
return (
|
| 61 |
-
gr.update(visible=True),
|
| 62 |
-
gr.update(visible=True),
|
| 63 |
-
gr.update(visible=True),
|
| 64 |
-
gr.update(value=""),
|
| 65 |
-
gr.update(visible=False),
|
| 66 |
-
gr.update(visible=False)
|
| 67 |
)
|
| 68 |
|
| 69 |
def exit_case():
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
+
|
| 4 |
+
# ✅ New LangChain Hugging Face imports
|
| 5 |
+
from langchain_huggingface import HuggingFaceEndpoint
|
| 6 |
from langchain.text_splitter import CharacterTextSplitter
|
| 7 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 8 |
from langchain_community.vectorstores import FAISS
|
| 9 |
from langchain.chains import ConversationalRetrievalChain
|
|
|
|
| 10 |
from langchain_community.document_loaders import PyPDFLoader
|
| 11 |
|
| 12 |
+
# --- 1️⃣ Load your PDF ---
|
| 13 |
loader = PyPDFLoader("chimera.pdf")
|
| 14 |
documents = loader.load()
|
| 15 |
|
| 16 |
+
# --- 2️⃣ Split into chunks ---
|
| 17 |
text_splitter = CharacterTextSplitter(chunk_size=800, chunk_overlap=100)
|
| 18 |
texts = text_splitter.split_documents(documents)
|
| 19 |
|
| 20 |
+
# --- 3️⃣ Create embeddings + FAISS vector store ---
|
| 21 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
| 22 |
db = FAISS.from_documents(texts, embeddings)
|
|
|
|
| 23 |
retriever = db.as_retriever(search_kwargs={"k": 3})
|
| 24 |
|
| 25 |
+
# --- 4️⃣ Hugging Face Endpoint setup ---
|
| 26 |
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 27 |
if hf_token is None:
|
| 28 |
+
raise ValueError(
|
| 29 |
+
"HUGGINGFACEHUB_API_TOKEN not found! Add it in Space Settings → Secrets."
|
| 30 |
+
)
|
| 31 |
|
| 32 |
+
llm = HuggingFaceEndpoint(
|
| 33 |
+
endpoint_url="https://api-inference.huggingface.co/models/google/flan-t5-base",
|
| 34 |
+
headers={"Authorization": f"Bearer {hf_token}"},
|
| 35 |
+
model_kwargs={"temperature": 0}
|
| 36 |
)
|
| 37 |
|
| 38 |
+
# --- 5️⃣ Build conversational chain ---
|
| 39 |
+
qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever)
|
|
|
|
|
|
|
| 40 |
|
| 41 |
chat_history = []
|
| 42 |
|
| 43 |
+
# --- 6️⃣ Respond function ---
|
| 44 |
def respond(message, history):
|
| 45 |
+
history = history[-6:] # keep last 3 exchanges
|
| 46 |
result = qa({"question": message, "chat_history": history})
|
| 47 |
history.append((message, result["answer"]))
|
| 48 |
return history, history
|
| 49 |
|
| 50 |
+
# --- 7️⃣ Gradio UI ---
|
| 51 |
with gr.Blocks() as demo:
|
| 52 |
with gr.Column():
|
| 53 |
warning_text = gr.HTML(
|
|
|
|
| 61 |
|
| 62 |
def enter_case():
|
| 63 |
return (
|
| 64 |
+
gr.update(visible=True), # chatbot
|
| 65 |
+
gr.update(visible=True), # user_input
|
| 66 |
+
gr.update(visible=True), # submit_btn
|
| 67 |
+
gr.update(value=""), # hide warning
|
| 68 |
+
gr.update(visible=False), # hide enter_btn
|
| 69 |
+
gr.update(visible=False) # hide exit_btn
|
| 70 |
)
|
| 71 |
|
| 72 |
def exit_case():
|