Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,11 +8,14 @@ from langchain_groq import ChatGroq
|
|
| 8 |
from langchain_classic.chains import ConversationalRetrievalChain
|
| 9 |
from langchain_classic.memory import ConversationBufferMemory
|
| 10 |
|
| 11 |
-
# ---
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
| 13 |
api_key = os.environ.get("GROQ_API")
|
| 14 |
|
| 15 |
-
#
|
| 16 |
def load_any(path: str):
|
| 17 |
p = path.lower()
|
| 18 |
if p.endswith(".pdf"): return PyPDFLoader(path).load()
|
|
@@ -20,109 +23,89 @@ def load_any(path: str):
|
|
| 20 |
if p.endswith(".docx"): return Docx2txtLoader(path).load()
|
| 21 |
return []
|
| 22 |
|
| 23 |
-
#
|
| 24 |
-
# This function runs when the user clicks "Build Chatbot"
|
| 25 |
def process_files(files):
|
| 26 |
-
if not files:
|
| 27 |
-
return None, "⚠️
|
| 28 |
-
|
| 29 |
-
if not api_key:
|
| 30 |
-
return None, "❌ Error: GROQ_API key not found in Secrets."
|
| 31 |
|
| 32 |
try:
|
| 33 |
-
# Load
|
| 34 |
docs = []
|
| 35 |
for file_obj in files:
|
| 36 |
docs.extend(load_any(file_obj.name))
|
| 37 |
|
| 38 |
if not docs:
|
| 39 |
-
return None, "⚠️ No readable text found
|
| 40 |
|
| 41 |
-
# Split
|
| 42 |
-
splitter = RecursiveCharacterTextSplitter(chunk_size=
|
| 43 |
chunks = splitter.split_documents(docs)
|
| 44 |
|
| 45 |
-
#
|
| 46 |
-
if not chunks:
|
| 47 |
-
return None, "❌ Error: The PDF contains no selectable text. It might be a scanned image. Please convert it to Word/Text first."
|
| 48 |
-
|
| 49 |
-
# Create Embeddings & Vector Store
|
| 50 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
| 51 |
-
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
-
#
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
temperature=0
|
| 59 |
)
|
| 60 |
-
|
|
|
|
|
|
|
| 61 |
memory = ConversationBufferMemory(
|
| 62 |
-
memory_key="chat_history",
|
| 63 |
-
return_messages=True,
|
| 64 |
output_key="answer"
|
| 65 |
)
|
| 66 |
-
|
| 67 |
chain = ConversationalRetrievalChain.from_llm(
|
| 68 |
llm=llm,
|
| 69 |
-
retriever=
|
| 70 |
memory=memory,
|
| 71 |
return_source_documents=True,
|
| 72 |
output_key="answer"
|
| 73 |
)
|
| 74 |
-
|
| 75 |
-
return chain, f"✅
|
| 76 |
|
| 77 |
except Exception as e:
|
| 78 |
return None, f"❌ Error: {str(e)}"
|
| 79 |
|
| 80 |
-
#
|
| 81 |
def chat_function(message, history, chain):
|
| 82 |
if not chain:
|
| 83 |
-
return "⚠️
|
| 84 |
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
answer = res["answer"]
|
| 88 |
-
|
| 89 |
-
# Format Sources
|
| 90 |
-
sources = []
|
| 91 |
-
for d in res.get("source_documents", []):
|
| 92 |
-
src = os.path.basename(d.metadata.get("source", "unknown"))
|
| 93 |
-
text = (d.page_content or "").replace("\n", " ")[:100] + "..."
|
| 94 |
-
sources.append(f"- {src}: {text}")
|
| 95 |
-
|
| 96 |
-
final_answer = answer + "\n\n---\n**Sources:**\n" + "\n".join(sources)
|
| 97 |
-
return final_answer
|
| 98 |
-
except Exception as e:
|
| 99 |
-
return f"❌ Error generating answer: {str(e)}"
|
| 100 |
-
|
| 101 |
-
# --- 5. BUILD UI ---
|
| 102 |
-
with gr.Blocks(title="RAG Chatbot") as demo:
|
| 103 |
-
gr.Markdown("# 📚 RAG Chatbot (LangChain + Groq)")
|
| 104 |
|
| 105 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
chain_state = gr.State(None)
|
| 107 |
|
| 108 |
with gr.Row():
|
| 109 |
with gr.Column(scale=1):
|
| 110 |
-
file_input = gr.File(file_count="multiple", label="Upload
|
| 111 |
-
build_btn = gr.Button("Build
|
| 112 |
-
|
| 113 |
|
| 114 |
with gr.Column(scale=2):
|
| 115 |
-
|
| 116 |
-
fn=chat_function,
|
| 117 |
-
additional_inputs=[chain_state] # Pass the chain to the chat function
|
| 118 |
-
)
|
| 119 |
|
| 120 |
-
|
| 121 |
-
build_btn.click(
|
| 122 |
-
fn=process_files,
|
| 123 |
-
inputs=[file_input],
|
| 124 |
-
outputs=[chain_state, status_output]
|
| 125 |
-
)
|
| 126 |
|
| 127 |
if __name__ == "__main__":
|
| 128 |
demo.launch()
|
|
|
|
| 8 |
from langchain_classic.chains import ConversationalRetrievalChain
|
| 9 |
from langchain_classic.memory import ConversationBufferMemory
|
| 10 |
|
| 11 |
+
# --- NEW IMPORTS FOR HYBRID SEARCH ---
|
| 12 |
+
from langchain_community.retrievers import BM25Retriever
|
| 13 |
+
from langchain.retrievers import EnsembleRetriever
|
| 14 |
+
|
| 15 |
+
# 1. SETUP API
|
| 16 |
api_key = os.environ.get("GROQ_API")
|
| 17 |
|
| 18 |
+
# 2. FILE LOADING LOGIC
|
| 19 |
def load_any(path: str):
|
| 20 |
p = path.lower()
|
| 21 |
if p.endswith(".pdf"): return PyPDFLoader(path).load()
|
|
|
|
| 23 |
if p.endswith(".docx"): return Docx2txtLoader(path).load()
|
| 24 |
return []
|
| 25 |
|
| 26 |
+
# 3. HYBRID PROCESSING FUNCTION
|
|
|
|
| 27 |
def process_files(files):
|
| 28 |
+
if not files or not api_key:
|
| 29 |
+
return None, "⚠️ Missing files or API key."
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
try:
|
| 32 |
+
# Load all documents
|
| 33 |
docs = []
|
| 34 |
for file_obj in files:
|
| 35 |
docs.extend(load_any(file_obj.name))
|
| 36 |
|
| 37 |
if not docs:
|
| 38 |
+
return None, "⚠️ No readable text found."
|
| 39 |
|
| 40 |
+
# Split into chunks
|
| 41 |
+
splitter = RecursiveCharacterTextSplitter(chunk_size=700, chunk_overlap=100)
|
| 42 |
chunks = splitter.split_documents(docs)
|
| 43 |
|
| 44 |
+
# A. Semantic Search (FAISS)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
| 46 |
+
faiss_db = FAISS.from_documents(chunks, embeddings)
|
| 47 |
+
faiss_retriever = faiss_db.as_retriever(search_kwargs={"k": 3})
|
| 48 |
+
|
| 49 |
+
# B. Keyword Search (BM25) - THIS IS THE MULTI-RETRIEVER ADDITION
|
| 50 |
+
bm25_retriever = BM25Retriever.from_documents(chunks)
|
| 51 |
+
bm25_retriever.k = 3
|
| 52 |
|
| 53 |
+
# C. Ensemble (Hybrid Search)
|
| 54 |
+
ensemble_retriever = EnsembleRetriever(
|
| 55 |
+
retrievers=[faiss_retriever, bm25_retriever],
|
| 56 |
+
weights=[0.6, 0.4] # 60% Semantic, 40% Keyword
|
|
|
|
| 57 |
)
|
| 58 |
+
|
| 59 |
+
# D. Classic Chain Setup
|
| 60 |
+
llm = ChatGroq(groq_api_key=api_key, model="llama-3.3-70b-versatile", temperature=0)
|
| 61 |
memory = ConversationBufferMemory(
|
| 62 |
+
memory_key="chat_history",
|
| 63 |
+
return_messages=True,
|
| 64 |
output_key="answer"
|
| 65 |
)
|
| 66 |
+
|
| 67 |
chain = ConversationalRetrievalChain.from_llm(
|
| 68 |
llm=llm,
|
| 69 |
+
retriever=ensemble_retriever, # Use Hybrid Retriever
|
| 70 |
memory=memory,
|
| 71 |
return_source_documents=True,
|
| 72 |
output_key="answer"
|
| 73 |
)
|
| 74 |
+
|
| 75 |
+
return chain, f"✅ Hybrid Multi-RAG Ready! ({len(chunks)} chunks)"
|
| 76 |
|
| 77 |
except Exception as e:
|
| 78 |
return None, f"❌ Error: {str(e)}"
|
| 79 |
|
| 80 |
+
# 4. CHAT FUNCTION
|
| 81 |
def chat_function(message, history, chain):
|
| 82 |
if not chain:
|
| 83 |
+
return "⚠️ Build the chatbot first."
|
| 84 |
|
| 85 |
+
res = chain.invoke({"question": message})
|
| 86 |
+
answer = res["answer"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
+
# Format Sources
|
| 89 |
+
sources = list(set([os.path.basename(d.metadata.get("source", "unknown")) for d in res.get("source_documents", [])]))
|
| 90 |
+
source_text = "\n\n---\n**Sources:** " + ", ".join(sources)
|
| 91 |
+
|
| 92 |
+
return answer + source_text
|
| 93 |
+
|
| 94 |
+
# 5. UI
|
| 95 |
+
with gr.Blocks(title="Hybrid RAG") as demo:
|
| 96 |
+
gr.Markdown("# 🚀 Hybrid Multi-RAG Chatbot")
|
| 97 |
chain_state = gr.State(None)
|
| 98 |
|
| 99 |
with gr.Row():
|
| 100 |
with gr.Column(scale=1):
|
| 101 |
+
file_input = gr.File(file_count="multiple", label="Upload Docs")
|
| 102 |
+
build_btn = gr.Button("Build Hybrid RAG", variant="primary")
|
| 103 |
+
status = gr.Textbox(label="Status", interactive=False)
|
| 104 |
|
| 105 |
with gr.Column(scale=2):
|
| 106 |
+
gr.ChatInterface(fn=chat_function, additional_inputs=[chain_state])
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
+
build_btn.click(process_files, inputs=[file_input], outputs=[chain_state, status])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
if __name__ == "__main__":
|
| 111 |
demo.launch()
|