ragmodel / app.py
msmaje's picture
Update app.py
b7d9c06 verified
import gradio as gr
import os
import tempfile
import shutil
from pathlib import Path
import logging
import zipfile
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Try importing LangChain components
try:
from langchain_community.document_loaders import PyPDFDirectoryLoader, PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
# Updated imports for LLM - try multiple import paths
try:
from langchain_community.llms import HuggingFaceHub
HUGGINGFACE_HUB_AVAILABLE = True
logger.info("Using HuggingFaceHub from langchain_community")
except ImportError:
try:
from langchain.llms import HuggingFaceHub
HUGGINGFACE_HUB_AVAILABLE = True
logger.info("Using HuggingFaceHub from langchain.llms")
except ImportError:
try:
from langchain_huggingface import HuggingFaceEndpoint
HUGGINGFACE_HUB_AVAILABLE = False # HuggingFaceEndpoint doesn't have the same interface as HuggingFaceHub
logger.info("Using HuggingFaceEndpoint as fallback")
except ImportError:
logger.error("No suitable HuggingFace LLM implementation found")
HUGGINGFACE_HUB_AVAILABLE = False
LANGCHAIN_AVAILABLE = True
except ImportError as e:
logger.error(f"LangChain import error: {e}")
LANGCHAIN_AVAILABLE = False
HUGGINGFACE_HUB_AVAILABLE = False
# Create PDFs folder if it doesn't exist
PDF_FOLDER_PATH = "./pdfs"
os.makedirs(PDF_FOLDER_PATH, exist_ok=True)
# Global variables for the RAG system
vectorstore = None
retrieval_qa = None
embedding_model = None
# Check for pre-existing PDF folder
PRELOADED_PDFS = os.path.exists(PDF_FOLDER_PATH) and len(os.listdir(PDF_FOLDER_PATH)) > 0
def initialize_models():
"""Initialize the embedding model and LLM"""
global embedding_model
try:
# Initialize embedding model
embedding_model = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2",
model_kwargs={'device': 'cpu'}
)
# Get HuggingFace token from environment
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if not hf_token:
return False, "❌ HuggingFace API token not found in environment variables. Please set HUGGINGFACEHUB_API_TOKEN."
return True, "βœ… Models initialized successfully"
except Exception as e:
logger.error(f"Model initialization error: {e}")
return False, f"❌ Error initializing models: {str(e)}"
def create_llm():
"""Create and return the LLM instance with proper Runnable interface"""
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if not hf_token:
logger.error("HuggingFace API token not found for LLM creation.")
return create_fallback_llm()
try:
# Prioritize HuggingFaceHub as it's more stable with LangChain
if HUGGINGFACE_HUB_AVAILABLE:
models_to_try = [
"mistralai/Mistral-7B-Instruct-v0.2",
"microsoft/DialoGPT-medium",
"google/flan-t5-base",
"microsoft/DialoGPT-small",
"tiiuae/falcon-7b-instruct"
]
for model_id in models_to_try:
try:
llm = HuggingFaceHub(
repo_id=model_id,
huggingfacehub_api_token=hf_token,
model_kwargs={
"temperature": 0.7,
"max_new_tokens": 512,
"max_length": 512,
"do_sample": True,
"top_p": 0.9,
"top_k": 50
}
)
logger.info(f"Successfully initialized HuggingFaceHub with model: {model_id}")
return llm
except Exception as model_error:
logger.warning(f"Failed to initialize {model_id} with HuggingFaceHub: {model_error}")
continue
# Fallback to HuggingFaceEndpoint if HuggingFaceHub is not available or failed
try:
from langchain_huggingface import HuggingFaceEndpoint
models_to_try = [
"mistralai/Mistral-7B-Instruct-v0.2",
"microsoft/DialoGPT-medium",
"google/flan-t5-base"
]
for model_id in models_to_try:
try:
llm = HuggingFaceEndpoint(
repo_id=model_id,
temperature=0.7,
max_new_tokens=512,
huggingfacehub_api_token=hf_token,
model_kwargs={
"max_length": 512,
"do_sample": True,
"temperature": 0.7,
"top_p": 0.9,
"top_k": 50
}
)
logger.info(f"Successfully initialized HuggingFaceEndpoint with model: {model_id}")
return llm
except Exception as model_error:
logger.warning(f"Failed to initialize {model_id} with HuggingFaceEndpoint: {model_error}")
continue
except ImportError:
pass # HuggingFaceEndpoint not available
# If all else fails, return fallback
raise Exception("All HuggingFace model initialization attempts failed")
except Exception as e:
logger.error(f"LLM creation error: {e}")
return create_fallback_llm()
def create_fallback_llm():
"""Create a proper LangChain-compatible fallback LLM"""
try:
from langchain.llms.base import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun
from typing import Optional, List, Any
class FallbackLLM(LLM):
"""A simple fallback LLM that provides basic responses"""
@property
def _llm_type(self) -> str:
return "fallback"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Basic response generation"""
if "summarize" in prompt.lower():
return "I apologize, but I'm currently experiencing technical difficulties with the AI model. However, I can see that you're asking about content in your documents. Please try rephrasing your question or check if the model service is available."
elif "what" in prompt.lower() or "how" in prompt.lower():
return "I'm having trouble processing your question due to technical issues with the language model. The document search is working, but I cannot generate detailed responses right now. Please try again later."
else:
return "I apologize, but I'm experiencing technical difficulties with the language model. The document processing is working correctly, but response generation is currently unavailable. Please try again later or contact support."
return FallbackLLM()
except ImportError:
# If we can't even import the base LLM class, create a simple mock
logger.error("Cannot create proper fallback LLM - LangChain base classes not available")
class SimpleFallback:
def invoke(self, prompt):
return "System temporarily unavailable. Please try again later."
def __call__(self, prompt): # For compatibility with older LangChain chains
return self.invoke(prompt)
return SimpleFallback()
def load_preloaded_pdfs(chunk_size=1000, chunk_overlap=200):
"""Load PDFs from the pre-existing folder"""
global vectorstore, retrieval_qa, embedding_model
if not LANGCHAIN_AVAILABLE:
return "❌ LangChain is not available. Please check the installation."
if not PRELOADED_PDFS:
return "❌ No pre-loaded PDFs found in ./pdfs folder."
try:
# Initialize models if not already done
if embedding_model is None:
success, message = initialize_models()
if not success:
return message
# Load documents from pre-existing folder
loader = PyPDFDirectoryLoader(PDF_FOLDER_PATH)
documents = loader.load()
if not documents:
return "❌ No documents were loaded from the PDFs folder. Ensure the folder contains valid PDFs."
# Split documents into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=int(chunk_size),
chunk_overlap=int(chunk_overlap)
)
chunks = text_splitter.split_documents(documents)
# Create vector store
vectorstore = FAISS.from_documents(chunks, embedding_model)
retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
# Setup prompt template
prompt_template = """
Use the following context to answer the question. If you cannot find the answer in the context, say "I don't have enough information to answer this question."
Context:
{context}
Question: {question}
Helpful Answer:
"""
prompt = PromptTemplate(
input_variables=["context", "question"],
template=prompt_template
)
# Initialize LLM using the updated function
llm = create_llm()
# Create RetrievalQA chain with better error handling
try:
retrieval_qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": prompt}
)
# Test the chain with a simple query to ensure it works
try:
test_result = retrieval_qa({"query": "test"})
logger.info("QA chain test successful")
except Exception as test_error:
logger.warning(f"QA chain test failed during initial run: {test_error}")
# Chain created but might have issues - continue anyway
except Exception as chain_error:
logger.error(f"Chain creation error: {chain_error}")
return f"❌ Error creating QA chain: {str(chain_error)}. Check LLM availability."
pdf_files = [f for f in os.listdir(PDF_FOLDER_PATH) if f.endswith('.pdf')]
return f"βœ… Successfully processed {len(documents)} documents from {len(pdf_files)} PDF files into {len(chunks)} chunks. Ready for questions!"
except Exception as e:
logger.error(f"Pre-loaded PDF processing error: {e}")
return f"❌ Error processing pre-loaded PDFs: {str(e)}"
def extract_zip_to_pdfs(zip_file):
"""Extract uploaded ZIP file to PDFs folder"""
if not zip_file:
return "❌ Please upload a ZIP file."
try:
# Create PDFs directory if it doesn't exist
os.makedirs(PDF_FOLDER_PATH, exist_ok=True)
# Extract ZIP file
with zipfile.ZipFile(zip_file, 'r') as zip_ref: # zip_file is now a filepath string
# Extract only PDF files
pdf_files = [f for f in zip_ref.namelist() if f.lower().endswith('.pdf')]
if not pdf_files:
return "❌ No PDF files found in the ZIP archive."
for pdf_file in pdf_files:
# Extract to PDFs folder
# Ensure the path is safe and doesn't lead to directory traversal
extracted_path = os.path.join(PDF_FOLDER_PATH, os.path.basename(pdf_file))
# Check if the extracted path is within the intended PDF_FOLDER_PATH
if not os.path.abspath(extracted_path).startswith(os.path.abspath(PDF_FOLDER_PATH)):
logger.warning(f"Attempted path traversal detected: {pdf_file}")
continue # Skip this file
# Extract the file
with open(extracted_path, "wb") as f:
f.write(zip_ref.read(pdf_file))
global PRELOADED_PDFS
PRELOADED_PDFS = True
return f"βœ… Successfully extracted {len(pdf_files)} PDF files. Now click 'Load Pre-existing PDFs' to process them."
except Exception as e:
return f"❌ Error extracting ZIP file: {str(e)}"
def process_pdfs(pdf_files, chunk_size, chunk_overlap):
"""Process uploaded PDF files and create vector store"""
global vectorstore, retrieval_qa, embedding_model
if not LANGCHAIN_AVAILABLE:
return "❌ LangChain is not available. Please check the installation."
if not pdf_files:
return "❌ Please upload at least one PDF file or use pre-loaded PDFs."
try:
# Initialize models if not already done
if embedding_model is None:
success, message = initialize_models()
if not success:
return message
# Create temporary directory for PDFs
temp_dir = tempfile.mkdtemp()
# Save uploaded files to temp directory
for pdf_file_path in pdf_files: # pdf_files is now a list of filepaths
if pdf_file_path is not None:
temp_path = os.path.join(temp_dir, os.path.basename(pdf_file_path)) # Use pdf_file_path directly
shutil.copy2(pdf_file_path, temp_path)
# Load documents
loader = PyPDFDirectoryLoader(temp_dir)
documents = loader.load()
if not documents:
return "❌ No documents were loaded. Please check your PDF files."
# Split documents into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=int(chunk_size),
chunk_overlap=int(chunk_overlap)
)
chunks = text_splitter.split_documents(documents)
# Create vector store
vectorstore = FAISS.from_documents(chunks, embedding_model)
retriever = vectorstore.as_retriever(search_kwargs={"k": 5})
# Setup prompt template
prompt_template = """
Use the following context to answer the question. If you cannot find the answer in the context, say "I don't have enough information to answer this question."
Context:
{context}
Question: {question}
Helpful Answer:
"""
prompt = PromptTemplate(
input_variables=["context", "question"],
template=prompt_template
)
# Initialize LLM using the updated function
llm = create_llm()
# Create RetrievalQA chain with better error handling
try:
retrieval_qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": prompt}
)
# Test the chain
try:
test_result = retrieval_qa({"query": "test"})
logger.info("QA chain test successful")
except Exception as test_error:
logger.warning(f"QA chain test failed during initial run: {test_error}")
except Exception as chain_error:
logger.error(f"Chain creation error: {chain_error}")
return f"❌ Error creating QA chain: {str(chain_error)}. Check LLM availability."
# Clean up temp directory
shutil.rmtree(temp_dir)
return f"βœ… Successfully processed {len(documents)} documents into {len(chunks)} chunks. Ready for questions!"
except Exception as e:
logger.error(f"PDF processing error: {e}")
return f"❌ Error processing PDFs: {str(e)}"
def answer_question(question):
"""Answer a question using the RAG system with improved error handling"""
global retrieval_qa
if not question.strip():
return "❌ Please enter a question.", ""
if retrieval_qa is None:
return "❌ Please upload and process PDF files first.", ""
try:
# Get answer from RAG system
result = retrieval_qa({"query": question})
answer = result.get("result", "No answer generated")
# Format source documents
sources = []
for i, doc in enumerate(result.get("source_documents", []), 1):
source = doc.metadata.get("source", "Unknown")
page = doc.metadata.get("page", "Unknown")
content_preview = doc.page_content[:200] + "..." if len(doc.page_content) > 200 else doc.page_content
sources.append(f"**Source {i}:**\n- File: {Path(source).name}\n- Page: {page}\n- Preview: {content_preview}\n")
sources_text = "\n".join(sources) if sources else "No sources found."
return answer, sources_text
except Exception as e:
logger.error(f"Question answering error: {e}")
# Provide a fallback response using just the retriever if LLM fails
try:
if vectorstore is not None:
# Get relevant documents directly from vectorstore
docs = vectorstore.similarity_search(question, k=3)
fallback_answer = "I found some relevant content in your documents:\n\n"
sources = []
for i, doc in enumerate(docs, 1):
source = doc.metadata.get("source", "Unknown")
page = doc.metadata.get("page", "Unknown")
content_preview = doc.page_content[:300] + "..." if len(doc.page_content) > 300 else doc.page_content
fallback_answer += f"**Excerpt {i}:** {content_preview}\n\n"
sources.append(f"**Source {i}:**\n- File: {Path(source).name}\n- Page: {page}\n")
sources_text = "\n".join(sources)
return fallback_answer + "\n*Note: This is a direct search result due to a technical issue with the AI model.*", sources_text
else:
return f"❌ Error answering question: {str(e)}. Vector store not initialized.", ""
except Exception as fallback_error:
logger.error(f"Fallback error during question answering: {fallback_error}")
return f"❌ Critical error answering question: {str(e)}", ""
def create_interface():
"""Create the fully responsive Gradio interface"""
# Enhanced CSS for comprehensive responsiveness
custom_css = """
/* CSS Variables for consistent theming */
:root {
--primary-color: #2563eb;
--secondary-color: #10b981;
--accent-color: #f59e0b;
--text-primary: #1f2937;
--text-secondary: #6b7280;
--bg-primary: #ffffff;
--bg-secondary: #f9fafb;
--border-color: #e5e7eb;
--shadow-sm: 0 1px 2px 0 rgba(0, 0, 0, 0.05);
--shadow-md: 0 4px 6px -1px rgba(0, 0, 0, 0.1);
--shadow-lg: 0 10px 15px -3px rgba(0, 0, 0, 0.1);
--radius-sm: 0.375rem;
--radius-md: 0.5rem;
--radius-lg: 0.75rem;
}
/* Dark mode support */
@media (prefers-color-scheme: dark) {
:root {
--text-primary: #f9fafb;
--text-secondary: #d1d5db;
--bg-primary: #1f2937;
--bg-secondary: #111827;
--border-color: #374151;
}
}
/* Base container improvements */
.gradio-container {
max-width: 100% !important;
margin: 0 auto !important;
padding: clamp(0.5rem, 2vw, 1.5rem) !important;
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
}
/* Responsive grid system */
.gr-row {
display: flex !important;
flex-wrap: wrap !important;
gap: clamp(0.75rem, 2vw, 1.5rem) !important;
margin-bottom: clamp(0.75rem, 2vw, 1.5rem) !important;
}
.gr-column {
flex: 1 1 auto !important;
min-width: 0 !important;
}
/* Remove any pre-existing or default Gradio styling that might conflict */
.gradio-container,
.gr-panel,
.gr-block,
.gr-group {
box-sizing: border-box !important;
min-width: 0 !important; /* Ensure elements can shrink */
}
/* Ensure images and media scale within their containers */
img, video {
max-width: 100% !important;
height: auto !important;
display: block !important;
}
/* Specific adjustments for file upload area text */
.gr-file .file-upload-text {
font-size: clamp(0.75rem, 3vw, 1rem) !important; /* Make text smaller on mobile */
line-height: 1.4 !important;
}
/* Ensure gr-markdown content is always visible and not hidden by overflow */
.gr-markdown {
overflow: visible !important; /* Prevent text from being clipped */
min-height: unset !important; /* Ensure it doesn't collapse */
width: 100% !important; /* Ensure it takes full available width */
box-sizing: border-box !important; /* Include padding and border in the element's total width and height */
padding: 1rem !important; /* Add some default padding to markdown blocks */
}
/* Ensure the main title and intro text are always at the top and visible */
.gradio-container > .gr-block:first-child {
/* This targets the first block inside the main container, which is usually the title Markdown */
display: block !important; /* Ensure it's not hidden by flex/grid */
width: 100% !important;
margin-bottom: 1.5rem !important; /* Add some space below the intro text */
padding: 0 0.5rem !important; /* Adjust padding for the top markdown block */
}
/* Adjust accordion header font size on small screens if it's too big */
@media (max-width: 640px) {
.gr-accordion-header {
font-size: 0.95rem !important; /* Slightly smaller for mobile headers */
padding: 0.75rem !important;
}
}
/* Mobile-first responsive breakpoints */
/* Small devices (phones, 320px and up) */
@media (max-width: 640px) {
.gradio-container {
padding: 0.75rem !important;
}
.gr-row {
flex-direction: column !important;
gap: 1rem !important;
}
.gr-column {
width: 100% !important;
flex: none !important;
}
/* Stack tabs vertically on very small screens */
.gr-tab-nav {
flex-direction: column !important;
gap: 0.25rem !important;
}
.gr-tab-nav > button {
width: 100% !important;
text-align: left !important;
padding: 0.75rem 1rem !important;
font-size: 0.875rem !important;
}
/* Improve button sizes for touch */
.gr-button {
width: 100% !important;
min-height: 48px !important;
font-size: 0.875rem !important;
padding: 0.75rem 1rem !important;
border-radius: var(--radius-md) !important;
font-weight: 500 !important;
}
/* Text inputs */
.gr-textbox textarea,
.gr-textbox input {
font-size: 16px !important; /* Prevents zoom on iOS */
padding: 0.75rem !important;
border-radius: var(--radius-md) !important;
border: 1px solid var(--border-color) !important;
}
/* File upload areas */
.gr-file {
min-height: 120px !important;
padding: 1rem !important;
border: 2px dashed var(--border-color) !important;
border-radius: var(--radius-lg) !important;
text-align: center !important;
}
/* Accordion improvements */
.gr-accordion {
border-radius: var(--radius-md) !important;
border: 1px solid var(--border-color) !important;
width: 100% !important; /* Force full width */
flex: none !important; /* Prevent flex issues */
}
/* Adjust spacing for accordions within columns */
.gr-column .gr-accordion {
margin-bottom: 1rem !important;
}
/* Ensure direct children of gradio-container also respond well */
.gradio-container > *:not(.gr-footer) { /* Exclude footer if it exists */
width: 100% !important;
margin-left: auto !important;
margin-right: auto !important;
}
/* Make sure all gradio components inside rows take full width */
.gr-row > .gr-block {
width: 100% !important;
}
/* Slider improvements */
.gr-slider {
margin: 1rem 0 !important;
}
.gr-slider input[type="range"] {
height: 32px !important;
}
/* Form spacing */
.gr-form > * {
margin-bottom: 1rem !important;
}
}
/* Medium devices (tablets, 641px and up) */
@media (min-width: 641px) and (max-width: 1024px) {
.gradio-container {
padding: 1.25rem !important;
}
.gr-row {
gap: 1.25rem !important;
}
.gr-button {
min-height: 44px !important;
padding: 0.625rem 1.25rem !important;
font-size: 0.875rem !important;
}
.gr-textbox textarea,
.gr-textbox input {
font-size: 15px !important;
padding: 0.625rem !important;
}
/* Two-column layout for medium screens */
.gr-column:first-child {
flex: 0 0 40% !important;
max-width: 40% !important;
}
.gr-column:last-child {
flex: 1 1 55% !important;
max-width: 55% !important;
}
.gr-row {
justify-content: space-between !important; /* Distribute space */
}
}
/* Large devices (desktops, 1025px and up) */
@media (min-width: 1025px) {
.gradio-container {
max-width: 1400px !important;
padding: 2rem !important;
}
.gr-row {
gap: 2rem !important;
}
.gr-button {
min-height: 42px !important;
padding: 0.625rem 1.5rem !important;
font-size: 0.875rem !important;
}
.gr-textbox textarea,
.gr-textbox input {
font-size: 14px !important;
padding: 0.625rem !important;
}
/* Optimal desktop layout */
.gr-column:first-child {
flex: 0 0 350px !important;
max-width: 350px !important;
}
.gr-column:last-child {
flex: 1 1 auto !important;
}
}
/* Typography improvements */
.gr-markdown h1 {
font-size: clamp(1.5rem, 4vw, 2.5rem) !important;
font-weight: 700 !important;
line-height: 1.2 !important;
margin-bottom: 1rem !important;
color: var(--text-primary) !important;
}
.gr-markdown h2 {
font-size: clamp(1.25rem, 3vw, 1.875rem) !important;
font-weight: 600 !important;
line-height: 1.3 !important;
margin: 1.5rem 0 0.75rem 0 !important;
color: var(--text-primary) !important;
}
.gr-markdown h3 {
font-size: clamp(1.125rem, 2.5vw, 1.5rem) !important;
font-weight: 600 !important;
line-height: 1.4 !important;
margin: 1.25rem 0 0.5rem 0 !important;
color: var(--text-primary) !important;
}
.gr-markdown p,
.gr-markdown li {
font-size: clamp(0.875rem, 2vw, 1rem) !important;
line-height: 1.6 !important;
color: var(--text-secondary) !important;
margin-bottom: 0.75rem !important;
}
/* Enhanced button styling */
.gr-button {
background: linear-gradient(135deg, var(--primary-color), #1d4ed8) !important;
color: white !important;
border: none !important;
border-radius: var(--radius-md) !important;
font-weight: 500 !important;
transition: all 0.2s ease !important;
cursor: pointer !important;
box-shadow: var(--shadow-sm) !important;
}
.gr-button:hover {
background: linear-gradient(135deg, #1d4ed8, var(--primary-color)) !important;
transform: translateY(-1px) !important;
box-shadow: var(--shadow-md) !important;
}
.gr-button:active {
transform: translateY(0) !important;
box-shadow: var(--shadow-sm) !important;
}
/* Secondary button variant */
.gr-button[variant="secondary"] {
background: linear-gradient(135deg, var(--secondary-color), #059669) !important;
}
.gr-button[variant="secondary"]:hover {
background: linear-gradient(135deg, #059669, var(--secondary-color)) !important;
}
/* Tab styling improvements */
.gr-tab-nav {
background: var(--bg-secondary) !important;
border-radius: var(--radius-md) !important;
padding: 0.25rem !important;
margin-bottom: 1rem !important;
display: flex !important;
gap: 0.25rem !important;
}
.gr-tab-nav > button {
background: transparent !important;
border: none !important;
padding: 0.5rem 1rem !important;
border-radius: var(--radius-sm) !important;
font-weight: 500 !important;
color: var(--text-secondary) !important;
transition: all 0.2s ease !important;
flex: 1 1 auto !important;
}
.gr-tab-nav > button.selected {
background: var(--bg-primary) !important;
color: var(--text-primary) !important;
box-shadow: var(--shadow-sm) !important;
}
.gr-tab-nav > button:hover {
color: var(--text-primary) !important;
background: rgba(255, 255, 255, 0.5) !important;
}
/* Input and textarea improvements */
.gr-textbox textarea,
.gr-textbox input {
border: 1px solid var(--border-color) !important;
border-radius: var(--radius-md) !important;
background: var(--bg-primary) !important;
color: var(--text-primary) !important;
transition: border-color 0.2s ease !important;
resize: vertical !important;
}
.gr-textbox textarea:focus,
.gr-textbox input:focus {
border-color: var(--primary-color) !important;
outline: none !important;
box-shadow: 0 0 0 3px rgba(37, 99, 235, 0.1) !important;
}
/* File upload styling */
.gr-file {
border: 2px dashed var(--border-color) !important;
border-radius: var(--radius-lg) !important;
background: var(--bg-secondary) !important;
padding: 2rem !important;
text-align: center !important;
transition: all 0.2s ease !important;
}
.gr-file:hover {
border-color: var(--primary-color) !important;
background: rgba(37, 99, 235, 0.05) !important;
}
/* Accordion styling */
.gr-accordion {
border: 1px solid var(--border-color) !important;
border-radius: var(--radius-md) !important;
background: var(--bg-primary) !important;
margin-bottom: 1rem !important;
}
.gr-accordion-header {
background: var(--bg-secondary) !important;
padding: 1rem !important;
font-weight: 600 !important;
color: var(--text-primary) !important;
border-bottom: 1px solid var(--border-color) !important;
}
/* Slider styling */
.gr-slider {
margin: 1rem 0 !important;
}
.gr-slider input[type="range"] {
appearance: none !important;
background: var(--bg-secondary) !important;
border-radius: var(--radius-lg) !important;
height: 8px !important;
}
.gr-slider input[type="range"]::-webkit-slider-thumb {
appearance: none !important;
width: 20px !important;
height: 20px !important;
border-radius: 50% !important;
background: var(--primary-color) !important;
cursor: pointer !important;
box-shadow: var(--shadow-sm) !important;
}
.gr-slider input[type="range"]::-moz-range-thumb {
width: 20px !important;
height: 20px !important;
border-radius: 50% !important;
background: var(--primary-color) !important;
cursor: pointer !important;
border: none !important;
box-shadow: var(--shadow-sm) !important;
}
/* Loading and status indicators */
.gr-loading {
display: flex !important;
align-items: center !important;
justify-content: center !important;
padding: 2rem !important;
color: var(--text-secondary) !important;
}
/* Scrollbar styling */
::-webkit-scrollbar {
width: 8px !important;
height: 8px !important;
}
::-webkit-scrollbar-track {
background: var(--bg-secondary) !important;
border-radius: var(--radius-sm) !important;
}
::-webkit-scrollbar-thumb {
background: var(--border-color) !important;
border-radius: var(--radius-sm) !important;
}
::-webkit-scrollbar-thumb:hover {
background: var(--text-secondary) !important;
}
/* Ensure good spacing for text outputs */
.gr-markdown {
padding: 1rem 0 !important;
}
"""
with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
gr.Markdown(
"""
# RAG PDF Chat Interface
Upload PDF documents and ask questions about their content using advanced AI.
This interface allows you to:
- Upload PDF files or ZIP archives containing PDFs
- Process documents using state-of-the-art text chunking and embedding techniques
- Ask questions about your documents using natural language
- Get accurate answers with source citations
"""
)
# Main content area
with gr.Row():
with gr.Column(scale=1): # This column will contain processing options
with gr.Accordion("πŸ“ Pre-loaded PDFs", open=True):
gr.Markdown("### Option 1: Use pre-existing PDFs")
gr.Markdown("If you have PDFs in the `./pdfs` folder, click the button below to process them.")
load_preloaded_btn = gr.Button("πŸ”„ Load Pre-existing PDFs", variant="secondary")
pre_load_status = gr.Textbox(label="Pre-load Status", interactive=False, value="No pre-loaded PDFs processed yet.")
with gr.Accordion("πŸ“¦ Upload ZIP Archive", open=False):
gr.Markdown("### Option 2: Upload ZIP Archive")
# Changed type from "file" to "filepath"
zip_file_input = gr.File(label="Upload ZIP File", type="filepath", file_count="single", file_types=[".zip"])
extract_zip_btn = gr.Button("πŸ“€ Extract ZIP Archive", variant="primary")
zip_status_output = gr.Textbox(label="ZIP Extraction Status", interactive=False)
with gr.Accordion("πŸ“„ Upload PDF Files", open=False):
gr.Markdown("### Option 3: Direct PDF upload")
gr.Markdown("Upload PDF files directly for processing.")
# Changed type from "file" to "filepath"
pdf_file_input = gr.File(label="Upload PDF Files", type="filepath", file_count="multiple", file_types=[".pdf"])
with gr.Accordion("βš™οΈ Processing Parameters", open=False):
chunk_size_slider = gr.Slider(
minimum=100,
maximum=2000,
value=1000,
step=50,
label="Chunk Size",
info="Size of text chunks for processing."
)
chunk_overlap_slider = gr.Slider(
minimum=0,
maximum=500,
value=200,
step=10,
label="Chunk Overlap",
info="Overlap between text chunks to maintain context."
)
process_btn = gr.Button("πŸš€ Process Documents", variant="primary")
processing_status = gr.Textbox(label="Processing Status", interactive=False)
with gr.Column(scale=2): # This column will contain the chat interface
with gr.Accordion("πŸ’¬ Chat with Documents", open=True):
gr.Markdown("### Ask questions about your documents")
gr.Markdown("Once you've processed your PDFs, you can ask questions about their content. The AI will provide answers based on the information in your documents.")
question_input = gr.Textbox(label="Ask a question about your documents", placeholder="e.g., What is the main topic of the documents?")
answer_output = gr.Textbox(label="Answer", interactive=False)
sources_output = gr.Textbox(label="Sources & References", interactive=False)
ask_btn = gr.Button("πŸ” Ask Question", variant="primary")
gr.Markdown("❓ Help & Tips: Ensure you have your HuggingFace API token set as an environment variable (HUGGINGFACEHUB_API_TOKEN) for the LLM to function properly.")
# Event listeners
load_preloaded_btn.click(
load_preloaded_pdfs,
inputs=[chunk_size_slider, chunk_overlap_slider], # Pass sliders to function
outputs=pre_load_status
)
extract_zip_btn.click(
extract_zip_to_pdfs,
inputs=zip_file_input,
outputs=zip_status_output
)
process_btn.click(
process_pdfs,
inputs=[pdf_file_input, chunk_size_slider, chunk_overlap_slider],
outputs=processing_status
)
ask_btn.click(
answer_question,
inputs=question_input,
outputs=[answer_output, sources_output]
)
# Initial model check
demo.load(initialize_models, outputs=pre_load_status) # Use pre_load_status to show init message
return demo
if __name__ == "__main__":
demo = create_interface()
# It's better to explicitly set share=False for local development
# and only set it to True if you intend to share publicly (which creates a public link)
demo.launch(show_api=False, inline=False)