myavatar / app.py
pavanammm's picture
Update app.py
36c544d verified
import os
import json
from pypdf import PdfReader
from openai import OpenAI
import gradio as gr
# --- 1. PDF Data Processing Functions ---
def extract_text_from_pdf(pdf_path):
"""
Extracts text content from a PDF file.
Args:
pdf_path (str): The path to the PDF document.
Returns:
str: The concatenated text content from all pages of the PDF.
Returns an empty string if the file is not found or an error occurs.
"""
if not os.path.exists(pdf_path):
print(f"Error: PDF file not found at '{pdf_path}'")
return ""
try:
reader = PdfReader(pdf_path)
text_content = []
for page in reader.pages:
text_content.append(page.extract_text())
return "\n".join(text_content)
except Exception as e:
print(f"An error occurred while reading the PDF: {e}")
return ""
def chunk_text(text, chunk_size=1000, chunk_overlap=200):
"""
Splits a given text into smaller, overlapping chunks.
Args:
text (str): The input text to be chunked.
chunk_size (int): The desired size of each chunk.
chunk_overlap (int): The number of characters to overlap between consecutive chunks.
Returns:
list: A list of text chunks.
"""
chunks = []
if not text:
return chunks
start_index = 0
while start_index < len(text):
end_index = min(start_index + chunk_size, len(text))
chunks.append(text[start_index:end_index])
if end_index == len(text):
break
start_index += chunk_size - chunk_overlap
return chunks
# --- 2. OpenRouter API Client Setup ---
# For local testing, get the API key from the environment or a hardcoded value if not found.
# For Hugging Face deployment, ensure it's set as a Space Secret.
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "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") # Placeholder from earlier cell
# If no key is found (e.g., if the placeholder was removed and env var isn't set)
if not OPENROUTER_API_KEY:
raise ValueError("OPENROUTER_API_KEY not found. Please set it as an environment variable or provide it in app.py for local testing.")
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=OPENROUTER_API_KEY,
)
OPENROUTER_MODEL_NAME = "google/gemma-2-9b-it"
# --- 3. Response Generation Agent ---
def generate_response(user_query, context_chunks):
"""
Generates a response from the LLM based on a user query and provided context chunks.
Args:
user_query (str): The user's question.
context_chunks (list): A list of strings, each being a chunk of text from the PDF.
Returns:
str: The LLM's generated response, or an error message if something goes wrong.
"""
context = "\n---\n".join(context_chunks)
system_message_content = (
"You are a personal avatar chatbot. Your task is to answer the user's questions "
"based *only* on the provided context. If the answer cannot be found in the context, "
"state that you don't have enough information to answer. Do not make up information."
f"\n\nContext:\n{context}"
)
messages = [
{"role": "system", "content": system_message_content},
{"role": "user", "content": user_query}
]
try:
response = client.chat.completions.create(
model=OPENROUTER_MODEL_NAME,
messages=messages,
temperature=0.5,
max_tokens=500
)
return response.choices[0].message.content
except Exception as e:
return f"An error occurred while generating response: {e}"
# --- 4. Response Evaluation Agent ---
def evaluate_response(user_query, generated_response, context_chunks):
"""
Evaluates a generated response based on the user query and context,
using an OpenRouter-powered LLM.
Args:
user_query (str): The original user's question.
generated_response (str): The response generated by the first agent.
context_chunks (list): The list of text chunks from the PDF used as context.
Returns:
tuple: A tuple containing (evaluation_pass_fail (bool), reasoning (str)).
Returns (False, error_message) if an error occurs.
"""
context = "\n---\n".join(context_chunks)
evaluation_system_message = (
"You are an evaluation agent. Your task is to assess a 'generated_response' "
"based on a 'user_query' and provided 'context'.\n\n"
"Your assessment should focus on the following criteria:\n"
"1. **Accuracy**: Is the 'generated_response' factually correct according to the 'context'?\n"
"2. **Relevance**: Does the 'generated_response' directly address the 'user_query'?\n"
"3. **Context Adherence**: Does the 'generated_response' *only* use information present "
"in the 'context'? If it brings in outside information or makes up facts, it fails this criterion.\n\n"
"Based on these criteria, determine if the 'generated_response' is acceptable. "
"If the response explicitly states it cannot answer based on the context, and it's true, consider it acceptable."
"Return your evaluation as a JSON object with two keys: 'pass' (boolean: true if acceptable, false otherwise) "
"and 'reasoning' (string: a brief explanation for your decision).\n\n"
f"Context:\n{context}"
)
evaluation_user_message = (
f"User Query: {user_query}\n\n"
f"Generated Response: {generated_response}\n\n"
"Please evaluate this generated response according to the instructions."
)
messages = [
{"role": "system", "content": evaluation_system_message},
{"role": "user", "content": evaluation_user_message}
]
try:
response = client.chat.completions.create(
model=OPENROUTER_MODEL_NAME,
messages=messages,
temperature=0.1,
max_tokens=300,
response_format={ "type": "json_object" }
)
evaluation_output = response.choices[0].message.content
try:
eval_result = json.loads(evaluation_output)
return eval_result.get('pass', False), eval_result.get('reasoning', 'No reasoning provided.')
except json.JSONDecodeError:
print(f"Warning: Could not decode JSON from evaluator: {evaluation_output}")
return False, f"Evaluator returned malformed JSON: {evaluation_output}"
except Exception as e:
return False, f"An error occurred while evaluating response: {e}"
# --- 5. Chatbot Orchestration Logic ---
def chat_with_avatar(user_query, context_chunks, max_retries=3):
"""
Orchestrates the interaction between the generator and evaluator agents.
Attempts to generate a response and evaluates it, retrying if necessary.
Args:
user_query (str): The user's question.
context_chunks (list): A list of strings, each being a chunk of text from the PDF.
max_retries (int): The maximum number of attempts to generate an acceptable response.
Returns:
str: The final, approved response or a message indicating failure.
"""
for attempt in range(max_retries):
# print(f"\n--- Attempt {attempt + 1}/{max_retries} ---") # Commented for Gradio output
generated_response = generate_response(user_query, context_chunks)
# print(f"Generated Response (Attempt {attempt + 1}):\n{generated_response[:200]}...")
pass_fail, reasoning = evaluate_response(user_query, generated_response, context_chunks)
# print(f"Evaluation Result (Attempt {attempt + 1}): Pass = {pass_fail}, Reasoning: {reasoning}")
if pass_fail:
return generated_response
else:
# print(f"Response failed evaluation. Retrying... Reason: {reasoning}")
pass # Just retry with the same prompt for now
return "I'm sorry, I couldn't generate an acceptable response after several attempts. Please try rephrasing your question."
# --- Initial Setup (Load PDF and Chunk) ---
# Path to your personal PDF document.
# For Hugging Face Spaces, upload your PDF to the 'data' folder in your Space repository.
PDF_PATH = "Document2.pdf" # Make sure this matches the filename you upload
# Create a dummy PDF file for testing if it doesn't exist, this part is mainly for local dev/testing
# In Hugging Face spaces, the PDF should be present.
if not os.path.exists(PDF_PATH):
# This block would only execute if the PDF is missing locally for testing.
# In a deployed HF Space, the PDF should be present.
print(f"Warning: PDF file not found at '{PDF_PATH}'. Using empty content.")
PDF_CONTENT = ""
else:
print(f"Extracting text from {PDF_PATH}...")
PDF_CONTENT = extract_text_from_pdf(PDF_PATH)
print("PDF content extracted.")
TEXT_CHUNKS = chunk_text(PDF_CONTENT)
if not TEXT_CHUNKS:
print("Warning: No text chunks were created from the PDF. The chatbot will not have context.")
# --- Gradio Interface ---
def respond(message, history):
global TEXT_CHUNKS # Use the globally loaded text chunks
if not TEXT_CHUNKS:
return "I am unable to answer questions as my knowledge base (PDF) could not be loaded or processed. Please check the PDF file."
return chat_with_avatar(message, TEXT_CHUNKS)
gr.ChatInterface(
respond,
title="Personal Avatar Chatbot",
description="Ask me anything about Pavan Thakkallapalli! (Information based on provided PDF)",
examples=[
"What is Pavan Thakkallapalli's primary role and education?",
"Tell me about Pavan's experience with MLOps and Machine Learning.",
"What is Pavan's favorite movie?"
]
).launch()