libAgent / app.py
gk2410's picture
Update app.py
5f29551 verified
import os, requests, gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", token=os.getenv("HF_TOKEN"))
def safe_int(val, default=0):
try:
return int(val) if val is not None else default
except (ValueError, TypeError):
return default
def fetch_catalog_detailed(query, limit):
try:
url = f"https://openlibrary.org/search.json?q={query}&limit={limit}"
response = requests.get(url, timeout=5)
docs = response.json().get("docs", [])
return [{
"title": d.get("title", "Unknown"),
"author": ", ".join(d.get("author_name", ["Unknown"])),
"year": safe_int(d.get("first_publish_year"), 1000),
"editions": safe_int(d.get("edition_count"), 0)
} for d in docs]
except:
return []
def librarian_agent(message, history, session_state, sort_by, num_results):
# Prevent crashing if user moves slider before typing a query
if not message or message.strip() == "":
return history, session_state
if session_state is None:
session_state = {"verified": False, "age": None}
if not session_state["verified"]:
age_digits = "".join(filter(str.isdigit, message))
if age_digits:
session_state["age"] = int(age_digits)
session_state["verified"] = True
reply = "βœ… Verified. What topic are we researching?"
else:
reply = "πŸ‘‹ Please enter your age to start."
history.append({"role": "assistant", "content": reply})
return history, session_state
# Core RAG Logic
raw_books = fetch_catalog_detailed(message, int(num_results))
if sort_by == "Newest First":
raw_books = sorted(raw_books, key=lambda x: x['year'], reverse=True)
elif sort_by == "Popularity":
raw_books = sorted(raw_books, key=lambda x: x['editions'], reverse=True)
catalog_summary = "\n".join([f"BOOK: {b['title']} | YEAR: {b['year']} | EDITIONS: {b['editions']}" for b in raw_books])
llm_messages = [
{"role": "system", "content": f"You are a librarian. Output ONLY a Markdown table: | Book & Author | Year | Editions | Summary |."},
{"role": "user", "content": f"Catalog:\n{catalog_summary}\n\nUser: {message}"}
]
try:
output = client.chat_completion(messages=llm_messages, max_tokens=1000)
bot_res = output.choices[0].message.content
except Exception as e:
bot_res = f"Error: {str(e)}"
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": bot_res})
return history, session_state
# --- GRADIO UI ---
with gr.Blocks() as demo:
gr.Markdown("## πŸ“š Fully Dynamic AI Librarian")
state = gr.State(None)
with gr.Row():
sort_option = gr.Dropdown(["Relevance", "Newest First", "Popularity"], label="Sort", value="Relevance")
result_count = gr.Slider(minimum=3, maximum=15, step=1, value=5, label="Results Count")
chatbot = gr.Chatbot(label="Catalog", value=[{"role": "assistant", "content": "πŸ‘‹ Enter your **age** to begin."}])
msg = gr.Textbox(label="Query", placeholder="Search books...")
# Pack inputs for reuse
inputs_list = [msg, chatbot, state, sort_option, result_count]
outputs_list = [chatbot, state]
# EVENT 1: Submit on Enter
msg.submit(librarian_agent, inputs_list, outputs_list).then(lambda: "", None, msg)
# EVENT 2: DYNAMIC REFRESH
# When slider or dropdown changes, re-trigger the agent automatically
result_count.change(librarian_agent, inputs_list, outputs_list)
sort_option.change(librarian_agent, inputs_list, outputs_list)
demo.launch()