File size: 3,756 Bytes
91a7459
dea41fe
 
 
 
d662268
 
 
 
 
 
9188812
dea41fe
6e14405
4976770
 
5f29551
 
 
 
 
 
 
cb5040b
b5401b5
9188812
5f29551
 
4976770
 
96828e9
 
 
 
6e14405
 
 
cb5040b
5f29551
96828e9
5f29551
6168601
9ee857a
96828e9
5f29551
6e14405
d662268
cb5040b
91a7459
cb5040b
9188812
cb5040b
5f29551
cb5040b
9fb864c
5f29551
 
2901bbc
96828e9
2d6b1ed
5f29551
6168601
2d6b1ed
5f29551
96828e9
6168601
 
9ee857a
 
5f29551
 
 
96828e9
 
9188812
5f29551
 
91a7459
5f29551
 
 
 
 
 
 
 
 
 
 
 
 
 
2d6b1ed
9fb864c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import os, requests, gradio as gr
from huggingface_hub import InferenceClient

client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", token=os.getenv("HF_TOKEN"))

def safe_int(val, default=0):
    try:
        return int(val) if val is not None else default
    except (ValueError, TypeError):
        return default

def fetch_catalog_detailed(query, limit):
    try:
        url = f"https://openlibrary.org/search.json?q={query}&limit={limit}"
        response = requests.get(url, timeout=5)
        docs = response.json().get("docs", [])
        return [{
            "title": d.get("title", "Unknown"),
            "author": ", ".join(d.get("author_name", ["Unknown"])),
            "year": safe_int(d.get("first_publish_year"), 1000), 
            "editions": safe_int(d.get("edition_count"), 0)
        } for d in docs]
    except:
        return []

def librarian_agent(message, history, session_state, sort_by, num_results):
    # Prevent crashing if user moves slider before typing a query
    if not message or message.strip() == "":
        return history, session_state

    if session_state is None:
        session_state = {"verified": False, "age": None}

    if not session_state["verified"]:
        age_digits = "".join(filter(str.isdigit, message))
        if age_digits:
            session_state["age"] = int(age_digits)
            session_state["verified"] = True
            reply = "βœ… Verified. What topic are we researching?"
        else:
            reply = "πŸ‘‹ Please enter your age to start."
        history.append({"role": "assistant", "content": reply})
        return history, session_state

    # Core RAG Logic
    raw_books = fetch_catalog_detailed(message, int(num_results))
    
    if sort_by == "Newest First":
        raw_books = sorted(raw_books, key=lambda x: x['year'], reverse=True)
    elif sort_by == "Popularity":
        raw_books = sorted(raw_books, key=lambda x: x['editions'], reverse=True)

    catalog_summary = "\n".join([f"BOOK: {b['title']} | YEAR: {b['year']} | EDITIONS: {b['editions']}" for b in raw_books])

    llm_messages = [
        {"role": "system", "content": f"You are a librarian. Output ONLY a Markdown table: | Book & Author | Year | Editions | Summary |."},
        {"role": "user", "content": f"Catalog:\n{catalog_summary}\n\nUser: {message}"}
    ]

    try:
        output = client.chat_completion(messages=llm_messages, max_tokens=1000)
        bot_res = output.choices[0].message.content
    except Exception as e:
        bot_res = f"Error: {str(e)}"

    history.append({"role": "user", "content": message})
    history.append({"role": "assistant", "content": bot_res})
    return history, session_state

# --- GRADIO UI ---
with gr.Blocks() as demo:
    gr.Markdown("## πŸ“š Fully Dynamic AI Librarian")
    state = gr.State(None)
    
    with gr.Row():
        sort_option = gr.Dropdown(["Relevance", "Newest First", "Popularity"], label="Sort", value="Relevance")
        result_count = gr.Slider(minimum=3, maximum=15, step=1, value=5, label="Results Count")
    
    chatbot = gr.Chatbot(label="Catalog", value=[{"role": "assistant", "content": "πŸ‘‹ Enter your **age** to begin."}])
    msg = gr.Textbox(label="Query", placeholder="Search books...")

    # Pack inputs for reuse
    inputs_list = [msg, chatbot, state, sort_option, result_count]
    outputs_list = [chatbot, state]

    # EVENT 1: Submit on Enter
    msg.submit(librarian_agent, inputs_list, outputs_list).then(lambda: "", None, msg)

    # EVENT 2: DYNAMIC REFRESH
    # When slider or dropdown changes, re-trigger the agent automatically
    result_count.change(librarian_agent, inputs_list, outputs_list)
    sort_option.change(librarian_agent, inputs_list, outputs_list)

demo.launch()