gk2410 commited on
Commit
cb5040b
Β·
verified Β·
1 Parent(s): 9fb864c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -44
app.py CHANGED
@@ -3,55 +3,69 @@ import requests
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
 
6
- # Initialize Inference Client
7
  client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", token=os.getenv("HF_TOKEN"))
8
 
9
- def fetch_catalog(query):
10
  try:
11
- url = f"https://openlibrary.org/search.json?q={query}&limit=3"
12
  response = requests.get(url, timeout=5)
13
- data = response.json().get("docs", [])
14
- if not data:
15
- return "No matches found."
16
- return "\n".join([f"- {d.get('title')} by {', '.join(d.get('author_name', ['Unknown']))}" for d in data])
 
 
 
 
 
 
 
17
  except:
18
- return "Search service offline."
19
 
20
- def librarian_agent(message, history, session_state):
21
- # Standardize state initialization
22
  if session_state is None:
23
  session_state = {"verified": False, "age": None}
24
 
25
- # --- PHASE 1: ETHICAL GATE ---
26
  if not session_state["verified"]:
27
  age_input = "".join(filter(str.isdigit, message))
28
  if age_input:
29
- age = int(age_input)
30
- if age < 13:
31
- reply = "πŸ›‘οΈ Safety: Access restricted for users under 13."
32
- else:
33
- session_state["age"] = age
34
- session_state["verified"] = True
35
- reply = f"βœ… Verified (Age {age}). How can I help you today?"
36
  else:
37
- reply = "πŸ‘‹ Please enter your age as a number to start."
38
-
39
- # history is a list of {"role": "...", "content": "..."}
40
- history.append({"role": "user", "content": message})
41
  history.append({"role": "assistant", "content": reply})
42
  return history, session_state
43
 
44
- # --- PHASE 2: RAG PIPELINE ---
45
- catalog_context = fetch_catalog(message)
46
-
47
- # Building the internal LLM prompt
 
 
 
 
 
 
 
 
 
48
  llm_messages = [
49
- {"role": "system", "content": f"You are a librarian for a {session_state['age']} year old. Use: {catalog_context}"},
50
- {"role": "user", "content": message}
 
 
 
 
 
 
 
51
  ]
52
 
53
  try:
54
- output = client.chat_completion(messages=llm_messages, max_tokens=1000)
55
  bot_res = output.choices[0].message.content
56
  except Exception as e:
57
  bot_res = f"Service Error: {str(e)}"
@@ -60,27 +74,26 @@ def librarian_agent(message, history, session_state):
60
  history.append({"role": "assistant", "content": bot_res})
61
  return history, session_state
62
 
63
- # --- UI BLOCK (Minimalist) ---
64
  with gr.Blocks() as demo:
65
- gr.Markdown("## πŸ“š AI Librarian Agent")
66
-
67
  state = gr.State(None)
68
 
69
- # We initialize with a system message in the new dict format
70
- # We REMOVE 'type="messages"' to avoid the keyword error
 
 
 
 
 
71
  chatbot = gr.Chatbot(
72
- label="Librarian Consultation",
73
- value=[{"role": "assistant", "content": "πŸ‘‹ Welcome! Please enter your **age** to begin."}]
74
  )
75
 
76
- msg = gr.Textbox(label="Input", placeholder="Enter age (e.g. 25)")
77
-
78
- def respond(user_input, chat_history, current_state):
79
- # Gradio automatically detects the format if 'type' is omitted
80
- updated_history, updated_state = librarian_agent(user_input, chat_history, current_state)
81
- return "", updated_history, updated_state
82
 
83
- msg.submit(respond, [msg, chatbot, state], [msg, chatbot, state])
 
84
 
85
- # Launch without passing theme to Blocks, handle it here if needed
86
  demo.launch()
 
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
 
 
6
  client = InferenceClient("meta-llama/Llama-3.3-70B-Instruct", token=os.getenv("HF_TOKEN"))
7
 
8
+ def fetch_catalog_detailed(query):
9
  try:
10
+ url = f"https://openlibrary.org/search.json?q={query}&limit=5"
11
  response = requests.get(url, timeout=5)
12
+ docs = response.json().get("docs", [])
13
+
14
+ books = []
15
+ for d in docs:
16
+ books.append({
17
+ "title": d.get("title", "Unknown"),
18
+ "author": ", ".join(d.get("author_name", ["Unknown"])),
19
+ "year": d.get("first_publish_year", "N/A"),
20
+ "popularity": d.get("edition_count", 0)
21
+ })
22
+ return books
23
  except:
24
+ return []
25
 
26
+ def librarian_agent(message, history, session_state, sort_by):
 
27
  if session_state is None:
28
  session_state = {"verified": False, "age": None}
29
 
30
+ # --- ETHICAL GATE ---
31
  if not session_state["verified"]:
32
  age_input = "".join(filter(str.isdigit, message))
33
  if age_input:
34
+ session_state["age"] = int(age_input)
35
+ session_state["verified"] = True
36
+ reply = "βœ… Verified. What topic are you interested in?"
 
 
 
 
37
  else:
38
+ reply = "πŸ‘‹ Please enter your age to start."
 
 
 
39
  history.append({"role": "assistant", "content": reply})
40
  return history, session_state
41
 
42
+ # --- RETRIEVAL & SORTING ---
43
+ raw_books = fetch_catalog_detailed(message)
44
+ if sort_by == "Newest First":
45
+ raw_books.sort(key=lambda x: x['year'] if isinstance(x['year'], int) else 0, reverse=True)
46
+ elif sort_by == "Popularity":
47
+ raw_books.sort(key=lambda x: x['popularity'], reverse=True)
48
+
49
+ # Creating the data string for the LLM
50
+ catalog_str = ""
51
+ for b in raw_books:
52
+ catalog_str += f"Title: {b['title']}, Author: {b['author']}, Year: {b['year']}\n"
53
+
54
+ # --- INSTRUCTING THE LLM FOR TABULAR SUMMARY ---
55
  llm_messages = [
56
+ {
57
+ "role": "system",
58
+ "content": (
59
+ f"You are a helpful librarian for a {session_state['age']}-year-old. "
60
+ "Output a Markdown table with EXACTLY these columns: | Book & Author | Year | Summary |."
61
+ "The Summary must be 1-2 concise sentences. Be professional and age-appropriate."
62
+ )
63
+ },
64
+ {"role": "user", "content": f"Books found:\n{catalog_str}\n\nUser Question: {message}"}
65
  ]
66
 
67
  try:
68
+ output = client.chat_completion(messages=llm_messages, max_tokens=1200)
69
  bot_res = output.choices[0].message.content
70
  except Exception as e:
71
  bot_res = f"Service Error: {str(e)}"
 
74
  history.append({"role": "assistant", "content": bot_res})
75
  return history, session_state
76
 
77
+ # --- UI CONFIGURATION ---
78
  with gr.Blocks() as demo:
79
+ gr.Markdown("## ⚑ Fast-Track Librarian with AI Summaries")
 
80
  state = gr.State(None)
81
 
82
+ with gr.Row():
83
+ sort_option = gr.Radio(
84
+ ["Relevance", "Newest First", "Popularity"],
85
+ label="Sort by",
86
+ value="Relevance"
87
+ )
88
+
89
  chatbot = gr.Chatbot(
90
+ label="Quick Catalog",
91
+ value=[{"role": "assistant", "content": "πŸ‘‹ Enter your **age** to begin."}]
92
  )
93
 
94
+ msg = gr.Textbox(label="Search Query", placeholder="e.g. Science Fiction or History of Rome")
 
 
 
 
 
95
 
96
+ msg.submit(librarian_agent, [msg, chatbot, state, sort_option], [chatbot, state])
97
+ msg.submit(lambda: "", None, msg)
98
 
 
99
  demo.launch()