ZENLLC commited on
Commit
e32b16d
·
verified ·
1 Parent(s): 13db0a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -83
app.py CHANGED
@@ -8,56 +8,45 @@ Skills
8
  • /translate_es <text> — English → Spanish
9
  • /ascii <text> — FIGlet ASCII art
10
  • Any question ending with “?” → live Wikipedia answer
11
- ↳ remembers the last topic, so “Who discovered it?” now works.
12
- Model: facebook/blenderbot-400M-distill (~720 MB, public)
13
 
14
- Copy-paste this file into your Space, keep the same requirements.txt, commit, done.
15
  """
16
 
17
  import ast, math, re, gc, traceback, torch, wikipedia, pyfiglet
18
  import gradio as gr
19
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
20
 
21
- # ────────────────────────────────────────────────────────────────
22
- # 0 · helpers
23
- # ────────────────────────────────────────────────────────────────
24
  def log_err(e: Exception) -> str:
25
- print("=== ZEN-Bot ERROR =====================")
26
  traceback.print_exc()
27
- print("=======================================\n")
28
  return f"⚠️ {type(e).__name__}: {e}"
29
 
30
- # ────────────────────────────────────────────────────────────────
31
- # 1 · model (public, small enough for free CPU/basic)
32
- # ────────────────────────────────────────────────────────────────
33
- MODEL = "facebook/blenderbot-400M-distill"
34
 
35
- tokenizer = AutoTokenizer.from_pretrained(MODEL)
36
- model = AutoModelForSeq2SeqLM.from_pretrained(MODEL)
37
  model.eval(); torch.set_grad_enabled(False)
38
 
39
- GEN_ARGS = dict(max_new_tokens=64, do_sample=False) # greedy, fast
40
 
41
  def llm(prompt: str) -> str:
42
- """Generate with BlenderBot-distill, catching any runtime errors."""
43
  try:
44
- inputs = tokenizer(
45
- prompt, return_tensors="pt",
46
- truncation=True, max_length=1024
47
- ).input_ids
48
  with torch.no_grad():
49
- out = model.generate(inputs, **GEN_ARGS)
50
- answer_ids = out[0, inputs.shape[-1]:]
51
- return tokenizer.decode(answer_ids,
52
- skip_special_tokens=True).strip()
53
  except Exception as e:
54
  return log_err(e)
55
 
56
- # ────────────────────────────────────────────────────────────────
57
- # 2 · safe /math
58
- # ────────────────────────────────────────────────────────────────
59
- _ALLOWED = {k: getattr(math, k) for k in dir(math) if not k.startswith("_")}
60
- _ALLOWED.update({"abs": abs, "round": round})
61
 
62
  def safe_math(expr: str) -> str:
63
  try:
@@ -65,102 +54,88 @@ def safe_math(expr: str) -> str:
65
 
66
  def ok(n):
67
  match n:
68
- case ast.Num(): return True
69
- case ast.UnaryOp(): return ok(n.operand)
70
- case ast.BinOp(): return ok(n.left) and ok(n.right)
71
  case ast.Call():
72
  return (isinstance(n.func, ast.Name)
73
- and n.func.id in _ALLOWED
74
  and all(ok(a) for a in n.args))
75
- case _: return False
76
 
77
  if not ok(node.body):
78
- return "⛔️ Only basic maths & trig/log functions allowed."
79
  return str(eval(compile(node, "<expr>", "eval"),
80
- {"__builtins__": {}}, _ALLOWED))
81
  except Exception as e:
82
  return log_err(e)
83
 
84
- # ────────────────────────────────────────────────────────────────
85
- # 3 · Wikipedia Q&A (with last-topic memory)
86
- # ────────────────────────────────────────────────────────────────
87
- last_topic: dict[str, str] = {} # {session_hash: "Penicillin"}
88
 
89
- def wiki_answer(q: str, session_id: str) -> str | None:
90
- """Return a 3-sentence Wikipedia summary or None if not found."""
 
91
  try:
92
- # If the question uses 'it', swap with last remembered topic
93
  query = q.strip()
94
- if last_topic.get(session_id) and re.search(r"\bit\b", query, re.I):
95
- query = re.sub(r"\bit\b", last_topic[session_id], query, flags=re.I)
96
 
97
  wikipedia.set_lang("en")
98
  page = wikipedia.page(query, auto_suggest=True, redirect=True)
99
- last_topic[session_id] = page.title # remember for pronouns
100
- summary = wikipedia.summary(
101
- page.title, sentences=3, auto_suggest=False
102
- )
103
  return f"**{page.title}** — {summary}"
104
  except (wikipedia.DisambiguationError, wikipedia.PageError):
105
- return None # let LLM try instead
106
  except Exception as e:
107
  return log_err(e)
108
 
109
- # ────────────────────────────────────────────────────────────────
110
- # 4 · ASCII art
111
- # ────────────────────────────────────────────────────────────────
112
- def ascii_art(txt: str) -> str:
113
  try:
114
- return pyfiglet.figlet_format(txt, width=120)
115
  except Exception as e:
116
  return log_err(e)
117
 
118
- # ────────────────────────────────────────────────────────────────
119
- # 5 · router ↔ ChatInterface callback
120
- # ────────────────────────────────────────────────────────────────
121
  CMD = re.compile(r"^/(math|summarize|translate_es|ascii)\s+(.+)", re.S | re.I)
122
 
123
- def respond(message: str,
124
- history: list[list[str, str]],
125
- request: gr.Request) -> str:
126
- session_id = request.session_hash or "anon"
127
-
128
- # 5-A · Command shortcuts
129
- if (m := CMD.match(message.strip())):
130
  cmd, body = m.group(1).lower(), m.group(2).strip()
131
  if cmd == "math": return safe_math(body)
132
  if cmd == "ascii": return ascii_art(body)
133
- if cmd == "summarize": return llm(f"Summarise in two concise sentences:\n\n{body}\n\nSummary:")
134
  if cmd == "translate_es": return llm(f"Translate into Spanish (natural):\n\n{body}\n\nSpanish:")
135
 
136
- # 5-B · Wikipedia factual Q&A
137
- if message.strip().endswith("?") and len(message.split()) > 2:
138
- wiki = wiki_answer(message, session_id)
139
  if wiki: return wiki
140
 
141
- # 5-C · Normal chat (keep last 6 turns for speed)
142
- prompt = ("You are ZEN-Bot, a concise, friendly tutor for young AI pioneers.\n\n")
143
  for u, b in history[-6:]:
144
  prompt += f"User: {u}\nAssistant: {b}\n"
145
- prompt += f"User: {message}\nAssistant:"
146
  return llm(prompt)
147
 
148
- # ────────────────────────────────────────────────────────────────
149
- # 6 · Gradio UI
150
- # ────────────────────────────────────────────────────────────────
151
  demo = gr.ChatInterface(
152
- fn = respond,
153
- title = "🚀 ZEN-Bot Ultimate (Key-Free)",
154
- description = (
155
  "**Commands** \n"
156
- "• Normal chat \n"
157
  "• `/math 2**5 / (sin(0.5)+1)` \n"
158
  "• `/summarize <text>` \n"
159
  "• `/translate_es Hello!` \n"
160
  "• `/ascii ZEN` \n"
161
- "• Ask factual questions ending with `?` (remembers topic for pronouns)"
162
  ),
163
- examples = [
164
  "Who discovered penicillin?",
165
  "/ascii AI ROCKS",
166
  "/math sqrt(144)+log(100,10)",
@@ -168,8 +143,9 @@ demo = gr.ChatInterface(
168
  "/translate_es Good evening, friends!",
169
  "Who discovered it?",
170
  ],
171
- fill_height = True,
172
- theme = "soft",
 
173
  )
174
 
175
  if __name__ == "__main__":
 
8
  • /translate_es <text> — English → Spanish
9
  • /ascii <text> — FIGlet ASCII art
10
  • Any question ending with “?” → live Wikipedia answer
11
+ ↳ remembers last topic, so “Who discovered it?” works.
 
12
 
13
+ Model: facebook/blenderbot-400M-distill (public, ~720 MB)
14
  """
15
 
16
  import ast, math, re, gc, traceback, torch, wikipedia, pyfiglet
17
  import gradio as gr
18
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
19
 
20
+ # ───────────────────────── helpers ──────────────────────────
 
 
21
  def log_err(e: Exception) -> str:
22
+ print("=== ZEN-Bot ERROR ===")
23
  traceback.print_exc()
24
+ print("=====================\n")
25
  return f"⚠️ {type(e).__name__}: {e}"
26
 
27
+ # ─────────────────────── model loading ──────────────────────
28
+ MODEL_ID = "facebook/blenderbot-400M-distill" # always public
 
 
29
 
30
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
31
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
32
  model.eval(); torch.set_grad_enabled(False)
33
 
34
+ GEN_KW = dict(max_new_tokens=64, do_sample=False) # deterministic → faster
35
 
36
  def llm(prompt: str) -> str:
 
37
  try:
38
+ ids_in = tokenizer(prompt, return_tensors="pt",
39
+ truncation=True, max_length=1024).input_ids
 
 
40
  with torch.no_grad():
41
+ ids_out = model.generate(ids_in, **GEN_KW)
42
+ reply_ids = ids_out[0, ids_in.shape[-1]:]
43
+ return tokenizer.decode(reply_ids, skip_special_tokens=True).strip()
 
44
  except Exception as e:
45
  return log_err(e)
46
 
47
+ # ──────────────────────── /math safe-eval ───────────────────
48
+ _MATH = {k: getattr(math, k) for k in dir(math) if not k.startswith("_")}
49
+ _MATH.update({"abs": abs, "round": round})
 
 
50
 
51
  def safe_math(expr: str) -> str:
52
  try:
 
54
 
55
  def ok(n):
56
  match n:
57
+ case ast.Num(): return True
58
+ case ast.UnaryOp(): return ok(n.operand)
59
+ case ast.BinOp(): return ok(n.left) and ok(n.right)
60
  case ast.Call():
61
  return (isinstance(n.func, ast.Name)
62
+ and n.func.id in _MATH
63
  and all(ok(a) for a in n.args))
64
+ case _: return False
65
 
66
  if not ok(node.body):
67
+ return "⛔️ Only basic math / trig / log functions allowed."
68
  return str(eval(compile(node, "<expr>", "eval"),
69
+ {"__builtins__": {}}, _MATH))
70
  except Exception as e:
71
  return log_err(e)
72
 
73
+ # ──────────────────────── Wikipedia Q&A ─────────────────────
74
+ last_topic: str | None = None # shared across the single free CPU Space
 
 
75
 
76
+ def wiki_answer(q: str) -> str | None:
77
+ """3-sentence Wikipedia answer returns None if not found."""
78
+ global last_topic
79
  try:
 
80
  query = q.strip()
81
+ if last_topic and re.search(r"\bit\b", query, re.I):
82
+ query = re.sub(r"\bit\b", last_topic, query, flags=re.I)
83
 
84
  wikipedia.set_lang("en")
85
  page = wikipedia.page(query, auto_suggest=True, redirect=True)
86
+ last_topic = page.title # remember for next turn
87
+ summary = wikipedia.summary(page.title, sentences=3, auto_suggest=False)
 
 
88
  return f"**{page.title}** — {summary}"
89
  except (wikipedia.DisambiguationError, wikipedia.PageError):
90
+ return None
91
  except Exception as e:
92
  return log_err(e)
93
 
94
+ # ───────────────────────── ASCII art ─────────────────────────
95
+ def ascii_art(text: str) -> str:
 
 
96
  try:
97
+ return pyfiglet.figlet_format(text, width=120)
98
  except Exception as e:
99
  return log_err(e)
100
 
101
+ # ────────────────── main router / callback ───────────────────
 
 
102
  CMD = re.compile(r"^/(math|summarize|translate_es|ascii)\s+(.+)", re.S | re.I)
103
 
104
+ def respond(msg: str, history: list[list[str, str]]) -> str:
105
+ # A · commands
106
+ if (m := CMD.match(msg.strip())):
 
 
 
 
107
  cmd, body = m.group(1).lower(), m.group(2).strip()
108
  if cmd == "math": return safe_math(body)
109
  if cmd == "ascii": return ascii_art(body)
110
+ if cmd == "summarize": return llm(f"Summarize in two concise sentences:\n\n{body}\n\nSummary:")
111
  if cmd == "translate_es": return llm(f"Translate into Spanish (natural):\n\n{body}\n\nSpanish:")
112
 
113
+ # B · live Wikipedia for factual Qs
114
+ if msg.endswith("?") and len(msg.split()) > 2:
115
+ wiki = wiki_answer(msg)
116
  if wiki: return wiki
117
 
118
+ # C · normal chat (keep last 6 turns for speed)
119
+ prompt = "You are ZEN-Bot, a concise, friendly tutor for young AI pioneers.\n\n"
120
  for u, b in history[-6:]:
121
  prompt += f"User: {u}\nAssistant: {b}\n"
122
+ prompt += f"User: {msg}\nAssistant:"
123
  return llm(prompt)
124
 
125
+ # ────────────────────── Gradio Chat UI ───────────────────────
 
 
126
  demo = gr.ChatInterface(
127
+ fn = respond,
128
+ title = "🚀 ZEN-Bot Ultimate (Key-Free)",
129
+ description = (
130
  "**Commands** \n"
131
+ "• normal chat \n"
132
  "• `/math 2**5 / (sin(0.5)+1)` \n"
133
  "• `/summarize <text>` \n"
134
  "• `/translate_es Hello!` \n"
135
  "• `/ascii ZEN` \n"
136
+ "• Ask factual questions ending with `?` (remembers topic for 'it')"
137
  ),
138
+ examples = [
139
  "Who discovered penicillin?",
140
  "/ascii AI ROCKS",
141
  "/math sqrt(144)+log(100,10)",
 
143
  "/translate_es Good evening, friends!",
144
  "Who discovered it?",
145
  ],
146
+ cache_examples = False, # ← avoids the startup pre-run
147
+ theme = "soft",
148
+ fill_height = True,
149
  )
150
 
151
  if __name__ == "__main__":