ZENLLC commited on
Commit
42ad35c
·
verified ·
1 Parent(s): b15a92e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -55
app.py CHANGED
@@ -1,111 +1,107 @@
1
  """
2
  ZEN-Bot Ultimate — key-free Hugging Face Space (free CPU)
3
 
4
- Skills (type these commands – or just ask naturally):
5
  • Normal chat
6
  • /math <expr> — safe calculator
7
  • /summarize <text> — 2-sentence TL;DR
8
  • /translate_es <text> — English → Spanish
9
  • /ascii <text> — big FIGlet ASCII art
10
  • Any question ending with “?” → live Wikipedia answer
11
- Dependencies: transformers, torch, gradio, wikipedia, pyfiglet
12
  """
13
 
14
  import ast, math, re, gc, traceback, torch, wikipedia, pyfiglet
15
  import gradio as gr
16
- from transformers import AutoTokenizer, AutoModelForCausalLM
17
 
18
- # ---------------------------------------------------------------------
19
- # 0 · Tiny helpers
20
- # ---------------------------------------------------------------------
21
- def err_msg(e: Exception) -> str:
22
- print("=== ZEN-Bot ERROR ===")
23
  traceback.print_exc()
24
- print("=====================")
25
  return f"⚠️ {type(e).__name__}: {e}"
26
 
27
- # ---------------------------------------------------------------------
28
- # 1 · Load a light, coherent open-weights chat model (fits free CPU)
29
- # ---------------------------------------------------------------------
30
- MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat" # ≈1 GB → ~4 GB RAM
31
 
32
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
33
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
34
  model.eval(); torch.set_grad_enabled(False)
35
 
36
  GEN_KW = dict(
37
  max_new_tokens = 64,
38
- do_sample = False, # fast & deterministic
39
- pad_token_id = tokenizer.eos_token_id,
40
  )
41
 
42
  def llm(prompt: str) -> str:
43
  try:
44
- ids_in = tokenizer(prompt, return_tensors="pt").input_ids[:, -1024:]
 
45
  with torch.no_grad():
46
  ids_out = model.generate(ids_in, **GEN_KW)
47
  reply_ids = ids_out[0, ids_in.shape[-1]:]
48
  return tokenizer.decode(reply_ids, skip_special_tokens=True).strip()
49
  except Exception as e:
50
- return err_msg(e)
51
 
52
- # ---------------------------------------------------------------------
53
- # 2 · /math safe evaluator
54
- # ---------------------------------------------------------------------
55
  _ALLOWED = {k: getattr(math, k) for k in dir(math) if not k.startswith("__")}
56
  _ALLOWED.update({"abs": abs, "round": round})
57
 
58
  def safe_math(expr: str) -> str:
59
  try:
60
  node = ast.parse(expr, mode="eval")
61
-
62
  def ok(n):
63
  if isinstance(n, ast.Num): return True
64
  if isinstance(n, ast.UnaryOp): return ok(n.operand)
65
  if isinstance(n, ast.BinOp): return ok(n.left) and ok(n.right)
66
  if isinstance(n, ast.Call):
67
- return (isinstance(n.func, ast.Name) and
68
- n.func.id in _ALLOWED and
69
- all(ok(a) for a in n.args))
70
  return False
71
-
72
  if not ok(node.body):
73
- return "⛔️ Only basic math functions are allowed."
74
  return str(eval(compile(node, "<expr>", "eval"),
75
  {"__builtins__": {}}, _ALLOWED))
76
  except Exception as e:
77
- return err_msg(e)
78
 
79
- # ---------------------------------------------------------------------
80
- # 3 · Wikipedia live answer
81
- # ---------------------------------------------------------------------
82
  def wiki_answer(q: str) -> str | None:
83
  try:
84
  wikipedia.set_lang("en")
85
- page = wikipedia.page(q, auto_suggest=True, redirect=True)
86
- summ = wikipedia.summary(page.title, sentences=3, auto_suggest=False)
87
  return f"**{page.title}** — {summ}"
88
  except (wikipedia.DisambiguationError, wikipedia.PageError):
89
  return None
90
  except Exception as e:
91
- return err_msg(e)
92
 
93
- # ---------------------------------------------------------------------
94
  # 4 · ASCII art
95
- # ---------------------------------------------------------------------
96
  def ascii_art(txt: str) -> str:
97
- try:
98
- return pyfiglet.figlet_format(txt, width=120)
99
- except Exception as e:
100
- return err_msg(e)
101
 
102
- # ---------------------------------------------------------------------
103
- # 5 · Main router
104
- # ---------------------------------------------------------------------
105
  CMD = re.compile(r"^/(math|summarize|translate_es|ascii)\s+(.+)", re.S | re.I)
106
 
107
- def respond(msg: str, chat_hist: list[list[str, str]]) -> str:
108
- # 5-A · Commands
109
  m = CMD.match(msg.strip())
110
  if m:
111
  cmd, body = m.group(1).lower(), m.group(2).strip()
@@ -114,39 +110,39 @@ def respond(msg: str, chat_hist: list[list[str, str]]) -> str:
114
  if cmd == "summarize": return llm(f"Summarize in two concise sentences:\n\n{body}\n\nSummary:")
115
  if cmd == "translate_es": return llm(f"Translate into Spanish (natural):\n\n{body}\n\nSpanish:")
116
 
117
- # 5-B · “Question about the world?” → Wikipedia first
118
  if msg.strip().endswith("?") and len(msg.split()) > 2:
119
  wiki = wiki_answer(msg)
120
  if wiki: return wiki
121
 
122
- # 5-C · Normal chat with memory of last 6 turns
123
  prompt = "You are ZEN-Bot, a concise, helpful tutor for young AI pioneers.\n\n"
124
- for u, b in chat_hist[-6:]:
125
  prompt += f"User: {u}\nAssistant: {b}\n"
126
  prompt += f"User: {msg}\nAssistant:"
127
  return llm(prompt)
128
 
129
- # ---------------------------------------------------------------------
130
  # 6 · Gradio UI
131
- # ---------------------------------------------------------------------
132
  demo = gr.ChatInterface(
133
  fn = respond,
134
  title = "🚀 ZEN-Bot Ultimate (Key-Free)",
135
  description = (
136
- "**What I can do** \n"
137
- "• Normal chat \n"
138
  "• `/math 2**5 / (sin(0.5)+1)` \n"
139
  "• `/summarize <text>` \n"
140
- "• `/translate_es Hello world!` \n"
141
  "• `/ascii ZEN` \n"
142
- "• Ask any fact question (ends with `?`) I’ll fetch live from Wikipedia"
143
  ),
144
  theme = "soft",
145
  fill_height = True,
146
  examples = [
147
  "Who discovered penicillin?",
148
  "/ascii AI ROCKS",
149
- "/math sqrt(144) + log(100,10)",
150
  "/summarize The Industrial Revolution began in Britain...",
151
  "/translate_es Good evening, friends!",
152
  ],
 
1
  """
2
  ZEN-Bot Ultimate — key-free Hugging Face Space (free CPU)
3
 
4
+ Skills
5
  • Normal chat
6
  • /math <expr> — safe calculator
7
  • /summarize <text> — 2-sentence TL;DR
8
  • /translate_es <text> — English → Spanish
9
  • /ascii <text> — big FIGlet ASCII art
10
  • Any question ending with “?” → live Wikipedia answer
11
+ Model: facebook/blenderbot-400M-distill (public, ~720 MB weights)
12
  """
13
 
14
  import ast, math, re, gc, traceback, torch, wikipedia, pyfiglet
15
  import gradio as gr
16
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
17
 
18
+ # ──────────────────────────────────────────────────────────────────────────────
19
+ # 0 · helpers
20
+ # ──────────────────────────────────────────────────────────────────────────────
21
+ def err(e: Exception) -> str:
22
+ print("\n=== ZEN-Bot ERROR ===")
23
  traceback.print_exc()
24
+ print("====================\n")
25
  return f"⚠️ {type(e).__name__}: {e}"
26
 
27
+ # ──────────────────────────────────────────────────────────────────────────────
28
+ # 1 · model (fits free CPU tier)
29
+ # ──────────────────────────────────────────────────────────────────────────────
30
+ MODEL_NAME = "facebook/blenderbot-400M-distill" # always public
31
 
32
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
33
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
34
  model.eval(); torch.set_grad_enabled(False)
35
 
36
  GEN_KW = dict(
37
  max_new_tokens = 64,
38
+ do_sample = False, # fast & deterministic
 
39
  )
40
 
41
  def llm(prompt: str) -> str:
42
  try:
43
+ ids_in = tokenizer(prompt, return_tensors="pt",
44
+ truncation=True, max_length=1024).input_ids
45
  with torch.no_grad():
46
  ids_out = model.generate(ids_in, **GEN_KW)
47
  reply_ids = ids_out[0, ids_in.shape[-1]:]
48
  return tokenizer.decode(reply_ids, skip_special_tokens=True).strip()
49
  except Exception as e:
50
+ return err(e)
51
 
52
+ # ──────────────────────────────────────────────────────────────────────────────
53
+ # 2 · /math (safe)
54
+ # ──────────────────────────────────────────────────────────────────────────────
55
  _ALLOWED = {k: getattr(math, k) for k in dir(math) if not k.startswith("__")}
56
  _ALLOWED.update({"abs": abs, "round": round})
57
 
58
  def safe_math(expr: str) -> str:
59
  try:
60
  node = ast.parse(expr, mode="eval")
 
61
  def ok(n):
62
  if isinstance(n, ast.Num): return True
63
  if isinstance(n, ast.UnaryOp): return ok(n.operand)
64
  if isinstance(n, ast.BinOp): return ok(n.left) and ok(n.right)
65
  if isinstance(n, ast.Call):
66
+ return (isinstance(n.func, ast.Name)
67
+ and n.func.id in _ALLOWED
68
+ and all(ok(a) for a in n.args))
69
  return False
 
70
  if not ok(node.body):
71
+ return "⛔️ Expression not allowed."
72
  return str(eval(compile(node, "<expr>", "eval"),
73
  {"__builtins__": {}}, _ALLOWED))
74
  except Exception as e:
75
+ return err(e)
76
 
77
+ # ──────────────────────────────────────────────────────────────────────────────
78
+ # 3 · Wikipedia Q&A
79
+ # ──────────────────────────────────────────────────────────────────────────────
80
  def wiki_answer(q: str) -> str | None:
81
  try:
82
  wikipedia.set_lang("en")
83
+ page = wikipedia.page(q, auto_suggest=True, redirect=True)
84
+ summ = wikipedia.summary(page.title, sentences=3, auto_suggest=False)
85
  return f"**{page.title}** — {summ}"
86
  except (wikipedia.DisambiguationError, wikipedia.PageError):
87
  return None
88
  except Exception as e:
89
+ return err(e)
90
 
91
+ # ──────────────────────────────────────────────────────────────────────────────
92
  # 4 · ASCII art
93
+ # ──────────────────────────────────────────────────────────────────────────────
94
  def ascii_art(txt: str) -> str:
95
+ try: return pyfiglet.figlet_format(txt, width=120)
96
+ except Exception as e: return err(e)
 
 
97
 
98
+ # ──────────────────────────────────────────────────────────────────────────────
99
+ # 5 · main router
100
+ # ──────────────────────────────────────────────────────────────────────────────
101
  CMD = re.compile(r"^/(math|summarize|translate_es|ascii)\s+(.+)", re.S | re.I)
102
 
103
+ def respond(msg: str, hist: list[list[str, str]]) -> str:
104
+ # 5-A · commands
105
  m = CMD.match(msg.strip())
106
  if m:
107
  cmd, body = m.group(1).lower(), m.group(2).strip()
 
110
  if cmd == "summarize": return llm(f"Summarize in two concise sentences:\n\n{body}\n\nSummary:")
111
  if cmd == "translate_es": return llm(f"Translate into Spanish (natural):\n\n{body}\n\nSpanish:")
112
 
113
+ # 5-B · Wikipedia for factual questions
114
  if msg.strip().endswith("?") and len(msg.split()) > 2:
115
  wiki = wiki_answer(msg)
116
  if wiki: return wiki
117
 
118
+ # 5-C · normal chat (keep last 6 turns)
119
  prompt = "You are ZEN-Bot, a concise, helpful tutor for young AI pioneers.\n\n"
120
+ for u, b in hist[-6:]:
121
  prompt += f"User: {u}\nAssistant: {b}\n"
122
  prompt += f"User: {msg}\nAssistant:"
123
  return llm(prompt)
124
 
125
+ # ──────────────────────────────────────────────────────────────────────────────
126
  # 6 · Gradio UI
127
+ # ──────────────────────────────────────────────────────────────────────────────
128
  demo = gr.ChatInterface(
129
  fn = respond,
130
  title = "🚀 ZEN-Bot Ultimate (Key-Free)",
131
  description = (
132
+ "**Commands** \n"
133
+ "• normal chat \n"
134
  "• `/math 2**5 / (sin(0.5)+1)` \n"
135
  "• `/summarize <text>` \n"
136
+ "• `/translate_es Hello!` \n"
137
  "• `/ascii ZEN` \n"
138
+ "• Ask any factual question ending with `?` for live Wikipedia answer"
139
  ),
140
  theme = "soft",
141
  fill_height = True,
142
  examples = [
143
  "Who discovered penicillin?",
144
  "/ascii AI ROCKS",
145
+ "/math sqrt(144)+log(100,10)",
146
  "/summarize The Industrial Revolution began in Britain...",
147
  "/translate_es Good evening, friends!",
148
  ],