ZENLLC commited on
Commit
87639c8
·
verified ·
1 Parent(s): 0bf62b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -78
app.py CHANGED
@@ -1,27 +1,38 @@
1
  """
2
- ZEN-Bot Lite Key-free chatbot for HF Spaces (free CPU)
3
  Skills
4
- Natural chat
5
  • /math – safe calculator
6
- • /summarize – 2-sentence abstract
7
- • /translate_es – English Spanish
8
- Only one open-weights model (facebook/blenderbot-400M-distill).
9
  """
10
 
11
- import ast, math, re, gc
12
  import gradio as gr
13
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
14
 
15
  # ---------------------------------------------------------------------
16
- # 1 · Model small enough for free CPU
17
  # ---------------------------------------------------------------------
18
- MODEL_NAME = "facebook/blenderbot-400M-distill" # ~720 MB on disk, loads in ≈3 GB RAM
 
 
 
 
 
 
 
 
 
19
 
20
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
21
- model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
 
 
22
 
23
  # ---------------------------------------------------------------------
24
- # 2 · Safe /math evaluator (same as before)
25
  # ---------------------------------------------------------------------
26
  _ALLOWED = {k: getattr(math, k) for k in dir(math) if not k.startswith("__")}
27
  _ALLOWED.update({"abs": abs, "round": round})
@@ -31,101 +42,112 @@ def safe_math(expr: str) -> str:
31
  node = ast.parse(expr, mode="eval")
32
 
33
  def ok(n):
34
- if isinstance(n, ast.Num):
35
  return True
36
- if isinstance(n, ast.UnaryOp):
37
  return ok(n.operand)
38
- if isinstance(n, ast.BinOp):
39
  return ok(n.left) and ok(n.right)
40
- if isinstance(n, ast.Call):
41
- return (isinstance(n.func, ast.Name) and
42
- n.func.id in _ALLOWED and
43
- all(ok(a) for a in n.args))
44
  return False
45
 
46
  if not ok(node.body):
47
- return "⛔️ Expression not allowed"
48
  return str(eval(compile(node, "<expr>", "eval"),
49
  {"__builtins__": {}}, _ALLOWED))
50
  except Exception as e:
51
- return f"⚠️ Error: {e}"
52
 
53
  # ---------------------------------------------------------------------
54
- # 3 · Generation helper
55
  # ---------------------------------------------------------------------
56
  GEN_KW = dict(
57
- max_new_tokens = 128,
58
- do_sample = True,
59
- top_p = 0.92,
60
- temperature = 0.7,
61
  )
62
 
63
  def llm(prompt: str) -> str:
64
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids
65
- output_ids = model.generate(**GEN_KW, input_ids=input_ids)
66
- reply_ids = output_ids[0, input_ids.shape[-1]:]
67
- return tokenizer.decode(reply_ids, skip_special_tokens=True).strip()
 
 
 
 
68
 
69
  # ---------------------------------------------------------------------
70
- # 4 · Regex router + lightweight name memory (stored inside history)
71
  # ---------------------------------------------------------------------
72
- CMD = re.compile(r"^/(math|summarize|translate_es)\s+(.+)", re.S | re.I)
73
-
74
- def reply(message: str, history: list[list[str, str]]) -> str:
75
- # 4.1 · Command handling first
76
- m = CMD.match(message.strip())
77
- if m:
78
- cmd, payload = m.group(1).lower(), m.group(2).strip()
79
- if cmd == "math":
80
- return safe_math(payload)
81
-
82
- if cmd == "summarize":
83
- prompt = ( "Summarize in two concise sentences:\n\n"
84
- f"{payload}\n\nSummary:" )
85
- return llm(prompt)
86
-
87
- if cmd == "translate_es":
88
- prompt = ( "Translate the following text from English to Spanish "
89
- "(natural, not literal):\n\n"
90
- f"{payload}\n\nSpanish:" )
91
- return llm(prompt)
92
-
93
- # 4.2 · Grab user’s name if volunteered
94
- name_match = re.search(r"\bmy name is (\w+)", message, re.I)
95
- name = name_match.group(1).capitalize() if name_match else None
96
-
97
- # 4.3 · Build dialogue prompt
98
- system = "You are ZEN-Bot, a concise, friendly tutor for young AI pioneers."
99
- if name:
100
- system += f" The user's name is {name}."
101
- prompt = system + "\n\n"
102
- for u, b in history:
103
- prompt += f"User: {u}\nAssistant: {b}\n"
104
- prompt += f"User: {message}\nAssistant:"
105
-
106
- return llm(prompt)
 
 
 
 
 
 
 
 
107
 
108
  # ---------------------------------------------------------------------
109
  # 5 · Gradio UI
110
  # ---------------------------------------------------------------------
111
  demo = gr.ChatInterface(
112
- fn = reply,
113
- title = "🔧 ZEN-Bot Lite (Key-Free)",
114
  description = (
115
- "**Try me**\n"
116
- "• normal chat\n"
117
- "• `/math 3*(sin(0.5)+1)`\n"
118
- "• `/summarize Industrial Revolution text...`\n"
119
- "• `/translate_es Good morning!`"
120
  ),
121
- examples = [
122
- "Hi, my name is Jordan!",
123
- "/math 2**6 / 3",
124
- "/summarize The water cycle begins when...",
125
- "/translate_es We are building awesome AI projects.",
126
- ],
127
  theme = "soft",
128
  fill_height = True,
 
 
 
 
 
 
129
  )
130
 
131
  if __name__ == "__main__":
 
1
  """
2
+ ZEN-Bot (stable build) Hugging Face Space, no API keys
3
  Skills
4
+ Friendly chat
5
  • /math – safe calculator
6
+ • /summarize – 2-sentence TL;DR
7
+ • /translate_es – English Spanish
8
+ Model: microsoft/DialoGPT-medium (~762 MB weights, runs on free CPU)
9
  """
10
 
11
+ import ast, math, re, gc, torch, traceback
12
  import gradio as gr
13
+ from transformers import AutoTokenizer, AutoModelForCausalLM
14
 
15
  # ---------------------------------------------------------------------
16
+ # 0 · Small helpers
17
  # ---------------------------------------------------------------------
18
+ def log_err(e: Exception) -> str:
19
+ print("=== ZEN-Bot ERROR ===")
20
+ traceback.print_exc()
21
+ print("=====================")
22
+ return f"⚠️ {type(e).__name__}: {e}"
23
+
24
+ # ---------------------------------------------------------------------
25
+ # 1 · Load model & tokenizer (fits free CPU tier)
26
+ # ---------------------------------------------------------------------
27
+ MODEL_NAME = "microsoft/DialoGPT-medium"
28
 
29
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
30
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
31
+ model.eval() # we’re only generating, not training
32
+ torch.set_grad_enabled(False) # global no-grad just in case
33
 
34
  # ---------------------------------------------------------------------
35
+ # 2 · Safe /math evaluator
36
  # ---------------------------------------------------------------------
37
  _ALLOWED = {k: getattr(math, k) for k in dir(math) if not k.startswith("__")}
38
  _ALLOWED.update({"abs": abs, "round": round})
 
42
  node = ast.parse(expr, mode="eval")
43
 
44
  def ok(n):
45
+ if isinstance(n, ast.Num): # numbers
46
  return True
47
+ if isinstance(n, ast.UnaryOp): # -x
48
  return ok(n.operand)
49
+ if isinstance(n, ast.BinOp): # x + y
50
  return ok(n.left) and ok(n.right)
51
+ if isinstance(n, ast.Call): # sin(x)
52
+ return (isinstance(n.func, ast.Name)
53
+ and n.func.id in _ALLOWED
54
+ and all(ok(a) for a in n.args))
55
  return False
56
 
57
  if not ok(node.body):
58
+ return "⛔️ Only basic math functions are allowed."
59
  return str(eval(compile(node, "<expr>", "eval"),
60
  {"__builtins__": {}}, _ALLOWED))
61
  except Exception as e:
62
+ return log_err(e)
63
 
64
  # ---------------------------------------------------------------------
65
+ # 3 · LLM generation helper
66
  # ---------------------------------------------------------------------
67
  GEN_KW = dict(
68
+ max_new_tokens = 64, # fast & HF 30-sec-safe
69
+ do_sample = False, # deterministic, fewer nonsense tokens
70
+ pad_token_id = tokenizer.eos_token_id,
 
71
  )
72
 
73
  def llm(prompt: str) -> str:
74
+ try:
75
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
76
+ with torch.no_grad():
77
+ output_ids = model.generate(**GEN_KW, input_ids=input_ids)
78
+ reply_ids = output_ids[0, input_ids.shape[-1]:]
79
+ return tokenizer.decode(reply_ids, skip_special_tokens=True).strip()
80
+ except Exception as e:
81
+ return log_err(e)
82
 
83
  # ---------------------------------------------------------------------
84
+ # 4 · Command router + tiny name memory (history-local)
85
  # ---------------------------------------------------------------------
86
+ COMMAND = re.compile(r"^/(math|summarize|translate_es)\s+(.+)", re.S | re.I)
87
+
88
+ def respond(message: str, history: list[list[str, str]]) -> str:
89
+ try:
90
+ # 4.1 · Commands
91
+ m = COMMAND.match(message.strip())
92
+ if m:
93
+ cmd, payload = m.group(1).lower(), m.group(2).strip()
94
+ if cmd == "math":
95
+ return safe_math(payload)
96
+
97
+ if cmd == "summarize":
98
+ prompt = (
99
+ "Summarize the following text in two concise sentences:\n\n"
100
+ f"{payload}\n\nSummary:"
101
+ )
102
+ return llm(prompt)
103
+
104
+ if cmd == "translate_es":
105
+ prompt = (
106
+ "Translate the following text from English to Spanish "
107
+ "(natural, not literal):\n\n"
108
+ f"{payload}\n\nSpanish:"
109
+ )
110
+ return llm(prompt)
111
+
112
+ # 4.2 · Capture user’s name (per session)
113
+ name_match = re.search(r"\bmy name is (\w+)", message, re.I)
114
+ name_line = ""
115
+ if name_match:
116
+ name_line = f" The user's name is {name_match.group(1).capitalize()}."
117
+
118
+ # 4.3 · Build chat prompt
119
+ system = "You are ZEN-Bot, a concise, friendly tutor for young AI pioneers." + name_line
120
+ prompt = system + "\n\n"
121
+ for u, b in history[-6:]: # keep last 6 turns for speed
122
+ prompt += f"User: {u}\nAssistant: {b}\n"
123
+ prompt += f"User: {message}\nAssistant:"
124
+
125
+ return llm(prompt)
126
+
127
+ except Exception as e:
128
+ return log_err(e)
129
 
130
  # ---------------------------------------------------------------------
131
  # 5 · Gradio UI
132
  # ---------------------------------------------------------------------
133
  demo = gr.ChatInterface(
134
+ fn = respond,
135
+ title = " ZEN-Bot (Key-Free Edition)",
136
  description = (
137
+ "**What I can do**\n"
138
+ "• Just chat normally\n"
139
+ "• `/math 2**5 / (sin(0.5)+1)`\n"
140
+ "• `/summarize <any text>`\n"
141
+ "• `/translate_es Hello, how are you?`"
142
  ),
 
 
 
 
 
 
143
  theme = "soft",
144
  fill_height = True,
145
+ examples = [
146
+ "Hi, my name is Sam!",
147
+ "/math log(10) + sqrt(16)",
148
+ "/summarize The Internet began as a US-funded research project in 1969…",
149
+ "/translate_es Artificial intelligence is transforming education.",
150
+ ],
151
  )
152
 
153
  if __name__ == "__main__":