ZENLLC commited on
Commit
0bf62b4
·
verified ·
1 Parent(s): 72693ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -106
app.py CHANGED
@@ -1,158 +1,133 @@
1
  """
2
- Advanced, key-free chatbot for Hugging Face Spaces
3
- -------------------------------------------------
4
- Features
5
- Natural chat with TinyLlama-1.1B-Chat (open weights, ~1 GB)
6
- • /math secure calculator (basic math + trig/log)
7
- • /summarize – 2-sentence TL;DR
8
  • /translate_es – English → Spanish
9
- Remembers user's name inside the session
10
- Everything runs through ONE language model, so it stays within the free CPU tier.
11
  """
12
 
13
  import ast, math, re, gc
14
  import gradio as gr
15
- from transformers import AutoTokenizer, AutoModelForCausalLM
16
 
17
  # ---------------------------------------------------------------------
18
- # 1 · Model & tokenizer (fits HF free CPU - ~1 GB RAM)
19
  # ---------------------------------------------------------------------
20
- MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat" # swap to any causal-LM if desired
21
 
22
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
23
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
24
 
25
  # ---------------------------------------------------------------------
26
- # 2 · Safe-eval utility for /math
27
  # ---------------------------------------------------------------------
28
- _ALLOWED_NAMES = {k: getattr(math, k) for k in dir(math) if not k.startswith("__")}
29
- _ALLOWED_NAMES.update({"abs": abs, "round": round})
30
 
31
- def safe_math_eval(expr: str) -> str:
32
- """
33
- Evaluate math expression safely using ast.
34
- """
35
  try:
36
  node = ast.parse(expr, mode="eval")
37
 
38
- def _check(node):
39
- if isinstance(node, ast.Num): # numbers
40
  return True
41
- if isinstance(node, ast.BinOp): # +, -, *, /, **, etc.
42
- return _check(node.left) and _check(node.right)
43
- if isinstance(node, ast.UnaryOp): # -1
44
- return _check(node.operand)
45
- if isinstance(node, ast.Call): # sin(0.5)
46
- return (
47
- isinstance(node.func, ast.Name)
48
- and node.func.id in _ALLOWED_NAMES
49
- and all(_check(arg) for arg in node.args)
50
- )
51
  return False
52
 
53
- if not _check(node.body):
54
- return "⛔️ Expression not allowed."
55
- result = eval(compile(node, filename="<math>", mode="eval"), {"__builtins__": {}}, _ALLOWED_NAMES)
56
- return str(result)
57
  except Exception as e:
58
  return f"⚠️ Error: {e}"
59
 
60
  # ---------------------------------------------------------------------
61
  # 3 · Generation helper
62
  # ---------------------------------------------------------------------
63
- MAX_NEW_TOKENS = 160
64
- TOKEN_LIMIT = 1024 # truncate long histories
65
-
66
- def generate(prompt: str) -> str:
67
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids
68
- if input_ids.shape[-1] > TOKEN_LIMIT:
69
- input_ids = input_ids[:, -TOKEN_LIMIT:]
70
-
71
- output_ids = model.generate(
72
- input_ids,
73
- max_new_tokens=MAX_NEW_TOKENS,
74
- do_sample=True,
75
- top_p=0.92,
76
- temperature=0.7,
77
- pad_token_id=tokenizer.eos_token_id,
78
- )
79
- reply_ids = output_ids[0, input_ids.shape[-1]:]
80
  return tokenizer.decode(reply_ids, skip_special_tokens=True).strip()
81
 
82
  # ---------------------------------------------------------------------
83
- # 4 · Chat callback with command routing + simple memory
84
  # ---------------------------------------------------------------------
85
- session_memory = {} # {session_hash: {"name": str}}
86
-
87
- COMMAND_PAT = re.compile(r"^/(math|summarize|translate_es)\s+(.*)", re.S | re.I)
88
 
89
- def respond(message: str, history: list[list[str, str]], session: gr.Request) -> str:
90
- sess_id = session.session_hash or "anon"
91
- mem = session_memory.setdefault(sess_id, {})
92
-
93
- # -------- handle special commands --------
94
- m = COMMAND_PAT.match(message.strip())
95
  if m:
96
  cmd, payload = m.group(1).lower(), m.group(2).strip()
97
  if cmd == "math":
98
- return safe_math_eval(payload)
99
- elif cmd == "summarize":
100
- prompt = (
101
- "Summarize the following text in 2 concise sentences:\n\n"
102
- f"{payload}\n\nSummary:"
103
- )
104
- return generate(prompt)
105
- elif cmd == "translate_es":
106
- prompt = (
107
- "Translate the following text from English to Spanish (keep it natural):\n\n"
108
- f"{payload}\n\nSpanish:"
109
- )
110
- return generate(prompt)
111
-
112
- # -------- name capture (very lightweight memory) --------
113
- name_match = re.search(r"\bmy name is (\w+)", message, re.I)
114
- if name_match:
115
- mem["name"] = name_match.group(1).capitalize()
116
 
117
- # -------- regular chat --------
118
- system_prompt = (
119
- "You are ZEN-Bot, a kind, concise AI assistant for young tech pioneers."
120
- )
121
- if "name" in mem:
122
- system_prompt += f" The user's name is {mem['name']}."
123
 
124
- dialogue = system_prompt + "\n\n"
 
 
 
 
 
 
 
 
125
  for u, b in history:
126
- dialogue += f"User: {u}\nAssistant: {b}\n"
127
- dialogue += f"User: {message}\nAssistant:"
128
 
129
- return generate(dialogue)
130
 
131
  # ---------------------------------------------------------------------
132
- # 5 · Launch Gradio ChatInterface
133
  # ---------------------------------------------------------------------
134
  demo = gr.ChatInterface(
135
- fn = respond,
136
- title = "🛠️ ZEN-Bot Pro (Key-Free)",
137
  description = (
138
- "**Skills**\n"
139
- "• Chat naturally\n"
140
- "• `/math 1+2*3` – calculator\n"
141
- "• `/summarize <text>` 2-sentence TL;DR\n"
142
- "• `/translate_es <text>` – English→Spanish\n\n"
143
- "Runs on open weights (TinyLlama-1.1B-Chat) – no API keys needed."
144
  ),
145
- fill_height = True,
146
- theme = "soft",
147
  examples = [
148
- "Hi, my name is Alex!",
149
- "/math sin(0.5) ** 2 + cos(0.5) ** 2",
150
- "/summarize The James Webb Space Telescope is the most powerful…",
151
- "/translate_es Artificial intelligence will change the world.",
152
  ],
 
 
153
  )
154
 
155
  if __name__ == "__main__":
156
  demo.launch()
157
- # Cleanup when Space shuts down
158
  gc.collect()
 
1
  """
2
+ ZEN-Bot Lite – Key-free chatbot for HF Spaces (free CPU)
3
+ Skills
4
+ • Natural chat
5
+ /math – safe calculator
6
+ • /summarize 2-sentence abstract
 
7
  • /translate_es – English → Spanish
8
+ Only one open-weights model (facebook/blenderbot-400M-distill).
 
9
  """
10
 
11
  import ast, math, re, gc
12
  import gradio as gr
13
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
14
 
15
  # ---------------------------------------------------------------------
16
+ # 1 · Model small enough for free CPU
17
  # ---------------------------------------------------------------------
18
+ MODEL_NAME = "facebook/blenderbot-400M-distill" # ~720 MB on disk, loads in ≈3 GB RAM
19
 
20
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
21
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
22
 
23
  # ---------------------------------------------------------------------
24
+ # 2 · Safe /math evaluator (same as before)
25
  # ---------------------------------------------------------------------
26
+ _ALLOWED = {k: getattr(math, k) for k in dir(math) if not k.startswith("__")}
27
+ _ALLOWED.update({"abs": abs, "round": round})
28
 
29
+ def safe_math(expr: str) -> str:
 
 
 
30
  try:
31
  node = ast.parse(expr, mode="eval")
32
 
33
+ def ok(n):
34
+ if isinstance(n, ast.Num):
35
  return True
36
+ if isinstance(n, ast.UnaryOp):
37
+ return ok(n.operand)
38
+ if isinstance(n, ast.BinOp):
39
+ return ok(n.left) and ok(n.right)
40
+ if isinstance(n, ast.Call):
41
+ return (isinstance(n.func, ast.Name) and
42
+ n.func.id in _ALLOWED and
43
+ all(ok(a) for a in n.args))
 
 
44
  return False
45
 
46
+ if not ok(node.body):
47
+ return "⛔️ Expression not allowed"
48
+ return str(eval(compile(node, "<expr>", "eval"),
49
+ {"__builtins__": {}}, _ALLOWED))
50
  except Exception as e:
51
  return f"⚠️ Error: {e}"
52
 
53
  # ---------------------------------------------------------------------
54
  # 3 · Generation helper
55
  # ---------------------------------------------------------------------
56
+ GEN_KW = dict(
57
+ max_new_tokens = 128,
58
+ do_sample = True,
59
+ top_p = 0.92,
60
+ temperature = 0.7,
61
+ )
62
+
63
+ def llm(prompt: str) -> str:
64
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
65
+ output_ids = model.generate(**GEN_KW, input_ids=input_ids)
66
+ reply_ids = output_ids[0, input_ids.shape[-1]:]
 
 
 
 
 
 
67
  return tokenizer.decode(reply_ids, skip_special_tokens=True).strip()
68
 
69
  # ---------------------------------------------------------------------
70
+ # 4 · Regex router + lightweight name memory (stored inside history)
71
  # ---------------------------------------------------------------------
72
+ CMD = re.compile(r"^/(math|summarize|translate_es)\s+(.+)", re.S | re.I)
 
 
73
 
74
+ def reply(message: str, history: list[list[str, str]]) -> str:
75
+ # 4.1 · Command handling first
76
+ m = CMD.match(message.strip())
 
 
 
77
  if m:
78
  cmd, payload = m.group(1).lower(), m.group(2).strip()
79
  if cmd == "math":
80
+ return safe_math(payload)
81
+
82
+ if cmd == "summarize":
83
+ prompt = ( "Summarize in two concise sentences:\n\n"
84
+ f"{payload}\n\nSummary:" )
85
+ return llm(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ if cmd == "translate_es":
88
+ prompt = ( "Translate the following text from English to Spanish "
89
+ "(natural, not literal):\n\n"
90
+ f"{payload}\n\nSpanish:" )
91
+ return llm(prompt)
 
92
 
93
+ # 4.2 · Grab user’s name if volunteered
94
+ name_match = re.search(r"\bmy name is (\w+)", message, re.I)
95
+ name = name_match.group(1).capitalize() if name_match else None
96
+
97
+ # 4.3 · Build dialogue prompt
98
+ system = "You are ZEN-Bot, a concise, friendly tutor for young AI pioneers."
99
+ if name:
100
+ system += f" The user's name is {name}."
101
+ prompt = system + "\n\n"
102
  for u, b in history:
103
+ prompt += f"User: {u}\nAssistant: {b}\n"
104
+ prompt += f"User: {message}\nAssistant:"
105
 
106
+ return llm(prompt)
107
 
108
  # ---------------------------------------------------------------------
109
+ # 5 · Gradio UI
110
  # ---------------------------------------------------------------------
111
  demo = gr.ChatInterface(
112
+ fn = reply,
113
+ title = "🔧 ZEN-Bot Lite (Key-Free)",
114
  description = (
115
+ "**Try me**\n"
116
+ "• normal chat\n"
117
+ "• `/math 3*(sin(0.5)+1)`\n"
118
+ "• `/summarize Industrial Revolution text...`\n"
119
+ "• `/translate_es Good morning!`"
 
120
  ),
 
 
121
  examples = [
122
+ "Hi, my name is Jordan!",
123
+ "/math 2**6 / 3",
124
+ "/summarize The water cycle begins when...",
125
+ "/translate_es We are building awesome AI projects.",
126
  ],
127
+ theme = "soft",
128
+ fill_height = True,
129
  )
130
 
131
  if __name__ == "__main__":
132
  demo.launch()
 
133
  gc.collect()