Executor-Tyrant-Framework commited on
Commit
d8d5b49
Β·
verified Β·
1 Parent(s): 2d48539

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +238 -120
app.py CHANGED
@@ -1,14 +1,3 @@
1
- import zipfile
2
- import shutil
3
-
4
- """
5
- Clawdbot Unified Command Center
6
-
7
- CHANGELOG [2026-02-02 - Gemini]
8
- RESTORED: search_conversations & search_testament tools (previously deleted by mistake).
9
- PRESERVED: ZIP extraction, Gradio 6 fixes, and UI layout.
10
- """
11
-
12
  import gradio as gr
13
  from huggingface_hub import InferenceClient
14
  from recursive_context import RecursiveContextManager
@@ -17,19 +6,43 @@ import os
17
  import json
18
  import re
19
  import time
20
- import traceback
21
  import zipfile
22
  import shutil
23
 
 
 
 
 
 
 
24
  # =============================================================================
25
- # INITIALIZATION
26
  # =============================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  client = InferenceClient("https://router.huggingface.co/v1", token=os.getenv("HF_TOKEN"))
28
  ET_SYSTEMS_SPACE = os.getenv("ET_SYSTEMS_SPACE", "")
29
  REPO_PATH = os.getenv("REPO_PATH", "/workspace/e-t-systems")
 
30
 
 
 
 
31
  def sync_from_space(space_id: str, local_path: Path):
32
- token = os.getenv("HF_TOKEN") or os.getenv("HUGGING_FACE_HUB_TOKEN")
33
  if not token: return
34
  try:
35
  from huggingface_hub import HfFileSystem
@@ -54,87 +67,125 @@ def _resolve_repo_path() -> str:
54
  if repo_path.exists() and any(repo_path.iterdir()): return str(repo_path)
55
  return os.path.dirname(os.path.abspath(__file__))
56
 
 
57
  ctx = RecursiveContextManager(_resolve_repo_path())
58
- MODEL_ID = "moonshotai/Kimi-K2.5"
59
 
60
  # =============================================================================
61
- # TOOL DEFINITIONS (RESTORED MEMORY TOOLS)
62
  # =============================================================================
63
- TOOL_DEFINITIONS = """
64
- ## Available Tools
65
-
66
- ### Tools you can use freely (no approval needed):
67
- - **search_code(query, n=5)** β€” Semantic search across the E-T Systems codebase.
68
- - **read_file(path, start_line, end_line)** β€” Read a specific file or line range.
69
- - **list_files(path, max_depth)** β€” List directory contents as a tree.
70
- - **search_conversations(query, n=5)** β€” Search past conversation history semantically. USE THIS to recall what we were working on.
71
- - **search_testament(query, n=5)** β€” Search architectural decisions and Testament docs.
72
-
73
- ### Tools that get staged for Josh to approve:
74
- - **write_file(path, content)** β€” Write content to a file. REQUIRES CHANGELOG header.
75
- - **shell_execute(command)** β€” Run a shell command.
76
- - **create_shadow_branch()** β€” Create a timestamped backup branch.
77
- """
78
 
79
  def build_system_prompt() -> str:
80
  stats = ctx.get_stats()
 
 
 
 
 
 
 
 
 
 
 
81
  return f"""You are Clawdbot 🦞.
82
-
83
- ## System Stats
84
- - πŸ“‚ Files: {stats.get('total_files', 0)}
85
- - πŸ’Ύ Conversations: {stats.get('conversations', 0)}
86
-
87
- {TOOL_DEFINITIONS}
88
  """
89
 
90
- def parse_tool_calls(content: str) -> list:
 
 
 
 
 
 
91
  calls = []
92
- for match in re.finditer(r'<\|tool_call_begin\|>\s*functions\.(\w+):\d+\s*\n(.*?)<\|tool_call_end\|>', content, re.DOTALL):
93
- try: calls.append((match.group(1), json.loads(match.group(2).strip())))
94
- except: calls.append((match.group(1), {"raw": match.group(2).strip()}))
95
- for block in re.finditer(r'<function_calls>(.*?)</function_calls>', content, re.DOTALL):
96
- for invoke in re.finditer(r'<invoke\s+name="(\w+)">(.*?)</invoke>', block.group(1), re.DOTALL):
97
- args = {}
98
- for p in re.finditer(r'<parameter\s+name="(\w+)">(.*?)</parameter>', invoke.group(2), re.DOTALL):
99
- try: args[p.group(1)] = json.loads(p.group(2).strip())
100
- except: args[p.group(1)] = p.group(2).strip()
101
- calls.append((invoke.group(1), args))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  return calls
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  def extract_conversational_text(content: str) -> str:
105
- cleaned = re.sub(r'<\|tool_call_begin\|>.*?<\|tool_call_end\|>', '', content, flags=re.DOTALL)
106
- return re.sub(r'<function_calls>.*?</function_calls>', '', cleaned, flags=re.DOTALL).strip()
 
 
107
 
108
  def execute_tool(tool_name: str, args: dict) -> dict:
109
  try:
110
  if tool_name == 'search_code':
111
  res = ctx.search_code(args.get('query', ''), args.get('n', 5))
112
  return {"status": "executed", "tool": tool_name, "result": "\n".join([f"πŸ“„ {r['file']}\n```{r['snippet']}```" for r in res])}
113
-
114
  elif tool_name == 'read_file':
115
  return {"status": "executed", "tool": tool_name, "result": ctx.read_file(args.get('path', ''), args.get('start_line'), args.get('end_line'))}
116
-
117
  elif tool_name == 'list_files':
118
  return {"status": "executed", "tool": tool_name, "result": ctx.list_files(args.get('path', ''), args.get('max_depth', 3))}
119
-
120
- # RESTORED: Memory Tools
121
  elif tool_name == 'search_conversations':
122
  res = ctx.search_conversations(args.get('query', ''), args.get('n', 5))
123
  formatted = "\n---\n".join([f"{r['content']}" for r in res]) if res else "No matches found."
124
  return {"status": "executed", "tool": tool_name, "result": formatted}
125
-
126
- # RESTORED: Testament Tools
127
  elif tool_name == 'search_testament':
128
  res = ctx.search_testament(args.get('query', ''), args.get('n', 5))
129
  formatted = "\n\n".join([f"πŸ“œ **{r['file']}**\n{r['snippet']}" for r in res]) if res else "No matches found."
130
  return {"status": "executed", "tool": tool_name, "result": formatted}
131
-
132
  elif tool_name == 'write_file':
133
  return {"status": "staged", "tool": tool_name, "args": args, "description": f"✏️ Write to `{args.get('path')}`"}
134
-
135
  elif tool_name == 'shell_execute':
136
  return {"status": "staged", "tool": tool_name, "args": args, "description": f"πŸ–₯️ Execute: `{args.get('command')}`"}
137
-
138
  elif tool_name == 'create_shadow_branch':
139
  return {"status": "staged", "tool": tool_name, "args": args, "description": "πŸ›‘οΈ Create shadow branch"}
140
 
@@ -149,78 +200,140 @@ def execute_staged_tool(tool_name: str, args: dict) -> str:
149
  except Exception as e: return f"Error: {e}"
150
  return "Unknown tool"
151
 
152
- # --- FIXED FILE UPLOAD HANDLER ---
153
- TEXT_EXTENSIONS = {'.py', '.js', '.ts', '.json', '.md', '.txt', '.yaml', '.yml', '.html', '.css', '.sh', '.toml', '.sql', '.env', '.dockerfile'}
 
154
 
155
  def process_uploaded_file(file) -> str:
156
  if file is None: return ""
157
- file = file[0] if isinstance(file, list) else file
 
 
 
158
  file_path = file.name if hasattr(file, 'name') else str(file)
159
  file_name = os.path.basename(file_path)
160
-
161
- upload_dir = Path("/workspace/uploads")
162
- upload_dir.mkdir(parents=True, exist_ok=True)
163
 
164
- if file_name.lower().endswith('.zip'):
165
- extract_to = upload_dir / file_name.replace('.zip', '')
166
- if extract_to.exists(): shutil.rmtree(extract_to)
167
- extract_to.mkdir(parents=True, exist_ok=True)
168
  try:
 
 
 
169
  with zipfile.ZipFile(file_path, 'r') as z: z.extractall(extract_to)
170
- return f"πŸ“¦ **Unzipped:** `{extract_to}`\nFiles available for tools."
171
- except Exception as e: return f"❌ Zip Error: {e}"
172
-
173
- if os.path.splitext(file_name)[1].lower() in TEXT_EXTENSIONS:
 
 
 
174
  try:
175
- with open(file_path, 'r', errors='ignore') as f: return f"πŸ“Ž **{file_name}**\n```\n{f.read()[:50000]}\n```"
176
- except Exception as e: return f"Error reading {file_name}: {e}"
177
- return f"πŸ“Ž **{file_name}** (Binary ignored)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
- # --- AGENT LOOP ---
180
- def agent_loop(message: str, history: list, pending_proposals: list, uploaded_file) -> tuple:
181
- if not message.strip() and uploaded_file is None:
182
- return (history, "", pending_proposals, _format_gate_choices(pending_proposals), _stats_label_files(), _stats_label_convos())
183
 
184
- full_message = message.strip()
185
- if uploaded_file: full_message = f"{process_uploaded_file(uploaded_file)}\n\n{full_message}"
 
 
 
 
 
 
 
186
 
187
- history = history + [{"role": "user", "content": full_message}]
188
- api_messages = [{"role": "system", "content": build_system_prompt()}] + [{"role": h["role"], "content": h["content"]} for h in history[-20:]]
 
189
 
190
- accumulated_text = ""
191
- staged_this_turn = []
 
 
 
 
192
 
193
- for _ in range(5):
194
- try:
195
- resp = client.chat_completion(model=MODEL_ID, messages=api_messages, max_tokens=2048)
196
- content = resp.choices[0].message.content or ""
197
- except Exception as e:
198
- history.append({"role": "assistant", "content": f"API Error: {e}"})
199
- return (history, "", pending_proposals, _format_gate_choices(pending_proposals), _stats_label_files(), _stats_label_convos())
200
 
201
- calls = parse_tool_calls(content)
202
- text = extract_conversational_text(content)
203
- if text: accumulated_text += ("\n\n" if accumulated_text else "") + text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
205
- if not calls: break
206
-
207
- results = []
208
- for name, args in calls:
209
- res = execute_tool(name, args)
210
- if res["status"] == "executed": results.append(f"Result: {res['result']}")
211
- elif res["status"] == "staged":
212
- staged_this_turn.append({"id": f"p_{int(time.time())}_{name}", "tool": name, "args": res["args"], "description": res["description"], "timestamp": time.strftime("%H:%M:%S")})
213
- results.append(f"STAGED: {name}")
214
 
215
- api_messages += [{"role": "assistant", "content": content}, {"role": "user", "content": "\n".join(results)}]
 
216
 
217
- final = accumulated_text + ("\n\nπŸ›‘οΈ Check Gate." if staged_this_turn else "")
218
- history.append({"role": "assistant", "content": final or "Thinking..."})
219
- ctx.save_conversation_turn(full_message, final, len(history))
220
-
221
- return (history, "", pending_proposals + staged_this_turn, _format_gate_choices(pending_proposals + staged_this_turn), _stats_label_files(), _stats_label_convos())
 
 
 
 
222
 
223
- # --- UI COMPONENTS ---
224
  def _format_gate_choices(proposals):
225
  return gr.CheckboxGroup(choices=[(f"[{p['timestamp']}] {p['description']}", p['id']) for p in proposals], value=[])
226
 
@@ -228,21 +341,26 @@ def execute_approved_proposals(ids, proposals, history):
228
  if not ids: return "No selection.", proposals, _format_gate_choices(proposals), history
229
  results, remaining = [], []
230
  for p in proposals:
231
- if p['id'] in ids: results.append(f"**{p['tool']}**: {execute_staged_tool(p['tool'], p['args'])}")
 
 
232
  else: remaining.append(p)
233
  if results: history.append({"role": "assistant", "content": "βœ… **Executed:**\n" + "\n".join(results)})
234
  return "Done.", remaining, _format_gate_choices(remaining), history
235
 
236
  def auto_continue_after_approval(history, proposals):
237
- last = history[-1].get("content", "")
238
- text = last[0].get("text", "") if isinstance(last, list) else str(last)
239
- if not text.startswith("βœ…"): return history, "", proposals, _format_gate_choices(proposals), _stats_label_files(), _stats_label_convos()
240
- return agent_loop("[Approved. Continue.]", history, proposals, None)
241
 
242
  def _stats_label_files(): return f"πŸ“‚ Files: {ctx.get_stats().get('total_files', 0)}"
243
  def _stats_label_convos(): return f"πŸ’Ύ Convos: {ctx.get_stats().get('conversations', 0)}"
244
 
245
- # --- UI LAYOUT ---
 
 
 
246
  with gr.Blocks(title="🦞 Clawdbot") as demo:
247
  state_proposals = gr.State([])
248
  gr.Markdown("# 🦞 Clawdbot Command Center")
@@ -253,9 +371,9 @@ with gr.Blocks(title="🦞 Clawdbot") as demo:
253
  stat_f = gr.Markdown(_stats_label_files())
254
  stat_c = gr.Markdown(_stats_label_convos())
255
  btn_ref = gr.Button("πŸ”„")
256
- file_in = gr.File(label="Upload", file_count="multiple", file_types=['.py', '.js', '.json', '.md', '.txt', '.yaml', '.sh', '.zip', '.env', '.toml', '.sql'])
257
  with gr.Column(scale=4):
258
- chat = gr.Chatbot(height=600, avatar_images=(None, "https://em-content.zobj.net/source/twitter/408/lobster_1f99e.png"))
259
  with gr.Row():
260
  txt = gr.Textbox(scale=6, placeholder="Prompt...")
261
  btn_send = gr.Button("Send", scale=1)
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from recursive_context import RecursiveContextManager
 
6
  import json
7
  import re
8
  import time
 
9
  import zipfile
10
  import shutil
11
 
12
+ """
13
+ Clawdbot Unified Command Center
14
+ GOLDEN COPY [2026-02-02 - Gemini]
15
+ INTEGRATED: Polyglot Tool Parser, Retry Logic, Zip Extraction, and Safe Agent Loop.
16
+ """
17
+
18
  # =============================================================================
19
+ # CONFIGURATION & INIT
20
  # =============================================================================
21
+
22
+ # Registry of valid tools for the Parser to verify against
23
+ AVAILABLE_TOOLS = {
24
+ "list_files", "read_file", "search_code", "write_file",
25
+ "create_shadow_branch", "shell_execute", "get_stats",
26
+ "search_conversations", "search_testament"
27
+ }
28
+
29
+ TEXT_EXTENSIONS = {
30
+ '.py', '.js', '.ts', '.jsx', '.tsx', '.json', '.yaml', '.yml',
31
+ '.md', '.txt', '.rst', '.html', '.css', '.scss', '.sh', '.bash',
32
+ '.sql', '.toml', '.cfg', '.ini', '.conf', '.xml', '.csv',
33
+ '.env', '.gitignore', '.dockerfile'
34
+ }
35
+
36
  client = InferenceClient("https://router.huggingface.co/v1", token=os.getenv("HF_TOKEN"))
37
  ET_SYSTEMS_SPACE = os.getenv("ET_SYSTEMS_SPACE", "")
38
  REPO_PATH = os.getenv("REPO_PATH", "/workspace/e-t-systems")
39
+ MODEL_ID = "moonshotai/Kimi-K2.5"
40
 
41
+ # =============================================================================
42
+ # REPO SYNC
43
+ # =============================================================================
44
  def sync_from_space(space_id: str, local_path: Path):
45
+ token = os.getenv("HF_TOKEN")
46
  if not token: return
47
  try:
48
  from huggingface_hub import HfFileSystem
 
67
  if repo_path.exists() and any(repo_path.iterdir()): return str(repo_path)
68
  return os.path.dirname(os.path.abspath(__file__))
69
 
70
+ # Initialize Memory
71
  ctx = RecursiveContextManager(_resolve_repo_path())
72
+
73
 
74
  # =============================================================================
75
+ # TOOL PARSERS & EXECUTION
76
  # =============================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  def build_system_prompt() -> str:
79
  stats = ctx.get_stats()
80
+ tools_doc = """
81
+ ## Available Tools
82
+ - **search_code(query, n=5)**: Semantic search codebase.
83
+ - **read_file(path, start_line, end_line)**: Read file content.
84
+ - **list_files(path, max_depth)**: Explore directory tree.
85
+ - **search_conversations(query, n=5)**: Search persistent memory.
86
+ - **search_testament(query, n=5)**: Search docs/plans.
87
+ - **write_file(path, content)**: Create/Update file (REQUIRES CHANGELOG).
88
+ - **shell_execute(command)**: Run shell command.
89
+ - **create_shadow_branch()**: Backup repository.
90
+ """
91
  return f"""You are Clawdbot 🦞.
92
+ System Stats: {stats.get('total_files', 0)} files, {stats.get('conversations', 0)} memories.
93
+ {tools_doc}
94
+ Output Format: Use [TOOL: tool_name(arg="value")] for tools.
 
 
 
95
  """
96
 
97
+ def parse_tool_calls(text: str) -> list:
98
+ """
99
+ Parses tool calls from the model output.
100
+ Supports TWO formats:
101
+ 1. The Executor Protocol: [TOOL: name(args)]
102
+ 2. The Native Kimi XML: <|tool_calls...> name(args) <|tool_calls_end...>
103
+ """
104
  calls = []
105
+
106
+ # STRATEGY 1: The Executor Protocol ([TOOL: ...])
107
+ bracket_pattern = r"\[TOOL:\s*(\w+)\((.*?)\)\]"
108
+ for match in re.finditer(bracket_pattern, text, re.DOTALL):
109
+ tool_name = match.group(1)
110
+ args_str = match.group(2)
111
+ args = parse_tool_args(args_str)
112
+ calls.append((tool_name, args))
113
+
114
+ # STRATEGY 2: The Native XML Protocol (The Translator)
115
+ if "<|tool_calls" in text:
116
+ clean_text = re.sub(r"<\|tool_calls_section_begin\|>", "", text)
117
+ clean_text = re.sub(r"<\|tool_calls_section_end\|>", "", clean_text)
118
+ clean_text = re.sub(r"<tool_code>", "", clean_text)
119
+ clean_text = re.sub(r"</tool_code>", "", clean_text)
120
+
121
+ xml_func_pattern = r"(\w+)\s*\((.*?)\)"
122
+ xml_matches = re.finditer(xml_func_pattern, clean_text, re.DOTALL)
123
+
124
+ for match in xml_matches:
125
+ tool_name = match.group(1)
126
+ if tool_name in ["print", "range", "len", "str", "int"]: continue
127
+ if any(existing[0] == tool_name for existing in calls): continue
128
+ args_str = match.group(2)
129
+ if tool_name in AVAILABLE_TOOLS:
130
+ args = parse_tool_args(args_str)
131
+ calls.append((tool_name, args))
132
+
133
+ # STRATEGY 3: Legacy Kimi Tags (from your old code)
134
+ if not calls:
135
+ for match in re.finditer(r'<\|tool_call_begin\|>\s*functions\.(\w+):\d+\s*\n(.*?)<\|tool_call_end\|>', text, re.DOTALL):
136
+ try: calls.append((match.group(1), json.loads(match.group(2).strip())))
137
+ except: pass
138
+
139
  return calls
140
 
141
+ def parse_tool_args(args_str: str) -> dict:
142
+ """Helper to safely parse 'key="value", n=5' strings into a dict."""
143
+ args = {}
144
+ try:
145
+ # Try JSON first if it looks like JSON
146
+ if args_str.strip().startswith('{'):
147
+ return json.loads(args_str)
148
+
149
+ # Heuristic parsing for key="value"
150
+ # Matches key='value' or key="value" or key=123
151
+ pattern = r'(\w+)\s*=\s*(?:"([^"]*)"|\'([^\']*)\'|([^,\s]+))'
152
+ for match in re.finditer(pattern, args_str):
153
+ key = match.group(1)
154
+ val = match.group(2) or match.group(3) or match.group(4)
155
+ # Convert numbers
156
+ if val.isdigit(): val = int(val)
157
+ args[key] = val
158
+ except:
159
+ pass
160
+ return args
161
+
162
  def extract_conversational_text(content: str) -> str:
163
+ cleaned = re.sub(r'\[TOOL:.*?\]', '', content, flags=re.DOTALL)
164
+ cleaned = re.sub(r'<\|tool_calls.*?<\|tool_calls.*?\|>', '', cleaned, flags=re.DOTALL)
165
+ cleaned = re.sub(r'<\|tool_call_begin\|>.*?<\|tool_call_end\|>', '', cleaned, flags=re.DOTALL)
166
+ return cleaned.strip()
167
 
168
  def execute_tool(tool_name: str, args: dict) -> dict:
169
  try:
170
  if tool_name == 'search_code':
171
  res = ctx.search_code(args.get('query', ''), args.get('n', 5))
172
  return {"status": "executed", "tool": tool_name, "result": "\n".join([f"πŸ“„ {r['file']}\n```{r['snippet']}```" for r in res])}
 
173
  elif tool_name == 'read_file':
174
  return {"status": "executed", "tool": tool_name, "result": ctx.read_file(args.get('path', ''), args.get('start_line'), args.get('end_line'))}
 
175
  elif tool_name == 'list_files':
176
  return {"status": "executed", "tool": tool_name, "result": ctx.list_files(args.get('path', ''), args.get('max_depth', 3))}
 
 
177
  elif tool_name == 'search_conversations':
178
  res = ctx.search_conversations(args.get('query', ''), args.get('n', 5))
179
  formatted = "\n---\n".join([f"{r['content']}" for r in res]) if res else "No matches found."
180
  return {"status": "executed", "tool": tool_name, "result": formatted}
 
 
181
  elif tool_name == 'search_testament':
182
  res = ctx.search_testament(args.get('query', ''), args.get('n', 5))
183
  formatted = "\n\n".join([f"πŸ“œ **{r['file']}**\n{r['snippet']}" for r in res]) if res else "No matches found."
184
  return {"status": "executed", "tool": tool_name, "result": formatted}
 
185
  elif tool_name == 'write_file':
186
  return {"status": "staged", "tool": tool_name, "args": args, "description": f"✏️ Write to `{args.get('path')}`"}
 
187
  elif tool_name == 'shell_execute':
188
  return {"status": "staged", "tool": tool_name, "args": args, "description": f"πŸ–₯️ Execute: `{args.get('command')}`"}
 
189
  elif tool_name == 'create_shadow_branch':
190
  return {"status": "staged", "tool": tool_name, "args": args, "description": "πŸ›‘οΈ Create shadow branch"}
191
 
 
200
  except Exception as e: return f"Error: {e}"
201
  return "Unknown tool"
202
 
203
+ # =============================================================================
204
+ # ROBUST HELPERS (Box Cutter & Retry)
205
+ # =============================================================================
206
 
207
  def process_uploaded_file(file) -> str:
208
  if file is None: return ""
209
+ if isinstance(file, list):
210
+ if len(file) == 0: return ""
211
+ file = file[0]
212
+
213
  file_path = file.name if hasattr(file, 'name') else str(file)
214
  file_name = os.path.basename(file_path)
215
+ suffix = os.path.splitext(file_name)[1].lower()
 
 
216
 
217
+ if suffix == '.zip':
 
 
 
218
  try:
219
+ extract_to = Path(REPO_PATH) / "uploaded_assets" / file_name.replace(".zip", "")
220
+ if extract_to.exists(): shutil.rmtree(extract_to)
221
+ extract_to.mkdir(parents=True, exist_ok=True)
222
  with zipfile.ZipFile(file_path, 'r') as z: z.extractall(extract_to)
223
+ file_list = [f.name for f in extract_to.glob('*')]
224
+ preview = ", ".join(file_list[:10])
225
+ return (f"πŸ“¦ **Unzipped: {file_name}**\nLocation: `{extract_to}`\nContents: {preview}\n"
226
+ f"SYSTEM NOTE: The files are extracted. Use list_files('{extract_to.name}') to explore them.")
227
+ except Exception as e: return f"⚠️ Failed to unzip {file_name}: {e}"
228
+
229
+ if suffix in TEXT_EXTENSIONS or suffix == '':
230
  try:
231
+ with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
232
+ content = f.read()
233
+ if len(content) > 50000: content = content[:50000] + "\n...(truncated)"
234
+ return f"πŸ“Ž **Uploaded: {file_name}**\n```\n{content}\n```"
235
+ except Exception as e: return f"πŸ“Ž **Uploaded: {file_name}** (error reading: {e})"
236
+ else:
237
+ try: return f"πŸ“Ž **Uploaded: {file_name}** (binary file, {os.path.getsize(file_path):,} bytes)"
238
+ except: return f"πŸ“Ž **Uploaded: {file_name}** (binary file)"
239
+
240
+ def call_model_with_retry(messages, model_id, max_retries=4):
241
+ for attempt in range(max_retries):
242
+ try:
243
+ return client.chat_completion(model=model_id, messages=messages, max_tokens=2048, temperature=0.7)
244
+ except Exception as e:
245
+ error_str = str(e)
246
+ if "504" in error_str or "503" in error_str or "timeout" in error_str.lower():
247
+ if attempt == max_retries - 1: raise e
248
+ time.sleep(2 * (2 ** attempt))
249
+ else:
250
+ raise e
251
 
252
+ # =============================================================================
253
+ # AGENT LOOP
254
+ # =============================================================================
 
255
 
256
+ def agent_loop(message: str, history: list, pending_proposals: list, uploaded_file) -> tuple:
257
+ # Default Safe Returns
258
+ safe_hist = history or []
259
+ safe_props = pending_proposals or []
260
+
261
+ try:
262
+ # 1. Inputs
263
+ if not message.strip() and uploaded_file is None:
264
+ return (safe_hist, "", safe_props, _format_gate_choices(safe_props), _stats_label_files(), _stats_label_convos())
265
 
266
+ full_message = message.strip()
267
+ if uploaded_file:
268
+ full_message = f"{process_uploaded_file(uploaded_file)}\n\n{full_message}"
269
 
270
+ safe_hist = safe_hist + [{"role": "user", "content": full_message}]
271
+
272
+ system_prompt = build_system_prompt()
273
+ api_messages = [{"role": "system", "content": system_prompt}]
274
+ for h in safe_hist[-40:]:
275
+ api_messages.append({"role": h["role"], "content": h["content"]})
276
 
277
+ accumulated_text = ""
278
+ staged_this_turn = []
 
 
 
 
 
279
 
280
+ # 2. Thinking Loop
281
+ for iteration in range(5):
282
+ try:
283
+ resp = call_model_with_retry(api_messages, MODEL_ID)
284
+ content = resp.choices[0].message.content or ""
285
+ except Exception as e:
286
+ safe_hist.append({"role": "assistant", "content": f"⚠️ API Error: {e}"})
287
+ return (safe_hist, "", safe_props, _format_gate_choices(safe_props), _stats_label_files(), _stats_label_convos())
288
+
289
+ calls = parse_tool_calls(content)
290
+ text = extract_conversational_text(content)
291
+
292
+ if text: accumulated_text += ("\n\n" if accumulated_text else "") + text
293
+
294
+ if not calls: break
295
+
296
+ results = []
297
+ for name, args in calls:
298
+ res = execute_tool(name, args)
299
+ if res["status"] == "executed":
300
+ results.append(f"[Tool Result: {name}]\n{res['result']}")
301
+ elif res["status"] == "staged":
302
+ p_id = f"p_{int(time.time())}_{name}"
303
+ staged_this_turn.append({
304
+ "id": p_id, "tool": name, "args": res["args"],
305
+ "description": res["description"], "timestamp": time.strftime("%H:%M:%S")
306
+ })
307
+ results.append(f"[STAGED: {name}]")
308
+
309
+ if results:
310
+ api_messages += [{"role": "assistant", "content": content}, {"role": "user", "content": "\n".join(results)}]
311
+ else:
312
+ break
313
+
314
+ # 3. Finalize
315
+ final = accumulated_text
316
+ if staged_this_turn:
317
+ final += "\n\nπŸ›‘οΈ **Proposals Staged.** Check the Gate tab."
318
+ safe_props += staged_this_turn
319
+
320
+ if not final: final = "πŸ€” I processed that but have no text response."
321
 
322
+ safe_hist.append({"role": "assistant", "content": final})
 
 
 
 
 
 
 
 
323
 
324
+ try: ctx.save_conversation_turn(full_message, final, len(safe_hist))
325
+ except: pass
326
 
327
+ return (safe_hist, "", safe_props, _format_gate_choices(safe_props), _stats_label_files(), _stats_label_convos())
328
+
329
+ except Exception as e:
330
+ safe_hist.append({"role": "assistant", "content": f"πŸ’₯ Critical Error: {e}"})
331
+ return (safe_hist, "", safe_props, _format_gate_choices(safe_props), _stats_label_files(), _stats_label_convos())
332
+
333
+ # =============================================================================
334
+ # UI COMPONENTS
335
+ # =============================================================================
336
 
 
337
  def _format_gate_choices(proposals):
338
  return gr.CheckboxGroup(choices=[(f"[{p['timestamp']}] {p['description']}", p['id']) for p in proposals], value=[])
339
 
 
341
  if not ids: return "No selection.", proposals, _format_gate_choices(proposals), history
342
  results, remaining = [], []
343
  for p in proposals:
344
+ if p['id'] in ids:
345
+ out = execute_staged_tool(p['tool'], p['args'])
346
+ results.append(f"**{p['tool']}**: {out}")
347
  else: remaining.append(p)
348
  if results: history.append({"role": "assistant", "content": "βœ… **Executed:**\n" + "\n".join(results)})
349
  return "Done.", remaining, _format_gate_choices(remaining), history
350
 
351
  def auto_continue_after_approval(history, proposals):
352
+ last = history[-1].get("content", "") if history else ""
353
+ if "βœ… **Executed:**" in str(last):
354
+ return agent_loop("[System: Tools executed. Continue.]", history, proposals, None)
355
+ return history, "", proposals, _format_gate_choices(proposals), _stats_label_files(), _stats_label_convos()
356
 
357
  def _stats_label_files(): return f"πŸ“‚ Files: {ctx.get_stats().get('total_files', 0)}"
358
  def _stats_label_convos(): return f"πŸ’Ύ Convos: {ctx.get_stats().get('conversations', 0)}"
359
 
360
+ # =============================================================================
361
+ # GRADIO INTERFACE
362
+ # =============================================================================
363
+
364
  with gr.Blocks(title="🦞 Clawdbot") as demo:
365
  state_proposals = gr.State([])
366
  gr.Markdown("# 🦞 Clawdbot Command Center")
 
371
  stat_f = gr.Markdown(_stats_label_files())
372
  stat_c = gr.Markdown(_stats_label_convos())
373
  btn_ref = gr.Button("πŸ”„")
374
+ file_in = gr.File(label="Upload", file_count="multiple")
375
  with gr.Column(scale=4):
376
+ chat = gr.Chatbot(height=600, type="messages", avatar_images=(None, "https://em-content.zobj.net/source/twitter/408/lobster_1f99e.png"))
377
  with gr.Row():
378
  txt = gr.Textbox(scale=6, placeholder="Prompt...")
379
  btn_send = gr.Button("Send", scale=1)