Executor-Tyrant-Framework commited on
Commit
eef10f6
·
verified ·
1 Parent(s): a5ce75e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -59
app.py CHANGED
@@ -12,19 +12,16 @@ import traceback
12
 
13
  """
14
  Clawdbot Unified Command Center
15
- PLATINUM COPY [2026-02-03]
16
- FEATURES:
17
- - Stamina: 15 Turns + Forced Summarization (No more silence)
18
- - Polyglot: Understands [TOOL:...] and XML formats
19
- - Robustness: Retry logic for API timeouts
20
- - Compatibility: Gradio 6.x native
21
  """
22
 
23
  # =============================================================================
24
  # CONFIGURATION & INIT
25
  # =============================================================================
26
 
27
- # Registry of valid tools for validation
28
  AVAILABLE_TOOLS = {
29
  "list_files", "read_file", "search_code", "write_file",
30
  "create_shadow_branch", "shell_execute", "get_stats",
@@ -100,15 +97,8 @@ Output Format: Use [TOOL: tool_name(arg="value")] for tools.
100
  """
101
 
102
  def parse_tool_calls(text: str) -> list:
103
- """
104
- Parses tool calls from the model output.
105
- Supports TWO formats:
106
- 1. The Executor Protocol: [TOOL: name(args)]
107
- 2. The Native Kimi XML: <|tool_calls...> name(args) <|tool_calls_end...>
108
- """
109
  calls = []
110
-
111
- # STRATEGY 1: The Executor Protocol ([TOOL: ...])
112
  bracket_pattern = r"\[TOOL:\s*(\w+)\((.*?)\)\]"
113
  for match in re.finditer(bracket_pattern, text, re.DOTALL):
114
  tool_name = match.group(1)
@@ -116,52 +106,38 @@ def parse_tool_calls(text: str) -> list:
116
  args = parse_tool_args(args_str)
117
  calls.append((tool_name, args))
118
 
119
- # STRATEGY 2: The Native XML Protocol (The Translator)
120
  if "<|tool_calls" in text:
121
  clean_text = re.sub(r"<\|tool_calls_section_begin\|>", "", text)
122
  clean_text = re.sub(r"<\|tool_calls_section_end\|>", "", clean_text)
123
  clean_text = re.sub(r"<tool_code>", "", clean_text)
124
  clean_text = re.sub(r"</tool_code>", "", clean_text)
125
-
126
- xml_func_pattern = r"(\w+)\s*\((.*?)\)"
127
- xml_matches = re.finditer(xml_func_pattern, clean_text, re.DOTALL)
128
-
129
  for match in xml_matches:
130
  tool_name = match.group(1)
131
  if tool_name in ["print", "range", "len", "str", "int"]: continue
132
  if any(existing[0] == tool_name for existing in calls): continue
133
- args_str = match.group(2)
134
  if tool_name in AVAILABLE_TOOLS:
135
- args = parse_tool_args(args_str)
136
- calls.append((tool_name, args))
137
 
138
- # STRATEGY 3: Legacy Kimi Tags (from your old code)
139
  if not calls:
140
  for match in re.finditer(r'<\|tool_call_begin\|>\s*functions\.(\w+):\d+\s*\n(.*?)<\|tool_call_end\|>', text, re.DOTALL):
141
  try: calls.append((match.group(1), json.loads(match.group(2).strip())))
142
  except: pass
143
-
144
  return calls
145
 
146
  def parse_tool_args(args_str: str) -> dict:
147
- """Helper to safely parse 'key="value", n=5' strings into a dict."""
148
  args = {}
149
  try:
150
- # Try JSON first if it looks like JSON
151
- if args_str.strip().startswith('{'):
152
- return json.loads(args_str)
153
-
154
- # Heuristic parsing for key="value"
155
- # Matches key='value' or key="value" or key=123
156
  pattern = r'(\w+)\s*=\s*(?:"([^"]*)"|\'([^\']*)\'|([^,\s]+))'
157
  for match in re.finditer(pattern, args_str):
158
  key = match.group(1)
159
  val = match.group(2) or match.group(3) or match.group(4)
160
- # Convert numbers
161
  if val.isdigit(): val = int(val)
162
  args[key] = val
163
- except:
164
- pass
165
  return args
166
 
167
  def extract_conversational_text(content: str) -> str:
@@ -193,7 +169,6 @@ def execute_tool(tool_name: str, args: dict) -> dict:
193
  return {"status": "staged", "tool": tool_name, "args": args, "description": f"🖥️ Execute: `{args.get('command')}`"}
194
  elif tool_name == 'create_shadow_branch':
195
  return {"status": "staged", "tool": tool_name, "args": args, "description": "🛡️ Create shadow branch"}
196
-
197
  return {"status": "error", "result": f"Unknown tool: {tool_name}"}
198
  except Exception as e: return {"status": "error", "result": str(e)}
199
 
@@ -206,15 +181,14 @@ def execute_staged_tool(tool_name: str, args: dict) -> str:
206
  return "Unknown tool"
207
 
208
  # =============================================================================
209
- # ROBUST HELPERS (Box Cutter & Retry)
210
  # =============================================================================
211
 
212
  def process_uploaded_file(file) -> str:
213
  if file is None: return ""
214
- if isinstance(file, list):
215
- if len(file) == 0: return ""
216
- file = file[0]
217
-
218
  file_path = file.name if hasattr(file, 'name') else str(file)
219
  file_name = os.path.basename(file_path)
220
  suffix = os.path.splitext(file_name)[1].lower()
@@ -238,14 +212,14 @@ def process_uploaded_file(file) -> str:
238
  if len(content) > 50000: content = content[:50000] + "\n...(truncated)"
239
  return f"📎 **Uploaded: {file_name}**\n```\n{content}\n```"
240
  except Exception as e: return f"📎 **Uploaded: {file_name}** (error reading: {e})"
241
- else:
242
- try: return f"📎 **Uploaded: {file_name}** (binary file, {os.path.getsize(file_path):,} bytes)"
243
- except: return f"📎 **Uploaded: {file_name}** (binary file)"
244
 
 
245
  def call_model_with_retry(messages, model_id, max_retries=4):
246
  for attempt in range(max_retries):
247
  try:
248
- return client.chat_completion(model=model_id, messages=messages, max_tokens=2048, temperature=0.7)
 
249
  except Exception as e:
250
  error_str = str(e)
251
  if "504" in error_str or "503" in error_str or "timeout" in error_str.lower():
@@ -259,12 +233,10 @@ def call_model_with_retry(messages, model_id, max_retries=4):
259
  # =============================================================================
260
 
261
  def agent_loop(message: str, history: list, pending_proposals: list, uploaded_file) -> tuple:
262
- # Default Safe Returns
263
  safe_hist = history or []
264
  safe_props = pending_proposals or []
265
 
266
  try:
267
- # 1. Inputs
268
  if not message.strip() and uploaded_file is None:
269
  return (safe_hist, "", safe_props, _format_gate_choices(safe_props), _stats_label_files(), _stats_label_convos())
270
 
@@ -282,20 +254,16 @@ def agent_loop(message: str, history: list, pending_proposals: list, uploaded_fi
282
  accumulated_text = ""
283
  staged_this_turn = []
284
 
285
- # FEATURE: Stamina Increased to 15
286
  MAX_ITERATIONS = 15
287
 
288
- # 2. Thinking Loop
289
  for iteration in range(MAX_ITERATIONS):
290
  try:
291
- # FEATURE: Forced Surface Protocol
292
  if iteration == MAX_ITERATIONS - 1:
293
- print("⚠️ Max iterations reached. Forcing summary.")
294
- api_messages.append({
295
- "role": "system",
296
- "content": "SYSTEM ALERT: You have reached the maximum number of tool steps. STOP using tools. Immediately summarize your findings and answer the user's request based on what you know now."
297
- })
298
 
 
299
  resp = call_model_with_retry(api_messages, MODEL_ID)
300
  content = resp.choices[0].message.content or ""
301
  except Exception as e:
@@ -327,16 +295,14 @@ def agent_loop(message: str, history: list, pending_proposals: list, uploaded_fi
327
  else:
328
  break
329
 
330
- # 3. Finalize
331
  final = accumulated_text
332
  if staged_this_turn:
333
  final += "\n\n🛡️ **Proposals Staged.** Check the Gate tab."
334
  safe_props += staged_this_turn
335
 
336
- if not final: final = "🤔 I processed that but have no text response. (Check logs for tool outputs)"
337
 
338
  safe_hist.append({"role": "assistant", "content": final})
339
-
340
  try: ctx.save_conversation_turn(full_message, final, len(safe_hist))
341
  except: pass
342
 
@@ -389,7 +355,6 @@ with gr.Blocks(title="🦞 Clawdbot") as demo:
389
  btn_ref = gr.Button("🔄")
390
  file_in = gr.File(label="Upload", file_count="multiple")
391
  with gr.Column(scale=4):
392
- # FIX: Removed 'type="messages"'
393
  chat = gr.Chatbot(height=600, avatar_images=(None, "https://em-content.zobj.net/source/twitter/408/lobster_1f99e.png"))
394
  with gr.Row():
395
  txt = gr.Textbox(scale=6, placeholder="Prompt...")
 
12
 
13
  """
14
  Clawdbot Unified Command Center
15
+ DIAMOND COPY [2026-02-03]
16
+ FIXED: Added missing retry logic.
17
+ FIXED: Increased Max Tokens to 8192 (Prevents truncation).
18
+ FIXED: Increased Loop Stamina to 15 (Prevents silence).
 
 
19
  """
20
 
21
  # =============================================================================
22
  # CONFIGURATION & INIT
23
  # =============================================================================
24
 
 
25
  AVAILABLE_TOOLS = {
26
  "list_files", "read_file", "search_code", "write_file",
27
  "create_shadow_branch", "shell_execute", "get_stats",
 
97
  """
98
 
99
  def parse_tool_calls(text: str) -> list:
 
 
 
 
 
 
100
  calls = []
101
+ # 1. Bracket Format
 
102
  bracket_pattern = r"\[TOOL:\s*(\w+)\((.*?)\)\]"
103
  for match in re.finditer(bracket_pattern, text, re.DOTALL):
104
  tool_name = match.group(1)
 
106
  args = parse_tool_args(args_str)
107
  calls.append((tool_name, args))
108
 
109
+ # 2. XML Format (Translator)
110
  if "<|tool_calls" in text:
111
  clean_text = re.sub(r"<\|tool_calls_section_begin\|>", "", text)
112
  clean_text = re.sub(r"<\|tool_calls_section_end\|>", "", clean_text)
113
  clean_text = re.sub(r"<tool_code>", "", clean_text)
114
  clean_text = re.sub(r"</tool_code>", "", clean_text)
115
+ xml_matches = re.finditer(r"(\w+)\s*\((.*?)\)", clean_text, re.DOTALL)
 
 
 
116
  for match in xml_matches:
117
  tool_name = match.group(1)
118
  if tool_name in ["print", "range", "len", "str", "int"]: continue
119
  if any(existing[0] == tool_name for existing in calls): continue
 
120
  if tool_name in AVAILABLE_TOOLS:
121
+ calls.append((tool_name, parse_tool_args(match.group(2))))
 
122
 
123
+ # 3. Legacy Kimi Tags
124
  if not calls:
125
  for match in re.finditer(r'<\|tool_call_begin\|>\s*functions\.(\w+):\d+\s*\n(.*?)<\|tool_call_end\|>', text, re.DOTALL):
126
  try: calls.append((match.group(1), json.loads(match.group(2).strip())))
127
  except: pass
 
128
  return calls
129
 
130
  def parse_tool_args(args_str: str) -> dict:
 
131
  args = {}
132
  try:
133
+ if args_str.strip().startswith('{'): return json.loads(args_str)
 
 
 
 
 
134
  pattern = r'(\w+)\s*=\s*(?:"([^"]*)"|\'([^\']*)\'|([^,\s]+))'
135
  for match in re.finditer(pattern, args_str):
136
  key = match.group(1)
137
  val = match.group(2) or match.group(3) or match.group(4)
 
138
  if val.isdigit(): val = int(val)
139
  args[key] = val
140
+ except: pass
 
141
  return args
142
 
143
  def extract_conversational_text(content: str) -> str:
 
169
  return {"status": "staged", "tool": tool_name, "args": args, "description": f"🖥️ Execute: `{args.get('command')}`"}
170
  elif tool_name == 'create_shadow_branch':
171
  return {"status": "staged", "tool": tool_name, "args": args, "description": "🛡️ Create shadow branch"}
 
172
  return {"status": "error", "result": f"Unknown tool: {tool_name}"}
173
  except Exception as e: return {"status": "error", "result": str(e)}
174
 
 
181
  return "Unknown tool"
182
 
183
  # =============================================================================
184
+ # ROBUST HELPERS
185
  # =============================================================================
186
 
187
  def process_uploaded_file(file) -> str:
188
  if file is None: return ""
189
+ if isinstance(file, list): file = file[0] if len(file) > 0 else None
190
+ if file is None: return ""
191
+
 
192
  file_path = file.name if hasattr(file, 'name') else str(file)
193
  file_name = os.path.basename(file_path)
194
  suffix = os.path.splitext(file_name)[1].lower()
 
212
  if len(content) > 50000: content = content[:50000] + "\n...(truncated)"
213
  return f"📎 **Uploaded: {file_name}**\n```\n{content}\n```"
214
  except Exception as e: return f"📎 **Uploaded: {file_name}** (error reading: {e})"
215
+ return f"📎 **Uploaded: {file_name}** (binary file, {os.path.getsize(file_path):,} bytes)"
 
 
216
 
217
+ # NEW FUNCTION: This is what was missing!
218
  def call_model_with_retry(messages, model_id, max_retries=4):
219
  for attempt in range(max_retries):
220
  try:
221
+ # KEY FIX: max_tokens=8192 allows writing large files without cutoff
222
+ return client.chat_completion(model=model_id, messages=messages, max_tokens=8192, temperature=0.7)
223
  except Exception as e:
224
  error_str = str(e)
225
  if "504" in error_str or "503" in error_str or "timeout" in error_str.lower():
 
233
  # =============================================================================
234
 
235
  def agent_loop(message: str, history: list, pending_proposals: list, uploaded_file) -> tuple:
 
236
  safe_hist = history or []
237
  safe_props = pending_proposals or []
238
 
239
  try:
 
240
  if not message.strip() and uploaded_file is None:
241
  return (safe_hist, "", safe_props, _format_gate_choices(safe_props), _stats_label_files(), _stats_label_convos())
242
 
 
254
  accumulated_text = ""
255
  staged_this_turn = []
256
 
257
+ # KEY FIX: Stamina increased to 15 turns
258
  MAX_ITERATIONS = 15
259
 
 
260
  for iteration in range(MAX_ITERATIONS):
261
  try:
262
+ # KEY FIX: Forced Surface logic
263
  if iteration == MAX_ITERATIONS - 1:
264
+ api_messages.append({"role": "system", "content": "SYSTEM ALERT: Max steps reached. STOP using tools. Summarize findings immediately."})
 
 
 
 
265
 
266
+ # KEY FIX: Use the new wrapper function instead of direct client call
267
  resp = call_model_with_retry(api_messages, MODEL_ID)
268
  content = resp.choices[0].message.content or ""
269
  except Exception as e:
 
295
  else:
296
  break
297
 
 
298
  final = accumulated_text
299
  if staged_this_turn:
300
  final += "\n\n🛡️ **Proposals Staged.** Check the Gate tab."
301
  safe_props += staged_this_turn
302
 
303
+ if not final: final = "🤔 I processed that but have no text response."
304
 
305
  safe_hist.append({"role": "assistant", "content": final})
 
306
  try: ctx.save_conversation_turn(full_message, final, len(safe_hist))
307
  except: pass
308
 
 
355
  btn_ref = gr.Button("🔄")
356
  file_in = gr.File(label="Upload", file_count="multiple")
357
  with gr.Column(scale=4):
 
358
  chat = gr.Chatbot(height=600, avatar_images=(None, "https://em-content.zobj.net/source/twitter/408/lobster_1f99e.png"))
359
  with gr.Row():
360
  txt = gr.Textbox(scale=6, placeholder="Prompt...")