webxos commited on
Commit
8ae33a6
·
verified ·
1 Parent(s): 6d4d0dd

Upload 3 files

Browse files
Files changed (3) hide show
  1. config.json +20 -0
  2. pygmyclaw.py +774 -0
  3. pygmyclaw_multitool.py +324 -0
config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "qwen2.5:0.5b",
3
+ "endpoint": "http://localhost:11434/api/generate",
4
+ "workspace": ".",
5
+ "debug": false,
6
+ "multi_instance": {
7
+ "enabled": true,
8
+ "ports": [11434, 11435, 11436, 11437]
9
+ },
10
+ "queue": {
11
+ "type": "redis",
12
+ "redis_host": "localhost",
13
+ "redis_port": 6379,
14
+ "queue_name": "grok_tasks"
15
+ },
16
+ "scheduler": {
17
+ "enabled": true,
18
+ "check_interval": 60
19
+ }
20
+ }
pygmyclaw.py ADDED
@@ -0,0 +1,774 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ PygmyClaw – Compact AI Agent with multi‑instance speculative decoding,
4
+ persistent queue, and integrated scheduler for cron‑like tasks.
5
+ """
6
+ import os
7
+ import json
8
+ import sys
9
+ import subprocess
10
+ import urllib.request
11
+ import urllib.error
12
+ import socket
13
+ import time
14
+ import threading
15
+ import queue
16
+ import shlex
17
+ from pathlib import Path
18
+
19
+ # Optional Redis support
20
+ try:
21
+ import redis
22
+ REDIS_AVAILABLE = True
23
+ except ImportError:
24
+ REDIS_AVAILABLE = False
25
+
26
+ # Optional Ollama Python client (for multi‑instance drafts)
27
+ try:
28
+ import ollama
29
+ OLLAMA_CLIENT_AVAILABLE = True
30
+ except ImportError:
31
+ OLLAMA_CLIENT_AVAILABLE = False
32
+
33
+ # For time parsing in scheduler (optional)
34
+ try:
35
+ import dateparser
36
+ DATEPARSER_AVAILABLE = True
37
+ except ImportError:
38
+ DATEPARSER_AVAILABLE = False
39
+
40
+ SCRIPT_DIR = Path(__file__).parent.resolve()
41
+ CONFIG_FILE = SCRIPT_DIR / "config.json"
42
+ ERROR_LOG = None
43
+ MAX_LOG_ENTRIES = 1000
44
+ SCHEDULED_JOBS_FILE = SCRIPT_DIR / "scheduled_jobs.json"
45
+
46
+ DEFAULT_MODEL = "qwen2.5:0.5b"
47
+ DEFAULT_ENDPOINT = "http://localhost:11434/api/generate"
48
+ DEBUG = os.environ.get("PYGMYCLAW_DEBUG", "").lower() in ("1", "true", "yes")
49
+
50
+ # ----------------------------------------------------------------------
51
+ # Multi‑instance / speculative decoding globals
52
+ INSTANCE_PROCESSES = [] # list of Popen objects for each Ollama serve
53
+ DRAFT_BATCH_SIZE = 6 # tokens per draft
54
+ USE_REDIS = False # set by config
55
+ REDIS_CLIENT = None
56
+ QUEUE_NAME = "grok_tasks"
57
+ TASK_QUEUE = None # fallback Python queue
58
+ QUEUE_PROCESSOR_EVENT = threading.Event()
59
+
60
+ # Scheduler globals
61
+ SCHEDULER_THREAD = None
62
+ SCHEDULER_EVENT = threading.Event()
63
+ SCHEDULER_INTERVAL = 60 # seconds between checks
64
+
65
+ # ----------------------------------------------------------------------
66
+ class PygmyClaw:
67
+ def __init__(self):
68
+ self.workspace = SCRIPT_DIR
69
+ self.model = DEFAULT_MODEL
70
+ self.endpoint = DEFAULT_ENDPOINT
71
+ self.multi_instance = None
72
+ self.queue_config = None
73
+ self.scheduler_config = None
74
+ self.load_config()
75
+ self.multitool = self.workspace / "pygmyclaw_multitool.py"
76
+ self.check_prerequisites()
77
+ self._ensure_model_ready()
78
+ self._warmup_model()
79
+
80
+ # Fetch Python tool list
81
+ py_resp = self.call_multitool("list_tools_detailed")
82
+ self.python_tools = self._extract_tool_list(py_resp)
83
+
84
+ self.system_prompt = self.build_system_prompt()
85
+
86
+ # Initialize queue if multi‑instance is enabled
87
+ if self.multi_instance and self.multi_instance.get("enabled"):
88
+ self._init_queue()
89
+ self.start_queue_processor()
90
+
91
+ # Initialize scheduler if enabled
92
+ if self.scheduler_config and self.scheduler_config.get("enabled", False):
93
+ self.start_scheduler()
94
+
95
+ def _extract_tool_list(self, resp):
96
+ """Extract list of tools from JSON response."""
97
+ if not isinstance(resp, dict):
98
+ return []
99
+ if "error" in resp:
100
+ print(f"⚠️ Tool list error: {resp['error']}", file=sys.stderr)
101
+ return []
102
+ if "result" in resp and isinstance(resp["result"], dict):
103
+ inner = resp["result"]
104
+ if "tools" in inner and isinstance(inner["tools"], list):
105
+ return inner["tools"]
106
+ if "tools" in resp and isinstance(resp["tools"], list):
107
+ return resp["tools"]
108
+ return []
109
+
110
+ def load_config(self):
111
+ if CONFIG_FILE.exists():
112
+ try:
113
+ with open(CONFIG_FILE) as f:
114
+ cfg = json.load(f)
115
+ self.model = cfg.get("model", self.model)
116
+ self.endpoint = cfg.get("endpoint", self.endpoint)
117
+ if "workspace" in cfg:
118
+ self.workspace = Path(cfg["workspace"]).resolve()
119
+ if cfg.get("debug", False):
120
+ global DEBUG
121
+ DEBUG = True
122
+ self.multi_instance = cfg.get("multi_instance")
123
+ self.queue_config = cfg.get("queue")
124
+ self.scheduler_config = cfg.get("scheduler")
125
+ except Exception as e:
126
+ print(f"⚠️ Warning: Could not load config.json: {e}")
127
+ global ERROR_LOG
128
+ ERROR_LOG = self.workspace / "error_log.json"
129
+
130
+ def check_prerequisites(self):
131
+ if not self.multitool.exists():
132
+ print(f"❌ Python multitool not found at {self.multitool}")
133
+ sys.exit(1)
134
+
135
+ try:
136
+ with urllib.request.urlopen("http://localhost:11434/api/tags", timeout=5) as resp:
137
+ data = json.loads(resp.read())
138
+ models = [m["name"] for m in data.get("models", [])]
139
+ if self.model not in models and not any(m.startswith(self.model) for m in models):
140
+ print(f"⚠️ Model '{self.model}' not found in local list.")
141
+ else:
142
+ print(f"✅ Model '{self.model}' found locally.")
143
+ except Exception as e:
144
+ print(f"❌ Cannot reach Ollama at {self.endpoint}: {e}")
145
+ print(" Make sure Ollama is running (try 'ollama serve' in another terminal).")
146
+ sys.exit(1)
147
+
148
+ def _ensure_model_ready(self):
149
+ print(f"⏳ Ensuring model '{self.model}' is ready...")
150
+ test_payload = {
151
+ "model": self.model,
152
+ "prompt": "hello",
153
+ "stream": False,
154
+ "options": {"num_predict": 1}
155
+ }
156
+ try:
157
+ req = urllib.request.Request(
158
+ self.endpoint,
159
+ data=json.dumps(test_payload).encode('utf-8'),
160
+ headers={'Content-Type': 'application/json'},
161
+ method='POST'
162
+ )
163
+ with urllib.request.urlopen(req, timeout=300) as resp:
164
+ resp_data = json.loads(resp.read())
165
+ if "response" in resp_data:
166
+ print("✅ Model is ready.")
167
+ else:
168
+ print("⚠️ Unexpected response from Ollama.")
169
+ except Exception as e:
170
+ print(f"❌ Failed to communicate with model '{self.model}': {e}")
171
+ print("\nPossible solutions:")
172
+ print("1. Ensure Ollama is running: `ollama serve`")
173
+ print("2. Pull the model manually: `ollama pull {}`".format(self.model))
174
+ sys.exit(1)
175
+
176
+ def _warmup_model(self):
177
+ try:
178
+ warmup = {
179
+ "model": self.model,
180
+ "prompt": ".",
181
+ "stream": False,
182
+ "options": {"num_predict": 1}
183
+ }
184
+ req = urllib.request.Request(
185
+ self.endpoint,
186
+ data=json.dumps(warmup).encode(),
187
+ headers={'Content-Type': 'application/json'},
188
+ method='POST'
189
+ )
190
+ with urllib.request.urlopen(req, timeout=10) as resp:
191
+ pass
192
+ except Exception:
193
+ pass
194
+
195
+ # ------------------------------------------------------------------
196
+ # Multi‑instance management (unchanged)
197
+ def start_instances(self):
198
+ """Launch 4 Ollama instances on different ports."""
199
+ if not self.multi_instance or not self.multi_instance.get("enabled"):
200
+ print("Multi‑instance not enabled in config.")
201
+ return
202
+
203
+ ports = self.multi_instance.get("ports", [11434, 11435, 11436, 11437])
204
+ global INSTANCE_PROCESSES
205
+ for i, port in enumerate(ports):
206
+ env = os.environ.copy()
207
+ env['OLLAMA_HOST'] = f'127.0.0.1:{port}'
208
+ env['OLLAMA_NUM_PARALLEL'] = '1'
209
+ if i > 0 and 'CUDA_VISIBLE_DEVICES' in env:
210
+ gpu_ids = env['CUDA_VISIBLE_DEVICES'].split(',')
211
+ if len(gpu_ids) > i:
212
+ env['CUDA_VISIBLE_DEVICES'] = gpu_ids[i]
213
+ else:
214
+ env.pop('CUDA_VISIBLE_DEVICES', None)
215
+ proc = subprocess.Popen(['ollama', 'serve'], env=env)
216
+ INSTANCE_PROCESSES.append(proc)
217
+ print(f"Started Ollama on port {port} (PID {proc.pid})")
218
+ time.sleep(2)
219
+
220
+ def stop_instances(self):
221
+ global INSTANCE_PROCESSES
222
+ for proc in INSTANCE_PROCESSES:
223
+ proc.terminate()
224
+ INSTANCE_PROCESSES.clear()
225
+ print("All instances stopped.")
226
+
227
+ # ------------------------------------------------------------------
228
+ # Tokenization helper
229
+ def _tokenize(self, text):
230
+ """Return list of token strings for the given text."""
231
+ url = self.endpoint.replace("/generate", "/tokenize")
232
+ payload = {"model": self.model, "prompt": text}
233
+ try:
234
+ req = urllib.request.Request(
235
+ url,
236
+ data=json.dumps(payload).encode('utf-8'),
237
+ headers={'Content-Type': 'application/json'},
238
+ method='POST'
239
+ )
240
+ with urllib.request.urlopen(req, timeout=10) as resp:
241
+ data = json.loads(resp.read())
242
+ return data.get("tokens", [])
243
+ except Exception as e:
244
+ if DEBUG:
245
+ print(f"[DEBUG] Tokenization failed: {e}", file=sys.stderr)
246
+ return []
247
+
248
+ def _count_tokens(self, text):
249
+ return len(self._tokenize(text))
250
+
251
+ # ------------------------------------------------------------------
252
+ # Speculative decoding (unchanged)
253
+ def _draft_gen_ollama(self, host, prompt, batch_size):
254
+ if not OLLAMA_CLIENT_AVAILABLE:
255
+ raise RuntimeError("Ollama Python client not installed.")
256
+ client = ollama.Client(host=host)
257
+ resp = client.generate(
258
+ model=self.model,
259
+ prompt=prompt,
260
+ options={'num_predict': batch_size, 'temperature': 0.6}
261
+ )
262
+ return resp['response']
263
+
264
+ def generate_with_ssd(self, prompt, max_tokens=100):
265
+ """
266
+ Speculative decoding:
267
+ - 3 drafters generate parallel drafts (each a string of batch_size tokens)
268
+ - Verifier checks the last token of the longest draft
269
+ """
270
+ if not self.multi_instance or not self.multi_instance.get("enabled"):
271
+ return self.ask_ollama(prompt)
272
+
273
+ if not OLLAMA_CLIENT_AVAILABLE:
274
+ print("⚠️ Ollama client not available – falling back to single instance.")
275
+ return self.ask_ollama(prompt)
276
+
277
+ ports = self.multi_instance.get("ports")
278
+ if len(ports) < 4:
279
+ raise RuntimeError("Need at least 4 ports for SSD")
280
+
281
+ output = ""
282
+ current_prompt = prompt
283
+ tokens_generated = 0
284
+
285
+ while tokens_generated < max_tokens:
286
+ drafts = [None] * 3
287
+ threads = []
288
+
289
+ def draft_worker(idx):
290
+ host = f'http://127.0.0.1:{ports[idx]}'
291
+ try:
292
+ drafts[idx] = self._draft_gen_ollama(host, current_prompt, DRAFT_BATCH_SIZE)
293
+ except Exception as e:
294
+ if DEBUG:
295
+ print(f"[DEBUG] Drafter {idx} failed: {e}", file=sys.stderr)
296
+
297
+ for i in range(3):
298
+ t = threading.Thread(target=draft_worker, args=(i,))
299
+ t.start()
300
+ threads.append(t)
301
+
302
+ for t in threads:
303
+ t.join()
304
+
305
+ valid_drafts = [d for d in drafts if d]
306
+ if not valid_drafts:
307
+ return self.ask_ollama(prompt)
308
+
309
+ best_draft = max(valid_drafts, key=len)
310
+
311
+ draft_tokens = self._tokenize(best_draft)
312
+ if not draft_tokens:
313
+ accepted = best_draft
314
+ else:
315
+ last_draft_token = draft_tokens[-1]
316
+ verify_host = f'http://127.0.0.1:{ports[3]}'
317
+ vclient = ollama.Client(host=verify_host)
318
+ prompt_with_draft = current_prompt + best_draft[:-len(last_draft_token)]
319
+ try:
320
+ vresp = vclient.generate(
321
+ model=self.model,
322
+ prompt=prompt_with_draft,
323
+ options={'num_predict': 1}
324
+ )
325
+ verifier_text = vresp['response']
326
+ except Exception as e:
327
+ if DEBUG:
328
+ print(f"[DEBUG] Verifier failed: {e}", file=sys.stderr)
329
+ accepted = best_draft
330
+ else:
331
+ verifier_tokens = self._tokenize(verifier_text)
332
+ if verifier_tokens and verifier_tokens[0] == last_draft_token:
333
+ accepted = best_draft
334
+ else:
335
+ accepted = verifier_text
336
+ if DEBUG:
337
+ print("Grok twist: Draft rejected – too boring!")
338
+
339
+ output += accepted
340
+ current_prompt += accepted
341
+ tokens_generated += self._count_tokens(accepted)
342
+
343
+ return output
344
+
345
+ # ------------------------------------------------------------------
346
+ # Persistent queue (unchanged except adding scheduler integration)
347
+ def _init_queue(self):
348
+ global USE_REDIS, REDIS_CLIENT, TASK_QUEUE, QUEUE_NAME
349
+ qcfg = self.queue_config
350
+ if qcfg and qcfg.get("type") == "redis" and REDIS_AVAILABLE:
351
+ try:
352
+ REDIS_CLIENT = redis.Redis(
353
+ host=qcfg.get("redis_host", "localhost"),
354
+ port=qcfg.get("redis_port", 6379),
355
+ db=0
356
+ )
357
+ REDIS_CLIENT.ping()
358
+ USE_REDIS = True
359
+ QUEUE_NAME = qcfg.get("queue_name", "grok_tasks")
360
+ print("Using Redis queue.")
361
+ except Exception as e:
362
+ print(f"Redis unavailable, falling back to file queue: {e}")
363
+ USE_REDIS = False
364
+ else:
365
+ USE_REDIS = False
366
+
367
+ if not USE_REDIS:
368
+ TASK_QUEUE = queue.Queue()
369
+ qfile = self.workspace / "task_queue.json"
370
+ if qfile.exists():
371
+ try:
372
+ with open(qfile) as f:
373
+ tasks = json.load(f)
374
+ for t in tasks:
375
+ TASK_QUEUE.put(t)
376
+ except Exception:
377
+ pass
378
+
379
+ def _save_file_queue(self):
380
+ if USE_REDIS:
381
+ return
382
+ qfile = self.workspace / "task_queue.json"
383
+ tasks = list(TASK_QUEUE.queue)
384
+ try:
385
+ with open(qfile, 'w') as f:
386
+ json.dump(tasks, f)
387
+ except Exception:
388
+ pass
389
+
390
+ def add_task(self, prompt):
391
+ task = {"id": str(time.time()), "prompt": prompt}
392
+ if USE_REDIS:
393
+ REDIS_CLIENT.rpush(QUEUE_NAME, json.dumps(task))
394
+ else:
395
+ TASK_QUEUE.put(task)
396
+ self._save_file_queue()
397
+ print(f"Task {task['id']} added to queue.")
398
+
399
+ def process_queue(self):
400
+ """Background worker: continuously process queued tasks."""
401
+ print("Queue processor started.")
402
+ while QUEUE_PROCESSOR_EVENT.is_set():
403
+ task = None
404
+ if USE_REDIS:
405
+ data = REDIS_CLIENT.lpop(QUEUE_NAME)
406
+ if data:
407
+ task = json.loads(data)
408
+ else:
409
+ try:
410
+ task = TASK_QUEUE.get(timeout=1)
411
+ except queue.Empty:
412
+ pass
413
+
414
+ if task:
415
+ print(f"Processing task {task['id']}: {task['prompt']}")
416
+ try:
417
+ result = self.generate_with_ssd(task['prompt'], max_tokens=150)
418
+ out_file = self.workspace / f"task_{task['id']}.out"
419
+ with open(out_file, 'w') as f:
420
+ f.write(result)
421
+ print(f"Task {task['id']} completed -> {out_file}")
422
+ except Exception as e:
423
+ print(f"Task {task['id']} failed: {e}")
424
+ else:
425
+ time.sleep(1)
426
+
427
+ def start_queue_processor(self):
428
+ QUEUE_PROCESSOR_EVENT.set()
429
+ thr = threading.Thread(target=self.process_queue, daemon=True)
430
+ thr.start()
431
+
432
+ def stop_queue_processor(self):
433
+ QUEUE_PROCESSOR_EVENT.clear()
434
+
435
+ # ------------------------------------------------------------------
436
+ # Scheduler (new)
437
+ def _scheduler_loop(self):
438
+ """Background thread that checks scheduled jobs and runs due ones."""
439
+ print("Scheduler started.")
440
+ while SCHEDULER_EVENT.is_set():
441
+ self._run_due_jobs()
442
+ time.sleep(SCHEDULER_INTERVAL)
443
+
444
+ def _run_due_jobs(self):
445
+ """Read jobs file, execute commands whose time has come."""
446
+ if not SCHEDULED_JOBS_FILE.exists():
447
+ return
448
+ try:
449
+ with open(SCHEDULED_JOBS_FILE) as f:
450
+ jobs = json.load(f)
451
+ except Exception as e:
452
+ print(f"Scheduler error reading jobs: {e}")
453
+ return
454
+
455
+ now = time.time()
456
+ still_pending = []
457
+ for job in jobs:
458
+ due_time = self._parse_time_spec(job.get("time_spec"), job.get("created"))
459
+ if due_time is None:
460
+ # If parsing fails, keep it (maybe it's a recurring spec we can't handle yet)
461
+ still_pending.append(job)
462
+ continue
463
+
464
+ if now >= due_time:
465
+ # Run the command
466
+ cmd = job.get("command")
467
+ print(f"Scheduler executing: {cmd}")
468
+ try:
469
+ # Simple shell execution (consider security implications)
470
+ subprocess.Popen(cmd, shell=True)
471
+ except Exception as e:
472
+ print(f"Scheduler execution error: {e}")
473
+ # For one‑time jobs, we do NOT re-add. For recurring, we'd need to compute next run.
474
+ # For simplicity, we remove after execution (user can schedule again).
475
+ # In a real implementation, you'd want to support recurring jobs.
476
+ else:
477
+ still_pending.append(job)
478
+
479
+ # Write back only pending jobs (ones not run yet or recurring)
480
+ try:
481
+ with open(SCHEDULED_JOBS_FILE, 'w') as f:
482
+ json.dump(still_pending, f, indent=2)
483
+ except Exception as e:
484
+ print(f"Scheduler error writing jobs: {e}")
485
+
486
+ def _parse_time_spec(self, time_spec, created):
487
+ """
488
+ Convert a time specification like "in 5 minutes" to a timestamp.
489
+ Returns None if parsing fails or if it's a recurring spec.
490
+ For now, we only handle simple relative times using dateparser.
491
+ """
492
+ if not time_spec:
493
+ return None
494
+ # If it starts with "every", we ignore (recurring not implemented yet)
495
+ if time_spec.lower().startswith("every"):
496
+ return None
497
+ if DATEPARSER_AVAILABLE:
498
+ # Use dateparser to parse relative times
499
+ parsed = dateparser.parse(time_spec, settings={'RELATIVE_BASE': created})
500
+ if parsed:
501
+ return parsed.timestamp()
502
+ else:
503
+ # Fallback: try to parse "in X minutes" manually (very basic)
504
+ parts = time_spec.lower().split()
505
+ if len(parts) >= 3 and parts[0] == "in" and parts[2] in ("minute", "minutes"):
506
+ try:
507
+ minutes = int(parts[1])
508
+ return created + minutes * 60
509
+ except ValueError:
510
+ pass
511
+ return None
512
+
513
+ def start_scheduler(self):
514
+ global SCHEDULER_THREAD, SCHEDULER_EVENT
515
+ if SCHEDULER_THREAD and SCHEDULER_THREAD.is_alive():
516
+ return
517
+ SCHEDULER_EVENT.set()
518
+ SCHEDULER_THREAD = threading.Thread(target=self._scheduler_loop, daemon=True)
519
+ SCHEDULER_THREAD.start()
520
+
521
+ def stop_scheduler(self):
522
+ SCHEDULER_EVENT.clear()
523
+ if SCHEDULER_THREAD:
524
+ SCHEDULER_THREAD.join(timeout=2)
525
+
526
+ # ------------------------------------------------------------------
527
+ # Core methods for interacting with Python tools
528
+ def call_multitool(self, action, **kwargs):
529
+ payload = {"action": action, **kwargs}
530
+ if DEBUG:
531
+ print(f"[DEBUG] Calling Python tool: {action}", file=sys.stderr)
532
+ try:
533
+ proc = subprocess.run(
534
+ [sys.executable, str(self.multitool)],
535
+ input=json.dumps(payload),
536
+ capture_output=True,
537
+ text=True,
538
+ timeout=30
539
+ )
540
+ if proc.returncode != 0:
541
+ return {"error": f"Python tool exited with code {proc.returncode}: {proc.stderr}"}
542
+ return json.loads(proc.stdout)
543
+ except subprocess.TimeoutExpired:
544
+ return {"error": "Python tool timeout"}
545
+ except json.JSONDecodeError:
546
+ self.log_error("JSON decode error from Python tool", proc.stdout)
547
+ return {"error": "Invalid JSON from Python tool"}
548
+ except Exception as e:
549
+ self.log_error(str(e))
550
+ return {"error": str(e)}
551
+
552
+ def log_error(self, msg, trace=""):
553
+ self.call_multitool("log_error", msg=msg, trace=trace)
554
+
555
+ def build_system_prompt(self):
556
+ tools_desc = ""
557
+ for t in self.python_tools:
558
+ params = t.get("parameters", {})
559
+ tools_desc += f"- {t['name']}: {t['description']} (parameters: {json.dumps(params)})\n"
560
+ return f"""You are PygmyClaw, a compact AI assistant with access to these tools:
561
+ {tools_desc}
562
+
563
+ To use a tool, respond with a JSON object:
564
+ {{"tool": "tool_name", "parameters": {{"param1": "value", ...}}}}
565
+
566
+ Otherwise, respond normally with plain text.
567
+ """
568
+
569
+ def ask_ollama(self, user_input, system=None):
570
+ payload = {
571
+ "model": self.model,
572
+ "prompt": user_input,
573
+ "system": system if system else self.system_prompt,
574
+ "stream": False,
575
+ "options": {"temperature": 0.7, "num_predict": 512}
576
+ }
577
+ data = json.dumps(payload).encode('utf-8')
578
+ req = urllib.request.Request(
579
+ self.endpoint,
580
+ data=data,
581
+ headers={'Content-Type': 'application/json'},
582
+ method='POST'
583
+ )
584
+ if DEBUG:
585
+ print(f"[DEBUG] Ollama request: {payload}", file=sys.stderr)
586
+ try:
587
+ with urllib.request.urlopen(req, timeout=300) as resp:
588
+ response_data = json.loads(resp.read())
589
+ return response_data.get("response", "").strip()
590
+ except urllib.error.HTTPError as e:
591
+ error_body = e.read().decode()
592
+ self.log_error(f"Ollama HTTP {e.code}", error_body)
593
+ return f"Ollama error: {e.code} - {error_body}"
594
+ except urllib.error.URLError as e:
595
+ reason = str(e.reason) if hasattr(e, 'reason') else str(e)
596
+ self.log_error(f"Ollama URL error: {reason}")
597
+ return f"Error contacting Ollama (URL error): {reason}"
598
+ except socket.timeout:
599
+ self.log_error("Ollama timeout")
600
+ return "Error: Ollama request timed out after 300 seconds."
601
+ except Exception as e:
602
+ self.log_error(str(e))
603
+ return f"Error contacting Ollama: {e}"
604
+
605
+ def extract_json(self, text):
606
+ text = text.strip()
607
+ start = text.find('{')
608
+ if start == -1:
609
+ return None
610
+ brace_count = 0
611
+ for i in range(start, len(text)):
612
+ if text[i] == '{':
613
+ brace_count += 1
614
+ elif text[i] == '}':
615
+ brace_count -= 1
616
+ if brace_count == 0:
617
+ candidate = text[start:i+1]
618
+ try:
619
+ return json.loads(candidate)
620
+ except json.JSONDecodeError:
621
+ return None
622
+ return None
623
+
624
+ def run_tool_if_needed(self, user_input):
625
+ print("PygmyClaw> Thinking...", end='', flush=True)
626
+ response = self.ask_ollama(user_input)
627
+ print("\r" + " "*30 + "\r", end='', flush=True)
628
+
629
+ cmd = self.extract_json(response)
630
+ if cmd and isinstance(cmd, dict):
631
+ tool = cmd.get("tool")
632
+ params = cmd.get("parameters", {})
633
+ if tool and isinstance(params, dict):
634
+ python_names = {t['name'] for t in self.python_tools}
635
+ if tool in python_names:
636
+ result = self.call_multitool(tool, **params)
637
+ else:
638
+ result = {"error": f"Unknown tool '{tool}'"}
639
+ followup = (
640
+ f"User asked: {user_input}\n"
641
+ f"You used tool '{tool}' with result:\n{json.dumps(result, indent=2)}\n"
642
+ f"Now provide a helpful response to the user based on that result."
643
+ )
644
+ final = self.ask_ollama(followup, system=self.system_prompt)
645
+ return final
646
+
647
+ return response
648
+
649
+ def repl(self):
650
+ print(f"🐍 PygmyClaw (model: {self.model}, workspace: {self.workspace}) – Type /help for commands.")
651
+ while True:
652
+ try:
653
+ user_input = input(">> ").strip()
654
+ if not user_input:
655
+ continue
656
+ if user_input.lower() in ("/exit", "/q"):
657
+ break
658
+ if user_input == "/help":
659
+ self.show_help()
660
+ continue
661
+ response = self.run_tool_if_needed(user_input)
662
+ print(f"Claw: {response}")
663
+ except KeyboardInterrupt:
664
+ break
665
+ except Exception as e:
666
+ self.log_error(str(e))
667
+ print(f"Error: {e}")
668
+
669
+ def show_help(self):
670
+ all_tools = [t['name'] for t in self.python_tools]
671
+ try:
672
+ sysinfo = self.call_multitool("sys_info")
673
+ os_name = sysinfo.get("os", "unknown")
674
+ except Exception:
675
+ os_name = "unknown"
676
+ print("\n" + "="*50)
677
+ print(f"🐍 PygmyClaw – {os_name}")
678
+ print("="*50)
679
+ print("Built‑in commands:")
680
+ print(" /help Show this menu")
681
+ print(" /exit, /q Quit")
682
+ print("\nAvailable tools:")
683
+ for t in sorted(all_tools):
684
+ print(f" - {t}")
685
+ print("="*50)
686
+
687
+ # ----------------------------------------------------------------------
688
+ # CLI entry point with subcommands (updated with scheduler commands)
689
+ def main():
690
+ import argparse
691
+ parser = argparse.ArgumentParser(description="PygmyClaw with speculative decoding and scheduler")
692
+ subparsers = parser.add_subparsers(dest="command", help="Subcommands")
693
+
694
+ sp = subparsers.add_parser("start", help="Start 4 Ollama instances")
695
+ sp.add_argument("--background", action="store_true", help="Run queue processor in background")
696
+
697
+ subparsers.add_parser("stop", help="Stop all instances")
698
+
699
+ sp = subparsers.add_parser("generate", help="Generate text using speculative decoding")
700
+ sp.add_argument("prompt", type=str, help="Input prompt")
701
+ sp.add_argument("--max-tokens", type=int, default=100, help="Max tokens to generate")
702
+
703
+ sp = subparsers.add_parser("queue", help="Queue operations")
704
+ sp.add_argument("action", choices=["add", "process", "status"], help="add a task, start processor, or show status")
705
+ sp.add_argument("prompt", nargs="?", help="Prompt to add (for 'add')")
706
+
707
+ sp = subparsers.add_parser("scheduler", help="Scheduler operations")
708
+ sp.add_argument("action", choices=["start", "stop", "status"], help="Start, stop, or show status of scheduler")
709
+
710
+ subparsers.add_parser("repl", help="Start interactive REPL (default)")
711
+
712
+ args = parser.parse_args()
713
+
714
+ agent = PygmyClaw()
715
+
716
+ if args.command == "start":
717
+ agent.start_instances()
718
+ if args.background:
719
+ print("Queue processor is running (background).")
720
+ else:
721
+ print("Instances started. Use Ctrl+C to stop.")
722
+ try:
723
+ while True:
724
+ time.sleep(1)
725
+ except KeyboardInterrupt:
726
+ agent.stop_instances()
727
+
728
+ elif args.command == "stop":
729
+ agent.stop_instances()
730
+ agent.stop_queue_processor()
731
+ agent.stop_scheduler()
732
+
733
+ elif args.command == "generate":
734
+ result = agent.generate_with_ssd(args.prompt, max_tokens=args.max_tokens)
735
+ print(result)
736
+
737
+ elif args.command == "queue":
738
+ if args.action == "add":
739
+ if not args.prompt:
740
+ print("Error: prompt required for 'add'")
741
+ sys.exit(1)
742
+ agent.add_task(args.prompt)
743
+ elif args.action == "process":
744
+ agent.process_queue()
745
+ elif args.action == "status":
746
+ if USE_REDIS:
747
+ length = REDIS_CLIENT.llen(QUEUE_NAME)
748
+ print(f"Redis queue '{QUEUE_NAME}' has {length} tasks.")
749
+ else:
750
+ length = TASK_QUEUE.qsize()
751
+ print(f"In‑memory queue has {length} tasks.")
752
+ else:
753
+ print("Unknown queue action")
754
+
755
+ elif args.command == "scheduler":
756
+ if args.action == "start":
757
+ agent.start_scheduler()
758
+ print("Scheduler started.")
759
+ elif args.action == "stop":
760
+ agent.stop_scheduler()
761
+ print("Scheduler stopped.")
762
+ elif args.action == "status":
763
+ if SCHEDULER_THREAD and SCHEDULER_THREAD.is_alive():
764
+ print("Scheduler is running.")
765
+ else:
766
+ print("Scheduler is not running.")
767
+ else:
768
+ print("Unknown scheduler action")
769
+
770
+ else:
771
+ agent.repl()
772
+
773
+ if __name__ == "__main__":
774
+ main()
pygmyclaw_multitool.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ PygmyClaw Multitool – Contains the actual tool implementations.
4
+ Now with generic dispatcher, heartbeat, file I/O, and scheduler tools.
5
+ """
6
+ import json
7
+ import sys
8
+ import os
9
+ import time
10
+ import inspect
11
+ import platform
12
+ from pathlib import Path
13
+
14
+ # Optional dependencies
15
+ try:
16
+ import psutil
17
+ PSUTIL_AVAILABLE = True
18
+ except ImportError:
19
+ PSUTIL_AVAILABLE = False
20
+
21
+ SCRIPT_DIR = Path(__file__).parent.resolve()
22
+ ERROR_LOG = SCRIPT_DIR / "error_log.json"
23
+ MAX_LOG_ENTRIES = 1000
24
+ SCHEDULED_JOBS_FILE = SCRIPT_DIR / "scheduled_jobs.json"
25
+
26
+ # ----------------------------------------------------------------------
27
+ # Tool definitions
28
+ TOOLS = {
29
+ "list_tools_detailed": {
30
+ "name": "list_tools_detailed",
31
+ "description": "List all available tools with their descriptions and parameters.",
32
+ "parameters": {},
33
+ "func": "do_list_tools"
34
+ },
35
+ "sys_info": {
36
+ "name": "sys_info",
37
+ "description": "Get system information (OS, Python version, etc.).",
38
+ "parameters": {},
39
+ "func": "do_sys_info"
40
+ },
41
+ "log_error": {
42
+ "name": "log_error",
43
+ "description": "Log an error message to the error log.",
44
+ "parameters": {
45
+ "msg": "string",
46
+ "trace": "string (optional)"
47
+ },
48
+ "func": "do_log_error"
49
+ },
50
+ "echo": {
51
+ "name": "echo",
52
+ "description": "Echo the input text (for testing).",
53
+ "parameters": {"text": "string"},
54
+ "func": "do_echo"
55
+ },
56
+ "heartbeat": {
57
+ "name": "heartbeat",
58
+ "description": "Get system health info: CPU, memory, disk, uptime.",
59
+ "parameters": {},
60
+ "func": "do_heartbeat"
61
+ },
62
+ "file_read": {
63
+ "name": "file_read",
64
+ "description": "Read a file from the workspace.",
65
+ "parameters": {"path": "string"},
66
+ "func": "do_file_read"
67
+ },
68
+ "file_write": {
69
+ "name": "file_write",
70
+ "description": "Write content to a file (mode: 'w' overwrite, 'a' append).",
71
+ "parameters": {"path": "string", "content": "string", "mode": "string (optional)"},
72
+ "func": "do_file_write"
73
+ },
74
+ "schedule_task": {
75
+ "name": "schedule_task",
76
+ "description": "Schedule a command to run at a specific time or interval. Time format: 'in 5 minutes', 'every day at 10:00', etc. (uses dateparser if installed, otherwise simple timestamps).",
77
+ "parameters": {
78
+ "command": "string",
79
+ "time_spec": "string",
80
+ "job_id": "string (optional)"
81
+ },
82
+ "func": "do_schedule_task"
83
+ },
84
+ "list_scheduled": {
85
+ "name": "list_scheduled",
86
+ "description": "List all scheduled jobs.",
87
+ "parameters": {},
88
+ "func": "do_list_scheduled"
89
+ },
90
+ "remove_scheduled": {
91
+ "name": "remove_scheduled",
92
+ "description": "Remove a scheduled job by its ID.",
93
+ "parameters": {"job_id": "string"},
94
+ "func": "do_remove_scheduled"
95
+ }
96
+ }
97
+
98
+ # ----------------------------------------------------------------------
99
+ # Tool implementations
100
+
101
+ def do_list_tools():
102
+ """Return the list of tools with their metadata."""
103
+ tools_list = []
104
+ for name, info in TOOLS.items():
105
+ tools_list.append({
106
+ "name": name,
107
+ "description": info["description"],
108
+ "parameters": info["parameters"]
109
+ })
110
+ return {"tools": tools_list}
111
+
112
+ def do_sys_info():
113
+ """Return system information."""
114
+ return {
115
+ "os": platform.system(),
116
+ "os_release": platform.release(),
117
+ "python_version": platform.python_version(),
118
+ "hostname": platform.node()
119
+ }
120
+
121
+ def do_log_error(msg, trace=""):
122
+ """Append an error to the error log file."""
123
+ entry = {
124
+ "timestamp": time.time(),
125
+ "msg": msg,
126
+ "trace": trace
127
+ }
128
+ try:
129
+ if ERROR_LOG.exists():
130
+ with open(ERROR_LOG) as f:
131
+ log = json.load(f)
132
+ else:
133
+ log = []
134
+ log.append(entry)
135
+ if len(log) > MAX_LOG_ENTRIES:
136
+ log = log[-MAX_LOG_ENTRIES:]
137
+ with open(ERROR_LOG, 'w') as f:
138
+ json.dump(log, f, indent=2)
139
+ return {"status": "logged"}
140
+ except Exception as e:
141
+ return {"error": f"Failed to write log: {e}"}
142
+
143
+ def do_echo(text):
144
+ """Echo the input."""
145
+ return {"echo": text}
146
+
147
+ def do_heartbeat():
148
+ """Return system load, memory, disk usage, and uptime."""
149
+ info = {}
150
+ if PSUTIL_AVAILABLE:
151
+ try:
152
+ info["cpu_percent"] = psutil.cpu_percent(interval=1)
153
+ mem = psutil.virtual_memory()
154
+ info["memory"] = {
155
+ "total": mem.total,
156
+ "available": mem.available,
157
+ "percent": mem.percent
158
+ }
159
+ disk = psutil.disk_usage('/')
160
+ info["disk"] = {
161
+ "total": disk.total,
162
+ "used": disk.used,
163
+ "free": disk.free,
164
+ "percent": disk.percent
165
+ }
166
+ info["uptime_seconds"] = time.time() - psutil.boot_time()
167
+ except Exception as e:
168
+ info["error"] = f"psutil error: {e}"
169
+ else:
170
+ info["error"] = "psutil not installed – install for detailed stats"
171
+ info["platform"] = platform.platform()
172
+ return info
173
+
174
+ def _safe_path(path):
175
+ """Resolve path relative to SCRIPT_DIR and ensure it stays inside."""
176
+ target = (SCRIPT_DIR / path).resolve()
177
+ try:
178
+ target.relative_to(SCRIPT_DIR)
179
+ return target
180
+ except ValueError:
181
+ return None
182
+
183
+ def do_file_read(path):
184
+ """Read and return contents of a file (must be inside workspace)."""
185
+ safe = _safe_path(path)
186
+ if not safe:
187
+ return {"error": "Path not allowed (outside workspace)"}
188
+ try:
189
+ with open(safe, 'r', encoding='utf-8') as f:
190
+ content = f.read()
191
+ return {"content": content, "path": str(safe)}
192
+ except Exception as e:
193
+ return {"error": str(e)}
194
+
195
+ def do_file_write(path, content, mode="w"):
196
+ """Write content to a file (modes: w = overwrite, a = append)."""
197
+ safe = _safe_path(path)
198
+ if not safe:
199
+ return {"error": "Path not allowed (outside workspace)"}
200
+ if mode not in ("w", "a"):
201
+ return {"error": f"Invalid mode '{mode}'; use 'w' or 'a'"}
202
+ try:
203
+ with open(safe, mode, encoding='utf-8') as f:
204
+ f.write(content)
205
+ return {"status": "written", "path": str(safe), "mode": mode}
206
+ except Exception as e:
207
+ return {"error": str(e)}
208
+
209
+ def do_schedule_task(command, time_spec, job_id=None):
210
+ """
211
+ Add a scheduled job. Simple implementation: store in a JSON file.
212
+ The agent's scheduler will read this file and execute commands when due.
213
+ """
214
+ jobs = []
215
+ if SCHEDULED_JOBS_FILE.exists():
216
+ try:
217
+ with open(SCHEDULED_JOBS_FILE) as f:
218
+ jobs = json.load(f)
219
+ except Exception:
220
+ jobs = []
221
+
222
+ if job_id is None:
223
+ job_id = f"job_{int(time.time())}_{len(jobs)}"
224
+
225
+ # Parse time_spec – we just store it; the agent's scheduler will interpret.
226
+ # For simplicity, we support:
227
+ # - "in X minutes/hours/days" -> compute timestamp
228
+ # - "every day at HH:MM" -> store as cron-like?
229
+ # We'll store raw and let the agent handle it.
230
+ job = {
231
+ "id": job_id,
232
+ "command": command,
233
+ "time_spec": time_spec,
234
+ "created": time.time()
235
+ }
236
+ jobs.append(job)
237
+ try:
238
+ with open(SCHEDULED_JOBS_FILE, 'w') as f:
239
+ json.dump(jobs, f, indent=2)
240
+ return {"status": "scheduled", "job_id": job_id}
241
+ except Exception as e:
242
+ return {"error": f"Failed to write jobs file: {e}"}
243
+
244
+ def do_list_scheduled():
245
+ """List all scheduled jobs."""
246
+ if not SCHEDULED_JOBS_FILE.exists():
247
+ return {"jobs": []}
248
+ try:
249
+ with open(SCHEDULED_JOBS_FILE) as f:
250
+ jobs = json.load(f)
251
+ return {"jobs": jobs}
252
+ except Exception as e:
253
+ return {"error": f"Failed to read jobs: {e}"}
254
+
255
+ def do_remove_scheduled(job_id):
256
+ """Remove a scheduled job by ID."""
257
+ if not SCHEDULED_JOBS_FILE.exists():
258
+ return {"error": "No jobs file"}
259
+ try:
260
+ with open(SCHEDULED_JOBS_FILE) as f:
261
+ jobs = json.load(f)
262
+ new_jobs = [j for j in jobs if j.get("id") != job_id]
263
+ if len(new_jobs) == len(jobs):
264
+ return {"error": f"Job ID '{job_id}' not found"}
265
+ with open(SCHEDULED_JOBS_FILE, 'w') as f:
266
+ json.dump(new_jobs, f, indent=2)
267
+ return {"status": "removed", "job_id": job_id}
268
+ except Exception as e:
269
+ return {"error": f"Failed to remove job: {e}"}
270
+
271
+ # ----------------------------------------------------------------------
272
+ # Map function names to actual functions
273
+ FUNC_MAP = {
274
+ "do_list_tools": do_list_tools,
275
+ "do_sys_info": do_sys_info,
276
+ "do_log_error": do_log_error,
277
+ "do_echo": do_echo,
278
+ "do_heartbeat": do_heartbeat,
279
+ "do_file_read": do_file_read,
280
+ "do_file_write": do_file_write,
281
+ "do_schedule_task": do_schedule_task,
282
+ "do_list_scheduled": do_list_scheduled,
283
+ "do_remove_scheduled": do_remove_scheduled,
284
+ }
285
+
286
+ # ----------------------------------------------------------------------
287
+ # Generic dispatcher using inspect
288
+ def main():
289
+ try:
290
+ data = json.loads(sys.stdin.read())
291
+ action = data.get("action")
292
+ if not action:
293
+ print(json.dumps({"error": "No action specified"}))
294
+ return
295
+
296
+ tool_info = TOOLS.get(action)
297
+ if not tool_info:
298
+ print(json.dumps({"error": f"Unknown action '{action}'"}))
299
+ return
300
+
301
+ func_name = tool_info["func"]
302
+ func = FUNC_MAP.get(func_name)
303
+ if not func:
304
+ print(json.dumps({"error": f"Internal error: unknown function {func_name}"}))
305
+ return
306
+
307
+ # Extract parameters expected by the function
308
+ sig = inspect.signature(func)
309
+ kwargs = {}
310
+ for param in sig.parameters.values():
311
+ if param.name in data:
312
+ kwargs[param.name] = data[param.name]
313
+ elif param.default is param.empty:
314
+ # Required parameter missing
315
+ print(json.dumps({"error": f"Missing required parameter '{param.name}'"}))
316
+ return
317
+
318
+ result = func(**kwargs)
319
+ print(json.dumps(result))
320
+ except Exception as e:
321
+ print(json.dumps({"error": f"Multitool exception: {e}"}))
322
+
323
+ if __name__ == "__main__":
324
+ main()