File size: 9,497 Bytes
a3e5f70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
#!/usr/bin/env python3
"""
๐Ÿ–ฅ๏ธ Quillan-Ronin Desktop Agent (Vision + Control Loop)
Version: 2.0 (Hardened Architecture)

Features:
- Object-Oriented State Management
- Token-Optimized Vision Capture (Dynamic Downscaling)
- VLM-Optimized Relative Coordinate Mapping (0.0 to 1.0)
- Failsafe Triggers & Exception Handling
- Enforced JSON Schema Prompting
"""

import subprocess
import time
import json
import base64
import re
from io import BytesIO
from typing import Dict, Any, List

import pyautogui
from PIL import Image

# ๐Ÿ›ก๏ธ WARDEN PROTOCOL: Safety First
# Slam mouse to any of the 4 corners of the screen to kill the agent.
pyautogui.FAILSAFE = True

# =========================
# โš™๏ธ SYSTEM PROMPT
# =========================
SYSTEM_PROMPT = """
You are an autonomous desktop control agent. You receive a screenshot of the user's desktop and a goal.
Your objective is to determine the next immediate action to achieve the goal.

CRITICAL RULES:
1. Coordinate Mapping: Use RELATIVE coordinates from 0.0 to 1.0. 
   (e.g., x: 0.5, y: 0.5 is the exact center of the screen. x: 0.0, y: 0.0 is top-left).
2. You must output ONLY valid, parsable JSON. No markdown wrappers, no explanations outside the JSON.

SCHEMA:
{
  "thought": "Briefly explain your visual analysis and reasoning for the next step.",
  "action": "click" | "type" | "press" | "hotkey" | "exec" | "done",
  "args": {
     // For 'click': "x": float (0.0-1.0), "y": float (0.0-1.0)
     // For 'type': "text": string
     // For 'press': "key": string (e.g., "enter", "tab", "win")
     // For 'hotkey': "keys": ["ctrl", "c"]
     // For 'exec': "command": string
  }
}
"""

# =========================
# ๐Ÿง  AGENT ARCHITECTURE
# =========================

class QuillanDesktopAgent:
    def __init__(self, step_delay: float = 1.5, max_steps: int = 20):
        self.step_delay = step_delay
        self.max_steps = max_steps
        self.history: List[Dict[str, Any]] = []
        
        # Capture environment bounds for relative mapping
        self.screen_width, self.screen_height = pyautogui.size()
        print(f"[*] Agent Initialized. Display bounds mapped: {self.screen_width}x{self.screen_height}")

    # -------------------------
    # ๐Ÿ“ธ VISION LAYER
    # -------------------------
    def capture_vision(self, max_dimension: int = 1024) -> str:
        """
        Captures screen and optimizes payload to prevent VLM context overflow.
        Maintains aspect ratio while restricting max dimension.
        """
        img = pyautogui.screenshot()
        
        # Optimization: Downscale for token efficiency
        img.thumbnail((max_dimension, max_dimension), Image.Resampling.LANCZOS)
        
        buffered = BytesIO()
        img.save(buffered, format="PNG", optimize=True)
        encoded = base64.b64encode(buffered.getvalue()).decode('utf-8')
        return encoded

    # -------------------------
    # ๐Ÿ–ฑ๏ธ ACTION SPACE
    # -------------------------
    def execute_action(self, action: str, args: Dict[str, Any]) -> str:
        """Routes and executes physical actions with safety bounds."""
        try:
            if action == "click":
                # Translate relative VLM coordinates (0.0-1.0) to absolute pixels
                rel_x = float(args.get("x", 0.5))
                rel_y = float(args.get("y", 0.5))
                
                # Clamp between 0.0 and 1.0 to prevent out-of-bounds
                rel_x = max(0.0, min(1.0, rel_x))
                rel_y = max(0.0, min(1.0, rel_y))
                
                abs_x = int(rel_x * self.screen_width)
                abs_y = int(rel_y * self.screen_height)
                
                pyautogui.click(abs_x, abs_y)
                return f"Success: Clicked relative ({rel_x:.2f}, {rel_y:.2f}) -> absolute [{abs_x}, {abs_y}]"

            elif action == "type":
                text = str(args.get("text", ""))
                pyautogui.write(text, interval=0.02)
                return f"Success: Typed '{text}'"

            elif action == "press":
                key = str(args.get("key", ""))
                pyautogui.press(key)
                return f"Success: Pressed '{key}'"

            elif action == "hotkey":
                keys = args.get("keys", [])
                pyautogui.hotkey(*keys)
                return f"Success: Triggered hotkey {keys}"

            elif action == "exec":
                # โš ๏ธ WARDEN WARNING: Ensure execution environment is sandboxed
                command = str(args.get("command", ""))
                print(f"โš ๏ธ SECURITY ALERT: Executing shell command: {command}")
                result = subprocess.run(command, shell=True, capture_output=True, text=True, timeout=10)
                output = result.stdout[:1000] + ("..." if len(result.stdout) > 1000 else "")
                err = result.stderr[:500]
                return f"Success: Executed. Out: {output} | Err: {err}"

            elif action == "done":
                return "Agent declared task complete."

            else:
                return f"Error: Unknown action '{action}'"
                
        except Exception as e:
            return f"Error during execution of {action}: {str(e)}"

    # -------------------------
    # ๐Ÿง  MODEL INTERFACE
    # -------------------------
    def _call_vlm(self, goal: str, image_b64: str) -> Dict[str, Any]:
        """
        Stub for your actual Vision-Language Model API call (OpenAI, Anthropic, Gemini, etc.).
        """
        # Construct the payload structure you would send to the API:
        # messages = [
        #    {"role": "system", "content": SYSTEM_PROMPT},
        #    {"role": "user", "content": [
        #        {"type": "text", "text": f"Goal: {goal}\nHistory: {json.dumps(self.history[-3:])}"},
        #        {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_b64}"}}
        #    ]}
        # ]
        
        # โš ๏ธ MOCK RESPONSE FOR DEMONSTRATION
        if not self.history:
            raw_response = '{"thought": "Opening start menu.", "action": "press", "args": {"key": "win"}}'
        elif len(self.history) == 1:
            raw_response = '{"thought": "Searching for browser.", "action": "type", "args": {"text": "chrome"}}'
        elif len(self.history) == 2:
            raw_response = '{"thought": "Launching application.", "action": "press", "args": {"key": "enter"}}'
        else:
            raw_response = '{"thought": "Task complete.", "action": "done", "args": {}}'
            
        # Robust JSON parsing
        return self._parse_json_response(raw_response)

    def _parse_json_response(self, text: str) -> Dict[str, Any]:
        """Extracts and parses JSON, stripping markdown block wrappers if the LLM hallucinated them."""
        try:
            # Look for JSON block
            match = re.search(r'\{.*\}', text.strip(), re.DOTALL)
            if match:
                return json.loads(match.group(0))
            return json.loads(text)
        except json.JSONDecodeError:
            print(f"โŒ Failed to parse LLM response as JSON: {text}")
            # Return a safe fallback action to trigger a retry
            return {"action": "error", "args": {}, "thought": "Failed to parse JSON."}

    # -------------------------
    # ๐Ÿ” CORE LOOP
    # -------------------------
    def run(self, goal: str):
        print(f"\n๐ŸŽฏ ENGAGING AGENT GOAL: {goal}\n" + "="*40)

        for step in range(self.max_steps):
            print(f"\n--- ๐Ÿ”„ STEP {step+1}/{self.max_steps} ---")

            # ๐Ÿ‘๏ธ Observe
            print("[*] Capturing spatial data...")
            screen_b64 = self.capture_vision()

            # ๐Ÿง  Decide
            print("[*] Awaiting VLM decision...")
            decision = self._call_vlm(goal, screen_b64)

            action = decision.get("action", "error")
            args = decision.get("args", {})
            thought = decision.get("thought", "No thought provided.")

            print(f"๐Ÿง  Thought: {thought}")
            print(f"โš™๏ธ Action : {action} | Args: {args}")

            # ๐Ÿ–ฑ๏ธ Act
            if action == "done":
                print("\nโœ… GOAL ACHIEVED. Disengaging agent loop.")
                break
            
            if action == "error":
                print("โš ๏ธ Skipping execution due to malformed LLM output.")
                result = "Failed to parse instruction."
            else:
                result = self.execute_action(action, args)
                print(f"๐Ÿ“ค Result : {result}")

            # ๐Ÿ“ Record
            self.history.append({
                "step": step + 1,
                "action": action,
                "result": result
            })

            # Delay to allow UI animations/rendering to complete before next screenshot
            time.sleep(self.step_delay)
            
        else:
            print("\nโš ๏ธ MAX STEPS REACHED. Terminating to prevent infinite loop.")

# =========================
# ๐Ÿš€ ENTRY POINT
# =========================
if __name__ == "__main__":
    agent = QuillanDesktopAgent(step_delay=2.0, max_steps=15)
    
    TARGET_GOAL = "Open a browser and search for 'open source ai agents'"
    
    try:
        agent.run(TARGET_GOAL)
    except pyautogui.FailSafeException:
        print("\n๐Ÿšจ FAILSAFE TRIGGERED! Mouse moved to corner. Agent terminated.")
    except KeyboardInterrupt:
        print("\n๐Ÿ›‘ Manual interrupt received. Agent terminated.")