files
Browse files
app.py
CHANGED
|
@@ -2,93 +2,44 @@ import gradio as gr
|
|
| 2 |
import importlib.util
|
| 3 |
import time
|
| 4 |
import random
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
|
|
|
| 18 |
class MegaWorldEnv:
|
| 19 |
def __init__(self):
|
| 20 |
-
self.start = (1, 1)
|
| 21 |
-
self.goal = (18, 18)
|
| 22 |
-
|
| 23 |
-
# 1. NEW MAZE STRUCTURE
|
| 24 |
self.walls = self._generate_walls()
|
| 25 |
-
|
| 26 |
-
# 2. Hazards
|
| 27 |
self.ice = [(5,y) for y in range(5,15)] + [(15,y) for y in range(5,15)]
|
| 28 |
self.mud = [(x,10) for x in range(2,18)]
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
self.
|
| 32 |
-
random.shuffle(self.traps)
|
| 33 |
-
|
| 34 |
-
# Chargers
|
| 35 |
-
self.chargers = [(18,2),(10,10)]
|
| 36 |
-
|
| 37 |
-
# Enemies
|
| 38 |
-
# We ensure they spawn in valid locations for the new map
|
| 39 |
-
self.enemies = [
|
| 40 |
-
{"pos":[5,5],"type":"patrol","axis":"x","range":(5,10),"dir":1},
|
| 41 |
-
{"pos":[15,5],"type":"patrol","axis":"x","range":(12,17),"dir":1},
|
| 42 |
-
{"pos":[12,12],"type":"hunter", "step": 0},
|
| 43 |
-
{"pos":[16,16],"type":"hunter", "step": 0}
|
| 44 |
-
]
|
| 45 |
random.shuffle(self.enemies)
|
| 46 |
-
|
| 47 |
def _generate_walls(self):
|
| 48 |
-
"""
|
| 49 |
-
New Layout: 'The Fortress'
|
| 50 |
-
A 4-chamber maze with central walls and narrow choke points.
|
| 51 |
-
"""
|
| 52 |
walls = []
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
# Horizontal central wall (y=9)
|
| 61 |
-
for x in range(20):
|
| 62 |
-
if x not in [3, 16]: # Gaps at x=3 and x=16
|
| 63 |
-
walls.append((x, 9))
|
| 64 |
-
|
| 65 |
-
# --- 2. Quadrant Obstacles (Clutter) ---
|
| 66 |
-
# Top-Left Room (Start Zone)
|
| 67 |
-
walls.extend([(4,4), (4,5), (4,6), (5,4), (6,4)])
|
| 68 |
-
|
| 69 |
-
# Top-Right Room (Patrol Zone)
|
| 70 |
-
walls.extend([(14,4), (14,5), (14,6), (15,4), (16,4)])
|
| 71 |
-
|
| 72 |
-
# Bottom-Left Room (Mud Zone)
|
| 73 |
-
walls.extend([(4,14), (4,15), (4,16), (5,14), (6,14)])
|
| 74 |
-
|
| 75 |
-
# Bottom-Right Room (Goal Zone) - The 'Bunker'
|
| 76 |
-
# Protective casing around the goal area to force specific entry
|
| 77 |
-
for i in range(15, 19):
|
| 78 |
-
walls.append((i, 15)) # Bar above goal area
|
| 79 |
-
|
| 80 |
-
walls.extend([(14,14), (13,13)]) # Extra clutter
|
| 81 |
-
|
| 82 |
return walls
|
| 83 |
-
|
| 84 |
def shaped_reward(self, old_pos, new_pos):
|
| 85 |
-
|
| 86 |
-
new_d = abs(new_pos[0]-self.goal[0]) + abs(new_pos[1]-self.goal[1])
|
| 87 |
-
return 3.0 * (old_d - new_d)
|
| 88 |
-
|
| 89 |
def get_radar(self, pos):
|
| 90 |
-
x,y=pos
|
| 91 |
-
radar={}
|
| 92 |
dirs={"up":(x,y+1),"down":(x,y-1),"left":(x-1,y),"right":(x+1,y)}
|
| 93 |
for d,(nx,ny) in dirs.items():
|
| 94 |
info="EMPTY"
|
|
@@ -103,116 +54,150 @@ class MegaWorldEnv:
|
|
| 103 |
if tuple(e["pos"])==(nx,ny): info="ENEMY"
|
| 104 |
radar[d]=RADAR_ENCODING[info]
|
| 105 |
return radar
|
| 106 |
-
|
| 107 |
def update_enemies(self, player_pos):
|
| 108 |
for e in self.enemies:
|
| 109 |
if e["type"]=="patrol":
|
| 110 |
-
e["pos"][0]+=e["dir"]
|
| 111 |
-
# Basic bounce check against walls/map edge
|
| 112 |
-
nx = e["pos"][0]
|
| 113 |
if nx>=e["range"][1] or nx<=e["range"][0] or (nx, e["pos"][1]) in self.walls:
|
| 114 |
-
e["dir"]*=-1
|
| 115 |
-
e["pos"][0]+=e["dir"] # Step back
|
| 116 |
else:
|
| 117 |
-
# DESIGNATED MOVEMENT: Square Patrol
|
| 118 |
path = [(1,0), (1,0), (0,1), (0,1), (-1,0), (-1,0), (0,-1), (0,-1)]
|
| 119 |
move = path[e["step"] % len(path)]
|
| 120 |
-
|
| 121 |
nx, ny = e["pos"][0] + move[0], e["pos"][1] + move[1]
|
| 122 |
-
|
| 123 |
-
# Only move if not hitting a wall (simple collision check)
|
| 124 |
-
if (nx, ny) not in self.walls and 0<=nx<20 and 0<=ny<20:
|
| 125 |
-
e["pos"][0] = nx
|
| 126 |
-
e["pos"][1] = ny
|
| 127 |
-
|
| 128 |
e["step"] += 1
|
| 129 |
-
|
| 130 |
def render(self, player_pos, history, battery, score):
|
| 131 |
-
html="<div style='background:#000;padding:10px;border-radius:12px'>"
|
| 132 |
-
html+=f"<div style='color:white'>π {battery} | π {score
|
| 133 |
-
html+="<div style='display:grid;grid-template-columns:repeat(20,
|
| 134 |
enemy_pos=[tuple(e["pos"]) for e in self.enemies]
|
| 135 |
for y in range(19,-1,-1):
|
| 136 |
for x in range(20):
|
| 137 |
-
pos=(x,y)
|
| 138 |
-
color="#
|
| 139 |
-
if pos in self.walls: color="#555" # Grey Walls
|
| 140 |
elif pos in self.ice: color="#29b6f6"
|
| 141 |
elif pos in self.mud: color="#4e342e"
|
| 142 |
elif pos in history: color="#263238"
|
| 143 |
if pos==self.goal: char="π"; color="#4caf50"
|
| 144 |
if pos in self.chargers: char="β‘"; color="#fdd835"
|
| 145 |
if pos in enemy_pos: char="πΉ"; color="#d500f9"
|
| 146 |
-
if pos==player_pos:
|
| 147 |
-
|
| 148 |
-
color="#2196f3" if battery>20 else "#ff6f00"
|
| 149 |
-
html+=f"<div style='width:22px;height:22px;background:{color};display:flex;align-items:center;justify-content:center'>{char}</div>"
|
| 150 |
html+="</div></div>"
|
| 151 |
return html
|
| 152 |
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
return
|
| 158 |
-
|
| 159 |
-
spec=importlib.util.spec_from_file_location("agent",file.name)
|
| 160 |
-
agent=importlib.util.module_from_spec(spec)
|
| 161 |
-
spec.loader.exec_module(agent)
|
| 162 |
-
|
| 163 |
-
pos=list(env.start)
|
| 164 |
-
battery=2000
|
| 165 |
-
score=0
|
| 166 |
-
history=[]
|
| 167 |
-
|
| 168 |
-
for step in range(3000):
|
| 169 |
-
radar=env.get_radar(pos)
|
| 170 |
-
action=agent.get_action(pos[:],radar,battery)
|
| 171 |
-
dx,dy=[(0,1),(0,-1),(-1,0),(1,0)][action]
|
| 172 |
-
prev_pos=pos[:]
|
| 173 |
-
|
| 174 |
-
nx,ny=pos[0]+dx,pos[1]+dy
|
| 175 |
-
if not (0<=nx<20 and 0<=ny<20) or (nx,ny) in env.walls:
|
| 176 |
-
nx,ny=pos
|
| 177 |
-
pos=[nx,ny]
|
| 178 |
-
|
| 179 |
-
env.update_enemies(pos)
|
| 180 |
-
history.append(tuple(pos))
|
| 181 |
-
|
| 182 |
-
battery-=1
|
| 183 |
-
if tuple(pos) in env.mud: battery-=5
|
| 184 |
-
|
| 185 |
-
reward=env.shaped_reward(tuple(prev_pos),tuple(pos))
|
| 186 |
-
|
| 187 |
-
if prev_pos==pos: reward-=5
|
| 188 |
-
if tuple(pos) in env.traps:
|
| 189 |
-
reward-=10; battery-=10
|
| 190 |
-
|
| 191 |
-
done=False
|
| 192 |
-
if battery<=0 or tuple(pos) in [tuple(e["pos"]) for e in env.enemies]:
|
| 193 |
-
reward-=20; done=True
|
| 194 |
-
if tuple(pos)==env.goal:
|
| 195 |
-
reward+=1000; done=True
|
| 196 |
-
|
| 197 |
-
reward=max(reward,-10)
|
| 198 |
-
score+=reward
|
| 199 |
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
time.sleep(0.05)
|
| 207 |
|
| 208 |
-
|
| 209 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
with gr.Row():
|
| 211 |
-
game=gr.HTML(MegaWorldEnv().render((1,1),[],100,0))
|
| 212 |
with gr.Column():
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
|
|
|
| 217 |
|
| 218 |
-
|
|
|
|
|
|
| 2 |
import importlib.util
|
| 3 |
import time
|
| 4 |
import random
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import shutil
|
| 8 |
+
import zipfile
|
| 9 |
+
import tempfile
|
| 10 |
+
import uuid
|
| 11 |
+
|
| 12 |
+
# --- PASTE THE FULL MegaWorldEnv CLASS HERE ---
|
| 13 |
+
# (Reuse the class code from previous turns or the game_env.py above)
|
| 14 |
+
# Ensure the .render() method is the full HTML version from your FIRST prompt.
|
| 15 |
+
# ----------------------------------------------
|
| 16 |
+
|
| 17 |
+
# --- COPY OF MegaWorldEnv CLASS FOR CONTEXT ---
|
| 18 |
+
RADAR_ENCODING = {"EMPTY": 0,"WALL": 1,"GOAL": 2,"ICE": 3,"MUD": 4,"DANGER": 5,"CHARGER": 6,"ENEMY": 7}
|
| 19 |
class MegaWorldEnv:
|
| 20 |
def __init__(self):
|
| 21 |
+
self.start = (1, 1); self.goal = (18, 18)
|
|
|
|
|
|
|
|
|
|
| 22 |
self.walls = self._generate_walls()
|
|
|
|
|
|
|
| 23 |
self.ice = [(5,y) for y in range(5,15)] + [(15,y) for y in range(5,15)]
|
| 24 |
self.mud = [(x,10) for x in range(2,18)]
|
| 25 |
+
self.traps = [(3,3),(8,8),(12,12),(17,17),(9,10),(11,10)]; random.shuffle(self.traps)
|
| 26 |
+
self.chargers = [(18,2),(10,10)]
|
| 27 |
+
self.enemies = [{"pos":[5,5],"type":"patrol","axis":"x","range":(5,10),"dir":1},{"pos":[15,5],"type":"patrol","axis":"x","range":(12,17),"dir":1},{"pos":[12,12],"type":"hunter", "step": 0}, {"pos":[16,16],"type":"hunter", "step": 0}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
random.shuffle(self.enemies)
|
|
|
|
| 29 |
def _generate_walls(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
walls = []
|
| 31 |
+
for y in range(20):
|
| 32 |
+
if y not in [3, 16]: walls.append((9, y))
|
| 33 |
+
for x in range(20):
|
| 34 |
+
if x not in [3, 16]: walls.append((x, 9))
|
| 35 |
+
walls.extend([(4,4), (4,5), (4,6), (5,4), (6,4), (14,4), (14,5), (14,6), (15,4), (16,4), (4,14), (4,15), (4,16), (5,14), (6,14)])
|
| 36 |
+
for i in range(15, 19): walls.append((i, 15))
|
| 37 |
+
walls.extend([(14,14), (13,13)])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
return walls
|
|
|
|
| 39 |
def shaped_reward(self, old_pos, new_pos):
|
| 40 |
+
return 0 # Not needed for evaluation
|
|
|
|
|
|
|
|
|
|
| 41 |
def get_radar(self, pos):
|
| 42 |
+
x,y=pos; radar={}
|
|
|
|
| 43 |
dirs={"up":(x,y+1),"down":(x,y-1),"left":(x-1,y),"right":(x+1,y)}
|
| 44 |
for d,(nx,ny) in dirs.items():
|
| 45 |
info="EMPTY"
|
|
|
|
| 54 |
if tuple(e["pos"])==(nx,ny): info="ENEMY"
|
| 55 |
radar[d]=RADAR_ENCODING[info]
|
| 56 |
return radar
|
|
|
|
| 57 |
def update_enemies(self, player_pos):
|
| 58 |
for e in self.enemies:
|
| 59 |
if e["type"]=="patrol":
|
| 60 |
+
e["pos"][0]+=e["dir"]; nx = e["pos"][0]
|
|
|
|
|
|
|
| 61 |
if nx>=e["range"][1] or nx<=e["range"][0] or (nx, e["pos"][1]) in self.walls:
|
| 62 |
+
e["dir"]*=-1; e["pos"][0]+=e["dir"]
|
|
|
|
| 63 |
else:
|
|
|
|
| 64 |
path = [(1,0), (1,0), (0,1), (0,1), (-1,0), (-1,0), (0,-1), (0,-1)]
|
| 65 |
move = path[e["step"] % len(path)]
|
|
|
|
| 66 |
nx, ny = e["pos"][0] + move[0], e["pos"][1] + move[1]
|
| 67 |
+
if (nx, ny) not in self.walls and 0<=nx<20 and 0<=ny<20: e["pos"]=[nx,ny]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
e["step"] += 1
|
|
|
|
| 69 |
def render(self, player_pos, history, battery, score):
|
| 70 |
+
html="<div style='background:#000;padding:10px;border-radius:12px;font-family:monospace'>"
|
| 71 |
+
html+=f"<div style='color:white;margin-bottom:5px'>π {battery} | π {score}</div>"
|
| 72 |
+
html+="<div style='display:grid;grid-template-columns:repeat(20,20px);gap:1px;width:fit-content;margin:auto'>"
|
| 73 |
enemy_pos=[tuple(e["pos"]) for e in self.enemies]
|
| 74 |
for y in range(19,-1,-1):
|
| 75 |
for x in range(20):
|
| 76 |
+
pos=(x,y); color="#111"; char=""
|
| 77 |
+
if pos in self.walls: color="#555"
|
|
|
|
| 78 |
elif pos in self.ice: color="#29b6f6"
|
| 79 |
elif pos in self.mud: color="#4e342e"
|
| 80 |
elif pos in history: color="#263238"
|
| 81 |
if pos==self.goal: char="π"; color="#4caf50"
|
| 82 |
if pos in self.chargers: char="β‘"; color="#fdd835"
|
| 83 |
if pos in enemy_pos: char="πΉ"; color="#d500f9"
|
| 84 |
+
if pos==player_pos: char="π€"; color="#2196f3" if battery>20 else "#ff6f00"
|
| 85 |
+
html+=f"<div style='width:20px;height:20px;background:{color};display:flex;align-items:center;justify-content:center;font-size:12px'>{char}</div>"
|
|
|
|
|
|
|
| 86 |
html+="</div></div>"
|
| 87 |
return html
|
| 88 |
|
| 89 |
+
# ---------------------------------------------------------
|
| 90 |
+
# SERVER CONFIG
|
| 91 |
+
# ---------------------------------------------------------
|
| 92 |
+
FLAG = "CTF{y0u_h4v3_m4st3r3d_th3_m4z3}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
+
def run_mega_simulation(zip_file):
|
| 95 |
+
env = MegaWorldEnv()
|
| 96 |
+
|
| 97 |
+
if zip_file is None:
|
| 98 |
+
yield env.render(env.start, [], 100, 0), {"status": "Waiting for upload..."}
|
| 99 |
+
return
|
|
|
|
| 100 |
|
| 101 |
+
# 1. Create a unique temp directory for this run
|
| 102 |
+
run_id = str(uuid.uuid4())
|
| 103 |
+
temp_dir = os.path.join(tempfile.gettempdir(), "ctf_run_" + run_id)
|
| 104 |
+
os.makedirs(temp_dir, exist_ok=True)
|
| 105 |
+
|
| 106 |
+
try:
|
| 107 |
+
# 2. Extract Zip
|
| 108 |
+
try:
|
| 109 |
+
with zipfile.ZipFile(zip_file.name, 'r') as zip_ref:
|
| 110 |
+
zip_ref.extractall(temp_dir)
|
| 111 |
+
except Exception as e:
|
| 112 |
+
yield env.render(env.start, [], 0, 0), {"error": f"Invalid Zip File: {e}"}
|
| 113 |
+
return
|
| 114 |
+
|
| 115 |
+
# 3. Locate agent.py
|
| 116 |
+
agent_path = os.path.join(temp_dir, "agent.py")
|
| 117 |
+
if not os.path.exists(agent_path):
|
| 118 |
+
yield env.render(env.start, [], 0, 0), {"error": "agent.py not found in zip root!"}
|
| 119 |
+
return
|
| 120 |
+
|
| 121 |
+
# 4. Dynamic Import with Path Handling
|
| 122 |
+
# We append temp_dir to sys.path so the agent can 'import brain.pkl' from its own folder
|
| 123 |
+
sys.path.append(temp_dir)
|
| 124 |
+
|
| 125 |
+
try:
|
| 126 |
+
spec = importlib.util.spec_from_file_location("agent_module", agent_path)
|
| 127 |
+
agent = importlib.util.module_from_spec(spec)
|
| 128 |
+
spec.loader.exec_module(agent)
|
| 129 |
+
except Exception as e:
|
| 130 |
+
yield env.render(env.start, [], 0, 0), {"error": f"Code Error in agent.py: {e}"}
|
| 131 |
+
return
|
| 132 |
+
|
| 133 |
+
# 5. Run Simulation
|
| 134 |
+
pos = list(env.start)
|
| 135 |
+
battery = 2500
|
| 136 |
+
score = 0
|
| 137 |
+
history = []
|
| 138 |
+
|
| 139 |
+
for step in range(1000):
|
| 140 |
+
radar = env.get_radar(pos)
|
| 141 |
+
|
| 142 |
+
try:
|
| 143 |
+
# Capture STDOUT/STDERR could be added here to prevent log spam
|
| 144 |
+
action = agent.get_action(tuple(pos), radar, battery)
|
| 145 |
+
if action not in [0, 1, 2, 3]: action = 0
|
| 146 |
+
except Exception as e:
|
| 147 |
+
yield env.render(tuple(pos), history, 0, score), {"error": f"Runtime Error: {e}"}
|
| 148 |
+
break
|
| 149 |
+
|
| 150 |
+
dx, dy = [(0,1), (0,-1), (-1,0), (1,0)][action]
|
| 151 |
+
prev_pos = pos[:]
|
| 152 |
+
nx, ny = pos[0]+dx, pos[1]+dy
|
| 153 |
+
|
| 154 |
+
if not (0 <= nx < 20 and 0 <= ny < 20) or (nx, ny) in env.walls:
|
| 155 |
+
nx, ny = pos
|
| 156 |
+
pos = [nx, ny]
|
| 157 |
+
|
| 158 |
+
env.update_enemies(pos)
|
| 159 |
+
history.append(tuple(pos))
|
| 160 |
+
battery -= 1
|
| 161 |
+
if tuple(pos) in env.mud: battery -= 5
|
| 162 |
+
|
| 163 |
+
# Checks
|
| 164 |
+
if tuple(pos) == env.goal:
|
| 165 |
+
yield env.render(tuple(pos), history, battery, score+1000), {"RESULT": f"VICTORY! {FLAG}"}
|
| 166 |
+
break
|
| 167 |
+
|
| 168 |
+
enemy_pos = [tuple(e["pos"]) for e in env.enemies]
|
| 169 |
+
if battery <= 0:
|
| 170 |
+
yield env.render(tuple(pos), history, 0, score), {"RESULT": "DIED: Battery Empty"}
|
| 171 |
+
break
|
| 172 |
+
if tuple(pos) in enemy_pos:
|
| 173 |
+
yield env.render(tuple(pos), history, 0, score), {"RESULT": "DIED: Caught by Enemy"}
|
| 174 |
+
break
|
| 175 |
+
if tuple(pos) in env.traps:
|
| 176 |
+
battery -= 10 # Trap penalty
|
| 177 |
+
|
| 178 |
+
yield env.render(tuple(pos), history, battery, score), {"step": step}
|
| 179 |
+
time.sleep(0.01) # Fast playback
|
| 180 |
+
|
| 181 |
+
finally:
|
| 182 |
+
# 6. Cleanup
|
| 183 |
+
# Remove temp dir from path and filesystem
|
| 184 |
+
if temp_dir in sys.path:
|
| 185 |
+
sys.path.remove(temp_dir)
|
| 186 |
+
shutil.rmtree(temp_dir, ignore_errors=True)
|
| 187 |
+
|
| 188 |
+
# --- GRADIO INTERFACE ---
|
| 189 |
+
with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
|
| 190 |
+
gr.Markdown("# π© CTF: The Maze Runner")
|
| 191 |
+
gr.Markdown("Upload a **ZIP file** containing your `agent.py` (and `brain.pkl` if needed).")
|
| 192 |
+
|
| 193 |
with gr.Row():
|
| 194 |
+
game = gr.HTML(MegaWorldEnv().render((1,1), [], 100, 0))
|
| 195 |
with gr.Column():
|
| 196 |
+
file_input = gr.File(label="Upload Submission (.zip)", file_types=[".zip"])
|
| 197 |
+
run_btn = gr.Button("Deploy Agent", variant="primary")
|
| 198 |
+
logs = gr.JSON(label="System Status")
|
| 199 |
+
|
| 200 |
+
run_btn.click(run_mega_simulation, file_input, [game, logs])
|
| 201 |
|
| 202 |
+
if __name__ == "__main__":
|
| 203 |
+
demo.launch()
|