AEUPH commited on
Commit
09409d5
·
verified ·
1 Parent(s): f7e3927

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +35 -61
Dockerfile CHANGED
@@ -101,7 +101,7 @@ HTML_TEMPLATE = r"""
101
  const { useState, useEffect, useRef } = React;
102
  function App() {
103
  const [desktopImage, setDesktopImage] = useState(null);
104
- const [logs, setLogs] = useState(["Neural Bios v9.4", "Booting Kernel..."]);
105
  const socketRef = useRef(null);
106
  const canvasRef = useRef(null);
107
 
@@ -177,23 +177,20 @@ PROGRAMS = {
177
  "explorer": Application("Explorer", "ICON_FOLDER", "file explorer, icons grid", (56, 40))
178
  }
179
 
180
- # Pre-defined Tensor "DNA" for UI consistency
181
  DRIVERS = {}
182
 
183
  def initialize_drivers():
184
  # 1. Desktop Background Latent (Blue-ish)
185
- bg = torch.zeros((1, 4, 128, 128), dtype=torch.float16)
186
- bg[:, 0, :, :] = 0.5 # Channel 0
187
- bg[:, 1, :, :] = 0.8 # Channel 1 (Blueish)
188
- bg[:, 2, :, :] = 0.2 # Channel 2
189
  DRIVERS["DESKTOP_BG"] = bg
190
 
191
- # 2. Icon Placeholders (Simple blocks)
192
- # Creating a simple unique latent signature for icons
193
- icon = torch.zeros((1, 4, 8, 8), dtype=torch.float16)
194
- icon[:, 0, 2:6, 2:6] = 2.0 # Bright spot
195
  DRIVERS["ICON_GENERIC"] = icon
196
-
197
  print("[*] Drivers Initialized.")
198
 
199
  class OSKernel:
@@ -215,7 +212,7 @@ class OSKernel:
215
 
216
  # Initialize empty window latent
217
  w, h = app.default_size
218
- latent = torch.zeros((1, 4, h, w), dtype=torch.float16)
219
 
220
  proc = Process(
221
  pid=pid, name=app.name, app_type=app_type,
@@ -234,31 +231,25 @@ class OSKernel:
234
  def focus_process(self, pid: int):
235
  if pid in self.processes:
236
  self.focused_pid = pid
237
- # Move to top Z-layer
238
  max_z = max((p.z_order for p in self.processes.values()), default=0)
239
  self.processes[pid].z_order = max_z + 1
240
 
241
  def handle_click(self, x: int, y: int) -> Dict:
242
- # 1. Check Windows (Reverse Z-Order)
243
  sorted_procs = sorted(self.processes.values(), key=lambda p: p.z_order, reverse=True)
244
  for proc in sorted_procs:
245
  px, py = proc.position
246
  pw, ph = proc.size
247
  if px <= x < px+pw and py <= y < py+ph:
248
  self.focus_process(proc.pid)
249
-
250
- # Check Close Button (Top Right Corner)
251
  if py <= y < py+4 and px+pw-4 <= x < px+pw:
252
  self.kill_process(proc.pid)
253
  return {"action": "close", "pid": proc.pid, "name": proc.name}
254
-
255
  return {"action": "focus", "pid": proc.pid, "name": proc.name}
256
 
257
- # 2. Check Desktop Icons
258
  for icon in self.desktop_icons:
259
  ix, iy = icon['x'], icon['y']
260
  if ix <= x < ix+8 and iy <= y < iy+8:
261
- pid = self.spawn_process(icon['app'], x=32, y=24) # Spawn offset
262
  return {"action": "launch", "pid": pid, "app": icon['app']}
263
 
264
  return {"action": "desktop_click"}
@@ -270,40 +261,45 @@ class OSKernel:
270
  class NeuralSystem:
271
  def __init__(self):
272
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
 
273
  self.dt = torch.float16 if self.device == "cuda" else torch.float32
274
- print(f"[*] System Device: {self.device}")
275
 
276
  # A. LOAD DIFFUSION
 
 
277
  print("[*] Loading Neural GPU...")
278
  self.pipe = StableDiffusionPipeline.from_pretrained(
279
  "runwayml/stable-diffusion-v1-5",
280
  torch_dtype=self.dt,
281
  safety_checker=None,
282
  requires_safety_checker=False
283
- ).to(self.device)
 
 
 
 
284
  self.pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
285
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
286
  self.pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd", torch_dtype=self.dt).to(self.device)
287
 
288
- # B. LOAD QWEN (With Padding Fix)
289
  print("[*] Loading Qwen 2.5...")
290
  self.model_id = "Qwen/Qwen2.5-Coder-0.5B-Instruct"
291
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
292
-
293
- # [FIX] Explicitly set padding token if missing
294
  if self.tokenizer.pad_token_id is None:
295
  self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
296
-
 
297
  self.llm = AutoModelForCausalLM.from_pretrained(
298
  self.model_id,
299
- dtype=self.dt,
300
- low_cpu_mem_usage=True
301
- ).to(self.device)
 
302
  print("[*] Systems Online.")
303
 
304
  def think(self, prompt_text):
305
- """ Runs Qwen with corrected attention masks """
306
- # [FIX] Explicit attention mask generation
307
  inputs = self.tokenizer(
308
  prompt_text,
309
  return_tensors="pt",
@@ -323,10 +319,8 @@ class NeuralSystem:
323
  return response
324
 
325
  def render_frame(self, kernel: OSKernel):
326
- """ Composites the desktop and runs one fast diffusion pass """
327
- # 1. Base Layer (Desktop)
328
- canvas = DRIVERS["DESKTOP_BG"].clone()
329
- if self.device == "cuda": canvas = canvas.to("cuda")
330
 
331
  # 2. Draw Icons
332
  icon_dna = DRIVERS["ICON_GENERIC"].to(self.device)
@@ -334,23 +328,18 @@ class NeuralSystem:
334
  x, y = icon['x'], icon['y']
335
  canvas[:, :, y:y+8, x:x+8] = icon_dna
336
 
337
- # 3. Draw Windows (Sorted by Z)
338
  sorted_procs = sorted(kernel.processes.values(), key=lambda p: p.z_order)
339
  for proc in sorted_procs:
340
  x, y = proc.position
341
  w, h = proc.size
342
-
343
- # Blit latent state
344
- # Ensure proc latent is on correct device
345
- proc_latent = proc.latent_state.to(self.device)
346
-
347
- # Simple bounds check
348
  if x+w <= 128 and y+h <= 128:
 
 
349
  canvas[:, :, y:y+h, x:x+w] = proc_latent
350
 
351
- # 4. Decode (No Diffusion for pure composition speed, or 1-step for 'dreaming')
352
  with torch.no_grad():
353
- # Fast decode using TAESD
354
  img = self.pipe.vae.decode(canvas / 0.18215).sample
355
  img = (img / 2 + 0.5).clamp(0, 1).cpu().permute(0, 2, 3, 1).numpy()
356
  img = self.pipe.numpy_to_pil(img)[0]
@@ -358,30 +347,25 @@ class NeuralSystem:
358
  return img
359
 
360
  def generate_window_content(self, proc: Process):
361
- """ Generates content for a newly opened window """
362
  app_def = PROGRAMS[proc.app_type]
363
  prompt = f"pixel art windows xp {app_def.name} window content, {app_def.content_prompt}, crisp UI"
364
 
365
- # 1-Step LCM Generation
366
  with torch.no_grad():
367
- # Generate pure noise of correct size
368
  latents = torch.randn(
369
  (1, 4, proc.size[1], proc.size[0]),
370
  device=self.device,
371
  dtype=self.dt
372
  )
373
- # Run 1 step
374
  img_latents = self.pipe(
375
  prompt,
376
  latents=latents,
377
  num_inference_steps=1,
378
  output_type="latent"
379
- ).images # Returns latents because output_type="latent"
380
 
381
- # Manually inject "Title Bar" DNA (Blue strip at top)
382
- # Channel 1 is roughly Blue/Cyan in SD Latent space
383
  img_latents[:, 1, 0:4, :] = 1.5
384
- img_latents[:, 0, 0:4, :] = -0.5 # Darken
385
 
386
  proc.latent_state = img_latents
387
 
@@ -404,7 +388,6 @@ def socket_handler(ws):
404
 
405
  ws.send(json.dumps({"type": "log", "data": "Kernel Attached."}))
406
 
407
- # Render Initial Frame
408
  img = sys_engine.render_frame(kernel_instance)
409
  buf = io.BytesIO()
410
  img.save(buf, format="PNG")
@@ -419,30 +402,21 @@ def socket_handler(ws):
419
  msg = json.loads(data)
420
 
421
  if msg['type'] == 'click':
422
- # 1. Handle OS Logic
423
  res = kernel_instance.handle_click(msg['x'], msg['y'])
424
 
425
  if res['action'] == 'launch':
426
  ws.send(json.dumps({"type": "log", "data": f"Launching {res['app']}..."}))
427
-
428
- # Get the process object
429
  proc = kernel_instance.processes[res['pid']]
430
-
431
- # Use AI to generate its content
432
  sys_engine.generate_window_content(proc)
433
 
434
  elif res['action'] == 'close':
435
  ws.send(json.dumps({"type": "log", "data": f"Closed {res['name']}"}))
436
 
437
  elif res['action'] == 'desktop_click':
438
- # Ask Qwen for flavor text
439
  thought = sys_engine.think(f"User clicked background at {msg['x']},{msg['y']}. Short witty system log:")
440
  ws.send(json.dumps({"type": "log", "data": f"SYS: {thought}"}))
441
 
442
- # 2. Re-Render Desktop
443
  img = sys_engine.render_frame(kernel_instance)
444
-
445
- # 3. Send Frame
446
  buf = io.BytesIO()
447
  img.save(buf, format="PNG")
448
  ws.send(json.dumps({
 
101
  const { useState, useEffect, useRef } = React;
102
  function App() {
103
  const [desktopImage, setDesktopImage] = useState(null);
104
+ const [logs, setLogs] = useState(["Neural Bios v9.5", "Booting Kernel..."]);
105
  const socketRef = useRef(null);
106
  const canvasRef = useRef(null);
107
 
 
177
  "explorer": Application("Explorer", "ICON_FOLDER", "file explorer, icons grid", (56, 40))
178
  }
179
 
 
180
  DRIVERS = {}
181
 
182
  def initialize_drivers():
183
  # 1. Desktop Background Latent (Blue-ish)
184
+ bg = torch.zeros((1, 4, 128, 128), dtype=torch.float32) # Using float32 for safety
185
+ bg[:, 0, :, :] = 0.5
186
+ bg[:, 1, :, :] = 0.8
187
+ bg[:, 2, :, :] = 0.2
188
  DRIVERS["DESKTOP_BG"] = bg
189
 
190
+ # 2. Icon Placeholders
191
+ icon = torch.zeros((1, 4, 8, 8), dtype=torch.float32)
192
+ icon[:, 0, 2:6, 2:6] = 2.0
 
193
  DRIVERS["ICON_GENERIC"] = icon
 
194
  print("[*] Drivers Initialized.")
195
 
196
  class OSKernel:
 
212
 
213
  # Initialize empty window latent
214
  w, h = app.default_size
215
+ latent = torch.zeros((1, 4, h, w), dtype=torch.float32)
216
 
217
  proc = Process(
218
  pid=pid, name=app.name, app_type=app_type,
 
231
  def focus_process(self, pid: int):
232
  if pid in self.processes:
233
  self.focused_pid = pid
 
234
  max_z = max((p.z_order for p in self.processes.values()), default=0)
235
  self.processes[pid].z_order = max_z + 1
236
 
237
  def handle_click(self, x: int, y: int) -> Dict:
 
238
  sorted_procs = sorted(self.processes.values(), key=lambda p: p.z_order, reverse=True)
239
  for proc in sorted_procs:
240
  px, py = proc.position
241
  pw, ph = proc.size
242
  if px <= x < px+pw and py <= y < py+ph:
243
  self.focus_process(proc.pid)
 
 
244
  if py <= y < py+4 and px+pw-4 <= x < px+pw:
245
  self.kill_process(proc.pid)
246
  return {"action": "close", "pid": proc.pid, "name": proc.name}
 
247
  return {"action": "focus", "pid": proc.pid, "name": proc.name}
248
 
 
249
  for icon in self.desktop_icons:
250
  ix, iy = icon['x'], icon['y']
251
  if ix <= x < ix+8 and iy <= y < iy+8:
252
+ pid = self.spawn_process(icon['app'], x=32, y=24)
253
  return {"action": "launch", "pid": pid, "app": icon['app']}
254
 
255
  return {"action": "desktop_click"}
 
261
  class NeuralSystem:
262
  def __init__(self):
263
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
264
+ # CPU must use float32 to avoid "Half not implemented" errors
265
  self.dt = torch.float16 if self.device == "cuda" else torch.float32
266
+ print(f"[*] System Device: {self.device} | Type: {self.dt}")
267
 
268
  # A. LOAD DIFFUSION
269
+ # We do NOT call .to(device) immediately for the whole pipeline if on CPU
270
+ # to prevent meta-tensor conversion issues with accelerate.
271
  print("[*] Loading Neural GPU...")
272
  self.pipe = StableDiffusionPipeline.from_pretrained(
273
  "runwayml/stable-diffusion-v1-5",
274
  torch_dtype=self.dt,
275
  safety_checker=None,
276
  requires_safety_checker=False
277
+ )
278
+ if self.device == "cuda":
279
+ self.pipe = self.pipe.to("cuda")
280
+
281
+ # Load LCM and VAE
282
  self.pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
283
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
284
  self.pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd", torch_dtype=self.dt).to(self.device)
285
 
286
+ # B. LOAD QWEN
287
  print("[*] Loading Qwen 2.5...")
288
  self.model_id = "Qwen/Qwen2.5-Coder-0.5B-Instruct"
289
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
 
 
290
  if self.tokenizer.pad_token_id is None:
291
  self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
292
+
293
+ # FIX: Use device_map instead of .to() to prevent "Cannot copy out of meta tensor"
294
  self.llm = AutoModelForCausalLM.from_pretrained(
295
  self.model_id,
296
+ torch_dtype=self.dt,
297
+ low_cpu_mem_usage=True,
298
+ device_map=self.device # Let accelerate handle the placement
299
+ )
300
  print("[*] Systems Online.")
301
 
302
  def think(self, prompt_text):
 
 
303
  inputs = self.tokenizer(
304
  prompt_text,
305
  return_tensors="pt",
 
319
  return response
320
 
321
  def render_frame(self, kernel: OSKernel):
322
+ # 1. Base Layer
323
+ canvas = DRIVERS["DESKTOP_BG"].clone().to(self.device)
 
 
324
 
325
  # 2. Draw Icons
326
  icon_dna = DRIVERS["ICON_GENERIC"].to(self.device)
 
328
  x, y = icon['x'], icon['y']
329
  canvas[:, :, y:y+8, x:x+8] = icon_dna
330
 
331
+ # 3. Draw Windows
332
  sorted_procs = sorted(kernel.processes.values(), key=lambda p: p.z_order)
333
  for proc in sorted_procs:
334
  x, y = proc.position
335
  w, h = proc.size
 
 
 
 
 
 
336
  if x+w <= 128 and y+h <= 128:
337
+ # Ensure latent is on correct device/dtype before blitting
338
+ proc_latent = proc.latent_state.to(self.device, dtype=self.dt)
339
  canvas[:, :, y:y+h, x:x+w] = proc_latent
340
 
341
+ # 4. Decode
342
  with torch.no_grad():
 
343
  img = self.pipe.vae.decode(canvas / 0.18215).sample
344
  img = (img / 2 + 0.5).clamp(0, 1).cpu().permute(0, 2, 3, 1).numpy()
345
  img = self.pipe.numpy_to_pil(img)[0]
 
347
  return img
348
 
349
  def generate_window_content(self, proc: Process):
 
350
  app_def = PROGRAMS[proc.app_type]
351
  prompt = f"pixel art windows xp {app_def.name} window content, {app_def.content_prompt}, crisp UI"
352
 
 
353
  with torch.no_grad():
 
354
  latents = torch.randn(
355
  (1, 4, proc.size[1], proc.size[0]),
356
  device=self.device,
357
  dtype=self.dt
358
  )
 
359
  img_latents = self.pipe(
360
  prompt,
361
  latents=latents,
362
  num_inference_steps=1,
363
  output_type="latent"
364
+ ).images
365
 
366
+ # Simple Title Bar Injection
 
367
  img_latents[:, 1, 0:4, :] = 1.5
368
+ img_latents[:, 0, 0:4, :] = -0.5
369
 
370
  proc.latent_state = img_latents
371
 
 
388
 
389
  ws.send(json.dumps({"type": "log", "data": "Kernel Attached."}))
390
 
 
391
  img = sys_engine.render_frame(kernel_instance)
392
  buf = io.BytesIO()
393
  img.save(buf, format="PNG")
 
402
  msg = json.loads(data)
403
 
404
  if msg['type'] == 'click':
 
405
  res = kernel_instance.handle_click(msg['x'], msg['y'])
406
 
407
  if res['action'] == 'launch':
408
  ws.send(json.dumps({"type": "log", "data": f"Launching {res['app']}..."}))
 
 
409
  proc = kernel_instance.processes[res['pid']]
 
 
410
  sys_engine.generate_window_content(proc)
411
 
412
  elif res['action'] == 'close':
413
  ws.send(json.dumps({"type": "log", "data": f"Closed {res['name']}"}))
414
 
415
  elif res['action'] == 'desktop_click':
 
416
  thought = sys_engine.think(f"User clicked background at {msg['x']},{msg['y']}. Short witty system log:")
417
  ws.send(json.dumps({"type": "log", "data": f"SYS: {thought}"}))
418
 
 
419
  img = sys_engine.render_frame(kernel_instance)
 
 
420
  buf = io.BytesIO()
421
  img.save(buf, format="PNG")
422
  ws.send(json.dumps({