AEUPH commited on
Commit
80a3acb
·
verified ·
1 Parent(s): 09409d5

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +8 -24
Dockerfile CHANGED
@@ -101,7 +101,7 @@ HTML_TEMPLATE = r"""
101
  const { useState, useEffect, useRef } = React;
102
  function App() {
103
  const [desktopImage, setDesktopImage] = useState(null);
104
- const [logs, setLogs] = useState(["Neural Bios v9.5", "Booting Kernel..."]);
105
  const socketRef = useRef(null);
106
  const canvasRef = useRef(null);
107
 
@@ -180,14 +180,12 @@ PROGRAMS = {
180
  DRIVERS = {}
181
 
182
  def initialize_drivers():
183
- # 1. Desktop Background Latent (Blue-ish)
184
- bg = torch.zeros((1, 4, 128, 128), dtype=torch.float32) # Using float32 for safety
185
  bg[:, 0, :, :] = 0.5
186
  bg[:, 1, :, :] = 0.8
187
  bg[:, 2, :, :] = 0.2
188
  DRIVERS["DESKTOP_BG"] = bg
189
 
190
- # 2. Icon Placeholders
191
  icon = torch.zeros((1, 4, 8, 8), dtype=torch.float32)
192
  icon[:, 0, 2:6, 2:6] = 2.0
193
  DRIVERS["ICON_GENERIC"] = icon
@@ -210,7 +208,6 @@ class OSKernel:
210
  pid = self.next_pid
211
  self.next_pid += 1
212
 
213
- # Initialize empty window latent
214
  w, h = app.default_size
215
  latent = torch.zeros((1, 4, h, w), dtype=torch.float32)
216
 
@@ -261,13 +258,9 @@ class OSKernel:
261
  class NeuralSystem:
262
  def __init__(self):
263
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
264
- # CPU must use float32 to avoid "Half not implemented" errors
265
  self.dt = torch.float16 if self.device == "cuda" else torch.float32
266
  print(f"[*] System Device: {self.device} | Type: {self.dt}")
267
 
268
- # A. LOAD DIFFUSION
269
- # We do NOT call .to(device) immediately for the whole pipeline if on CPU
270
- # to prevent meta-tensor conversion issues with accelerate.
271
  print("[*] Loading Neural GPU...")
272
  self.pipe = StableDiffusionPipeline.from_pretrained(
273
  "runwayml/stable-diffusion-v1-5",
@@ -278,25 +271,23 @@ class NeuralSystem:
278
  if self.device == "cuda":
279
  self.pipe = self.pipe.to("cuda")
280
 
281
- # Load LCM and VAE
282
  self.pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
283
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
284
  self.pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd", torch_dtype=self.dt).to(self.device)
285
 
286
- # B. LOAD QWEN
287
  print("[*] Loading Qwen 2.5...")
288
  self.model_id = "Qwen/Qwen2.5-Coder-0.5B-Instruct"
289
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
290
  if self.tokenizer.pad_token_id is None:
291
  self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
292
 
293
- # FIX: Use device_map instead of .to() to prevent "Cannot copy out of meta tensor"
 
294
  self.llm = AutoModelForCausalLM.from_pretrained(
295
  self.model_id,
296
- torch_dtype=self.dt,
297
- low_cpu_mem_usage=True,
298
- device_map=self.device # Let accelerate handle the placement
299
- )
300
  print("[*] Systems Online.")
301
 
302
  def think(self, prompt_text):
@@ -319,26 +310,21 @@ class NeuralSystem:
319
  return response
320
 
321
  def render_frame(self, kernel: OSKernel):
322
- # 1. Base Layer
323
  canvas = DRIVERS["DESKTOP_BG"].clone().to(self.device)
324
-
325
- # 2. Draw Icons
326
  icon_dna = DRIVERS["ICON_GENERIC"].to(self.device)
 
327
  for icon in kernel.desktop_icons:
328
  x, y = icon['x'], icon['y']
329
  canvas[:, :, y:y+8, x:x+8] = icon_dna
330
 
331
- # 3. Draw Windows
332
  sorted_procs = sorted(kernel.processes.values(), key=lambda p: p.z_order)
333
  for proc in sorted_procs:
334
  x, y = proc.position
335
  w, h = proc.size
336
  if x+w <= 128 and y+h <= 128:
337
- # Ensure latent is on correct device/dtype before blitting
338
  proc_latent = proc.latent_state.to(self.device, dtype=self.dt)
339
  canvas[:, :, y:y+h, x:x+w] = proc_latent
340
 
341
- # 4. Decode
342
  with torch.no_grad():
343
  img = self.pipe.vae.decode(canvas / 0.18215).sample
344
  img = (img / 2 + 0.5).clamp(0, 1).cpu().permute(0, 2, 3, 1).numpy()
@@ -363,10 +349,8 @@ class NeuralSystem:
363
  output_type="latent"
364
  ).images
365
 
366
- # Simple Title Bar Injection
367
  img_latents[:, 1, 0:4, :] = 1.5
368
  img_latents[:, 0, 0:4, :] = -0.5
369
-
370
  proc.latent_state = img_latents
371
 
372
  # ============================================================================
 
101
  const { useState, useEffect, useRef } = React;
102
  function App() {
103
  const [desktopImage, setDesktopImage] = useState(null);
104
+ const [logs, setLogs] = useState(["Neural Bios v9.6", "Booting Kernel..."]);
105
  const socketRef = useRef(null);
106
  const canvasRef = useRef(null);
107
 
 
180
  DRIVERS = {}
181
 
182
  def initialize_drivers():
183
+ bg = torch.zeros((1, 4, 128, 128), dtype=torch.float32)
 
184
  bg[:, 0, :, :] = 0.5
185
  bg[:, 1, :, :] = 0.8
186
  bg[:, 2, :, :] = 0.2
187
  DRIVERS["DESKTOP_BG"] = bg
188
 
 
189
  icon = torch.zeros((1, 4, 8, 8), dtype=torch.float32)
190
  icon[:, 0, 2:6, 2:6] = 2.0
191
  DRIVERS["ICON_GENERIC"] = icon
 
208
  pid = self.next_pid
209
  self.next_pid += 1
210
 
 
211
  w, h = app.default_size
212
  latent = torch.zeros((1, 4, h, w), dtype=torch.float32)
213
 
 
258
  class NeuralSystem:
259
  def __init__(self):
260
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
 
261
  self.dt = torch.float16 if self.device == "cuda" else torch.float32
262
  print(f"[*] System Device: {self.device} | Type: {self.dt}")
263
 
 
 
 
264
  print("[*] Loading Neural GPU...")
265
  self.pipe = StableDiffusionPipeline.from_pretrained(
266
  "runwayml/stable-diffusion-v1-5",
 
271
  if self.device == "cuda":
272
  self.pipe = self.pipe.to("cuda")
273
 
 
274
  self.pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
275
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
276
  self.pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd", torch_dtype=self.dt).to(self.device)
277
 
 
278
  print("[*] Loading Qwen 2.5...")
279
  self.model_id = "Qwen/Qwen2.5-Coder-0.5B-Instruct"
280
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
281
  if self.tokenizer.pad_token_id is None:
282
  self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
283
 
284
+ # [FIXED] Removed device_map and low_cpu_mem_usage to prevent meta-tensor crash
285
+ # This forces a standard, synchronous load into RAM.
286
  self.llm = AutoModelForCausalLM.from_pretrained(
287
  self.model_id,
288
+ torch_dtype=self.dt
289
+ ).to(self.device)
290
+
 
291
  print("[*] Systems Online.")
292
 
293
  def think(self, prompt_text):
 
310
  return response
311
 
312
  def render_frame(self, kernel: OSKernel):
 
313
  canvas = DRIVERS["DESKTOP_BG"].clone().to(self.device)
 
 
314
  icon_dna = DRIVERS["ICON_GENERIC"].to(self.device)
315
+
316
  for icon in kernel.desktop_icons:
317
  x, y = icon['x'], icon['y']
318
  canvas[:, :, y:y+8, x:x+8] = icon_dna
319
 
 
320
  sorted_procs = sorted(kernel.processes.values(), key=lambda p: p.z_order)
321
  for proc in sorted_procs:
322
  x, y = proc.position
323
  w, h = proc.size
324
  if x+w <= 128 and y+h <= 128:
 
325
  proc_latent = proc.latent_state.to(self.device, dtype=self.dt)
326
  canvas[:, :, y:y+h, x:x+w] = proc_latent
327
 
 
328
  with torch.no_grad():
329
  img = self.pipe.vae.decode(canvas / 0.18215).sample
330
  img = (img / 2 + 0.5).clamp(0, 1).cpu().permute(0, 2, 3, 1).numpy()
 
349
  output_type="latent"
350
  ).images
351
 
 
352
  img_latents[:, 1, 0:4, :] = 1.5
353
  img_latents[:, 0, 0:4, :] = -0.5
 
354
  proc.latent_state = img_latents
355
 
356
  # ============================================================================