AEUPH commited on
Commit
f2abc27
·
verified ·
1 Parent(s): 79c30ac

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +366 -428
Dockerfile CHANGED
@@ -4,281 +4,239 @@ FROM python:3.10-slim
4
  # Set working directory
5
  WORKDIR /app
6
 
7
- # 1. Install System Dependencies & Fonts
8
- # We add 'curl' to download the font and 'git' for diffusers compatibility.
 
9
  RUN apt-get update && apt-get install -y \
10
  git \
11
  curl \
 
12
  && rm -rf /var/lib/apt/lists/*
13
 
14
- # 2. Download Retro Font (VT323) for Python PIL
15
- # This fixes the "text too small" issue by allowing us to load a specific large font size.
16
  RUN curl -L -o /app/VT323.ttf https://github.com/google/fonts/raw/main/ofl/vt323/VT323-Regular.ttf
17
 
18
  # 3. Install Python Dependencies
 
 
19
  RUN pip install --no-cache-dir \
20
  torch \
 
 
21
  flask \
22
  flask-sock \
23
  diffusers \
24
  transformers \
25
  accelerate \
 
 
 
 
26
  safetensors \
27
  scipy
28
 
29
- # 4. Create a non-root user (Required for Hugging Face Spaces)
30
  RUN useradd -m -u 1000 user
31
  USER user
32
  ENV HOME=/home/user \
33
  PATH=/home/user/.local/bin:$PATH
34
 
35
  # 5. Write the Monolith Application to disk
 
36
  COPY --chown=user <<'EOF' app.py
37
- import sys
38
- import os
39
- import time
40
- import io
41
- import json
42
- import base64
43
- from dataclasses import dataclass, field
44
- from typing import Dict, List
 
 
 
45
 
46
  # ============================================================================
47
- # 1. DEPENDENCY CHECK & IMPORTS
48
  # ============================================================================
49
- print(f"[*] NeuralOS Monolith v9.3 OPTIMIZED (Running in: {sys.executable})")
50
-
51
- try:
52
- import torch
53
- from flask import Flask
54
- from flask_sock import Sock
55
- from diffusers import StableDiffusionPipeline, LCMScheduler, AutoencoderTiny
56
- from PIL import Image, ImageDraw, ImageFont
57
- except ImportError as e:
58
- print(f"\n[!] CRITICAL: Missing dependency: {e.name}")
59
- sys.exit(1)
60
 
61
- # ============================================================================
62
- # 2. OPTIMIZED EMBEDDED GUI (Responsive & High-DPI Ready)
63
- # ============================================================================
64
-
65
- HTML_TEMPLATE = """
66
  <!DOCTYPE html>
67
  <html lang="en">
68
  <head>
69
  <meta charset="UTF-8">
70
- <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
71
- <title>NeuralOS | Monolith</title>
 
 
 
 
72
  <style>
73
- @import url('https://fonts.googleapis.com/css2?family=VT323&display=swap');
74
-
75
- body {
76
- background: #050505;
77
- margin: 0;
78
- padding: 0;
79
- width: 100vw;
80
- height: 100vh;
81
- overflow: hidden;
82
- display: flex;
83
- justify-content: center;
84
- align-items: center;
85
- font-family: 'VT323', monospace;
86
  }
87
-
88
- /* Responsive Monitor Case */
89
- .monitor-case {
90
- background: #1a1a1a;
91
- padding: 1.5vmin; /* Responsive padding */
92
- border-radius: 1vmin;
93
- box-shadow: 0 0 0 0.4vmin #222, 0 0 4vmin rgba(0,0,0,0.8), inset 0 0 2vmin rgba(0,0,0,0.5);
94
  position: relative;
95
-
96
- /* Responsive Sizing Logic */
97
- width: auto;
98
- height: auto;
99
- max-width: 95vw;
100
- max-height: 90vh;
101
- aspect-ratio: 1 / 1; /* Force Square Aspect Ratio */
102
-
103
- display: flex;
104
- flex-direction: column;
105
  }
106
-
107
- .screen-container {
108
- width: 100%;
109
- height: 100%;
110
- position: relative;
 
 
111
  background: #000;
112
- overflow: hidden;
113
- border-radius: 0.5vmin;
114
- flex-grow: 1;
115
  }
116
-
117
- .crt-overlay {
118
- position: absolute;
119
- inset: 0;
120
- background: linear-gradient(rgba(18, 16, 16, 0) 50%, rgba(0, 0, 0, 0.25) 50%);
121
- background-size: 100% 4px;
122
- pointer-events: none;
123
- z-index: 10;
124
  }
125
-
126
- .scanline {
127
- width: 100%;
128
- height: 100px;
129
- z-index: 10;
130
- background: linear-gradient(0deg, rgba(0,0,0,0) 0%, rgba(255, 255, 255, 0.04) 50%, rgba(0,0,0,0) 100%);
131
- opacity: 0.1;
132
- position: absolute;
133
- bottom: 100%;
134
- animation: scanline 10s linear infinite;
135
- pointer-events: none;
136
  }
137
-
138
- @keyframes scanline { 0% { bottom: 100%; } 100% { bottom: -100px; } }
139
-
140
- /* KEY FIX: Ensure pixel art stays sharp when scaled down */
141
- #display {
142
- width: 100%;
143
- height: 100%;
144
- object-fit: contain;
145
- image-rendering: pixelated;
146
- image-rendering: crisp-edges;
147
- display: block;
148
  }
149
-
150
- .controls {
151
- display: flex;
152
- justify-content: flex-end;
153
- align-items: center;
154
- gap: 1vmin;
155
- padding-top: 1vmin;
156
- height: 4vmin;
157
  }
158
-
159
- .pwr-btn {
160
- background: #333;
161
- color: #0f0;
162
- border: 0.2vmin solid #222;
163
- padding: 0.5vmin 1.5vmin;
164
- font-family: 'VT323';
165
- font-size: 2vmin;
166
- cursor: pointer;
167
- text-transform: uppercase;
168
- box-shadow: 0 0.4vmin 0 #111;
169
  }
170
- .pwr-btn:active { transform: translateY(0.4vmin); box-shadow: none; }
171
-
172
- .led {
173
- width: 1.5vmin;
174
- height: 1.5vmin;
175
- border-radius: 50%;
176
- background: #111;
177
- box-shadow: 0 0 0.2vmin #000;
178
- }
179
- .led.on { background: #0f0; box-shadow: 0 0 0.5vmin #0f0; }
180
- .led.busy { background: #f00; box-shadow: 0 0 0.5vmin #f00; animation: blink 0.1s infinite; }
181
- @keyframes blink { 0% { opacity: 0.5; } 100% { opacity: 1; } }
182
  </style>
183
  </head>
184
  <body>
185
- <div class="monitor-case">
186
- <div class="screen-container">
187
- <div class="crt-overlay"></div>
188
- <div class="scanline"></div>
189
- <img id="display" src="" alt="AWAITING SIGNAL..." />
190
- </div>
191
- <div class="controls">
192
- <div class="led on"></div>
193
- <div id="hdd-led" class="led"></div>
194
- <button class="pwr-btn" onclick="location.reload()">RESET</button>
195
- </div>
196
- </div>
197
- <script>
198
- const display = document.getElementById('display');
199
- const hddLed = document.getElementById('hdd-led');
200
- let ws = null;
201
-
202
- function connect() {
203
- const proto = window.location.protocol === 'https:' ? 'wss' : 'ws';
204
- ws = new WebSocket(`${proto}://${window.location.host}/kernel`);
205
-
206
- ws.onmessage = (e) => {
207
- const msg = JSON.parse(e.data);
208
- if (msg.type === 'frame_update') {
209
- display.src = `data:image/jpeg;base64,${msg.data}`;
210
- hddLed.classList.add('busy');
211
- setTimeout(() => hddLed.classList.remove('busy'), 50);
212
- }
 
 
 
 
 
 
 
 
213
  };
214
 
215
- ws.onclose = () => { setTimeout(connect, 3000); };
216
- }
 
 
217
 
218
- display.addEventListener('mousedown', (e) => {
219
- if (!ws) return;
220
- // Responsive coordinate calculation
221
- const rect = display.getBoundingClientRect();
222
- // Map the clicked position (0 to rect.width) to internal resolution (0 to 128)
223
- const x = Math.floor(((e.clientX - rect.left) / rect.width) * 128);
224
- const y = Math.floor(((e.clientY - rect.top) / rect.height) * 128);
225
- ws.send(JSON.stringify({ type: 'click', x: x, y: y }));
226
- });
227
-
228
- document.addEventListener('keydown', (e) => {
229
- if (!ws) return;
230
- if (['Backspace', 'Tab', 'ArrowUp', 'ArrowDown', 'ArrowLeft', 'ArrowRight'].includes(e.key)) {
231
- e.preventDefault();
232
- }
233
- ws.send(JSON.stringify({ type: 'keydown', key: e.key }));
234
- });
235
-
236
- connect();
 
 
 
 
 
 
 
 
237
  </script>
238
  </body>
239
  </html>
240
  """
241
 
242
  # ============================================================================
243
- # 3. KERNEL & HARDWARE
244
  # ============================================================================
245
 
246
  DRIVERS = {
247
  "TITLE_BAR": torch.zeros((1, 4, 4, 32), dtype=torch.float16),
 
248
  "CLOSE_BTN": torch.zeros((1, 4, 4, 4), dtype=torch.float16),
249
  "TASKBAR": torch.zeros((1, 4, 6, 128), dtype=torch.float16),
250
  "START_BTN": torch.zeros((1, 4, 6, 24), dtype=torch.float16),
251
  "DESKTOP_BG": torch.zeros((1, 4, 128, 128), dtype=torch.float16),
252
  "ICON_NOTEPAD": torch.zeros((1, 4, 8, 8), dtype=torch.float16),
 
253
  "ICON_CMD": torch.zeros((1, 4, 8, 8), dtype=torch.float16),
254
  "ICON_FOLDER": torch.zeros((1, 4, 8, 8), dtype=torch.float16),
255
- "ICON_GAME": torch.zeros((1, 4, 8, 8), dtype=torch.float16),
256
  }
257
 
258
- def init_drivers():
259
- DRIVERS["TITLE_BAR"][:, 0, 0:1, :] = 2.0; DRIVERS["TITLE_BAR"][:, 1, :, :] = -1.0
 
 
260
  DRIVERS["CLOSE_BTN"][:, 2, :, :] = 2.5
261
  DRIVERS["TASKBAR"][:, 0, 0, :] = 1.2
262
  DRIVERS["START_BTN"][:, 1, 1:5, 2:22] = 1.8
263
- DRIVERS["DESKTOP_BG"][:, 1, 0:80, :] = 1.2; DRIVERS["DESKTOP_BG"][:, 2, 0:80, :] = 1.5
 
264
  DRIVERS["DESKTOP_BG"][:, 1, 80:128, :] = 0.8
265
  DRIVERS["ICON_NOTEPAD"][:, 0, :, :] = 1.5
266
- DRIVERS["ICON_CMD"][:, 1, :, :] = -1.0
267
- DRIVERS["ICON_GAME"][:, 0, 2:6, 2:6] = 2.0
268
-
269
- @dataclass
270
- class Application:
271
- name: str
272
- icon_dna: str
273
- default_size: tuple
274
- is_game: bool = False
275
-
276
- PROGRAMS = {
277
- "notepad": Application("Notepad", "ICON_NOTEPAD", (48, 40)),
278
- "cmd": Application("Terminal", "ICON_CMD", (56, 40)),
279
- "explorer": Application("My Computer", "ICON_FOLDER", (72, 56)),
280
- "doom": Application("NeuroDoom", "ICON_GAME", (64, 48), True)
281
- }
282
 
283
  @dataclass
284
  class Process:
@@ -288,274 +246,254 @@ class Process:
288
  position: tuple
289
  size: tuple
290
  latent_state: torch.Tensor
291
- text_buffer: List[str] = field(default_factory=list)
292
- input_focus: bool = False
293
  status: str = "running"
294
  z_order: int = 0
295
- meta: Dict = field(default_factory=dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
297
  class OSKernel:
298
  def __init__(self):
299
- self.processes = {}
300
- self.next_pid = 1000
301
- self.focused_pid = None
302
- self.system_state = "BOOT"
303
- self.boot_log = []
304
- self.desktop_latent = None
305
- self.desktop_icons = []
306
- self.start_menu_open = False
307
- self.current_dir = "C:\\Users\\Admin"
308
-
309
- def init_desktop(self):
310
  self.desktop_latent = DRIVERS["DESKTOP_BG"].clone()
311
  self.desktop_icons = [
312
- {"app": "notepad", "x": 4, "y": 4},
313
- {"app": "cmd", "x": 4, "y": 16},
314
- {"app": "explorer", "x": 4, "y": 28},
315
- {"app": "doom", "x": 4, "y": 40},
316
  ]
317
- self.system_state = "DESKTOP"
318
-
319
- def spawn(self, app_type, x, y):
320
- if app_type not in PROGRAMS: return
321
  app = PROGRAMS[app_type]
322
  pid = self.next_pid
323
  self.next_pid += 1
324
-
325
- buf = []
326
- if app_type == "cmd":
327
- buf = ["NEURAL OS [v9.3]", "(C) Monolith", "", f"{self.current_dir}>"]
328
- elif app_type == "notepad":
329
- buf = ["_"]
330
- elif app_type == "doom":
331
- buf = ["[INIT] LOADING...", "[INIT] CONNECTING..."]
332
-
333
- self.processes[pid] = Process(
334
- pid, app.name, app_type, (x, y), app.default_size,
335
- torch.zeros((1, 4, app.default_size[1], app.default_size[0]), dtype=torch.float16),
336
- buf, False, "running", pid, {"state": "menu"} if app.is_game else {}
337
- )
338
- self.focus(pid)
339
  return pid
340
-
341
- def focus(self, pid):
 
 
 
 
 
342
  if pid in self.processes:
343
- for p in self.processes.values(): p.input_focus = False
344
- self.processes[pid].input_focus = True
345
- self.processes[pid].z_order = max([p.z_order for p in self.processes.values()], default=0) + 1
346
  self.focused_pid = pid
347
-
348
- def kill(self, pid):
349
- if pid in self.processes: del self.processes[pid]
350
-
351
- def input(self, key):
352
- if not self.focused_pid: return
353
- proc = self.processes[self.focused_pid]
354
-
355
- if proc.app_type == "cmd":
356
- if key == "Enter":
357
- cmd = proc.text_buffer[-1].split(">")[-1].strip()
358
- proc.text_buffer.append("")
359
- if cmd == "dir": proc.text_buffer.append(" <DIR> Documents")
360
- elif cmd == "cls": proc.text_buffer = []
361
- elif cmd == "exit": self.kill(proc.pid)
362
- elif cmd.startswith("start "):
363
- target = cmd.split(" ")[1]
364
- if target in PROGRAMS:
365
- proc.meta["spawn_req"] = target
366
- proc.text_buffer.append(f"Starting {target}...")
367
- else: proc.text_buffer.append("Unknown command")
368
- proc.text_buffer.append(f"{self.current_dir}>")
369
- elif key == "Backspace":
370
- if ">" in proc.text_buffer[-1] and len(proc.text_buffer[-1].split(">")[-1]) > 0:
371
- proc.text_buffer[-1] = proc.text_buffer[-1][:-1]
372
- elif len(key) == 1: proc.text_buffer[-1] += key
373
-
374
- elif proc.app_type == "notepad":
375
- if key == "Enter": proc.text_buffer.append("")
376
- elif key == "Backspace":
377
- if len(proc.text_buffer[-1]) > 0: proc.text_buffer[-1] = proc.text_buffer[-1][:-1]
378
- elif len(proc.text_buffer) > 1: proc.text_buffer.pop()
379
- elif len(key) == 1: proc.text_buffer[-1] += key
380
-
381
- elif proc.app_type == "doom":
382
- proc.meta["last_key"] = key
383
- proc.meta["needs_update"] = True
384
-
385
- def composite(self):
386
- if self.system_state == "BOOT": return None
387
- out = self.desktop_latent.clone()
388
- for i in self.desktop_icons:
389
- if PROGRAMS[i['app']].icon_dna in DRIVERS:
390
- dna = DRIVERS[PROGRAMS[i['app']].icon_dna]
391
- out[:, :, i['y']:i['y']+8, i['x']:i['x']+8] = dna
392
- for p in sorted(self.processes.values(), key=lambda x: x.z_order):
393
- x, y = p.position
394
- w, h = p.size
395
- if x+w <= 128 and y+h <= 128:
396
- out[:, :, y:y+h, x:x+w] = p.latent_state
397
- tb = DRIVERS["TASKBAR"].clone()
398
- tb[:, :, :, 0:24] = DRIVERS["START_BTN"]
399
- out[:, :, 122:128, :] = tb
400
- return out
401
 
402
  # ============================================================================
403
- # 4. SERVER & RENDERER
404
  # ============================================================================
405
 
406
  app = Flask(__name__)
407
  sock = Sock(app)
408
  pipe = None
 
409
  kernel = OSKernel()
410
- # Load the font downloaded in Dockerfile. Size 32 is large enough for 1024px render.
411
- try:
412
- SYSTEM_FONT = ImageFont.truetype("/app/VT323.ttf", 32)
413
- except Exception:
414
- print("[!] Font load failed, using default")
415
- SYSTEM_FONT = ImageFont.load_default()
416
-
417
- def init_ai():
418
- global pipe
419
- print("[*] Loading Neural Graphics Pipeline...")
420
-
421
- if torch.cuda.is_available():
422
- device = "cuda"
423
- dtype = torch.float16
424
- print(f"[✓] CUDA Detected: {torch.cuda.get_device_name(0)}")
425
- else:
426
- device = "cpu"
427
- dtype = torch.float32
428
- print("[!] WARNING: CUDA Not Found. Running CPU Mode.")
429
-
430
- try:
431
- pipe = StableDiffusionPipeline.from_pretrained(
432
- "runwayml/stable-diffusion-v1-5",
433
- torch_dtype=dtype,
434
- variant="fp16" if device=="cuda" else None
435
- ).to(device)
436
-
437
- if device == "cuda":
438
- try:
439
  pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
440
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
441
- pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd", torch_dtype=dtype).to(device)
442
- print("[✓] Optimizations (LCM+TAE) Enabled")
443
- except:
444
- print("[!] Using Standard Scheduler")
445
- except Exception as e:
446
- print(f"[!] AI Init Error: {e}")
447
-
448
- def render_frame(k_obj):
449
- latent_tensor = k_obj.composite()
450
- if latent_tensor is not None:
451
- with torch.no_grad():
452
- latents = (1 / 0.18215) * latent_tensor.to(pipe.device)
453
- if pipe.device.type == 'cpu': latents = latents.float()
454
-
455
- img = pipe.vae.decode(latents).sample
456
- img = (img / 2 + 0.5).clamp(0, 1).nan_to_num()
457
- img = img.cpu().permute(0, 2, 3, 1).numpy()
458
- pil_img = pipe.numpy_to_pil(img)[0]
459
- else:
460
- pil_img = Image.new('RGB', (128, 128), (0,0,0))
461
-
462
- # Keep 1024px render for high detail, but Font Size 32 makes it readable
463
- pil_img = pil_img.resize((1024, 1024), resample=Image.NEAREST)
464
- draw = ImageDraw.Draw(pil_img)
 
 
 
 
465
 
466
- if k_obj.system_state == "BOOT":
467
- y = 50
468
- for log in k_obj.boot_log:
469
- draw.text((50, y), log, fill=(0, 255, 0), font=SYSTEM_FONT)
470
- y += 35 # Increased spacing for larger font
471
-
472
- scale = 8
473
- for p in k_obj.processes.values():
474
- wx, wy = p.position
475
- cx = (wx * scale) + 16 # Padding adjusted for scale
476
- cy = (wy * scale) + 40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
477
 
478
- # Draw Title
479
- draw.text(((wx*scale)+10, (wy*scale)+5), p.name, fill=(255,255,255), font=SYSTEM_FONT)
 
 
480
 
481
- if p.text_buffer:
482
- # Show fewer lines because font is larger, ensuring it fits in window
483
- lines_to_show = p.text_buffer[-12:]
484
- for i, line in enumerate(lines_to_show):
485
- col = (0, 255, 0) if p.app_type == "cmd" else (0,0,0)
486
- if p.app_type == "doom": col = (255, 50, 50)
487
- draw.text((cx, cy + (i*30)), line, fill=col, font=SYSTEM_FONT)
488
-
489
- buf = io.BytesIO()
490
- pil_img.save(buf, format="JPEG", quality=80)
491
- return base64.b64encode(buf.getvalue()).decode()
492
-
493
- @app.route('/')
494
- def index():
495
- return HTML_TEMPLATE
496
 
497
  @sock.route('/kernel')
498
- def websocket_kernel(ws):
499
- global kernel
500
- if not pipe: init_ai()
501
- init_drivers()
502
- kernel = OSKernel()
503
 
504
- boot_msgs = ["BIOS DATE 01/23/2026", "DETECTING NEURAL NET...", "LOADING VFS...", "BOOTING..."]
505
- for msg in boot_msgs:
506
- kernel.boot_log.append(msg)
507
- ws.send(json.dumps({"type": "frame_update", "data": render_frame(kernel)}))
508
- time.sleep(0.5)
509
-
510
- kernel.init_desktop()
511
- ws.send(json.dumps({"type": "frame_update", "data": render_frame(kernel)}))
512
 
513
  while True:
514
  try:
515
  data = ws.receive()
516
  if not data: break
517
  msg = json.loads(data)
518
- needs_update = False
519
 
520
  if msg['type'] == 'click':
521
- x, y = msg['x'], msg['y']
522
- if y >= 122 and x < 24:
523
- kernel.start_menu_open = not kernel.start_menu_open
524
- needs_update = True
525
- elif kernel.system_state == "DESKTOP":
526
- for i in kernel.desktop_icons:
527
- if i['x'] <= x < i['x']+8 and i['y'] <= y < i['y']+8:
528
- kernel.spawn(i['app'], 30, 20)
529
- needs_update = True
530
- for p in sorted(kernel.processes.values(), key=lambda z: z.z_order, reverse=True):
531
- px, py = p.position
532
- pw, ph = p.size
533
- if px <= x < px+pw and py <= y < py+ph:
534
- kernel.focus(p.pid)
535
- if py <= y < py+4 and px+pw-4 <= x < px+pw: kernel.kill(p.pid)
536
- needs_update = True
537
- break
538
-
539
- elif msg['type'] == 'keydown':
540
- kernel.input(msg['key'])
541
- if kernel.focused_pid:
542
- p = kernel.processes[kernel.focused_pid]
543
- if "spawn_req" in p.meta:
544
- kernel.spawn(p.meta.pop("spawn_req"), 40, 40)
545
- if p.app_type == "doom" and p.meta.get("needs_update"):
546
- p.text_buffer.append(f"ACTION: {p.meta.get('last_key')}")
547
- p.meta["needs_update"] = False
548
- needs_update = True
549
-
550
- if needs_update:
551
- ws.send(json.dumps({"type": "frame_update", "data": render_frame(kernel)}))
552
-
553
  except Exception as e:
554
- print(f"WS Error: {e}")
555
  break
556
 
557
- if __name__ == "__main__":
558
- app.run(host="0.0.0.0", port=7860, threaded=True)
 
 
 
 
 
 
 
559
  EOF
560
 
561
  # 6. Launch the Monolith
 
4
  # Set working directory
5
  WORKDIR /app
6
 
7
+ # 1. Install System Dependencies
8
+ # 'build-essential' is added to compile llama-cpp-python wheels.
9
+ # 'git' and 'curl' are retained for asset downloads and diffusers compatibility.
10
  RUN apt-get update && apt-get install -y \
11
  git \
12
  curl \
13
+ build-essential \
14
  && rm -rf /var/lib/apt/lists/*
15
 
16
+ # 2. Download Retro Font (VT323)
17
+ # Keeps the aesthetic consistent with your "NeuralOS" theme.
18
  RUN curl -L -o /app/VT323.ttf https://github.com/google/fonts/raw/main/ofl/vt323/VT323-Regular.ttf
19
 
20
  # 3. Install Python Dependencies
21
+ # Merged from your requirements.txt.
22
+ # Note: llama-cpp-python is installed with default settings (CPU only) for broad compatibility.
23
  RUN pip install --no-cache-dir \
24
  torch \
25
+ torchvision \
26
+ numpy \
27
  flask \
28
  flask-sock \
29
  diffusers \
30
  transformers \
31
  accelerate \
32
+ peft \
33
+ llama-cpp-python \
34
+ pillow \
35
+ diskcache \
36
  safetensors \
37
  scipy
38
 
39
+ # 4. Create a non-root user (Best practice for security)
40
  RUN useradd -m -u 1000 user
41
  USER user
42
  ENV HOME=/home/user \
43
  PATH=/home/user/.local/bin:$PATH
44
 
45
  # 5. Write the Monolith Application to disk
46
+ # This merges drivers.py, index.html, and server.py into one file.
47
  COPY --chown=user <<'EOF' app.py
48
+ import sys, os, io, base64, json, pickle, time
49
+ import numpy as np
50
+ import torch
51
+ from pathlib import Path
52
+ from dataclasses import dataclass
53
+ from typing import Dict, List, Optional
54
+ from flask import Flask, request, send_file, render_template_string
55
+ from flask_sock import Sock
56
+ from diffusers import StableDiffusionPipeline, AutoencoderTiny, LCMScheduler
57
+ from PIL import Image, ImageDraw
58
+ from llama_cpp import Llama
59
 
60
  # ============================================================================
61
+ # 1. FRONTEND ASSET (index.html embedded)
62
  # ============================================================================
 
 
 
 
 
 
 
 
 
 
 
63
 
64
+ HTML_TEMPLATE = r"""
 
 
 
 
65
  <!DOCTYPE html>
66
  <html lang="en">
67
  <head>
68
  <meta charset="UTF-8">
69
+ <title>LiteWin XP - Neural OS Desktop</title>
70
+ <script src="https://cdn.tailwindcss.com"></script>
71
+ <script src="https://unpkg.com/react@18/umd/react.production.min.js"></script>
72
+ <script src="https://unpkg.com/react-dom@18/umd/react-dom.production.min.js"></script>
73
+ <script src="https://unpkg.com/@babel/standalone/babel.min.js"></script>
74
+ <link href="https://fonts.googleapis.com/css2?family=Tahoma:wght@400;700&family=Fira+Code:wght@300;500&display=swap" rel="stylesheet">
75
  <style>
76
+ * { box-sizing: border-box; }
77
+ body {
78
+ background: #010102;
79
+ color: #e2e2e2;
80
+ font-family: 'Tahoma', sans-serif;
81
+ margin: 0;
82
+ overflow: hidden;
83
+ cursor: auto;
 
 
 
 
 
84
  }
85
+ .desktop-area {
 
 
 
 
 
 
86
  position: relative;
87
+ width: 100vw;
88
+ height: 100vh;
89
+ background: #3A6EA5;
90
+ background-image: linear-gradient(to bottom, #5A9FD4 0%, #306088 100%);
 
 
 
 
 
 
91
  }
92
+ .canvas-viewport {
93
+ position: absolute;
94
+ top: 50%;
95
+ left: 50%;
96
+ transform: translate(-50%, -50%);
97
+ width: 1024px;
98
+ height: 1024px;
99
  background: #000;
100
+ box-shadow: 0 0 100px rgba(0,0,0,0.9);
101
+ border: 2px solid #1a1a1e;
102
+ image-rendering: pixelated;
103
  }
104
+ .canvas-viewport img { width: 100%; height: 100%; image-rendering: pixelated; }
105
+ .taskbar {
106
+ position: absolute; bottom: 0; left: 0; right: 0; height: 48px;
107
+ background: linear-gradient(to bottom, #1F4788 0%, #1A3E6F 50%, #0E2950 100%);
108
+ border-top: 2px solid #4D7DB5; display: flex; align-items: center; padding: 0 4px; gap: 4px;
 
 
 
109
  }
110
+ .start-button {
111
+ background: linear-gradient(to bottom, #3F8B3F 0%, #2F6B2F 100%);
112
+ border: 2px outset #5FAF5F; color: white; font-weight: bold; padding: 4px 12px;
113
+ border-radius: 3px; cursor: pointer; font-size: 13px; display: flex; align-items: center; gap: 6px;
 
 
 
 
 
 
 
114
  }
115
+ .taskbar-window {
116
+ background: linear-gradient(to bottom, #B5D3E7 0%, #7BA7C7 100%);
117
+ border: 2px outset #D0E5F5; padding: 4px 10px; border-radius: 3px; cursor: pointer;
118
+ font-size: 11px; max-width: 160px; color: #000;
 
 
 
 
 
 
 
119
  }
120
+ .taskbar-window.active { background: linear-gradient(to bottom, #7BA7C7 0%, #5A86A7 100%); border-style: inset; color: white; }
121
+ .sidebar {
122
+ position: fixed; left: 0; top: 0; bottom: 48px; width: 320px;
123
+ background: rgba(10, 10, 12, 0.95); border-right: 1px solid #1a1a1e;
124
+ backdrop-filter: blur(10px); z-index: 1000; overflow-y: auto; padding: 20px;
125
+ font-family: 'Fira Code', monospace;
 
 
126
  }
127
+ .inspector {
128
+ position: fixed; right: 0; top: 0; bottom: 48px; width: 340px;
129
+ background: rgba(10, 10, 12, 0.95); border-left: 1px solid #1a1a1e;
130
+ backdrop-filter: blur(10px); padding: 20px; font-family: 'Fira Code', monospace; overflow-y: auto;
 
 
 
 
 
 
 
131
  }
132
+ .code-block { background: #0a0a0c; border: 1px solid #1a1a1e; padding: 12px; font-size: 10px; color: #34d399; }
 
 
 
 
 
 
 
 
 
 
 
133
  </style>
134
  </head>
135
  <body>
136
+ <div id="root"></div>
137
+ <script type="text/babel">
138
+ const { useState, useEffect, useRef } = React;
139
+ const APPS = [
140
+ { id: 'notepad', name: 'Notepad', icon: '📝' },
141
+ { id: 'paint', name: 'Paint', icon: '🎨' },
142
+ { id: 'cmd', name: 'Command Prompt', icon: '⌨️' },
143
+ { id: 'explorer', name: 'Explorer', icon: '📁' },
144
+ ];
145
+ function App() {
146
+ const [desktopImage, setDesktopImage] = useState(null);
147
+ const [processes, setProcesses] = useState([]);
148
+ const [startMenuOpen, setStartMenuOpen] = useState(false);
149
+ const socketRef = useRef(null);
150
+ const canvasRef = useRef(null);
151
+
152
+ useEffect(() => {
153
+ const proto = window.location.protocol === 'https:' ? 'wss' : 'ws';
154
+ const ws = new WebSocket(`${proto}://${window.location.host}/kernel`);
155
+ socketRef.current = ws;
156
+ ws.onmessage = (e) => {
157
+ const msg = JSON.parse(e.data);
158
+ if (msg.type === 'desktop_ready' || msg.type === 'frame_update') {
159
+ setDesktopImage(msg.data);
160
+ if (msg.processes) setProcesses(msg.processes);
161
+ }
162
+ };
163
+ return () => ws.close();
164
+ }, []);
165
+
166
+ const handleCanvasClick = (e) => {
167
+ if (!canvasRef.current) return;
168
+ const rect = canvasRef.current.getBoundingClientRect();
169
+ const x = Math.floor(((e.clientX - rect.left) / rect.width) * 128);
170
+ const y = Math.floor(((e.clientY - rect.top) / rect.height) * 128);
171
+ socketRef.current?.send(JSON.stringify({ type: 'click', x, y }));
172
  };
173
 
174
+ const launchApp = (appId) => {
175
+ socketRef.current?.send(JSON.stringify({ type: 'launch_app', app: appId }));
176
+ setStartMenuOpen(false);
177
+ };
178
 
179
+ return (
180
+ <div className="desktop-area">
181
+ <div ref={canvasRef} className="canvas-viewport" onClick={handleCanvasClick}>
182
+ {desktopImage && <img src={`data:image/png;base64,${desktopImage}`} />}
183
+ </div>
184
+ <div className="taskbar">
185
+ <div className="start-button" onClick={() => setStartMenuOpen(!startMenuOpen)}>start</div>
186
+ {processes.map(p => <div key={p.pid} className="taskbar-window">{p.name}</div>)}
187
+ </div>
188
+ {startMenuOpen && (
189
+ <div style={{ position: 'absolute', bottom: '50px', left: '4px', width: '220px', background: '#f0f0f0', border: '2px outset #ccc' }}>
190
+ {APPS.map(app => (
191
+ <div key={app.id} onClick={() => launchApp(app.id)} style={{ padding: '8px', cursor: 'pointer', color: 'black' }}>
192
+ {app.icon} {app.name}
193
+ </div>
194
+ ))}
195
+ </div>
196
+ )}
197
+ <div className="sidebar">
198
+ <h1>🔧 Neural_IDE</h1>
199
+ <p>Monolith Build v9.3</p>
200
+ </div>
201
+ </div>
202
+ );
203
+ }
204
+ const root = ReactDOM.createRoot(document.getElementById('root'));
205
+ root.render(<App />);
206
  </script>
207
  </body>
208
  </html>
209
  """
210
 
211
  # ============================================================================
212
+ # 2. DRIVERS & KERNEL LOGIC
213
  # ============================================================================
214
 
215
  DRIVERS = {
216
  "TITLE_BAR": torch.zeros((1, 4, 4, 32), dtype=torch.float16),
217
+ "TITLE_BAR_INACTIVE": torch.zeros((1, 4, 4, 32), dtype=torch.float16),
218
  "CLOSE_BTN": torch.zeros((1, 4, 4, 4), dtype=torch.float16),
219
  "TASKBAR": torch.zeros((1, 4, 6, 128), dtype=torch.float16),
220
  "START_BTN": torch.zeros((1, 4, 6, 24), dtype=torch.float16),
221
  "DESKTOP_BG": torch.zeros((1, 4, 128, 128), dtype=torch.float16),
222
  "ICON_NOTEPAD": torch.zeros((1, 4, 8, 8), dtype=torch.float16),
223
+ "ICON_PAINT": torch.zeros((1, 4, 8, 8), dtype=torch.float16),
224
  "ICON_CMD": torch.zeros((1, 4, 8, 8), dtype=torch.float16),
225
  "ICON_FOLDER": torch.zeros((1, 4, 8, 8), dtype=torch.float16),
 
226
  }
227
 
228
+ def initialize_drivers():
229
+ DRIVERS["TITLE_BAR"][:, 0, 0:1, :] = 2.0
230
+ DRIVERS["TITLE_BAR"][:, 0, 1:3, :] = 1.2
231
+ DRIVERS["TITLE_BAR"][:, 1, :, :] = -1.0
232
  DRIVERS["CLOSE_BTN"][:, 2, :, :] = 2.5
233
  DRIVERS["TASKBAR"][:, 0, 0, :] = 1.2
234
  DRIVERS["START_BTN"][:, 1, 1:5, 2:22] = 1.8
235
+ DRIVERS["DESKTOP_BG"][:, 1, 0:80, :] = 1.2
236
+ DRIVERS["DESKTOP_BG"][:, 2, 0:80, :] = 1.5
237
  DRIVERS["DESKTOP_BG"][:, 1, 80:128, :] = 0.8
238
  DRIVERS["ICON_NOTEPAD"][:, 0, :, :] = 1.5
239
+ print("[*] LiteWin High-Fidelity DNA v4 initialized")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
 
241
  @dataclass
242
  class Process:
 
246
  position: tuple
247
  size: tuple
248
  latent_state: torch.Tensor
 
 
249
  status: str = "running"
250
  z_order: int = 0
251
+ def to_dict(self):
252
+ return {"pid": self.pid, "name": self.name, "app_type": self.app_type, "position": self.position, "size": self.size}
253
+
254
+ @dataclass
255
+ class Application:
256
+ name: str
257
+ icon_dna: str
258
+ window_prompt: str
259
+ content_prompt: str
260
+ default_size: tuple
261
+
262
+ PROGRAMS = {
263
+ "notepad": Application("Notepad", "ICON_NOTEPAD", "high quality windows_xp notepad", "white text editor", (48, 40)),
264
+ "paint": Application("Paint", "ICON_PAINT", "official MS Paint", "white canvas", (64, 48)),
265
+ "cmd": Application("Command Prompt", "ICON_CMD", "windows terminal", "black console", (56, 40)),
266
+ "explorer": Application("Explorer", "ICON_FOLDER", "windows explorer", "file browser", (72, 56))
267
+ }
268
 
269
  class OSKernel:
270
  def __init__(self):
271
+ self.processes: Dict[int, Process] = {}
272
+ self.next_pid = 1
273
+ self.focused_pid: Optional[int] = None
 
 
 
 
 
 
 
 
274
  self.desktop_latent = DRIVERS["DESKTOP_BG"].clone()
275
  self.desktop_icons = [
276
+ {"app": "notepad", "x": 4, "y": 4, "label": "Notepad"},
277
+ {"app": "paint", "x": 4, "y": 16, "label": "Paint"},
278
+ {"app": "cmd", "x": 4, "y": 28, "label": "Command Prompt"},
279
+ {"app": "explorer", "x": 4, "y": 40, "label": "My Computer"},
280
  ]
281
+ self.start_menu_open = False
282
+
283
+ def spawn_process(self, app_type: str, x: int, y: int) -> int:
284
+ if app_type not in PROGRAMS: return -1
285
  app = PROGRAMS[app_type]
286
  pid = self.next_pid
287
  self.next_pid += 1
288
+ w, h = app.default_size
289
+ proc = Process(pid, app.name, app_type, (x, y), (w, h), torch.zeros((1, 4, h, w), dtype=torch.float16), "running", pid)
290
+ self.processes[pid] = proc
291
+ self.focus_process(pid)
 
 
 
 
 
 
 
 
 
 
 
292
  return pid
293
+
294
+ def kill_process(self, pid: int):
295
+ if pid in self.processes:
296
+ del self.processes[pid]
297
+ if self.focused_pid == pid: self.focused_pid = None
298
+
299
+ def focus_process(self, pid: int):
300
  if pid in self.processes:
 
 
 
301
  self.focused_pid = pid
302
+ max_z = max((p.z_order for p in self.processes.values()), default=0)
303
+ self.processes[pid].z_order = max_z + 1
304
+
305
+ def composite_frame(self) -> torch.Tensor:
306
+ output = self.desktop_latent.clone()
307
+ for icon in self.desktop_icons:
308
+ app = PROGRAMS[icon['app']]
309
+ if app.icon_dna in DRIVERS:
310
+ dna = DRIVERS[app.icon_dna]
311
+ x, y = icon['x'], icon['y']
312
+ output[:, :, y:y+8, x:x+8] = dna
313
+ running_procs = [p for p in self.processes.values() if p.status == "running"]
314
+ for proc in sorted(running_procs, key=lambda p: p.z_order):
315
+ x, y = proc.position
316
+ w, h = proc.size
317
+ if x + w > 128 or y + h > 128: continue
318
+ output[:, :, y:y+h, x:x+w] = proc.latent_state
319
+ taskbar = DRIVERS["TASKBAR"].clone()
320
+ taskbar[:, :, :, 0:24] = DRIVERS["START_BTN"]
321
+ output[:, :, 122:128, :] = taskbar
322
+ return output
323
+
324
+ def handle_click(self, x: int, y: int) -> Dict:
325
+ if y >= 122:
326
+ if x < 24:
327
+ self.start_menu_open = not self.start_menu_open
328
+ return {"action": "toggle_start_menu", "open": self.start_menu_open}
329
+ return {"action": "none"}
330
+ for icon in self.desktop_icons:
331
+ ix, iy = icon['x'], icon['y']
332
+ if ix <= x < ix+8 and iy <= y < iy+8:
333
+ pid = self.spawn_process(icon['app'], x=32, y=24)
334
+ return {"action": "launch", "app": icon['app'], "pid": pid}
335
+ for proc in sorted(self.processes.values(), key=lambda p: p.z_order, reverse=True):
336
+ if proc.status != "running": continue
337
+ px, py = proc.position
338
+ pw, ph = proc.size
339
+ if px <= x < px+pw and py <= y < py+ph:
340
+ self.focus_process(proc.pid)
341
+ if py <= y < py+4 and px+pw-4 <= x < px+pw:
342
+ self.kill_process(proc.pid)
343
+ return {"action": "close", "pid": proc.pid}
344
+ return {"action": "focus", "pid": proc.pid}
345
+ return {"action": "none"}
346
+
347
+ class LatentFileSystem:
348
+ def __init__(self, root_path="./litewin_disk"):
349
+ self.root = Path(root_path)
350
+ self.root.mkdir(exist_ok=True)
351
+
352
+ class LatentVM:
353
+ def execute(self, bytecode: str, target_latent: torch.Tensor) -> torch.Tensor:
354
+ return target_latent # Placeholder for VM execution
 
355
 
356
  # ============================================================================
357
+ # 3. SERVER & ML PIPELINE
358
  # ============================================================================
359
 
360
  app = Flask(__name__)
361
  sock = Sock(app)
362
  pipe = None
363
+ llm = None
364
  kernel = OSKernel()
365
+ GGUF_MODEL_PATH = "models/qwen2.5-coder-0.5b-instruct-q8_0.gguf"
366
+ STEPS = 1
367
+
368
+ def get_pipe():
369
+ global pipe, STEPS
370
+ if pipe is None:
371
+ print("[*] Booting Neural Kernel...")
372
+ device = "cuda" if torch.cuda.is_available() else "cpu"
373
+ dt = torch.float16 if device == "cuda" else torch.float32
374
+ pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=dt).to(device)
375
+ try:
376
+ if device == "cuda":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
  pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
378
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
379
+ pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd", torch_dtype=dt).to(device)
380
+ STEPS = 1
381
+ print("[✓] LCM + TAE Enabled")
382
+ else:
383
+ STEPS = 4
384
+ except Exception as e:
385
+ print(f"[!] Optimization failed: {e}")
386
+ return pipe
387
+
388
+ def decode_layer(latents, p):
389
+ with torch.no_grad():
390
+ latents = 1 / 0.18215 * latents
391
+ latents = latents.to(device=p.device, dtype=p.vae.dtype)
392
+ image = p.vae.decode(latents).sample
393
+ image = (image / 2 + 0.5).clamp(0, 1).nan_to_num()
394
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
395
+ image = p.numpy_to_pil(image)[0]
396
+ buf = io.BytesIO()
397
+ image.save(buf, format="PNG")
398
+ return base64.b64encode(buf.getvalue()).decode()
399
+
400
+ def render_perfect_window_latent(p, w_blocks, h_blocks, title="Window"):
401
+ width, height = w_blocks * 8, h_blocks * 8
402
+ img = Image.new('RGB', (width, height), color=(236, 233, 216))
403
+ draw = ImageDraw.Draw(img)
404
+ draw.rectangle([0, 0, width-1, height-1], outline=(0, 0, 0))
405
+ draw.rectangle([1, 1, width-2, 31], fill=(0, 84, 227)) # Blue Title
406
+ draw.rectangle([4, 32, width-5, height-5], fill=(255, 255, 255)) # Content
407
 
408
+ img_t = torch.from_numpy(np.array(img)).permute(2, 0, 1).float() / 255.0
409
+ img_t = (img_t * 2.0 - 1.0).unsqueeze(0).to(device=p.device, dtype=p.vae.dtype)
410
+ with torch.no_grad():
411
+ latent = p.vae.encode(img_t).latent_dist.sample() * 0.18215
412
+ return latent.cpu()
413
+
414
+ def generate_window_fast(p, kernel, pid):
415
+ device = p.device
416
+ proc = kernel.processes[pid]
417
+ app = PROGRAMS[proc.app_type]
418
+ w, h = proc.size
419
+
420
+ # Inject Frame
421
+ base_latent = render_perfect_window_latent(p, w, h, title=app.name)
422
+
423
+ # Fill Content (Simplified for Monolith)
424
+ prompt = f"{app.content_prompt}, pixel perfect"
425
+ text_inputs = p.tokenizer([prompt], padding="max_length", max_length=p.tokenizer.model_max_length, truncation=True, return_tensors="pt")
426
+ prompt_embeds = p.text_encoder(text_inputs.input_ids.to(device))[0]
427
+ uncond_inputs = p.tokenizer(["blurry"], padding="max_length", max_length=p.tokenizer.model_max_length, truncation=True, return_tensors="pt")
428
+ neg_embeds = p.text_encoder(uncond_inputs.input_ids.to(device))[0]
429
+ embeds = torch.cat([neg_embeds, prompt_embeds])
430
+
431
+ latents = base_latent.to(device)
432
+ p.scheduler.set_timesteps(STEPS, device=device)
433
+
434
+ for t in p.scheduler.timesteps:
435
+ latent_input = torch.cat([latents] * 2)
436
+ latent_input = p.scheduler.scale_model_input(latent_input, t)
437
+ with torch.no_grad():
438
+ noise_pred = p.unet(latent_input, t, encoder_hidden_states=embeds, return_dict=False)[0]
439
+ uncond, text = noise_pred.chunk(2)
440
+ noise_pred = uncond + (1.0 if STEPS==1 else 7.5) * (text - uncond)
441
+ next_latents = p.scheduler.step(noise_pred, t, latents).prev_sample
442
 
443
+ # Lock Title Bar (Top 4 blocks)
444
+ mask = torch.ones_like(latents)
445
+ mask[:, :, 0:4, :] = 0.0
446
+ latents = (mask * next_latents) + ((1.0 - mask) * latents)
447
 
448
+ proc.latent_state = latents.cpu()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449
 
450
  @sock.route('/kernel')
451
+ def kernel_ws(ws):
452
+ p = get_pipe()
453
+ initialize_drivers()
454
+ print("[*] Client connected to Monolith Kernel")
 
455
 
456
+ frame = kernel.composite_frame()
457
+ ws.send(json.dumps({
458
+ "type": "desktop_ready",
459
+ "data": decode_layer(frame, p),
460
+ "processes": [proc.to_dict() for proc in kernel.processes.values()]
461
+ }))
 
 
462
 
463
  while True:
464
  try:
465
  data = ws.receive()
466
  if not data: break
467
  msg = json.loads(data)
 
468
 
469
  if msg['type'] == 'click':
470
+ res = kernel.handle_click(msg['x'], msg['y'])
471
+ if res['action'] == 'launch':
472
+ generate_window_fast(p, kernel, res['pid'])
473
+
474
+ elif msg['type'] == 'launch_app':
475
+ pid = kernel.spawn_process(msg['app'], 12, 12)
476
+ generate_window_fast(p, kernel, pid)
477
+
478
+ ws.send(json.dumps({
479
+ "type": "frame_update",
480
+ "data": decode_layer(kernel.composite_frame(), p),
481
+ "processes": [proc.to_dict() for proc in kernel.processes.values()]
482
+ }))
483
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
484
  except Exception as e:
485
+ print(f"[ERR] {e}")
486
  break
487
 
488
+ @app.route('/')
489
+ def index():
490
+ return render_template_string(HTML_TEMPLATE)
491
+
492
+ if __name__ == '__main__':
493
+ print("="*40)
494
+ print(" NEURAL OS MONOLITH v1.0 RUNNING")
495
+ print("="*40)
496
+ app.run(host='0.0.0.0', port=7860, threaded=True)
497
  EOF
498
 
499
  # 6. Launch the Monolith