Spaces:
Sleeping
Sleeping
File size: 19,763 Bytes
035a382 9893785 035a382 2c14c01 c78d5c9 035a382 9a1b66c 8431283 9893785 035a382 9893785 035a382 f2abc27 035a382 2c14c01 f7e3927 035a382 f2abc27 035a382 c78d5c9 2c14c01 9893785 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 f7e3927 035a382 8431283 2c14c01 8431283 035a382 f7e3927 035a382 2c14c01 035a382 f7e3927 035a382 f7e3927 035a382 2c14c01 035a382 2c14c01 f2abc27 035a382 f7e3927 2c14c01 035a382 f2abc27 2c14c01 f7e3927 2c14c01 035a382 2c14c01 035a382 f7e3927 035a382 8431283 f2abc27 035a382 f2abc27 035a382 8431283 035a382 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 | # NEURAL OS HYPER-CORE v2.0 - 100% Performance Boost
FROM python:3.10-slim
WORKDIR /app
RUN apt-get update && apt-get install -y curl git libgomp1 && rm -rf /var/lib/apt/lists/*
RUN pip install --upgrade pip
RUN pip install --no-cache-dir torch torchvision numpy flask flask-sock \
diffusers transformers accelerate peft pillow diskcache safetensors scipy sentencepiece
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user PATH=/home/user/.local/bin:$PATH
COPY --chown=user <<'HYPER_EOF' app.py
import sys,os,io,base64,json,warnings,time,threading
from queue import Queue
import torch
import torch.nn.functional as F
import numpy as np
from dataclasses import dataclass
from typing import Dict,List,Optional,Tuple
from flask import Flask
from flask_sock import Sock
from PIL import Image,ImageDraw,ImageFont
from transformers import AutoModelForCausalLM,AutoTokenizer
from diffusers import StableDiffusionPipeline,AutoencoderTiny,LCMScheduler
import diskcache
warnings.filterwarnings("ignore")
HTML=r"""<!DOCTYPE html><html><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"><title>NeuralOS HyperCore v2</title><script src="https://cdn.tailwindcss.com"></script><script src="https://unpkg.com/react@18/umd/react.production.min.js"></script><script src="https://unpkg.com/react-dom@18/umd/react-dom.production.min.js"></script><script src="https://unpkg.com/@babel/standalone/babel.min.js"></script><style>*{box-sizing:border-box}body{background:#000;color:#e2e2e2;margin:0;overflow:hidden;font-family:Tahoma,sans-serif;display:flex;align-items:center;justify-content:center;height:100vh;width:100vw}#desktop-container{position:relative;width:1024px;height:1024px;transform-origin:center;box-shadow:0 0 80px rgba(0,150,255,.4);background:#3A6EA5;border:2px solid #2a5c8f}.canvas-viewport{position:absolute;top:0;left:0;width:100%;height:100%;image-rendering:pixelated;cursor:pointer}.canvas-viewport img{width:100%;height:100%;display:block;image-rendering:pixelated}.taskbar{position:absolute;bottom:0;left:0;right:0;height:64px;background:linear-gradient(to bottom,#245EDB 0%,#1F4FB5 50%,#1941A5 100%);border-top:2px solid #5C9CFF;display:flex;align-items:center;padding:0 10px;gap:10px;z-index:50;box-shadow:0 -2px 10px rgba(0,0,0,.3)}.start-btn{background:linear-gradient(to bottom,#3FA23F,#2E862E);border:2px outset #5FCF5F;color:#fff;font-weight:700;padding:8px 20px;border-radius:6px;cursor:pointer;font-size:18px;font-style:italic;text-shadow:1px 1px 2px #000;display:flex;align-items:center;gap:8px;transition:all .15s;user-select:none}.start-btn:hover{background:linear-gradient(to bottom,#4CB24C,#359535);transform:translateY(-1px)}.start-btn:active{border-style:inset;transform:translateY(0)}.start-btn::before{content:"β";font-style:normal;font-size:22px}.start-menu{position:absolute;bottom:70px;left:10px;width:320px;background:linear-gradient(to right,#245EDB 0,#245EDB 60px,#D6DFF7 60px);border:3px outset #8BB8FF;border-radius:8px 8px 0 0;box-shadow:2px 2px 10px rgba(0,0,0,.5);z-index:100;overflow:hidden}.start-menu-header{background:linear-gradient(to bottom,#5C9CFF,#245EDB);color:#fff;font-weight:700;padding:8px 12px;border-bottom:2px solid #8BB8FF;font-size:16px;text-shadow:1px 1px 1px #000}.start-menu-content{display:flex}.start-menu-sidebar{width:60px;background:#245EDB;padding:10px 5px;color:#fff;font-size:11px;writing-mode:vertical-rl;text-orientation:mixed;font-weight:700;text-shadow:1px 1px 1px #000}.start-menu-items{flex:1;background:#D6DFF7;padding:8px 0}.start-menu-item{padding:10px 15px;cursor:pointer;display:flex;align-items:center;gap:12px;color:#000;font-size:14px;transition:background .1s}.start-menu-item:hover{background:#4A7FD5;color:#fff}.start-menu-icon{width:32px;height:32px;background:#999;border:1px solid #666;border-radius:3px;display:flex;align-items:center;justify-content:center;font-size:18px}.console-log{position:absolute;top:20px;right:20px;width:400px;max-height:250px;background:rgba(0,0,0,.92);color:#0f0;font-family:'Fira Code',monospace;font-size:13px;line-height:1.5;padding:15px;border:2px solid #0a0;border-radius:6px;overflow-y:auto;z-index:1000;pointer-events:none;box-shadow:0 4px 20px rgba(0,255,0,.2);backdrop-filter:blur(5px)}.log-entry{margin-bottom:4px;animation:fadeIn .3s}@keyframes fadeIn{from{opacity:0;transform:translateX(-10px)}to{opacity:1;transform:translateX(0)}}.fps-counter{position:absolute;top:20px;left:20px;background:rgba(0,0,0,.7);color:#0ff;padding:8px 12px;border-radius:4px;font-family:'Fira Code',monospace;font-size:12px;z-index:1000;pointer-events:none}</style></head><body><div id="root"></div><script type="text/babel">const{useState,useEffect,useRef}=React;function App(){const[desktopImage,setDesktopImage]=useState(null);const[logs,setLogs]=useState(["β‘ NeuralOS HyperCore v2.0","βοΈ Initializing...","π§ AI Engines: STANDBY"]);const[scale,setScale]=useState(1);const[startMenuOpen,setStartMenuOpen]=useState(false);const[fps,setFps]=useState(0);const socketRef=useRef(null);const containerRef=useRef(null);const frameCountRef=useRef(0);const lastTimeRef=useRef(Date.now());const addLog=msg=>setLogs(p=>[...p.slice(-12),msg]);useEffect(()=>{const i=setInterval(()=>{const now=Date.now();const delta=(now-lastTimeRef.current)/1e3;const f=Math.round(frameCountRef.current/delta);setFps(f);frameCountRef.current=0;lastTimeRef.current=now},1e3);return()=>clearInterval(i)},[]);useEffect(()=>{const h=()=>{const p=20;const w=window.innerWidth-p;const ht=window.innerHeight-p;const sw=w/1024;const sh=ht/1024;const s=Math.min(sw,sh,1);setScale(s)};window.addEventListener('resize',h);h();return()=>window.removeEventListener('resize',h)},[]);useEffect(()=>{const proto=window.location.protocol==='https:'?'wss':'ws';const ws=new WebSocket(`${proto}://${window.location.host}/kernel`);socketRef.current=ws;ws.onopen=()=>addLog("π Kernel Connected");ws.onmessage=e=>{const msg=JSON.parse(e.data);if(msg.type==='frame_update'||msg.type==='desktop_ready'){setDesktopImage(msg.data);frameCountRef.current++}if(msg.type==='log')addLog(msg.data)};ws.onerror=()=>addLog("β Error");ws.onclose=()=>addLog("π Disconnected");return()=>ws.close()},[]);const handleClick=e=>{if(!containerRef.current)return;const rect=containerRef.current.getBoundingClientRect();const cx=(e.clientX-rect.left)/scale;const cy=(e.clientY-rect.top)/scale;const gx=Math.floor((cx/1024)*128);const gy=Math.floor((cy/1024)*128);if(gx>=0&&gx<=128&&gy>=0&&gy<=128){socketRef.current?.send(JSON.stringify({type:'click',x:gx,y:gy}));setStartMenuOpen(false)}};const toggleStartMenu=e=>{e.stopPropagation();setStartMenuOpen(!startMenuOpen)};const launchApp=app=>{socketRef.current?.send(JSON.stringify({type:'launch_app',app:app}));setStartMenuOpen(false)};return(<div id="desktop-container" ref={containerRef} style={{transform:`scale(${scale})`}}><div className="canvas-viewport" onClick={handleClick}>{desktopImage&&<img src={`data:image/png;base64,${desktopImage}`} draggable="false"/>}</div><div className="fps-counter">FPS:{fps}|Neural Active</div><div className="taskbar"><div className="start-btn" onClick={toggleStartMenu}>start</div></div>{startMenuOpen&&(<div className="start-menu"><div className="start-menu-header">NeuralOS Programs</div><div className="start-menu-content"><div className="start-menu-sidebar">HyperCore v2</div><div className="start-menu-items"><div className="start-menu-item" onClick={()=>launchApp('notepad')}><div className="start-menu-icon">π</div><span>Notepad</span></div><div className="start-menu-item" onClick={()=>launchApp('paint')}><div className="start-menu-icon">π¨</div><span>Paint</span></div><div className="start-menu-item" onClick={()=>launchApp('cmd')}><div className="start-menu-icon">β¨οΈ</div><span>Command Prompt</span></div><div className="start-menu-item" onClick={()=>launchApp('explorer')}><div className="start-menu-icon">π</div><span>File Explorer</span></div><div className="start-menu-item" onClick={()=>launchApp('browser')}><div className="start-menu-icon">π</div><span>Browser</span></div></div></div></div>)}<div className="console-log">{logs.map((l,i)=><div key={i} className="log-entry">>{l}</div>)}</div></div>);}ReactDOM.createRoot(document.getElementById('root')).render(<App/>);</script></body></html>"""
@dataclass
class Application:
name:str
icon_prompt:str
content_prompt:str
default_size:Tuple[int,int]
refinement_steps:int=2
@dataclass
class Process:
pid:int
name:str
app_type:str
position:Tuple[int,int]
size:Tuple[int,int]
latent_state:torch.Tensor
z_order:int
refinement_level:int=0
last_refined:float=0
PROGRAMS={
"notepad":Application("Notepad","pixel art notepad icon yellow paper blue lines 32x32 crisp detailed","windows notepad white background courier font menu bar detailed UI",(48,38),3),
"paint":Application("Paint","pixel art paint icon colorful palette brush 32x32 crisp detailed","ms paint white canvas color palette toolbar brushes detailed",(56,44),3),
"cmd":Application("CMD","pixel art terminal icon black screen white prompt 32x32 crisp","command prompt black white monospace C:\\ detailed",(52,36),2),
"explorer":Application("Explorer","pixel art folder icon yellow folder 32x32 crisp detailed","windows explorer folder tree file icons toolbar detailed UI",(60,46),3),
"browser":Application("Browser","pixel art browser icon blue globe 32x32 crisp detailed","web browser address bar navigation buttons detailed UI",(64,48),3)
}
class IconCache:
def __init__(self):self.cache={}
def get(self,k):return self.cache.get(k)
def set(self,k,v):self.cache[k]=v
ICON_CACHE=IconCache()
DRIVERS={}
def initialize_drivers():
bg=torch.zeros((1,4,128,128),dtype=torch.float32)
for y in range(128):
i=0.3+(y/128)*0.5
bg[:, 0,y,:]=i*0.4
bg[:,1,y,:]=i*0.9
bg[:,2,y,:]=i*0.2
DRIVERS["DESKTOP_BG"]=bg
print("[β] Drivers Init - HQ Background")
class OSKernel:
def __init__(self):
self.processes:Dict[int,Process]={}
self.next_pid=1
self.focused_pid:Optional[int]=None
self.refinement_queue=Queue()
self.desktop_icons=[
{"app":"notepad","x":6,"y":6,"label":"Notepad"},
{"app":"paint","x":6,"y":20,"label":"Paint"},
{"app":"cmd","x":6,"y":34,"label":"CMD"},
{"app":"explorer","x":6,"y":48,"label":"Explorer"},
{"app":"browser","x":6,"y":62,"label":"Browser"}
]
def spawn_process(self,app_type:str,x:int=32,y:int=24)->int:
if app_type not in PROGRAMS:return -1
app=PROGRAMS[app_type]
pid=self.next_pid
self.next_pid+=1
w,h=app.default_size
latent=torch.zeros((1,4,h,w),dtype=torch.float32)
proc=Process(pid,app.name,app_type,(x,y),(w,h),latent,pid,0,time.time())
self.processes[pid]=proc
self.focus_process(pid)
self.refinement_queue.put(pid)
return pid
def kill_process(self,pid:int):
if pid in self.processes:
del self.processes[pid]
if self.focused_pid==pid:self.focused_pid=None
def focus_process(self,pid:int):
if pid in self.processes:
self.focused_pid=pid
max_z=max((p.z_order for p in self.processes.values()),default=0)
self.processes[pid].z_order=max_z+1
def handle_click(self,x:int,y:int)->Dict:
sorted_procs=sorted(self.processes.values(),key=lambda p:p.z_order,reverse=True)
for proc in sorted_procs:
px,py=proc.position
pw,ph=proc.size
if px<=x<px+pw and py<=y<py+ph:
if py<=y<py+4 and px+pw-4<=x<px+pw:
self.kill_process(proc.pid)
return{"action":"close","pid":proc.pid,"name":proc.name}
self.focus_process(proc.pid)
return{"action":"focus","pid":proc.pid,"name":proc.name}
for icon in self.desktop_icons:
ix,iy=icon['x'],icon['y']
if ix<=x<ix+10 and iy<=y<iy+10:
pid=self.spawn_process(icon['app'],28+(len(self.processes)%3)*12,20+(len(self.processes)%3)*8)
return{"action":"launch","pid":pid,"app":icon['app']}
return{"action":"desktop_click","x":x,"y":y}
class NeuralSystem:
def __init__(self):
self.device="cuda" if torch.cuda.is_available() else "cpu"
self.dt=torch.float16 if self.device=="cuda" else torch.float32
print(f"[β‘] Device:{self.device}|Type:{self.dt}")
print("[π§ ] Loading Neural Renderer...")
self.pipe=StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",torch_dtype=self.dt,safety_checker=None,requires_safety_checker=False)
if self.device=="cuda":
self.pipe=self.pipe.to("cuda")
self.pipe.enable_attention_slicing()
self.pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
self.pipe.scheduler=LCMScheduler.from_config(self.pipe.scheduler.config)
self.pipe.vae=AutoencoderTiny.from_pretrained("madebyollin/taesd",torch_dtype=self.dt).to(self.device)
print("[π§ ] Loading AI...")
self.model_id="Qwen/Qwen2.5-Coder-0.5B-Instruct"
self.tokenizer=AutoTokenizer.from_pretrained(self.model_id)
if self.tokenizer.pad_token_id is None:self.tokenizer.pad_token_id=self.tokenizer.eos_token_id
self.llm=AutoModelForCausalLM.from_pretrained(self.model_id,torch_dtype=self.dt).to(self.device)
self.content_cache=diskcache.Cache('/tmp/neural_cache')
print("[β] Systems Online")
def think(self,prompt:str,max_tok:int=48)->str:
cache_key=f"think_{hash(prompt)}"
cached=self.content_cache.get(cache_key)
if cached:return cached
inputs=self.tokenizer(prompt,return_tensors="pt",padding=True,truncation=True).to(self.device)
with torch.no_grad():
outputs=self.llm.generate(inputs.input_ids,attention_mask=inputs.attention_mask,max_new_tokens=max_tok,do_sample=True,temperature=0.7,pad_token_id=self.tokenizer.eos_token_id)
response=self.tokenizer.decode(outputs[0][len(inputs.input_ids[0]):],skip_special_tokens=True).strip()
self.content_cache.set(cache_key,response,expire=3600)
return response
def generate_icon(self,app_type:str)->torch.Tensor:
cache_key=f"icon_{app_type}"
cached=ICON_CACHE.get(cache_key)
if cached is not None:return cached
app=PROGRAMS[app_type]
with torch.no_grad():
latents=torch.randn((1,4,10,10),device=self.device,dtype=self.dt)*0.8
result=self.pipe(app.icon_prompt,latents=latents,num_inference_steps=2,guidance_scale=1.0,output_type="latent").images
result=result*1.3
ICON_CACHE.set(cache_key,result)
return result
def generate_window_content(self,proc:Process,steps:int=1):
app_def=PROGRAMS[proc.app_type]
ref_desc=f" refinement {proc.refinement_level}" if proc.refinement_level>0 else ""
prompt=f"windows xp {app_def.name}{ref_desc} {app_def.content_prompt} highly detailed sharp"
with torch.no_grad():
if proc.refinement_level==0:
latents=torch.randn((1,4,proc.size[1],proc.size[0]),device=self.device,dtype=self.dt)*0.5
else:
latents=proc.latent_state.to(self.device,dtype=self.dt)
noise=torch.randn_like(latents)*0.1
latents=latents+noise
img_latents=self.pipe(prompt,latents=latents,num_inference_steps=steps,guidance_scale=1.0,output_type="latent").images
img_latents[:,1,0:4,:]=1.5
img_latents[:,0,0:4,:]=0.5
img_latents[:,2,1:3,-4:-1]=2.0
proc.latent_state=img_latents
proc.refinement_level+=1
proc.last_refined=time.time()
def render_frame(self,kernel:OSKernel):
canvas=DRIVERS["DESKTOP_BG"].clone().to(self.device)
for icon in kernel.desktop_icons:
icon_latent=self.generate_icon(icon['app']).to(self.device,dtype=self.dt)
x,y=icon['x'],icon['y']
canvas[:,:,y:y+10,x:x+10]=icon_latent
sorted_procs=sorted(kernel.processes.values(),key=lambda p:p.z_order)
for proc in sorted_procs:
x,y=proc.position
w,h=proc.size
if x+w<=128 and y+h<=128:
proc_latent=proc.latent_state.to(self.device,dtype=self.dt)
canvas[:,:,y:y+h,x:x+w]=proc_latent
with torch.no_grad():
img=self.pipe.vae.decode(canvas/0.18215).sample
img=(img/2+0.5).clamp(0,1).cpu().permute(0,2,3,1).numpy()
img=self.pipe.numpy_to_pil(img)[0]
return img
sys_engine=None
kernel_instance=OSKernel()
initialize_drivers()
app=Flask(__name__)
sock=Sock(app)
def refinement_worker(sys_engine,kernel):
while True:
if not kernel.refinement_queue.empty():
pid=kernel.refinement_queue.get()
if pid in kernel.processes:
proc=kernel.processes[pid]
app=PROGRAMS[proc.app_type]
if proc.refinement_level<app.refinement_steps:
sys_engine.generate_window_content(proc,steps=1)
kernel.refinement_queue.put(pid)
time.sleep(0.5)
@sock.route('/kernel')
def socket_handler(ws):
global sys_engine
if sys_engine is None:
sys_engine=NeuralSystem()
ref_thread=threading.Thread(target=refinement_worker,args=(sys_engine,kernel_instance),daemon=True)
ref_thread.start()
ws.send(json.dumps({"type":"log","data":"Kernel Attached"}))
img=sys_engine.render_frame(kernel_instance)
buf=io.BytesIO()
img.save(buf,format="PNG")
ws.send(json.dumps({"type":"desktop_ready","data":base64.b64encode(buf.getvalue()).decode()}))
while True:
data=ws.receive()
if not data:break
msg=json.loads(data)
if msg['type']=='click':
res=kernel_instance.handle_click(msg['x'],msg['y'])
if res['action']=='launch':
ws.send(json.dumps({"type":"log","data":f"π Launching {res['app']}..."}))
proc=kernel_instance.processes[res['pid']]
sys_engine.generate_window_content(proc)
elif res['action']=='close':
ws.send(json.dumps({"type":"log","data":f"β Closed {res['name']}"}))
elif res['action']=='desktop_click':
thought=sys_engine.think(f"User clicked desktop at {msg['x']},{msg['y']}. Witty system log:")
ws.send(json.dumps({"type":"log","data":f"π {thought}"}))
img=sys_engine.render_frame(kernel_instance)
buf=io.BytesIO()
img.save(buf,format="PNG")
ws.send(json.dumps({"type":"frame_update","data":base64.b64encode(buf.getvalue()).decode()}))
elif msg['type']=='launch_app':
pid=kernel_instance.spawn_process(msg['app'])
if pid!=-1:
ws.send(json.dumps({"type":"log","data":f"π± Started {msg['app']}"}))
proc=kernel_instance.processes[pid]
sys_engine.generate_window_content(proc)
img=sys_engine.render_frame(kernel_instance)
buf=io.BytesIO()
img.save(buf,format="PNG")
ws.send(json.dumps({"type":"frame_update","data":base64.b64encode(buf.getvalue()).decode()}))
@app.route('/')
def index():return HTML
if __name__=='__main__':app.run(host='0.0.0.0',port=7860)
HYPER_EOF
EXPOSE 7860
CMD ["python","app.py"] |