AEUPH commited on
Commit
6962bce
·
verified ·
1 Parent(s): c78d5c9

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +23 -27
Dockerfile CHANGED
@@ -4,25 +4,22 @@ FROM python:3.10-slim
4
  # Set working directory
5
  WORKDIR /app
6
 
7
- # 1. Install Build Tools
8
- # strictly required for the fallback compilation
9
  RUN apt-get update && apt-get install -y \
10
- git \
11
  curl \
12
- build-essential \
13
- cmake \
14
- libopenblas-dev \
15
  && rm -rf /var/lib/apt/lists/*
16
 
17
- # 2. CRITICAL: Upgrade pip
18
- # Old pip versions (default in 3.10-slim) often fail to find binary wheels,
19
- # forcing a slow source build. Upgrading fixes this.
20
  RUN pip install --upgrade pip setuptools wheel
21
 
22
  # 3. Download Retro Font (VT323)
23
  RUN curl -L -o /app/VT323.ttf https://github.com/google/fonts/raw/main/ofl/vt323/VT323-Regular.ttf
24
 
25
- # 4. Install Python Dependencies (Split for caching)
 
26
  RUN pip install --no-cache-dir \
27
  torch \
28
  torchvision \
@@ -38,11 +35,14 @@ RUN pip install --no-cache-dir \
38
  safetensors \
39
  scipy
40
 
41
- # 5. Install Llama-CPP-Python (The Fix)
42
- # - CMAKE_ARGS="-DLLAMA_NATIVE=OFF": Prevents hanging on architecture detection.
43
- # - -v: Verbose mode so you can see the build scrolling instead of freezing.
44
- ENV CMAKE_ARGS="-DLLAMA_NATIVE=OFF -DLLAMA_BLAS=ON -DGGML_NATIVE=OFF"
45
- RUN pip install --no-cache-dir -v llama-cpp-python
 
 
 
46
 
47
  # 6. Create a non-root user
48
  RUN useradd -m -u 1000 user
@@ -63,12 +63,17 @@ from flask_sock import Sock
63
  from diffusers import StableDiffusionPipeline, AutoencoderTiny, LCMScheduler
64
  from PIL import Image, ImageDraw
65
 
66
- # Graceful degradation if library fails (prevents crash loop)
 
 
67
  try:
68
  from llama_cpp import Llama
 
 
69
  except ImportError:
70
- Llama = None
71
- print("[!] CRITICAL: Llama-cpp-python failed to import. AI features will be limited.")
 
72
 
73
  # ============================================================================
74
  # 1. FRONTEND ASSET
@@ -357,15 +362,6 @@ class OSKernel:
357
  return {"action": "focus", "pid": proc.pid}
358
  return {"action": "none"}
359
 
360
- class LatentFileSystem:
361
- def __init__(self, root_path="./litewin_disk"):
362
- self.root = Path(root_path)
363
- self.root.mkdir(exist_ok=True)
364
-
365
- class LatentVM:
366
- def execute(self, bytecode: str, target_latent: torch.Tensor) -> torch.Tensor:
367
- return target_latent # Placeholder for VM execution
368
-
369
  # ============================================================================
370
  # 3. SERVER & ML PIPELINE
371
  # ============================================================================
 
4
  # Set working directory
5
  WORKDIR /app
6
 
7
+ # 1. Install System Dependencies
8
+ # Basic tools required for the OS simulation
9
  RUN apt-get update && apt-get install -y \
 
10
  curl \
11
+ git \
12
+ libgomp1 \
 
13
  && rm -rf /var/lib/apt/lists/*
14
 
15
+ # 2. Upgrade pip (Essential for finding modern wheels)
 
 
16
  RUN pip install --upgrade pip setuptools wheel
17
 
18
  # 3. Download Retro Font (VT323)
19
  RUN curl -L -o /app/VT323.ttf https://github.com/google/fonts/raw/main/ofl/vt323/VT323-Regular.ttf
20
 
21
+ # 4. Install Core Python Dependencies (Split for caching)
22
+ # These are safe and install quickly.
23
  RUN pip install --no-cache-dir \
24
  torch \
25
  torchvision \
 
35
  safetensors \
36
  scipy
37
 
38
+ # 5. ATTEMPT Fast Llama Install (Optional)
39
+ # --only-binary: Prevents the slow compilation loop.
40
+ # || true: Allows the build to continue even if this fails.
41
+ # We will handle the missing library in app.py
42
+ RUN pip install llama-cpp-python \
43
+ --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu \
44
+ --only-binary=llama-cpp-python \
45
+ || echo "⚠️ Llama-CPP wheel not found. Skipping to avoid compile hang."
46
 
47
  # 6. Create a non-root user
48
  RUN useradd -m -u 1000 user
 
63
  from diffusers import StableDiffusionPipeline, AutoencoderTiny, LCMScheduler
64
  from PIL import Image, ImageDraw
65
 
66
+ # ============================================================================
67
+ # SAFE IMPORT LOGIC
68
+ # ============================================================================
69
  try:
70
  from llama_cpp import Llama
71
+ HAS_LLM = True
72
+ print("[*] Llama-CPP module loaded successfully.")
73
  except ImportError:
74
+ HAS_LLM = False
75
+ print("[!] NOTICE: Llama-CPP not found. Text generation features will be disabled.")
76
+ print("[!] The graphical OS and image generation will still work perfectly.")
77
 
78
  # ============================================================================
79
  # 1. FRONTEND ASSET
 
362
  return {"action": "focus", "pid": proc.pid}
363
  return {"action": "none"}
364
 
 
 
 
 
 
 
 
 
 
365
  # ============================================================================
366
  # 3. SERVER & ML PIPELINE
367
  # ============================================================================