File size: 2,830 Bytes
950bfa6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# Use Node.js 22 (required by OpenClaw) as the base image
FROM node:22-bookworm-slim

# Install system tools, Ollama dependencies (zstd), and build-essential tools
RUN apt-get update && apt-get install -y \
    git curl procps zstd python3 make g++ jq \
    && curl -fsSL https://ollama.com/install.sh | sh \
    && rm -rf /var/lib/apt/lists/*

# Create directories and assign ownership to the built-in "node" user
RUN mkdir -p /home/node/.ollama && chown -R node:node /home/node/.ollama
RUN mkdir -p /home/node/.npm-global && chown -R node:node /home/node/.npm-global
RUN mkdir -p /home/node/.openclaw/workspace && chown -R node:node /home/node/.openclaw

# Switch to the non-root "node" user
USER node
ENV HOME=/home/node
ENV PATH=/home/node/.npm-global/bin:$PATH
WORKDIR $HOME

# Configure npm and install OpenClaw globally
RUN npm config set prefix '~/.npm-global'
RUN npm install -g openclaw@latest

# Expose the Hugging Face web port
EXPOSE 7860

# Create a robust startup script with the ultimate pairing bypass flag
RUN echo '#!/bin/bash\n\
echo "Writing default OpenClaw configuration..."\n\
cat <<EOF > /home/node/.openclaw/openclaw.json\n\
{\n\
  "gateway": {\n\
    "mode": "local",\n\
    "bind": "lan",\n\
    "trustedProxies": ["10.0.0.0/8", "127.0.0.1"],\n\
    "auth": {\n\
      "token": "pelm-my-super-secret-password-123"\n\
    },\n\
    "controlUi": {\n\
      "allowInsecureAuth": true,\n\
      "dangerouslyDisableDeviceAuth": true\n\
    }\n\
  },\n\
  "models": {\n\
    "mode": "merge",\n\
    "providers": {\n\
      "ollama": {\n\
        "baseUrl": "http://127.0.0.1:11434/v1",\n\
        "apiKey": "ollama-local",\n\
        "api": "openai-responses",\n\
        "models": [\n\
          {\n\
            "id": "qwen2.5-coder:14b",\n\
            "name": "Qwen 2.5 Coder 14B",\n\
            "reasoning": false,\n\
            "input": ["text"],\n\
            "cost": { "input": 0, "output": 0, "cacheRead": 0, "cacheWrite": 0 },\n\
            "contextWindow": 32000,\n\
            "maxTokens": 8192\n\
          }\n\
        ]\n\
      }\n\
    }\n\
  },\n\
  "agents": {\n\
    "defaults": {\n\
      "model": {\n\
        "primary": "ollama/qwen2.5-coder:14b"\n\
      }\n\
    }\n\
  }\n\
}\n\
EOF\n\
\n\
echo "Wiping stale device identities to prevent 1008 mismatch errors..."\n\
rm -rf /home/node/.openclaw/devices /home/node/.openclaw/identity\n\
\n\
echo "Starting Ollama server..."\n\
ollama serve &\n\
\n\
echo "Waiting for Ollama server to be active..."\n\
while ! ollama list > /dev/null 2>&1; do\n\
  sleep 1\n\
done\n\
\n\
echo "Ollama is up! Pulling Qwen 2.5 Coder 14B..."\n\
ollama pull qwen2.5-coder:14b\n\
\n\
echo "Model ready! Starting OpenClaw..."\n\
openclaw gateway --port 7860 --allow-unconfigured\n\
' > start.sh && chmod +x start.sh

# Start the Space
CMD ["./start.sh"]