Vishwas1 commited on
Commit
c3bf6f6
·
verified ·
1 Parent(s): 9981566

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +43 -28
Dockerfile CHANGED
@@ -1,53 +1,68 @@
1
  FROM python:3.10-slim
2
 
3
  ENV PYTHONUNBUFFERED=1 \
4
- NODE_VERSION=20.x
 
 
5
 
6
  # 1. SYSTEM DEPS
7
  RUN apt-get update && apt-get install -y \
8
- curl build-essential git git-lfs cmake ninja-build nginx \
9
  && curl -fsSL https://deb.nodesource.com/setup_$NODE_VERSION | bash - \
10
- && apt-get install -y nodejs
11
- ENV PATH="/root/.cargo/bin:${PATH}"
12
 
13
  # 2. SETUP
14
  WORKDIR /app
15
  RUN git clone https://github.com/open-jarvis/OpenJarvis.git .
16
 
17
- # 3. PYTHON
18
  RUN pip install --no-cache-dir uv huggingface_hub
19
  RUN uv sync --extra server
20
  ENV CMAKE_ARGS="-DGGML_CPU=ON -DGGML_NATIVE=OFF -DGGML_AVX512=OFF -GNinja"
21
  ENV CMAKE_BUILD_PARALLEL_LEVEL=8
22
  RUN uv pip install --python /app/.venv llama-cpp-python[server]
23
 
24
- # 4. FRONTEND
25
- RUN cd frontend && npm install --legacy-peer-deps && npm install react-is --save-dev && npx vite build --outDir dist --base ./
 
 
 
26
 
27
- # 5. MODEL
28
  RUN mkdir -p /app/models && \
29
  python -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='bartowski/SmolLM2-1.7B-Instruct-GGUF', filename='SmolLM2-1.7B-Instruct-Q4_K_M.gguf', local_dir='/app/models', local_dir_use_symlinks=False)"
30
 
31
- # 6. NGINX (Unprivileged)
32
- RUN echo 'pid /tmp/nginx.pid;\nevents { worker_connections 768; }\nhttp {\n include /etc/nginx/mime.types;\n server {\n listen 7860;\n location ~ ^/api/(.*)$ {\n proxy_pass http://127.0.0.1:8000/$1$is_args$args;\n }\n location / {\n root /app/frontend/dist;\n try_files $uri $uri/ /index.html;\n }\n }\n}' > /app/nginx.conf
33
-
34
- # 7. THE "NO-FAIL" STARTUP
35
- RUN echo '#!/bin/bash' > /app/start.sh && \
36
- echo 'MODEL_NAME=${OPENAI_API_MODEL:-"gpt-4o-mini"}' >> /app/start.sh && \
37
- # Write to BOTH possible config locations
38
- echo 'for dir in "/root/.openjarvis" "/app/.openjarvis"; do' >> /app/start.sh && \
39
- echo ' mkdir -p "$dir"' >> /app/start.sh && \
40
- echo ' echo "[model]\ndefault_model = \"$MODEL_NAME\"\n\n[[model.providers]]\nname = \"openai\"\n\n[[model.providers]]\nname = \"llamacpp\"\nbase_url = \"http://127.0.0.1:8080/v1\"" > "$dir/config.toml"' >> /app/start.sh && \
41
- echo 'done' >> /app/start.sh && \
42
- # Start Llama.cpp
43
- echo '/app/.venv/bin/python -m llama_cpp.server --model /app/models/SmolLM2-1.7B-Instruct-Q4_K_M.gguf --host 127.0.0.1 --port 8080 > /app/engine.log 2>&1 &' >> /app/start.sh && \
44
- # Start Brain
45
- echo 'export OPENAI_API_KEY=$OPENAI_API_KEY' >> /app/start.sh && \
46
- echo '/app/.venv/bin/jarvis serve --host 127.0.0.1 --port 8000 > /app/jarvis.log 2>&1 &' >> /app/start.sh && \
47
- echo 'sleep 25' >> /app/start.sh && \
48
- echo 'tail -n 15 /app/jarvis.log' >> /app/start.sh && \
49
- echo 'exec nginx -c /app/nginx.conf -g "daemon off;"' >> /app/start.sh && \
50
- chmod +x /app/start.sh
 
 
 
 
 
 
 
 
 
 
51
 
52
  EXPOSE 7860
53
  CMD ["/app/start.sh"]
 
1
  FROM python:3.10-slim
2
 
3
  ENV PYTHONUNBUFFERED=1 \
4
+ NODE_VERSION=20.x \
5
+ # Set home to /app to avoid permission issues with /root in some environments
6
+ HOME=/app
7
 
8
  # 1. SYSTEM DEPS
9
  RUN apt-get update && apt-get install -y \
10
+ curl build-essential git git-lfs cmake ninja-build nginx netcat-openbsd \
11
  && curl -fsSL https://deb.nodesource.com/setup_$NODE_VERSION | bash - \
12
+ && apt-get install -y nodejs \
13
+ && rm -rf /var/lib/apt/lists/*
14
 
15
  # 2. SETUP
16
  WORKDIR /app
17
  RUN git clone https://github.com/open-jarvis/OpenJarvis.git .
18
 
19
+ # 3. PYTHON SETUP
20
  RUN pip install --no-cache-dir uv huggingface_hub
21
  RUN uv sync --extra server
22
  ENV CMAKE_ARGS="-DGGML_CPU=ON -DGGML_NATIVE=OFF -DGGML_AVX512=OFF -GNinja"
23
  ENV CMAKE_BUILD_PARALLEL_LEVEL=8
24
  RUN uv pip install --python /app/.venv llama-cpp-python[server]
25
 
26
+ # 4. FRONTEND BUILD
27
+ RUN cd frontend && \
28
+ npm install --legacy-peer-deps && \
29
+ npm install react-is --save-dev && \
30
+ npx vite build --outDir dist --base ./
31
 
32
+ # 5. DOWNLOAD LOCAL MODEL
33
  RUN mkdir -p /app/models && \
34
  python -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='bartowski/SmolLM2-1.7B-Instruct-GGUF', filename='SmolLM2-1.7B-Instruct-Q4_K_M.gguf', local_dir='/app/models', local_dir_use_symlinks=False)"
35
 
36
+ # 6. NGINX CONFIG
37
+ RUN echo 'pid /tmp/nginx.pid ;\nevents { worker_connections 768; }\nhttp {\n include /etc/nginx/mime.types ;\n server {\n listen 7860;\n location ~ ^/api/(.*)$ {\n proxy_pass http://127.0.0.1:8000/$1$is_args$args;\n proxy_set_header Host $host;\n }\n location / {\n root /app/frontend/dist ;\n try_files $uri $uri/ /index.html ;\n }\n }\n}' > /app/nginx.conf
38
+
39
+ # 7. STARTUP SCRIPT
40
+ RUN echo '#!/bin/bash\n\
41
+ # Configuration Setup\n\
42
+ MODEL_NAME=${OPENAI_API_MODEL:-"gpt-4o-mini"}\n\
43
+ CONF_DIR="/app/.openjarvis"\n\
44
+ mkdir -p "$CONF_DIR"\n\
45
+ \n\
46
+ echo "[model]\ndefault_model = \"$MODEL_NAME\"\n\n[[model.providers]]\nname = \"openai\"\napi_key = \"$OPENAI_API_KEY\"\n\n[[model.providers]]\nname = \"huggingface\"\ntoken = \"$HF_TOKEN\"\n\n[[model.providers]]\nname = \"llamacpp\"\nbase_url = \"http://127.0.0.1:8080/v1\"\nmodel_id = \"local-smollm2\"" > "$CONF_DIR/config.toml"\n\
47
+ \n\
48
+ # Start Llama.cpp (Local Inference)\n\
49
+ /app/.venv/bin/python -m llama_cpp.server --model /app/models/SmolLM2-1.7B-Instruct-Q4_K_M.gguf --host 127.0.0.1 --port 8080 > /app/engine.log 2>&1 &\n\
50
+ \n\
51
+ # Start Jarvis Brain\n\
52
+ /app/.venv/bin/jarvis serve --host 127.0.0.1 --port 8000 > /app/jarvis.log 2>&1 &\n\
53
+ \n\
54
+ # Health Check: Wait for Jarvis API to be ready\n\
55
+ echo "Waiting for Jarvis API..."\n\
56
+ MAX_RETRIES=30\n\
57
+ COUNT=0\n\
58
+ while ! curl -s http://127.0.0.1:8000/v1/models > /dev/null; do\n\
59
+ sleep 2\n\
60
+ COUNT=$((COUNT+1))\n\
61
+ if [ $COUNT -ge $MAX_RETRIES ]; then echo "Jarvis failed to start"; exit 1; fi\n\
62
+ done\n\
63
+ \n\
64
+ echo "Backend ready. Starting Nginx..."\n\
65
+ exec nginx -c /app/nginx.conf -g "daemon off;"' > /app/start.sh && chmod +x /app/start.sh
66
 
67
  EXPOSE 7860
68
  CMD ["/app/start.sh"]