Spaces:
Sleeping
Sleeping
| FROM python:3.10-slim | |
| ENV PYTHONUNBUFFERED=1 \ | |
| NODE_VERSION=20.x \ | |
| # Set home to /app to avoid permission issues with /root in some environments | |
| HOME=/app | |
| # 1. SYSTEM DEPS | |
| RUN apt-get update && apt-get install -y \ | |
| curl build-essential git git-lfs cmake ninja-build nginx netcat-openbsd \ | |
| && curl -fsSL https://deb.nodesource.com/setup_$NODE_VERSION | bash - \ | |
| && apt-get install -y nodejs \ | |
| && rm -rf /var/lib/apt/lists/* | |
| # 2. SETUP | |
| WORKDIR /app | |
| RUN git clone https://github.com/open-jarvis/OpenJarvis.git . | |
| # 3. PYTHON SETUP | |
| RUN pip install --no-cache-dir uv huggingface_hub | |
| RUN uv sync --extra server | |
| ENV CMAKE_ARGS="-DGGML_CPU=ON -DGGML_NATIVE=OFF -DGGML_AVX512=OFF -GNinja" | |
| ENV CMAKE_BUILD_PARALLEL_LEVEL=8 | |
| RUN uv pip install --python /app/.venv llama-cpp-python[server] | |
| # 4. FRONTEND BUILD | |
| RUN cd frontend && \ | |
| npm install --legacy-peer-deps && \ | |
| npm install react-is --save-dev && \ | |
| npx vite build --outDir dist --base ./ | |
| # 5. DOWNLOAD LOCAL MODEL | |
| RUN mkdir -p /app/models && \ | |
| python -c "from huggingface_hub import hf_hub_download; hf_hub_download(repo_id='bartowski/SmolLM2-1.7B-Instruct-GGUF', filename='SmolLM2-1.7B-Instruct-Q4_K_M.gguf', local_dir='/app/models', local_dir_use_symlinks=False)" | |
| # 6. NGINX CONFIG | |
| RUN echo 'pid /tmp/nginx.pid ;\nevents { worker_connections 768; }\nhttp {\n include /etc/nginx/mime.types ;\n server {\n listen 7860;\n location ~ ^/api/(.*)$ {\n proxy_pass http://127.0.0.1:8000/$1$is_args$args;\n proxy_set_header Host $host;\n }\n location / {\n root /app/frontend/dist ;\n try_files $uri $uri/ /index.html ;\n }\n }\n}' > /app/nginx.conf | |
| # 7. STARTUP SCRIPT | |
| RUN echo '#!/bin/bash\n\ | |
| # Configuration Setup\n\ | |
| MODEL_NAME=${OPENAI_API_MODEL:-"gpt-4o-mini"}\n\ | |
| CONF_DIR="/app/.openjarvis"\n\ | |
| mkdir -p "$CONF_DIR"\n\ | |
| \n\ | |
| echo "[model]\ndefault_model = \"$MODEL_NAME\"\n\n[[model.providers]]\nname = \"openai\"\napi_key = \"$OPENAI_API_KEY\"\n\n[[model.providers]]\nname = \"huggingface\"\ntoken = \"$HF_TOKEN\"\n\n[[model.providers]]\nname = \"llamacpp\"\nbase_url = \"http://127.0.0.1:8080/v1\"\nmodel_id = \"local-smollm2\"" > "$CONF_DIR/config.toml"\n\ | |
| \n\ | |
| # Start Llama.cpp (Local Inference)\n\ | |
| /app/.venv/bin/python -m llama_cpp.server --model /app/models/SmolLM2-1.7B-Instruct-Q4_K_M.gguf --host 127.0.0.1 --port 8080 > /app/engine.log 2>&1 &\n\ | |
| \n\ | |
| # Start Jarvis Brain\n\ | |
| /app/.venv/bin/jarvis serve --host 127.0.0.1 --port 8000 > /app/jarvis.log 2>&1 &\n\ | |
| \n\ | |
| # Health Check: Wait for Jarvis API to be ready\n\ | |
| echo "Waiting for Jarvis API..."\n\ | |
| MAX_RETRIES=30\n\ | |
| COUNT=0\n\ | |
| while ! curl -s http://127.0.0.1:8000/v1/models > /dev/null; do\n\ | |
| sleep 2\n\ | |
| COUNT=$((COUNT+1))\n\ | |
| if [ $COUNT -ge $MAX_RETRIES ]; then echo "Jarvis failed to start"; exit 1; fi\n\ | |
| done\n\ | |
| \n\ | |
| echo "Backend ready. Starting Nginx..."\n\ | |
| exec nginx -c /app/nginx.conf -g "daemon off;"' > /app/start.sh && chmod +x /app/start.sh | |
| EXPOSE 7860 | |
| CMD ["/app/start.sh"] |