llm-cpu-server / Dockerfile
tjwrld's picture
Update Dockerfile
0c96772 verified
raw
history blame contribute delete
885 Bytes
FROM node:22
SHELL ["/bin/bash", "-c"]
# Install build dependencies and wget for model downloading
RUN apt-get update && \
apt-get install -y --no-install-recommends git cmake clang libgomp1 wget ca-certificates && \
rm -rf /var/lib/apt/lists/*
WORKDIR /opt/app
# Create necessary directories
RUN mkdir -p models public
# Download the model directly from Hugging Face during the Docker build
# Use /resolve/ to get the actual file stream
RUN wget -qO models/gemma-3-1b-it-UD-IQ1_S.gguf "https://huggingface.co/unsloth/gemma-3-1b-it-GGUF/resolve/main/gemma-3-1b-it-UD-IQ1_S.gguf?download=true"
# Copy package files and install dependencies
COPY package*.json ./
RUN npm install
# Copy the rest of the application code (including server.js and the public folder)
COPY . .
# Expose the standard Hugging Face Spaces port
EXPOSE 7860
# Start the server
CMD ["npm", "start"]