| | |
| | |
| | |
| | ARG USE_CUDA=false |
| | ARG USE_OLLAMA=false |
| | ARG USE_SLIM=false |
| | ARG USE_PERMISSION_HARDENING=false |
| | |
| | ARG USE_CUDA_VER=cu128 |
| | |
| | |
| | |
| | |
| | ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 |
| | ARG USE_RERANKING_MODEL="" |
| | ARG USE_AUXILIARY_EMBEDDING_MODEL=TaylorAI/bge-micro-v2 |
| |
|
| | |
| | ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base" |
| |
|
| | ARG BUILD_HASH=dev-build |
| | |
| | ARG UID=0 |
| | ARG GID=0 |
| |
|
| | |
| | FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build |
| | ARG BUILD_HASH |
| |
|
| | |
| | |
| |
|
| | WORKDIR /app |
| |
|
| | |
| | RUN apk add --no-cache git |
| |
|
| | COPY package.json package-lock.json ./ |
| | RUN npm ci --force |
| |
|
| | COPY . . |
| | ENV APP_BUILD_HASH=${BUILD_HASH} |
| | RUN npm run build |
| |
|
| | |
| | FROM python:3.11.14-slim-bookworm AS base |
| |
|
| | |
| | ARG USE_CUDA |
| | ARG USE_OLLAMA |
| | ARG USE_CUDA_VER |
| | ARG USE_SLIM |
| | ARG USE_PERMISSION_HARDENING |
| | ARG USE_EMBEDDING_MODEL |
| | ARG USE_RERANKING_MODEL |
| | ARG USE_AUXILIARY_EMBEDDING_MODEL |
| | ARG UID |
| | ARG GID |
| |
|
| | |
| | ENV PYTHONUNBUFFERED=1 |
| |
|
| | |
| | ENV ENV=prod \ |
| | PORT=8080 \ |
| | # pass build args to the build |
| | USE_OLLAMA_DOCKER=${USE_OLLAMA} \ |
| | USE_CUDA_DOCKER=${USE_CUDA} \ |
| | USE_SLIM_DOCKER=${USE_SLIM} \ |
| | USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \ |
| | USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL} \ |
| | USE_RERANKING_MODEL_DOCKER=${USE_RERANKING_MODEL} \ |
| | USE_AUXILIARY_EMBEDDING_MODEL_DOCKER=${USE_AUXILIARY_EMBEDDING_MODEL} |
| |
|
| | |
| | ENV OLLAMA_BASE_URL="/ollama" \ |
| | OPENAI_API_BASE_URL="" |
| |
|
| | |
| | ENV OPENAI_API_KEY="" \ |
| | WEBUI_SECRET_KEY="" \ |
| | SCARF_NO_ANALYTICS=true \ |
| | DO_NOT_TRACK=true \ |
| | ANONYMIZED_TELEMETRY=false |
| |
|
| | |
| | |
| | ENV WHISPER_MODEL="base" \ |
| | WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models" |
| |
|
| | |
| | ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \ |
| | RAG_RERANKING_MODEL="$USE_RERANKING_MODEL_DOCKER" \ |
| | AUXILIARY_EMBEDDING_MODEL="$USE_AUXILIARY_EMBEDDING_MODEL_DOCKER" \ |
| | SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models" |
| |
|
| | |
| | ENV TIKTOKEN_ENCODING_NAME="cl100k_base" \ |
| | TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken" |
| |
|
| | |
| | ENV HF_HOME="/app/backend/data/cache/embedding/models" |
| |
|
| | |
| | |
| |
|
| | |
| |
|
| | WORKDIR /app/backend |
| |
|
| | ENV HOME=/root |
| | |
| | RUN if [ $UID -ne 0 ]; then \ |
| | if [ $GID -ne 0 ]; then \ |
| | addgroup --gid $GID app; \ |
| | fi; \ |
| | adduser --uid $UID --gid $GID --home $HOME --disabled-password --no-create-home app; \ |
| | fi |
| |
|
| | RUN mkdir -p $HOME/.cache/chroma |
| | RUN echo -n 00000000-0000-0000-0000-000000000000 > $HOME/.cache/chroma/telemetry_user_id |
| |
|
| | |
| | RUN chown -R $UID:$GID /app $HOME |
| |
|
| | |
| | RUN apt-get update && \ |
| | apt-get install -y --no-install-recommends \ |
| | git build-essential pandoc gcc netcat-openbsd curl jq \ |
| | python3-dev \ |
| | ffmpeg libsm6 libxext6 zstd \ |
| | && rm -rf /var/lib/apt/lists/* |
| |
|
| | # install python dependencies |
| | COPY --chown=$UID:$GID ./backend/requirements.txt ./requirements.txt |
| |
|
| | RUN pip3 install --no-cache-dir uv && \ |
| | if [ "$USE_CUDA" = "true" ]; then \ |
| | # If you use CUDA the whisper and embedding model will be downloaded on first use |
| | # fix: pin torch<=2.9.1 - torch 2.10.0 aarch64 wheels cause SIGILL on ARM devices (RPi 4 Cortex-A72) #21349 |
| | pip3 install 'torch<=2.9.1' torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \ |
| | uv pip install --system -r requirements.txt --no-cache-dir && \ |
| | python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \ |
| | python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ.get('AUXILIARY_EMBEDDING_MODEL', 'TaylorAI/bge-micro-v2'), device='cpu')" && \ |
| | python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \ |
| | python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \ |
| | python -c "import nltk; nltk.download('punkt_tab')"; \ |
| | else \ |
| | pip3 install 'torch<=2.9.1' torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \ |
| | uv pip install --system -r requirements.txt --no-cache-dir && \ |
| | if [ "$USE_SLIM" != "true" ]; then \ |
| | python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \ |
| | python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ.get('AUXILIARY_EMBEDDING_MODEL', 'TaylorAI/bge-micro-v2'), device='cpu')" && \ |
| | python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \ |
| | python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \ |
| | python -c "import nltk; nltk.download('punkt_tab')"; \ |
| | fi; \ |
| | fi; \ |
| | mkdir -p /app/backend/data && chown -R $UID:$GID /app/backend/data/ && \ |
| | rm -rf /var/lib/apt/lists/*; |
| |
|
| | # Install Ollama if requested |
| | RUN if [ "$USE_OLLAMA" = "true" ]; then \ |
| | date +%s > /tmp/ollama_build_hash && \ |
| | echo "Cache broken at timestamp: `cat /tmp/ollama_build_hash`" && \ |
| | curl -fsSL https://ollama.com/install.sh | sh && \ |
| | rm -rf /var/lib/apt/lists/*; \ |
| | fi |
| |
|
| | # copy embedding weight from build |
| | # RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2 |
| | # COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx |
| |
|
| | # copy built frontend files |
| | COPY --chown=$UID:$GID --from=build /app/build /app/build |
| | COPY --chown=$UID:$GID --from=build /app/CHANGELOG.md /app/CHANGELOG.md |
| | COPY --chown=$UID:$GID --from=build /app/package.json /app/package.json |
| |
|
| | # copy backend files |
| | COPY --chown=$UID:$GID ./backend . |
| |
|
| | EXPOSE 8080 |
| |
|
| | HEALTHCHECK CMD curl --silent --fail http://localhost:${PORT:-8080}/health | jq -ne 'input.status == true' || exit 1 |
| |
|
| | # Minimal, atomic permission hardening for OpenShift (arbitrary UID): |
| | # - Group 0 owns /app and /root |
| | # - Directories are group-writable and have SGID so new files inherit GID 0 |
| | RUN if [ "$USE_PERMISSION_HARDENING" = "true" ]; then \ |
| | set -eux; \ |
| | chgrp -R 0 /app /root || true; \ |
| | chmod -R g+rwX /app /root || true; \ |
| | find /app -type d -exec chmod g+s {} + || true; \ |
| | find /root -type d -exec chmod g+s {} + || true; \ |
| | fi |
| |
|
| | USER $UID:$GID |
| |
|
| | ARG BUILD_HASH |
| | ENV WEBUI_BUILD_VERSION=${BUILD_HASH} |
| | ENV DOCKER=true |
| |
|
| | CMD [ "bash", "start.sh"] |
| |
|