version: '3' services: book-detection-api: build: context: . dockerfile: Dockerfile ports: - "7860:7860" environment: - TORCH_HOME=/tmp/.torch # Set torch home to a writable location volumes: - model_cache:/tmp/.torch # Persist model cache between restarts healthcheck: test: ["CMD", "curl", "-f", "http://localhost:7860/"] interval: 30s timeout: 10s retries: 3 start_period: 60s # Give enough time for model download on first run volumes: model_cache: # Named volume for model cache