services: postgres: image: postgres:16-alpine environment: POSTGRES_USER: sentinel POSTGRES_PASSWORD: sentinel POSTGRES_DB: sentinelai ports: - "5432:5432" volumes: - pgdata:/var/lib/postgresql/data healthcheck: test: ["CMD-SHELL", "pg_isready -U sentinel -d sentinelai"] interval: 5s timeout: 5s retries: 10 redis: image: redis:7-alpine ports: - "6379:6379" backend: build: context: .. dockerfile: docker/Dockerfile.backend environment: DATABASE_URL: postgresql+asyncpg://sentinel:sentinel@postgres:5432/sentinelai REDIS_URL: redis://redis:6379/0 CORS_ORIGINS: http://localhost:3000 COLLECTOR_WATCH_DIR: /app/demo_logs OLLAMA_HOST: http://ollama:11434 ENABLE_MOCK_CLOUD_POLL: "1" ports: - "8000:8000" depends_on: postgres: condition: service_healthy redis: condition: service_started volumes: - ../demo_logs:/app/demo_logs frontend: build: context: .. dockerfile: docker/Dockerfile.frontend environment: NEXT_PUBLIC_API_URL: http://localhost:8000 ports: - "3000:3000" depends_on: - backend # Optional: attach AMD ROCm Ollama for local Llama 3 / Qwen / Mistral inference. # Use a ROCm-tagged Ollama image on Linux hosts with AMD GPUs. # ollama: # image: ollama/ollama:rocm # devices: # - /dev/kfd # - /dev/dri # ports: # - "11434:11434" # volumes: # - ollama:/root/.ollama volumes: pgdata: {} # ollama: {}