File size: 1,611 Bytes
8b3905d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
services:
  postgres:
    image: postgres:16-alpine
    environment:
      POSTGRES_USER: sentinel
      POSTGRES_PASSWORD: sentinel
      POSTGRES_DB: sentinelai
    ports:
      - "5432:5432"
    volumes:
      - pgdata:/var/lib/postgresql/data
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U sentinel -d sentinelai"]
      interval: 5s
      timeout: 5s
      retries: 10

  redis:
    image: redis:7-alpine
    ports:
      - "6379:6379"

  backend:
    build:
      context: ..
      dockerfile: docker/Dockerfile.backend
    environment:
      DATABASE_URL: postgresql+asyncpg://sentinel:sentinel@postgres:5432/sentinelai
      REDIS_URL: redis://redis:6379/0
      CORS_ORIGINS: http://localhost:3000
      COLLECTOR_WATCH_DIR: /app/demo_logs
      OLLAMA_HOST: http://ollama:11434
      ENABLE_MOCK_CLOUD_POLL: "1"
    ports:
      - "8000:8000"
    depends_on:
      postgres:
        condition: service_healthy
      redis:
        condition: service_started
    volumes:
      - ../demo_logs:/app/demo_logs

  frontend:
    build:
      context: ..
      dockerfile: docker/Dockerfile.frontend
    environment:
      NEXT_PUBLIC_API_URL: http://localhost:8000
    ports:
      - "3000:3000"
    depends_on:
      - backend

  # Optional: attach AMD ROCm Ollama for local Llama 3 / Qwen / Mistral inference.
  # Use a ROCm-tagged Ollama image on Linux hosts with AMD GPUs.
  # ollama:
  #   image: ollama/ollama:rocm
  #   devices:
  #     - /dev/kfd
  #     - /dev/dri
  #   ports:
  #     - "11434:11434"
  #   volumes:
  #     - ollama:/root/.ollama

volumes:
  pgdata: {}
  # ollama: {}