File size: 2,146 Bytes
788fd6a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# docker-compose.yml

version: '3.8'

services:
  postgres:
    image: postgres:15 # Consider timescale/pgvector image if needed: ankane/pgvector
    container_name: agent_postgres
    environment:
      POSTGRES_USER: user
      POSTGRES_PASSWORD: password
      POSTGRES_DB: agentdb
    volumes:
      - postgres_data:/var/lib/postgresql/data
    ports:
      - "5432:5432" # Expose only if needed externally
    restart: unless-stopped
    # For pgvector extension (if using ankane/pgvector image or standard postgres + setup):
    # command: postgres -c shared_preload_libraries='pg_stat_statements,pgvector' # Example

  redis:
    image: redis:7
    container_name: agent_redis
    ports:
      - "6379:6379" # Expose only if needed externally
    restart: unless-stopped

  agent-worker:
    build: . # Assumes Dockerfile is in the current directory
    container_name: agent_worker
    depends_on:
      - postgres
      - redis
    environment:
      # Override or add environment variables here
      NUM_WORKERS: 4 # Example: Run 4 workers in this container (or scale replicas)
      PYTHONUNBUFFERED: 1 # Ensure logs appear immediately
      # Pass GPU capabilities if learning worker runs here (see Docker Compose GPU support docs)
    # volumes:
      # Mount code if needed for development: .:/app
    command: ["python", "your_main_script.py"] # Command to start workers
    restart: unless-stopped

  # Optional: Separate Learning Worker Service (if GPU resources are specific)
  # learning-worker:
  #   build: . # Or a specific Dockerfile for learning
  #   container_name: learning_worker
  #   depends_on:
  #     - postgres
  #   environment:
  #     # ... similar env vars ...
  #     LEARNING_INTERVAL_HOURS: 6
  #     DEVICE: cuda
  #   deploy: # Requires Docker Swarm or Kubernetes for GPU allocation
  #     resources:
  #       reservations:
  #         devices:
  #           - driver: nvidia
  #             count: 1 # Request 1 GPU
  #             capabilities: [gpu]
  #   command: ["python", "your_learning_script.py"] # Command to start scheduler/learning
  #   restart: unless-stopped

volumes:
  postgres_data: