AfVaCL / docker-compose.yml
tugaa's picture
Create docker-compose.yml
788fd6a verified
# docker-compose.yml
version: '3.8'
services:
postgres:
image: postgres:15 # Consider timescale/pgvector image if needed: ankane/pgvector
container_name: agent_postgres
environment:
POSTGRES_USER: user
POSTGRES_PASSWORD: password
POSTGRES_DB: agentdb
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- "5432:5432" # Expose only if needed externally
restart: unless-stopped
# For pgvector extension (if using ankane/pgvector image or standard postgres + setup):
# command: postgres -c shared_preload_libraries='pg_stat_statements,pgvector' # Example
redis:
image: redis:7
container_name: agent_redis
ports:
- "6379:6379" # Expose only if needed externally
restart: unless-stopped
agent-worker:
build: . # Assumes Dockerfile is in the current directory
container_name: agent_worker
depends_on:
- postgres
- redis
environment:
# Override or add environment variables here
NUM_WORKERS: 4 # Example: Run 4 workers in this container (or scale replicas)
PYTHONUNBUFFERED: 1 # Ensure logs appear immediately
# Pass GPU capabilities if learning worker runs here (see Docker Compose GPU support docs)
# volumes:
# Mount code if needed for development: .:/app
command: ["python", "your_main_script.py"] # Command to start workers
restart: unless-stopped
# Optional: Separate Learning Worker Service (if GPU resources are specific)
# learning-worker:
# build: . # Or a specific Dockerfile for learning
# container_name: learning_worker
# depends_on:
# - postgres
# environment:
# # ... similar env vars ...
# LEARNING_INTERVAL_HOURS: 6
# DEVICE: cuda
# deploy: # Requires Docker Swarm or Kubernetes for GPU allocation
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1 # Request 1 GPU
# capabilities: [gpu]
# command: ["python", "your_learning_script.py"] # Command to start scheduler/learning
# restart: unless-stopped
volumes:
postgres_data: