version: '3.8' services: # Redis - Message broker and cache redis: image: redis:7-alpine ports: - "6379:6379" volumes: - redis_data:/data healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 5s timeout: 3s retries: 5 # Backend API api: build: context: ./backend dockerfile: Dockerfile ports: - "8000:8000" environment: - REDIS_URL=redis://redis:6379/0 - STORAGE_PATH=/app/storage - API_HOST=0.0.0.0 - API_PORT=8000 - CORS_ORIGINS=http://localhost:5173,http://localhost:3000 - YOURMT3_DEVICE=cpu volumes: - ./backend:/app - ./storage:/app/storage depends_on: redis: condition: service_healthy command: uvicorn main:app --host 0.0.0.0 --port 8000 --reload # Celery Worker (GPU-enabled for ML processing) worker: build: context: ./backend dockerfile: Dockerfile.worker environment: - REDIS_URL=redis://redis:6379/0 - STORAGE_PATH=/app/storage - GPU_ENABLED=true volumes: - ./backend:/app - ./storage:/app/storage depends_on: redis: condition: service_healthy command: celery -A tasks worker --loglevel=info --concurrency=1 # Uncomment for GPU support (requires NVIDIA Docker runtime) # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: 1 # capabilities: [gpu] # Frontend (React + Vite) frontend: build: context: ./frontend dockerfile: Dockerfile ports: - "5173:5173" environment: - VITE_API_URL=http://localhost:8000 volumes: - ./frontend:/app - /app/node_modules command: npm run dev -- --host 0.0.0.0 volumes: redis_data: storage: