AI-Media-Studio / docker-compose.yml
yiyang-8
🚀 极客基地升级: 完整的AI超级站点功能
6adb512
# AI Media Studio - Docker Compose配置
# 极客基地 AI全球超级站点
#
# 使用方法:
# 启动所有服务: docker-compose up -d
# 查看日志: docker-compose logs -f
# 停止服务: docker-compose down
version: '3.8'
services:
# ============================================
# AI Media Studio 主服务
# ============================================
ai-media-studio:
build:
context: .
dockerfile: Dockerfile
image: ai-media-studio:latest
container_name: ai-media-studio
restart: unless-stopped
# GPU支持
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
# 端口映射
ports:
- "7860:7860" # Gradio WebUI
- "8000:8000" # FastAPI
# 数据卷
volumes:
- ./models:/app/models # 模型缓存
- ./outputs:/app/outputs # 输出文件
- ./configs:/app/configs # 配置文件
# 环境变量
environment:
- NVIDIA_VISIBLE_DEVICES=all
- CUDA_VISIBLE_DEVICES=0
- TORCH_HOME=/app/models/torch
- HF_HOME=/app/models/huggingface
- TRANSFORMERS_CACHE=/app/models/huggingface
- GRADIO_SERVER_NAME=0.0.0.0
- GRADIO_SERVER_PORT=7860
# 健康检查
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
# 日志配置
logging:
driver: "json-file"
options:
max-size: "100m"
max-file: "3"
# ============================================
# 仅WebUI服务 (可选)
# ============================================
webui:
build:
context: .
dockerfile: Dockerfile
image: ai-media-studio:latest
container_name: ai-media-studio-webui
profiles:
- webui-only
restart: unless-stopped
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ports:
- "7860:7860"
volumes:
- ./models:/app/models
- ./outputs:/app/outputs
environment:
- NVIDIA_VISIBLE_DEVICES=all
- TORCH_HOME=/app/models/torch
- HF_HOME=/app/models/huggingface
command: ["webui"]
# ============================================
# 仅API服务 (可选)
# ============================================
api:
build:
context: .
dockerfile: Dockerfile
image: ai-media-studio:latest
container_name: ai-media-studio-api
profiles:
- api-only
restart: unless-stopped
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ports:
- "8000:8000"
volumes:
- ./models:/app/models
- ./outputs:/app/outputs
environment:
- NVIDIA_VISIBLE_DEVICES=all
- TORCH_HOME=/app/models/torch
- HF_HOME=/app/models/huggingface
command: ["api"]
# ============================================
# Redis缓存 (可选,用于任务队列)
# ============================================
redis:
image: redis:7-alpine
container_name: ai-media-studio-redis
profiles:
- with-redis
restart: unless-stopped
ports:
- "6379:6379"
volumes:
- redis_data:/data
command: redis-server --appendonly yes
# ============================================
# Nginx反向代理 (可选,用于生产环境)
# ============================================
nginx:
image: nginx:alpine
container_name: ai-media-studio-nginx
profiles:
- production
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./docker/nginx.conf:/etc/nginx/nginx.conf:ro
- ./docker/ssl:/etc/nginx/ssl:ro
depends_on:
- ai-media-studio
# ============================================
# 数据卷
# ============================================
volumes:
redis_data:
# ============================================
# 网络
# ============================================
networks:
default:
name: ai-media-studio-network