ishraq-quran-backend / docker-compose.yml
nsakib161's picture
Fresh start: Configure for HF Spaces
991ca47
version: '3.8'
services:
quran-api:
build:
context: .
dockerfile: Dockerfile
container_name: quran-transcription-api
ports:
- "8888:8888"
environment:
- PYTHONUNBUFFERED=1
- CUDA_VISIBLE_DEVICES=0
- WHISPER_MODEL=OdyAsh/faster-whisper-base-ar-quran
- COMPUTE_TYPE=float16
- CORS_ORIGINS=http://localhost:3000,http://localhost:5173
- LOG_LEVEL=INFO
volumes:
# Cache Hugging Face models locally
- huggingface_cache:/root/.cache/huggingface
# Log output
- ./logs:/app/logs
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8888/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# Uncomment for GPU support (requires nvidia-docker)
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
# Optional: Redis for caching (future use)
# redis:
# image: redis:7-alpine
# container_name: quran-redis
# ports:
# - "6379:6379"
# restart: unless-stopped
volumes:
huggingface_cache:
driver: local
networks:
default:
name: quran-network