# Use Python 3.11 for better performance and compatibility # Optimized for Hugging Face Spaces with GPU support FROM python:3.11-slim # Set working directory WORKDIR /app # Set environment variables for optimal performance ENV PYTHONUNBUFFERED=1 \ GRADIO_SERVER_NAME=0.0.0.0 \ GRADIO_SERVER_PORT=7860 \ HF_HOME=/tmp/.cache/huggingface \ TORCH_HOME=/tmp/.cache/torch \ TRANSFORMERS_CACHE=/tmp/.cache/huggingface/transformers \ PYTHONDONTWRITEBYTECODE=1 \ PYTHONHASHSEED=random # Install system dependencies for audio processing, ML libraries, and GPU support RUN apt-get update && apt-get install -y \ build-essential \ git \ curl \ ffmpeg \ libsndfile1 \ libsndfile1-dev \ libffi-dev \ libssl-dev \ && rm -rf /var/lib/apt/lists/* # Copy requirements file first for better Docker layer caching COPY requirements.txt . # Upgrade pip and install Python dependencies # Install PyTorch with CUDA support for GPU acceleration on Spaces RUN pip install --no-cache-dir --upgrade pip setuptools wheel && \ pip install --no-cache-dir torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 && \ pip install --no-cache-dir -r requirements.txt # Clean up build dependencies to reduce image size RUN apt-get purge -y build-essential libsndfile1-dev libffi-dev libssl-dev && \ apt-get autoremove -y && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Copy application files COPY app.py . COPY README.md . # Create cache directories with proper permissions RUN mkdir -p /tmp/.cache/huggingface /tmp/.cache/torch && \ chmod -R 777 /tmp/.cache # Expose Gradio default port (required for Hugging Face Spaces) EXPOSE 7860 # Health check - verify Gradio server is responding # Extended start-period to allow model download and initialization HEALTHCHECK --interval=30s --timeout=15s --start-period=300s --retries=5 \ CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:7860/')" 2>/dev/null || exit 1 # Run the application with proper resource management CMD ["python", "-u", "app.py"]