FASTAPI / Dockerfile
tejani's picture
Update Dockerfile
8b0a52d verified
raw
history blame
1.56 kB
FROM python:3.10-slim
# Set working directory
WORKDIR /app
# Install system dependencies, including libgl1 for OpenCV
RUN apt-get update && apt-get install -y \
git \
git-lfs \
gcc \
python3-dev \
libgl1 \
libglib2.0-0 \
&& rm -rf /var/lib/apt/lists/*
# Initialize git-lfs
RUN git lfs install
# Clone the fastsdcpu repository
RUN git clone https://github.com/rupeshs/fastsdcpu.git .
# Create writable directories for cache, configs, and results
RUN mkdir -p /app/.cache/huggingface /app/configs /app/results && \
chmod -R 777 /app/.cache /app/configs /app/results
# Set environment variables for Hugging Face cache and OpenVINO telemetry
ENV HF_HOME=/app/.cache/huggingface
ENV XDG_CACHE_HOME=/app/.cache
ENV OPENVINO_TELEMETRY=0
# Install Python dependencies
RUN python -m venv env && \
. env/bin/activate && \
pip install --upgrade pip && \
pip install torch==2.2.2 --index-url https://download.pytorch.org/whl/cpu && \
pip install -r requirements.txt
# Download a default model (corrected URL for lcm-lora-sdv1-5)
RUN git clone https://huggingface.co/latent-consistency/lcm-lora-sdv1-5 /models/lcm-lora-sdv1-5 || \
echo "Model clone failed. Please check network or manually download the model."
# Configure the model path
RUN echo "/models/lcm-lora-sdv1-5" > configs/lcm-lora-models.txt && \
chmod 666 configs/lcm-lora-models.txt
# Expose the API port
EXPOSE 8000
# Activate virtual environment and run the API server
CMD ["/bin/bash", "-c", ". env/bin/activate && python src/app.py --api"]