File size: 2,135 Bytes
a01ae0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a53e306
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
FROM python:3.11-slim-bullseye

# Install system dependencies with clean up in single RUN layer
# RUN sed -i 's/main/main contrib non-free/' /etc/apt/sources.list && \
#     apt-get update && \
#     apt-get install -y --no-install-recommends \
#     unrar \
#     libgl1 \
#     libglib2.0-0 \
#     # ffmpeg for video processing
#     ffmpeg \
#     # git for transformers dependencies
#     git \
#     && rm -rf /var/lib/apt/lists/*

WORKDIR /app

# Upgrade pip and install core dependencies first
RUN pip install --no-cache-dir --upgrade pip setuptools wheel packaging

# Install optimized CPU-only PyTorch stack

# Copy requirements and install with special handling
COPY requirements.txt .
RUN pip install --no-cache-dir \
    -r requirements.txt \
    --find-links https://download.pytorch.org/whl/cpu \
    --extra-index-url https://pypi.org/simple

# # Install transformers with specific version that works with BLIP-2
# RUN pip install --no-cache-dir \
#     accelerate==0.27.2 \
#     transformers==4.36.2 \
#     timm==0.9.12 \
#     einops==0.7.0 \
#     bitsandbytes==0.42.0  # Helps with memory optimization

# For BLIP-2 specifically, we need these
RUN pip install --no-cache-dir \
    protobuf==3.20.3 \
    sentencepiece==0.2.0

# Copy application code with proper permissions
COPY --chown=1000:1000 . .

# Create non-root user and set permissions
RUN useradd -m -u 1000 user && \
    chown -R user:user /app && \
    mkdir -p /app/downloads /app/extracted /app/extracted_frames /app/analysis_results && \
    chown -R user:user /app/*

USER user

# Environment variables for better performance and less logging
ENV HF_HUB_DISABLE_PROGRESS=1
ENV TF_CPP_MIN_LOG_LEVEL=3
ENV PYTORCH_NO_CUDA_MEMORY_CACHING=1
ENV TOKENIZERS_PARALLELISM=false
ENV PYTHONUNBUFFERED=1

# Set this if you're only using CPU
ENV CUDA_VISIBLE_DEVICES=""

# Pre-download the BLIP-2 model during build (optional)
# RUN python -c "from transformers import AutoModel; AutoModel.from_pretrained('Salesforce/blip2-opt-2.7b', trust_remote_code=True)"

CMD ["uvicorn", "test_ai_integration.py", "--host", "0.0.0.0", "--port", "7860", "--workers", "1"]