Fola-AI commited on
Commit
519612a
·
1 Parent(s): 1bb33fd

Fix Dockerfile - use libgl1 for Debian Trixie compatibility

Browse files
Files changed (1) hide show
  1. Dockerfile +4 -21
Dockerfile CHANGED
@@ -2,12 +2,6 @@
2
  # FarmEyes - HuggingFace Spaces Dockerfile
3
  # =============================================================================
4
  # AI-Powered Crop Disease Detection for African Farmers
5
- #
6
- # This Dockerfile is optimized for HuggingFace Spaces free tier:
7
- # - Uses Python 3.10 slim image
8
- # - Installs llama-cpp-python for CPU inference
9
- # - Downloads N-ATLaS GGUF model at runtime (~4.92GB)
10
- # - Runs on port 7860 (HF Spaces default)
11
  # =============================================================================
12
 
13
  FROM python:3.10-slim
@@ -22,17 +16,16 @@ ENV HOST=0.0.0.0
22
  ENV PORT=7860
23
 
24
  # Install system dependencies
25
- # - ffmpeg: for audio processing (Whisper)
26
- # - libsm6, libxext6, libgl1: for OpenCV (image processing)
27
- # - build-essential, cmake: for compiling llama-cpp-python
28
  RUN apt-get update && apt-get install -y --no-install-recommends \
29
  ffmpeg \
30
  libsm6 \
31
  libxext6 \
32
- libgl1-mesa-glx \
33
  build-essential \
34
  cmake \
35
  git \
 
36
  && rm -rf /var/lib/apt/lists/*
37
 
38
  # Copy requirements first (for Docker cache optimization)
@@ -42,11 +35,9 @@ COPY requirements.txt .
42
  RUN pip install --no-cache-dir --upgrade pip
43
 
44
  # Install Python dependencies
45
- # Note: llama-cpp-python is compiled for CPU (no CUDA on free tier)
46
  RUN pip install --no-cache-dir -r requirements.txt
47
 
48
  # Install llama-cpp-python for CPU
49
- # This enables GGUF model inference
50
  RUN pip install --no-cache-dir llama-cpp-python
51
 
52
  # Copy all application code
@@ -58,13 +49,5 @@ RUN mkdir -p /app/uploads /app/temp
58
  # Expose port 7860 (HuggingFace Spaces default)
59
  EXPOSE 7860
60
 
61
- # Health check
62
- HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
63
- CMD curl -f http://localhost:7860/api/health || exit 1
64
-
65
  # Run the application
66
- # The app will:
67
- # 1. Start FastAPI server
68
- # 2. Download N-ATLaS GGUF model on first request (~5-15 min)
69
- # 3. Serve the web interface
70
- CMD ["python", "main.py"]
 
2
  # FarmEyes - HuggingFace Spaces Dockerfile
3
  # =============================================================================
4
  # AI-Powered Crop Disease Detection for African Farmers
 
 
 
 
 
 
5
  # =============================================================================
6
 
7
  FROM python:3.10-slim
 
16
  ENV PORT=7860
17
 
18
  # Install system dependencies
19
+ # Note: Using libgl1 instead of libgl1-mesa-glx for Debian Trixie compatibility
 
 
20
  RUN apt-get update && apt-get install -y --no-install-recommends \
21
  ffmpeg \
22
  libsm6 \
23
  libxext6 \
24
+ libgl1 \
25
  build-essential \
26
  cmake \
27
  git \
28
+ curl \
29
  && rm -rf /var/lib/apt/lists/*
30
 
31
  # Copy requirements first (for Docker cache optimization)
 
35
  RUN pip install --no-cache-dir --upgrade pip
36
 
37
  # Install Python dependencies
 
38
  RUN pip install --no-cache-dir -r requirements.txt
39
 
40
  # Install llama-cpp-python for CPU
 
41
  RUN pip install --no-cache-dir llama-cpp-python
42
 
43
  # Copy all application code
 
49
  # Expose port 7860 (HuggingFace Spaces default)
50
  EXPOSE 7860
51
 
 
 
 
 
52
  # Run the application
53
+ CMD ["python", "main.py"]