akryldigital commited on
Commit
0bc84fc
·
verified ·
1 Parent(s): f17dc4f

change python alias

Browse files
Files changed (1) hide show
  1. Dockerfile +15 -5
Dockerfile CHANGED
@@ -1,5 +1,6 @@
1
  # FROM python:3.11-slim
2
  # Use HuggingFace's pre-built GPU image (includes CUDA, PyTorch, Transformers)
 
3
  FROM huggingface/transformers-pytorch-gpu:latest
4
 
5
 
@@ -11,6 +12,16 @@ RUN apt-get update && apt-get install -y \
11
  curl \
12
  git \
13
  && rm -rf /var/lib/apt/lists/*
 
 
 
 
 
 
 
 
 
 
14
 
15
  # Copy requirements first (for better Docker layer caching)
16
  COPY requirements.txt ./
@@ -22,9 +33,9 @@ RUN pip3 install --no-cache-dir -r requirements.txt
22
  # This caches models in the Docker image for faster container startup
23
  # IMPORTANT: Set cache directory BEFORE downloading to ensure models are in /app/.cache/huggingface
24
  ENV HF_HOME=/app/.cache/huggingface
25
- ENV TRANSFORMERS_CACHE=/app/.cache/huggingface
26
- ENV HF_DATASETS_CACHE=/app/.cache/huggingface
27
  ENV HF_HUB_CACHE=/app/.cache/huggingface
 
 
28
  ENV SENTENCE_TRANSFORMERS_HOME=/app/.cache/huggingface
29
 
30
  COPY download_models.py ./
@@ -32,7 +43,7 @@ COPY src/config/settings.yaml ./src/config/settings.yaml
32
  RUN mkdir -p /app/.cache/huggingface && \
33
  chmod -R 755 /app/.cache && \
34
  chmod -R 755 /app/.cache/huggingface && \
35
- python download_models.py
36
 
37
  # Copy all application files (excluding .dockerignore patterns)
38
  COPY . .
@@ -51,8 +62,8 @@ RUN mkdir -p /app/.streamlit && \
51
 
52
 
53
  ENV STREAMLIT_CONFIG_HOME=/app/.streamlit
54
- ENV STREAMLIT_USER_BASE_PATH=/app/.cache/streamlit
55
  ENV STREAMLIT_BROWSER_GATHER_USAGE_STATS=false
 
56
 
57
  # Expose Streamlit port (HF Spaces maps to 7860 automatically)
58
  EXPOSE 8501
@@ -62,7 +73,6 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
62
  CMD curl --fail http://localhost:8501/_stcore/health || exit 1
63
 
64
  #temp developement commands
65
- RUN pip3 install plotly google-genai
66
  RUN python3 -c "import torch; print(torch.cuda.is_available(), torch.cuda.get_device_name(0))"
67
  # RUN mkdir /app/conversations && chmod -R 777 conversations
68
  # RUN mkdir /app/feedback && chmod -R 777 feedback
 
1
  # FROM python:3.11-slim
2
  # Use HuggingFace's pre-built GPU image (includes CUDA, PyTorch, Transformers)
3
+ # FROM huggingface/transformers-pytorch-amd-gpu:latest
4
  FROM huggingface/transformers-pytorch-gpu:latest
5
 
6
 
 
12
  curl \
13
  git \
14
  && rm -rf /var/lib/apt/lists/*
15
+
16
+ # Upgrade Python to 3.9
17
+ # RUN apt-get update && \
18
+ # apt-get install -y python3.9 python3.9-distutils python3.9-dev && \
19
+ # update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 && \
20
+ # curl -sS https://bootstrap.pypa.io/get-pip.py | python3.9 && \
21
+ # rm -rf /var/lib/apt/lists/*
22
+
23
+ # Verify
24
+ RUN python3 -V
25
 
26
  # Copy requirements first (for better Docker layer caching)
27
  COPY requirements.txt ./
 
33
  # This caches models in the Docker image for faster container startup
34
  # IMPORTANT: Set cache directory BEFORE downloading to ensure models are in /app/.cache/huggingface
35
  ENV HF_HOME=/app/.cache/huggingface
 
 
36
  ENV HF_HUB_CACHE=/app/.cache/huggingface
37
+ ENV HF_DATASETS_CACHE=/app/.cache/huggingface
38
+ ENV TRANSFORMERS_CACHE=/app/.cache/huggingface
39
  ENV SENTENCE_TRANSFORMERS_HOME=/app/.cache/huggingface
40
 
41
  COPY download_models.py ./
 
43
  RUN mkdir -p /app/.cache/huggingface && \
44
  chmod -R 755 /app/.cache && \
45
  chmod -R 755 /app/.cache/huggingface && \
46
+ python3 download_models.py
47
 
48
  # Copy all application files (excluding .dockerignore patterns)
49
  COPY . .
 
62
 
63
 
64
  ENV STREAMLIT_CONFIG_HOME=/app/.streamlit
 
65
  ENV STREAMLIT_BROWSER_GATHER_USAGE_STATS=false
66
+ ENV STREAMLIT_USER_BASE_PATH=/app/.cache/streamlit
67
 
68
  # Expose Streamlit port (HF Spaces maps to 7860 automatically)
69
  EXPOSE 8501
 
73
  CMD curl --fail http://localhost:8501/_stcore/health || exit 1
74
 
75
  #temp developement commands
 
76
  RUN python3 -c "import torch; print(torch.cuda.is_available(), torch.cuda.get_device_name(0))"
77
  # RUN mkdir /app/conversations && chmod -R 777 conversations
78
  # RUN mkdir /app/feedback && chmod -R 777 feedback