nihardon commited on
Commit
40250a0
·
verified ·
1 Parent(s): 4f57bb7

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +10 -8
Dockerfile CHANGED
@@ -1,22 +1,24 @@
1
- # 1. Use Python 3.9 (Standard Linux)
2
  FROM python:3.9
3
 
4
- # 2. Upgrade pip (Essential)
5
  RUN pip install --no-cache-dir --upgrade pip
6
 
7
- # 3. Install Hugging Face Hub FIRST (Pinned to safe version)
8
  RUN pip install "huggingface_hub<0.25.0"
9
 
10
- # 4. Install Llama-cpp-python via DIRECT URL
11
- # We point directly to the v0.2.55 manylinux wheel.
12
- # This bypasses the index search and GUARANTEES the correct file.
13
  RUN pip install --no-cache-dir \
14
- https://github.com/abetlen/llama-cpp-python/releases/download/v0.2.55/llama_cpp_python-0.2.55-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
 
 
15
 
16
  # 5. Install Gradio
17
  RUN pip install --no-cache-dir gradio
18
 
19
- # 6. Run
20
  WORKDIR /app
21
  COPY . .
22
  CMD ["python", "app.py"]
 
1
+ # 1. Use Python 3.9 (This is the version that worked for you)
2
  FROM python:3.9
3
 
4
+ # 2. Upgrade pip (Essential to find the correct wheels)
5
  RUN pip install --no-cache-dir --upgrade pip
6
 
7
+ # 3. Install the specific Hugging Face version (Prevents the crash)
8
  RUN pip install "huggingface_hub<0.25.0"
9
 
10
+ # 4. Install llama-cpp-python
11
+ # We use the EXACT method that worked in Turn 16.
12
+ # --prefer-binary forces it to download the wheel (fast) instead of building.
13
  RUN pip install --no-cache-dir \
14
+ llama-cpp-python \
15
+ --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu \
16
+ --prefer-binary
17
 
18
  # 5. Install Gradio
19
  RUN pip install --no-cache-dir gradio
20
 
21
+ # 6. Run the app
22
  WORKDIR /app
23
  COPY . .
24
  CMD ["python", "app.py"]