CooLLaMACEO commited on
Commit
8e30e39
·
verified ·
1 Parent(s): ad138c6

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +10 -14
Dockerfile CHANGED
@@ -1,24 +1,20 @@
1
  FROM python:3.10-slim
2
 
3
- # Install system dependencies
 
4
  RUN apt-get update && apt-get install -y \
5
  build-essential \
6
- python3-dev \
7
- wget \
8
- libgomp1 \
9
  && rm -rf /var/lib/apt/lists/*
10
 
11
- # 1. DOWNLOAD MISTRAL 7.7GB MODEL
12
- # Note: I've updated the URL to the specific Q8_0 file
13
- RUN wget -q -O model.gguf https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_K_M.gguf
14
 
15
- # 2. Install llama-cpp-python using pre-built wheel
16
- RUN pip install --no-cache-dir \
17
- https://huggingface.co/Luigi/llama-cpp-python-wheels-hf-spaces-free-cpu/resolve/main/llama_cpp_python-0.3.22-cp310-cp310-linux_x86_64.whl
18
 
19
- # 3. Install web server basics
20
- RUN pip install --no-cache-dir fastapi uvicorn
21
  COPY app.py .
 
 
 
22
 
23
- # Port 7860 for Hugging Face
24
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
  FROM python:3.10-slim
2
 
3
+ WORKDIR /app
4
+
5
  RUN apt-get update && apt-get install -y \
6
  build-essential \
7
+ curl \
 
 
8
  && rm -rf /var/lib/apt/lists/*
9
 
10
+ RUN pip install --no-cache-dir fastapi uvicorn llama-cpp-python
 
 
11
 
12
+ # Direct download link for the model you found
13
+ RUN curl -L "https://huggingface.co/maddes8cht/mosaicml-mpt-7b-chat-gguf/resolve/main/mosaicml-mpt-7b-chat-Q2_K.gguf?download=true" -o mpt-7b-q2.gguf
 
14
 
 
 
15
  COPY app.py .
16
+ COPY index.html .
17
+
18
+ EXPOSE 7860
19
 
20
+ CMD ["python", "app.py"]