File size: 1,003 Bytes
969b25e
8c1dd8c
d254ea0
 
 
4d569f0
575e1e8
8c1dd8c
c449c0f
575e1e8
d254ea0
969b25e
0ec3aae
4d569f0
 
575e1e8
969b25e
0ec3aae
b693cc4
0ec3aae
b693cc4
969b25e
4d569f0
 
d254ea0
969b25e
c449c0f
d254ea0
969b25e
c449c0f
e366f1f
969b25e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# Dockerfile
FROM python:3.10-slim

WORKDIR /app

# Install system dependencies
RUN apt-get update && apt-get install -y \
    wget \
    curl \
    && rm -rf /var/lib/apt/lists/*

# Install llama-cpp-python from its prebuilt wheel index for CPU
RUN pip install --no-cache-dir \
    --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu \
    llama-cpp-python==0.2.24

# Copy and install Python requirements
COPY requirements.txt .
RUN pip install --no-cache-dir --upgrade pip && \
    pip install --no-cache-dir -r requirements.txt

# Download the model file
RUN wget --progress=bar:force:noscroll -O capybarahermes-2.5-mistral-7b.Q5_K_M.gguf \
  https://huggingface.co/TheBloke/CapybaraHermes-2.5-Mistral-7B-GGUF/resolve/main/capybarahermes-2.5-mistral-7b.Q5_K_M.gguf

# Copy the API application file
COPY api.py .

# Expose the port Hugging Face uses
EXPOSE 7860

# Command to run the Uvicorn server on the public port
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]