JohnLicode commited on
Commit
5ec1ef2
·
verified ·
1 Parent(s): 43817cf

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +6 -5
Dockerfile CHANGED
@@ -1,7 +1,7 @@
1
  FROM ollama/ollama:latest
2
 
3
  # 1. Install dependencies
4
- RUN apt-get update && apt-get install -y curl bash python3 python3-pip && rm -rf /var/lib/apt/lists/*
5
 
6
  # 2. Download your model
7
  RUN curl -L "https://huggingface.co/JohnLicode/johnli-qwen-gguf/resolve/main/johnli-qwen.gguf?download=true" -o /root/johnli-qwen.gguf
@@ -12,13 +12,14 @@ RUN echo 'FROM /root/johnli-qwen.gguf' > /root/Modelfile && \
12
  echo 'PARAMETER temperature 0.7' >> /root/Modelfile && \
13
  echo 'PARAMETER num_predict 256' >> /root/Modelfile
14
 
15
- # 4. Copy app files
16
  WORKDIR /app
17
  COPY requirements.txt .
18
- RUN pip3 install --no-cache-dir -r requirements.txt
 
19
  COPY app.py .
20
 
21
- # 5. Create startup script that runs both Ollama and Gradio
22
  RUN printf '#!/bin/bash\n\
23
  echo "Starting Ollama server..."\n\
24
  ollama serve &\n\
@@ -26,7 +27,7 @@ sleep 10\n\
26
  echo "Creating model..."\n\
27
  ollama create johnli-persona -f /root/Modelfile\n\
28
  echo "Starting Gradio app..."\n\
29
- python3 /app/app.py\n' > /start.sh && chmod +x /start.sh
30
 
31
  # 6. Port for Gradio
32
  EXPOSE 7860
 
1
  FROM ollama/ollama:latest
2
 
3
  # 1. Install dependencies
4
+ RUN apt-get update && apt-get install -y curl bash python3 python3-pip python3-venv && rm -rf /var/lib/apt/lists/*
5
 
6
  # 2. Download your model
7
  RUN curl -L "https://huggingface.co/JohnLicode/johnli-qwen-gguf/resolve/main/johnli-qwen.gguf?download=true" -o /root/johnli-qwen.gguf
 
12
  echo 'PARAMETER temperature 0.7' >> /root/Modelfile && \
13
  echo 'PARAMETER num_predict 256' >> /root/Modelfile
14
 
15
+ # 4. Setup Python virtual environment and install packages
16
  WORKDIR /app
17
  COPY requirements.txt .
18
+ RUN python3 -m venv /app/venv && \
19
+ /app/venv/bin/pip install --no-cache-dir -r requirements.txt
20
  COPY app.py .
21
 
22
+ # 5. Create startup script
23
  RUN printf '#!/bin/bash\n\
24
  echo "Starting Ollama server..."\n\
25
  ollama serve &\n\
 
27
  echo "Creating model..."\n\
28
  ollama create johnli-persona -f /root/Modelfile\n\
29
  echo "Starting Gradio app..."\n\
30
+ /app/venv/bin/python /app/app.py\n' > /start.sh && chmod +x /start.sh
31
 
32
  # 6. Port for Gradio
33
  EXPOSE 7860