Spaces:
Sleeping
Sleeping
| FROM ollama/ollama:latest | |
| # 1. Install dependencies | |
| RUN apt-get update && apt-get install -y curl bash python3 python3-pip python3-venv && rm -rf /var/lib/apt/lists/* | |
| # 2. Download your model | |
| RUN curl -L "https://huggingface.co/JohnLicode/johnli-qwen-gguf/resolve/main/johnli-qwen.gguf?download=true" -o /root/johnli-qwen.gguf | |
| # 3. Create the Modelfile | |
| RUN echo 'FROM /root/johnli-qwen.gguf' > /root/Modelfile && \ | |
| echo 'SYSTEM "You are JohnLi digital persona. Respond as John Lenard Libertad - a 22-year-old developer who follows Stoic philosophy. Be friendly, professional, and concise."' >> /root/Modelfile && \ | |
| echo 'PARAMETER temperature 0.7' >> /root/Modelfile && \ | |
| echo 'PARAMETER num_predict 256' >> /root/Modelfile | |
| # 4. Setup Python virtual environment and install packages | |
| WORKDIR /app | |
| COPY requirements.txt . | |
| RUN python3 -m venv /app/venv && \ | |
| /app/venv/bin/pip install --no-cache-dir -r requirements.txt | |
| COPY app.py . | |
| # 5. Create startup script | |
| RUN printf '#!/bin/bash\n\ | |
| echo "Starting Ollama server..."\n\ | |
| ollama serve &\n\ | |
| sleep 10\n\ | |
| echo "Creating model..."\n\ | |
| ollama create johnli-persona -f /root/Modelfile\n\ | |
| echo "Starting Gradio app..."\n\ | |
| /app/venv/bin/python /app/app.py\n' > /start.sh && chmod +x /start.sh | |
| # 6. Port for Gradio | |
| EXPOSE 7860 | |
| # 7. Run | |
| ENTRYPOINT [] | |
| CMD ["/bin/bash", "/start.sh"] |