- Dockerfile +11 -6
Dockerfile
CHANGED
|
@@ -1,12 +1,17 @@
|
|
| 1 |
#FROM nvidia/cuda:12.8.0-cudnn-devel-ubuntu22.04
|
| 2 |
FROM ubuntu:latest
|
| 3 |
WORKDIR /app
|
| 4 |
-
COPY --chown=user . /app
|
| 5 |
# Update the package list and install curl
|
| 6 |
-
RUN apt-get update
|
| 7 |
-
RUN curl -
|
| 8 |
-
RUN
|
| 9 |
-
RUN
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
ENV OLLAMA_MODELS=/.ollama/model
|
| 11 |
ENV OLLAMA_HOST=0.0.0.0
|
| 12 |
ENV OLLAMA_ORIGINS="[*]"
|
|
@@ -15,4 +20,4 @@ EXPOSE 7860
|
|
| 15 |
EXPOSE 11434
|
| 16 |
|
| 17 |
|
| 18 |
-
CMD ollama serve & sleep
|
|
|
|
| 1 |
#FROM nvidia/cuda:12.8.0-cudnn-devel-ubuntu22.04
|
| 2 |
FROM ubuntu:latest
|
| 3 |
WORKDIR /app
|
| 4 |
+
#COPY --chown=user . /app
|
| 5 |
# Update the package list and install curl
|
| 6 |
+
RUN apt-get update -y && apt-get upgrade
|
| 7 |
+
RUN apt-get install -y curl wget build-essential git tar gzip htop micro
|
| 8 |
+
RUN apt-get install -y python3 python3-pip psmisc yt-dlp python3-dev
|
| 9 |
+
RUN pip install langchain-yt-dlp --break-system-packages
|
| 10 |
+
RUN curl -fsSL https://ollama.com/install.sh | sh && which ollama
|
| 11 |
+
RUN mkdir -p /.ollama && chmod 777 /.ollama
|
| 12 |
+
RUN mkdir -p /.ollama/model && chmod -R 777 /.ollama/model
|
| 13 |
+
RUN touch /.gitconfig && chmod 777 /.gitconfig
|
| 14 |
+
|
| 15 |
ENV OLLAMA_MODELS=/.ollama/model
|
| 16 |
ENV OLLAMA_HOST=0.0.0.0
|
| 17 |
ENV OLLAMA_ORIGINS="[*]"
|
|
|
|
| 20 |
EXPOSE 11434
|
| 21 |
|
| 22 |
|
| 23 |
+
CMD ollama serve & export OLLAMA_HOST="https://llama.redzone-6.cfd" & sleep 5 && ollama pull codellama:13b && ollama pull starcoder2:15b && ollama pull wizardlm-uncensored:latest && ollama pull tinyllama:1.1b && ollama pull llama2-uncensored:latest && ollama pull qwen3-embedding:0.6b && ollama pull embeddinggemma:latest
|