kataresearch commited on
Commit
844323b
·
1 Parent(s): 49e02cd

initial commit

Browse files
Files changed (3) hide show
  1. Dockerfile +42 -0
  2. README.md +1 -1
  3. entrypoint.sh +15 -0
Dockerfile ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a minimal Ubuntu base image
2
+ FROM ubuntu:latest
3
+
4
+ # Update packages and install curl and gnupg
5
+ RUN apt-get update && apt-get install -y \
6
+ curl \
7
+ gnupg \
8
+ bash
9
+
10
+ # Add NVIDIA package repositories
11
+ RUN curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
12
+ && echo "deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://nvidia.github.io/libnvidia-container/stable/deb/ $(. /etc/os-release; echo $UBUNTU_CODENAME) main" > /etc/apt/sources.list.d/nvidia-container-toolkit.list
13
+
14
+ # Install NVIDIA container toolkit (Check for any updated methods or URLs for Ubuntu jammy)
15
+ RUN apt-get update && apt-get install -y nvidia-container-toolkit || true
16
+
17
+ # Install application
18
+ RUN curl -fsSL https://ollama.com/install.sh | sh
19
+ # Below is to fix embedding bug as per
20
+ # RUN curl -fsSL https://ollama.com/install.sh | sed 's#https://ollama.com/download#https://github.com/jmorganca/ollama/releases/download/v0.1.29#' | sh
21
+
22
+ # Create the directory and give appropriate permissions
23
+ RUN mkdir -p /.ollama && chmod 777 /.ollama
24
+
25
+ # Set working directory
26
+ WORKDIR /.ollama
27
+
28
+ # Copy the entry point script
29
+ COPY entrypoint.sh /entrypoint.sh
30
+ RUN chmod +x /entrypoint.sh
31
+
32
+ # Set the entry point script as the default command
33
+ ENTRYPOINT ["/entrypoint.sh"]
34
+
35
+ # Expose the server port
36
+ EXPOSE 11434
37
+
38
+ # Set the model as an environment variable (this can be overridden)
39
+ ENV model=${model}
40
+
41
+ # Start the Ollama server
42
+ CMD ["ollama", "serve"]
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Ollama Server
3
- emoji: 🌍
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: docker
 
1
  ---
2
  title: Ollama Server
3
+ emoji: 🦙⚡︎
4
  colorFrom: gray
5
  colorTo: red
6
  sdk: docker
entrypoint.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Splitting the models by comma and pulling each
4
+ IFS=',' read -ra MODELS <<< "$model"
5
+ for m in "${MODELS[@]}"; do
6
+ echo "Pulling model: $m"
7
+ ollama pull "$m"
8
+ sleep 5
9
+ echo "Running model: $m"
10
+ ollama run "$m"
11
+ done
12
+
13
+ # Starting server
14
+ echo "Starting Ollama server..."
15
+ exec ollama serve # Runs in foreground, keeping the container alive