AIencoder commited on
Commit
d2dc2bf
·
verified ·
1 Parent(s): 6bb3de9

Update entrypoint.sh

Browse files
Files changed (1) hide show
  1. entrypoint.sh +8 -37
entrypoint.sh CHANGED
@@ -1,46 +1,17 @@
1
  #!/bin/bash
 
2
 
3
- echo "============================================"
4
- echo "🔥 GOD CODING MACHINE - Docker Edition"
5
- echo " 18GB RAM | CPU | No Rate Limits!"
6
- echo "============================================"
7
- echo ""
8
-
9
- # Start Ollama in background
10
- echo "🚀 Starting Ollama server..."
11
  ollama serve &
12
 
13
- # Wait for Ollama to be ready
14
- echo "⏳ Waiting for Ollama..."
15
- for i in {1..60}; do
16
- if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
17
- echo "✅ Ollama is ready!"
18
- break
19
- fi
20
- sleep 1
21
- done
22
-
23
- # Pull models optimized for 18GB RAM + CPU
24
- echo ""
25
- echo "📦 Downloading models (first run takes ~5-10 min)..."
26
- echo ""
27
-
28
- # Main model - Qwen 2.5 Coder 7B (best quality for our RAM)
29
- echo "📥 Pulling Qwen2.5-Coder:7b (main model)..."
30
- ollama pull qwen2.5-coder:7b
31
 
32
- # Smaller fast model for quick tasks
33
- echo "📥 Pulling Qwen2.5-Coder:3b (fast model)..."
34
  ollama pull qwen2.5-coder:3b
35
 
36
- echo ""
37
- echo "============================================"
38
- echo "✅ All models ready!"
39
- echo "============================================"
40
  ollama list
41
- echo ""
42
- echo "🌐 Starting Gradio interface..."
43
- echo ""
44
 
45
- # Start the Gradio app
46
- python app.py
 
1
  #!/bin/bash
2
+ set -e
3
 
4
+ echo "Starting Ollama..."
 
 
 
 
 
 
 
5
  ollama serve &
6
 
7
+ echo "Waiting for Ollama to start..."
8
+ sleep 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ echo "Pulling models..."
 
11
  ollama pull qwen2.5-coder:3b
12
 
13
+ echo "Models ready!"
 
 
 
14
  ollama list
 
 
 
15
 
16
+ echo "Starting Gradio..."
17
+ python app.py