AIencoder commited on
Commit
0b1db20
·
verified ·
1 Parent(s): eab1485

Update entrypoint.sh

Browse files
Files changed (1) hide show
  1. entrypoint.sh +29 -18
entrypoint.sh CHANGED
@@ -1,26 +1,37 @@
1
  #!/bin/bash
2
 
3
- echo "Starting Ollama server..."
4
- ollama serve &
5
 
6
- echo "Waiting for Ollama to be ready..."
7
- while ! curl -s http://localhost:11434/api/tags > /dev/null; do
8
- sleep 1
9
- done
10
- echo "✅ Ollama is ready!"
11
 
12
- echo "📥 Downloading models..."
 
 
 
 
 
 
 
 
 
13
 
14
- ollama pull qwen2.5-coder:1.5b
15
- ollama pull qwen2.5-coder:3b
16
- ollama pull qwen2.5-coder:7b
17
- ollama pull deepseek-coder:1.3b
18
- ollama pull deepseek-coder:6.7b
19
- ollama pull starcoder2:3b
20
- ollama pull codegemma:2b
 
 
 
 
 
21
 
22
- echo "✅ Models ready!"
23
- ollama list
24
 
25
- echo "🚀 Starting App..."
26
  python3 /app.py
 
1
  #!/bin/bash
2
 
3
+ echo "🔥 Axon v6 - llama.cpp Edition"
4
+ echo "📥 Downloading models..."
5
 
6
+ # Create models directory
7
+ mkdir -p /models
 
 
 
8
 
9
+ # Download GGUFs from HuggingFace
10
+ python3 << 'EOF'
11
+ from huggingface_hub import hf_hub_download
12
+ import os
13
+
14
+ models = [
15
+ ("Qwen/Qwen2.5-Coder-7B-Instruct-GGUF", "qwen2.5-coder-7b-instruct-q4_k_m.gguf"),
16
+ ("Qwen/Qwen2.5-Coder-3B-Instruct-GGUF", "qwen2.5-coder-3b-instruct-q4_k_m.gguf"),
17
+ ("Qwen/Qwen2.5-Coder-1.5B-Instruct-GGUF", "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf"),
18
+ ]
19
 
20
+ for repo, filename in models:
21
+ print(f"📥 Downloading {filename}...")
22
+ try:
23
+ hf_hub_download(
24
+ repo_id=repo,
25
+ filename=filename,
26
+ local_dir="/models",
27
+ local_dir_use_symlinks=False
28
+ )
29
+ print(f"✅ {filename} ready!")
30
+ except Exception as e:
31
+ print(f"❌ Failed to download {filename}: {e}")
32
 
33
+ print("✅ All models downloaded!")
34
+ EOF
35
 
36
+ echo "🚀 Starting Axon..."
37
  python3 /app.py