AIencoder commited on
Commit
f3703d9
Β·
verified Β·
1 Parent(s): 4dee3a6

Update start.sh

Browse files
Files changed (1) hide show
  1. start.sh +42 -11
start.sh CHANGED
@@ -1,10 +1,18 @@
1
  #!/bin/bash
2
  set -e
3
 
 
 
 
 
 
 
 
4
  echo "------------------------------------------------"
5
  echo "πŸ› οΈ Setting up Environment"
6
  echo "------------------------------------------------"
7
 
 
8
  mkdir -p /app/models/qwen2.5-omni-7b
9
 
10
  download_file () {
@@ -12,28 +20,51 @@ download_file () {
12
  FILENAME=$2
13
  DEST_PATH=$3
14
 
15
- if [ ! -f "$DEST_PATH" ]; then
16
- echo "⬇️ Downloading $FILENAME ..."
17
- python3 -c "
 
 
 
 
18
  from huggingface_hub import hf_hub_download
19
- import shutil, sys
 
 
20
  try:
21
- path = hf_hub_download(repo_id='$REPO_ID', filename='$FILENAME')
 
 
 
 
22
  shutil.copy(path, '$DEST_PATH')
 
23
  except Exception as e:
24
- print(f'Error downloading $FILENAME: {e}')
25
  sys.exit(1)
26
  "
27
- fi
28
  }
29
 
30
- # Qwen2.5-Omni-7B Q8_0 - Text + Images + Audio (near-lossless quality)
31
- download_file "ggml-org/Qwen2.5-Omni-7B-GGUF" "Qwen2.5-Omni-7B-Q8_0.gguf" "/app/models/qwen2.5-omni-7b/Qwen2.5-Omni-7B-Q8_0.gguf"
32
- download_file "ggml-org/Qwen2.5-Omni-7B-GGUF" "mmproj-Qwen2.5-Omni-7B-Q8_0.gguf" "/app/models/qwen2.5-omni-7b/mmproj-Qwen2.5-Omni-7B-Q8_0.gguf"
 
 
 
 
 
 
 
 
 
33
 
 
34
  echo "------------------------------------------------"
35
- echo "πŸš€ Starting Server"
36
  echo "------------------------------------------------"
 
 
 
37
 
38
  exec /usr/local/bin/llama-server \
39
  --models-dir /app/models \
 
1
  #!/bin/bash
2
  set -e
3
 
4
+ echo "================================================"
5
+ echo "πŸš€ Axon - Qwen2.5-Omni-7B Multimodal Server"
6
+ echo "================================================"
7
+ echo ""
8
+ echo "πŸ“‹ Capabilities: Text | Images | Audio"
9
+ echo "πŸ”§ Quantization: Q8_0 (near-lossless)"
10
+ echo ""
11
  echo "------------------------------------------------"
12
  echo "πŸ› οΈ Setting up Environment"
13
  echo "------------------------------------------------"
14
 
15
+ # Create model directory structure
16
  mkdir -p /app/models/qwen2.5-omni-7b
17
 
18
  download_file () {
 
20
  FILENAME=$2
21
  DEST_PATH=$3
22
 
23
+ if [ -f "$DEST_PATH" ]; then
24
+ echo "βœ… Already exists: $(basename $DEST_PATH)"
25
+ return 0
26
+ fi
27
+
28
+ echo "⬇️ Downloading $FILENAME ..."
29
+ python3 -c "
30
  from huggingface_hub import hf_hub_download
31
+ import shutil
32
+ import sys
33
+
34
  try:
35
+ path = hf_hub_download(
36
+ repo_id='$REPO_ID',
37
+ filename='$FILENAME',
38
+ cache_dir='/app/.cache'
39
+ )
40
  shutil.copy(path, '$DEST_PATH')
41
+ print(f'βœ… Downloaded: $FILENAME')
42
  except Exception as e:
43
+ print(f'❌ Error downloading $FILENAME: {e}')
44
  sys.exit(1)
45
  "
 
46
  }
47
 
48
+ echo ""
49
+ echo "πŸ“¦ Downloading Qwen2.5-Omni-7B Q8_0..."
50
+ echo ""
51
+
52
+ # Download model and multimodal projector
53
+ download_file "ggml-org/Qwen2.5-Omni-7B-GGUF" \
54
+ "Qwen2.5-Omni-7B-Q8_0.gguf" \
55
+ "/app/models/qwen2.5-omni-7b/Qwen2.5-Omni-7B-Q8_0.gguf"
56
+
57
+ download_file "ggml-org/Qwen2.5-Omni-7B-GGUF" \
58
+ "mmproj-Qwen2.5-Omni-7B-Q8_0.gguf" \
59
+ "/app/models/qwen2.5-omni-7b/mmproj-Qwen2.5-Omni-7B-Q8_0.gguf"
60
 
61
+ echo ""
62
  echo "------------------------------------------------"
63
+ echo "πŸš€ Starting llama.cpp Server"
64
  echo "------------------------------------------------"
65
+ echo ""
66
+ echo "🌐 Server will be available at http://0.0.0.0:7860"
67
+ echo ""
68
 
69
  exec /usr/local/bin/llama-server \
70
  --models-dir /app/models \