Spaces:
Running
Running
Update start_gradio.sh
Browse files- start_gradio.sh +44 -18
start_gradio.sh
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
#!/bin/bash
|
| 2 |
# 启动脚本:可选下载模型,并直接启动 Gradio 应用(内部调用 vLLM.LLM)
|
| 3 |
-
|
| 4 |
set -euo pipefail
|
| 5 |
|
| 6 |
MODEL_REPO="${MODEL_REPO:-stepfun-ai/Step-Audio-2-mini-Think}"
|
|
@@ -14,6 +13,10 @@ GPU_MEMORY_UTILIZATION=${GPU_MEMORY_UTILIZATION:-0.9}
|
|
| 14 |
TOKENIZER_MODE=${TOKENIZER_MODE:-step_audio_2}
|
| 15 |
SERVED_MODEL_NAME=${SERVED_MODEL_NAME:-step-audio-2-mini-think}
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
echo "=========================================="
|
| 18 |
echo "Step Audio 2 Gradio 启动脚本"
|
| 19 |
echo "MODEL_REPO: $MODEL_REPO"
|
|
@@ -21,30 +24,49 @@ echo "MODEL_DIR : $MODEL_DIR"
|
|
| 21 |
echo "PRELOAD_MODEL: $PRELOAD_MODEL"
|
| 22 |
echo "HOST/PORT: $HOST:$GRADIO_PORT"
|
| 23 |
echo "TP: $TENSOR_PARALLEL_SIZE | MAX_LEN: $MAX_MODEL_LEN"
|
|
|
|
| 24 |
echo "=========================================="
|
| 25 |
|
| 26 |
download_model() {
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
echo "[Download] 使用 huggingface-cli"
|
| 29 |
huggingface-cli download "$MODEL_REPO" --local-dir "$MODEL_DIR" --local-dir-use-symlinks False
|
| 30 |
else
|
| 31 |
echo "[Download] 使用 python + huggingface_hub"
|
| 32 |
python3 -c "
|
|
|
|
| 33 |
from huggingface_hub import snapshot_download
|
| 34 |
print('开始下载: $MODEL_REPO')
|
| 35 |
-
snapshot_download(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
print('下载完成')
|
| 37 |
"
|
| 38 |
fi
|
| 39 |
}
|
| 40 |
|
| 41 |
if [[ "$PRELOAD_MODEL" == "1" ]]; then
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
download_model
|
| 46 |
else
|
| 47 |
echo "检测到本地模型: $MODEL_DIR"
|
|
|
|
| 48 |
fi
|
| 49 |
export MODEL_PATH="$MODEL_DIR"
|
| 50 |
else
|
|
@@ -52,15 +74,19 @@ else
|
|
| 52 |
export MODEL_PATH="${MODEL_PATH:-$MODEL_REPO}"
|
| 53 |
fi
|
| 54 |
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
#!/bin/bash
|
| 2 |
# 启动脚本:可选下载模型,并直接启动 Gradio 应用(内部调用 vLLM.LLM)
|
|
|
|
| 3 |
set -euo pipefail
|
| 4 |
|
| 5 |
MODEL_REPO="${MODEL_REPO:-stepfun-ai/Step-Audio-2-mini-Think}"
|
|
|
|
| 13 |
TOKENIZER_MODE=${TOKENIZER_MODE:-step_audio_2}
|
| 14 |
SERVED_MODEL_NAME=${SERVED_MODEL_NAME:-step-audio-2-mini-think}
|
| 15 |
|
| 16 |
+
# 新增:设置缓存目录到可写位置
|
| 17 |
+
export HF_HOME="${HF_HOME:-/tmp/hf_cache}"
|
| 18 |
+
export XDG_CACHE_HOME="${XDG_CACHE_HOME:-/tmp/hf_cache}"
|
| 19 |
+
|
| 20 |
echo "=========================================="
|
| 21 |
echo "Step Audio 2 Gradio 启动脚本"
|
| 22 |
echo "MODEL_REPO: $MODEL_REPO"
|
|
|
|
| 24 |
echo "PRELOAD_MODEL: $PRELOAD_MODEL"
|
| 25 |
echo "HOST/PORT: $HOST:$GRADIO_PORT"
|
| 26 |
echo "TP: $TENSOR_PARALLEL_SIZE | MAX_LEN: $MAX_MODEL_LEN"
|
| 27 |
+
echo "缓存目录: $HF_HOME"
|
| 28 |
echo "=========================================="
|
| 29 |
|
| 30 |
download_model() {
|
| 31 |
+
# 创建必要的目录
|
| 32 |
+
mkdir -p "$MODEL_DIR"
|
| 33 |
+
mkdir -p "$HF_HOME"
|
| 34 |
+
|
| 35 |
+
echo "[Download] 开始下载模型到: $MODEL_DIR"
|
| 36 |
+
echo "[Download] 缓存目录: $HF_HOME"
|
| 37 |
+
|
| 38 |
+
# 优先尝试使用 hf 命令(新版本推荐)
|
| 39 |
+
if command -v hf &> /dev/null; then
|
| 40 |
+
echo "[Download] 使用 hf download 命令"
|
| 41 |
+
hf download "$MODEL_REPO" --local-dir "$MODEL_DIR" --cache-dir "$HF_HOME"
|
| 42 |
+
elif command -v huggingface-cli &> /dev/null; then
|
| 43 |
echo "[Download] 使用 huggingface-cli"
|
| 44 |
huggingface-cli download "$MODEL_REPO" --local-dir "$MODEL_DIR" --local-dir-use-symlinks False
|
| 45 |
else
|
| 46 |
echo "[Download] 使用 python + huggingface_hub"
|
| 47 |
python3 -c "
|
| 48 |
+
import os
|
| 49 |
from huggingface_hub import snapshot_download
|
| 50 |
print('开始下载: $MODEL_REPO')
|
| 51 |
+
snapshot_download(
|
| 52 |
+
repo_id='$MODEL_REPO',
|
| 53 |
+
local_dir='$MODEL_DIR',
|
| 54 |
+
local_dir_use_symlinks=False,
|
| 55 |
+
cache_dir='$HF_HOME'
|
| 56 |
+
)
|
| 57 |
print('下载完成')
|
| 58 |
"
|
| 59 |
fi
|
| 60 |
}
|
| 61 |
|
| 62 |
if [[ "$PRELOAD_MODEL" == "1" ]]; then
|
| 63 |
+
# 检查模型是否完整(检查关键文件)
|
| 64 |
+
if [[ ! -d "$MODEL_DIR" ]] || [[ ! -f "$MODEL_DIR/config.json" ]] || [[ ! -f "$MODEL_DIR/model.safetensors.index.json" ]]; then
|
| 65 |
+
echo "模型未就绪或文件不完整,开始下载..."
|
| 66 |
download_model
|
| 67 |
else
|
| 68 |
echo "检测到本地模型: $MODEL_DIR"
|
| 69 |
+
echo "模型文件检查通过"
|
| 70 |
fi
|
| 71 |
export MODEL_PATH="$MODEL_DIR"
|
| 72 |
else
|
|
|
|
| 74 |
export MODEL_PATH="${MODEL_PATH:-$MODEL_REPO}"
|
| 75 |
fi
|
| 76 |
|
| 77 |
+
# 验证下载结果
|
| 78 |
+
if [[ "$PRELOAD_MODEL" == "1" ]]; then
|
| 79 |
+
echo "=== 模型文件验证 ==="
|
| 80 |
+
ls -la "$MODEL_DIR" | head -10
|
| 81 |
+
if [[ -f "$MODEL_DIR/config.json" ]]; then
|
| 82 |
+
echo "✓ config.json 存在"
|
| 83 |
+
else
|
| 84 |
+
echo "✗ config.json 缺失"
|
| 85 |
+
fi
|
| 86 |
+
if [[ -f "$MODEL_DIR/model.safetensors.index.json" ]]; then
|
| 87 |
+
echo "✓ model.safetensors.index.json 存在"
|
| 88 |
+
else
|
| 89 |
+
echo "✗ model.safetensors.index.json 缺失"
|
| 90 |
+
fi
|
| 91 |
+
echo "==================="
|
| 92 |
+
fi
|