moevis commited on
Commit
2a9a064
·
verified ·
1 Parent(s): 1892ef3

Update start_services.sh

Browse files
Files changed (1) hide show
  1. start_services.sh +18 -18
start_services.sh CHANGED
@@ -31,26 +31,26 @@ else
31
  echo "✓ Model already exists locally"
32
  fi
33
 
34
- # Step-Audio-R1 的 chat template
35
- CHAT_TEMPLATE='{%- macro render_content(content) -%}{%- if content is string -%}{{- content.replace("<audio_patch>\\n", "<audio_patch>") -}}{%- elif content is mapping -%}{{- content["'"'"'value'"'"'] if '"'"'value'"'"' in content else content["'"'"'text'"'"'] -}}{%- elif content is iterable -%}{%- for item in content -%}{%- if item.type == '"'"'text'"'"' -%}{{- item["'"'"'value'"'"'] if '"'"'value'"'"' in item else item["'"'"'text'"'"'] -}}{%- elif item.type == '"'"'audio'"'"' -%}<audio_patch>{%- endif -%}{%- endfor -%}{%- endif -%}{%- endmacro -%}{%- if tools -%}{{- '"'"'<|BOT|>system\\n'"'"' -}}{%- if messages[0]["'"'"'role'"'"'] == '"'"'system'"'"' -%}{{- render_content(messages[0]["'"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{{- '"'"'<|BOT|>tool_json_schemas\\n'"'"' + tools|tojson + '"'"'<|EOT|>'"'"' -}}{%- else -%}{%- if messages[0]["'"'"'role'"'"'] == '"'"'system'"'"' -%}{{- '"'"'<|BOT|>system\\n'"'"' + render_content(messages[0]["'"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- for message in messages -%}{%- if message["role"] == "user" -%}{{- '"'"'<|BOT|>human\\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- elif message["role"] == "assistant" -%}{{- '"'"'<|BOT|>assistant\\n'"'"' + (render_content(message["content"]) if message["content"] else '"'"''"'"') -}}{%- set is_last_assistant = true -%}{%- for m in messages[loop.index:] -%}{%- if m["role"] == "assistant" -%}{%- set is_last_assistant = false -%}{%- endif -%}{%- endfor -%}{%- if not is_last_assistant -%}{{- '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- elif message["role"] == "function_output" -%}{%- else -%}{%- if not (loop.first and message["role"] == "system") -%}{{- '"'"'<|BOT|>'"'"' + message["role"] + '"'"'\\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '"'"'<|BOT|>assistant\\n'"'"' -}}{%- endif -%}'
36
 
37
- echo "starting vllm server"
38
 
39
- # 后台启动 vLLM API
40
- python3 -m vllm.entrypoints.openai.api_server \
41
- --model "$MODEL_DIR" \
42
- --port "$API_PORT" \
43
- --host 0.0.0.0 \
44
- --max-model-len 65536 \
45
- --tensor-parallel-size 4 \
46
- --gpu-memory-utilization 0.85 \
47
- --trust-remote-code \
48
- --interleave-mm-strings \
49
- --chat-template "$CHAT_TEMPLATE" \
50
- &
51
 
52
- VLLM_PID=$!
53
- echo "vLLM started (PID: $VLLM_PID)"
54
 
55
 
56
  # 启动 Gradio (前台运行)
@@ -60,4 +60,4 @@ export MODEL_NAME="Step-Audio-R1"
60
  python3 app.py --host 0.0.0.0 --port "$GRADIO_PORT"
61
 
62
  # 清理
63
- trap 'kill $VLLM_PID' EXIT
 
31
  echo "✓ Model already exists locally"
32
  fi
33
 
34
+ # # Step-Audio-R1 的 chat template
35
+ # CHAT_TEMPLATE='{%- macro render_content(content) -%}{%- if content is string -%}{{- content.replace("<audio_patch>\\n", "<audio_patch>") -}}{%- elif content is mapping -%}{{- content["'"'"'value'"'"'] if '"'"'value'"'"' in content else content["'"'"'text'"'"'] -}}{%- elif content is iterable -%}{%- for item in content -%}{%- if item.type == '"'"'text'"'"' -%}{{- item["'"'"'value'"'"'] if '"'"'value'"'"' in item else item["'"'"'text'"'"'] -}}{%- elif item.type == '"'"'audio'"'"' -%}<audio_patch>{%- endif -%}{%- endfor -%}{%- endif -%}{%- endmacro -%}{%- if tools -%}{{- '"'"'<|BOT|>system\\n'"'"' -}}{%- if messages[0]["'"'"'role'"'"'] == '"'"'system'"'"' -%}{{- render_content(messages[0]["'"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{{- '"'"'<|BOT|>tool_json_schemas\\n'"'"' + tools|tojson + '"'"'<|EOT|>'"'"' -}}{%- else -%}{%- if messages[0]["'"'"'role'"'"'] == '"'"'system'"'"' -%}{{- '"'"'<|BOT|>system\\n'"'"' + render_content(messages[0]["'"'"'content'"'"']) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- for message in messages -%}{%- if message["role"] == "user" -%}{{- '"'"'<|BOT|>human\\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- elif message["role"] == "assistant" -%}{{- '"'"'<|BOT|>assistant\\n'"'"' + (render_content(message["content"]) if message["content"] else '"'"''"'"') -}}{%- set is_last_assistant = true -%}{%- for m in messages[loop.index:] -%}{%- if m["role"] == "assistant" -%}{%- set is_last_assistant = false -%}{%- endif -%}{%- endfor -%}{%- if not is_last_assistant -%}{{- '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- elif message["role"] == "function_output" -%}{%- else -%}{%- if not (loop.first and message["role"] == "system") -%}{{- '"'"'<|BOT|>'"'"' + message["role"] + '"'"'\\n'"'"' + render_content(message["content"]) + '"'"'<|EOT|>'"'"' -}}{%- endif -%}{%- endif -%}{%- endfor -%}{%- if add_generation_prompt -%}{{- '"'"'<|BOT|>assistant\\n'"'"' -}}{%- endif -%}'
36
 
37
+ # echo "starting vllm server"
38
 
39
+ # # 后台启动 vLLM API
40
+ # python3 -m vllm.entrypoints.openai.api_server \
41
+ # --model "$MODEL_DIR" \
42
+ # --port "$API_PORT" \
43
+ # --host 0.0.0.0 \
44
+ # --max-model-len 65536 \
45
+ # --tensor-parallel-size 4 \
46
+ # --gpu-memory-utilization 0.85 \
47
+ # --trust-remote-code \
48
+ # --interleave-mm-strings \
49
+ # --chat-template "$CHAT_TEMPLATE" \
50
+ # &
51
 
52
+ # VLLM_PID=$!
53
+ # echo "vLLM started (PID: $VLLM_PID)"
54
 
55
 
56
  # 启动 Gradio (前台运行)
 
60
  python3 app.py --host 0.0.0.0 --port "$GRADIO_PORT"
61
 
62
  # 清理
63
+ # trap 'kill $VLLM_PID' EXIT