Create deployment_guide.md

#1
by jack-zxy - opened
Files changed (1) hide show
  1. deployment_guide.md +165 -0
deployment_guide.md ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Intern-S1-Pro Deployment Guide
2
+
3
+ The Intern-S1-Pro release is a 1T parameter model stored in FP8 format. Deployment requires at least two 8-GPU H200 nodes, with either of the following configurations:
4
+
5
+ - Tensor Parallelism (TP)
6
+ - Data Parallelism (DP) + Expert Parallelism (EP)
7
+
8
+ > NOTE: The deployment examples in this guide are provided for reference only and may not represent the latest or most optimized configurations. Inference frameworks are under active development — always consult the official documentation from each framework’s maintainers to ensure peak performance and compatibility.
9
+
10
+ ## LMDeploy
11
+
12
+ Required version `lmdeploy>=0.12.0`
13
+
14
+ - Tensor Parallelism
15
+
16
+ ```bash
17
+ # start ray on node 0 and node 1
18
+
19
+ # node 0
20
+ lmdeploy serve api_server internlm/Intern-S1-Pro --backend pytorch --tp 16
21
+ ```
22
+
23
+ - Data Parallelism + Expert Parallelism
24
+
25
+ ```
26
+ # node 0, proxy server
27
+ lmdeploy serve proxy --server-name ${proxy_server_ip} --server-port ${proxy_server_port} --routing-strategy 'min_expected_latency' --serving-strategy Hybrid
28
+
29
+ # node 0
30
+ export LMDEPLOY_DP_MASTER_ADDR=${node0_ip}
31
+ export LMDEPLOY_DP_MASTER_PORT=29555
32
+ lmdeploy serve api_server \
33
+ internlm/Intern-S1-Pro \
34
+ --backend pytorch \
35
+ --tp 1 \
36
+ --dp 16 \
37
+ --ep 16 \
38
+ --proxy-url http://${proxy_server_ip}:${proxy_server_port} \
39
+ --nnodes 2 \
40
+ --node-rank 0 \
41
+ --reasoning-parser intern-s1 \
42
+ --tool-call-parser qwen3
43
+
44
+ # node 1
45
+ export LMDEPLOY_DP_MASTER_ADDR=${node0_ip}
46
+ export LMDEPLOY_DP_MASTER_PORT=29555
47
+ lmdeploy serve api_server \
48
+ internlm/Intern-S1-Pro \
49
+ --backend pytorch \
50
+ --tp 1 \
51
+ --dp 16 \
52
+ --ep 16 \
53
+ --proxy-url http://${proxy_server_ip}:${proxy_server_port} \
54
+ --nnodes 2 \
55
+ --node-rank 1 \
56
+ --reasoning-parser intern-s1 \
57
+ --tool-call-parser qwen3
58
+ ```
59
+
60
+ ## vLLM
61
+
62
+ - Tensor Parallelism + Expert Parallelism
63
+
64
+ ```bash
65
+ # start ray on node 0 and node 1
66
+
67
+ # node 0
68
+ export VLLM_ENGINE_READY_TIMEOUT_S=10000
69
+ vllm serve internlm/Intern-S1-Pro \
70
+ --tensor-parallel-size 16 \
71
+ --enable-expert-parallel \
72
+ --distributed-executor-backend ray \
73
+ --max-model-len 65536 \
74
+ --trust-remote-code \
75
+ --reasoning-parser deepseek_r1 \
76
+ --enable-auto-tool-choice \
77
+ --tool-call-parser hermes
78
+ ```
79
+
80
+ - Data Parallelism + Expert Parallelism
81
+
82
+ ```bash
83
+ # node 0
84
+ export VLLM_ENGINE_READY_TIMEOUT_S=10000
85
+ vllm serve internlm/Intern-S1-Pro \
86
+ --all2all-backend deepep_low_latency \
87
+ --tensor-parallel-size 1 \
88
+ --enable-expert-parallel \
89
+ --data-parallel-size 16 \
90
+ --data-parallel-size-local 8 \
91
+ --data-parallel-address ${node0_ip} \
92
+ --data-parallel-rpc-port 13345 \
93
+ --gpu_memory_utilization 0.8 \
94
+ --mm_processor_cache_gb=0 \
95
+ --media-io-kwargs '{"video": {"num_frames": 768, "fps": 2}}' \
96
+ --max-model-len 65536 \
97
+ --trust-remote-code \
98
+ --api-server-count=8 \
99
+ --reasoning-parser deepseek_r1 \
100
+ --enable-auto-tool-choice \
101
+ --tool-call-parser hermes
102
+
103
+ # node 1
104
+ export VLLM_ENGINE_READY_TIMEOUT_S=10000
105
+ vllm serve internlm/Intern-S1-Pro \
106
+ --all2all-backend deepep_low_latency \
107
+ --tensor-parallel-size 1 \
108
+ --enable-expert-parallel \
109
+ --data-parallel-size 16 \
110
+ --data-parallel-size-local 8 \
111
+ --data-parallel-start-rank 8 \
112
+ --data-parallel-address ${node0_ip} \
113
+ --data-parallel-rpc-port 13345 \
114
+ --gpu_memory_utilization 0.8 \
115
+ --mm_processor_cache_gb=0 \
116
+ --media-io-kwargs '{"video": {"num_frames": 768, "fps": 2}}' \
117
+ --max-model-len 65536 \
118
+ --trust-remote-code \
119
+ --headless \
120
+ --reasoning-parser deepseek_r1 \
121
+ --enable-auto-tool-choice \
122
+ --tool-call-parser hermes
123
+ ```
124
+
125
+ > NOTE: To prevent out-of-memory (OOM) errors, we limit the context length using `--max-model-len 65536`. For datasets requiring longer responses, you may increase this value as needed. Additionally, video inference can consume substantial memory in vLLM API server processes; we therefore recommend setting `--media-io-kwargs '{"video": {"num_frames": 768, "fps": 2}}'` to constrain preprocessing memory usage during video benchmarking.
126
+
127
+ ## SGLang
128
+
129
+ - Tensor Parallelism + Expert Parallelism
130
+
131
+ ```bash
132
+ export DIST_ADDR=${master_node_ip}:${master_node_port}
133
+
134
+ # node 0
135
+ python3 -m sglang.launch_server \
136
+ --model-path internlm/Intern-S1-Pro \
137
+ --tp 16 \
138
+ --ep 16 \
139
+ --mem-fraction-static 0.85 \
140
+ --trust-remote-code \
141
+ --dist-init-addr ${DIST_ADDR} \
142
+ --nnodes 2 \
143
+ --attention-backend fa3 \
144
+ --mm-attention-backend fa3 \
145
+ --keep-mm-feature-on-device \
146
+ --node-rank 0 \
147
+ --reasoning-parser qwen3 \
148
+ --tool-call-parser qwen
149
+
150
+ # node 1
151
+ python3 -m sglang.launch_server \
152
+ --model-path internlm/Intern-S1-Pro \
153
+ --tp 16 \
154
+ --ep 16 \
155
+ --mem-fraction-static 0.85 \
156
+ --trust-remote-code \
157
+ --dist-init-addr ${DIST_ADDR} \
158
+ --nnodes 2 \
159
+ --attention-backend fa3 \
160
+ --mm-attention-backend fa3 \
161
+ --keep-mm-feature-on-device \
162
+ --node-rank 1 \
163
+ --reasoning-parser qwen3 \
164
+ --tool-call-parser qwen
165
+ ```