Cialtion commited on
Commit
6588cb9
·
verified ·
1 Parent(s): 1d527f8

Upload main.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. main.py +33 -0
main.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vllm import LLM, SamplingParams
2
+ from rt_templates import RTPrompts
3
+ from rt_tools import RTTools
4
+ import time
5
+
6
+ # 1. 初始化模型 (RT-Qwen 系列)
7
+ MODEL_PATH = "./RT-Qwen2.5-1.5B-AWQ"
8
+ llm = LLM(model=MODEL_PATH, enable_prefix_caching=True, gpu_memory_utilization=0.6)
9
+ stop_tokens = ["<|null|>", "</content>", "</function>", "</arg1>", "</arg2>", "</arg3>", "</arg4>", "</arg5>", "</arg6>"]
10
+ sampling_params = SamplingParams(temperature=0.0, max_tokens=16, stop=stop_tokens, include_stop_str_in_output=True)
11
+
12
+ # 2. 准备 Prompt (极简 Query)
13
+ prompt_prefix = RTPrompts.SYSTEM_PROMPT.format(tools_json=RTTools.get_all())
14
+ user_query = RTPrompts.get_query("Add Bob with 123")
15
+ full_prompt = prompt_prefix + user_query
16
+
17
+ # 3. 模拟多头并行解码 (Head 1: Function, Head 2: Name, Head 3: Phone)
18
+ heads = ["<content>", "<function>", "<arg1>", "<arg2>", "<arg3>", "<arg4>", "<arg5>", "<arg62>"]
19
+ prompts = [full_prompt + head for head in heads]
20
+
21
+ print(f"\n--- Parallel Decoding for: 'Add Bob with 123' ---")
22
+ start_time = time.perf_counter()
23
+
24
+ # vLLM 会自动处理 Prefix Caching,仅第一次 Prefill 全量,后续并发
25
+ outputs = llm.generate(prompts, sampling_params)
26
+
27
+ end_time = time.perf_counter()
28
+ print(f"Total Latency: {(end_time - start_time)*1000:.2f} ms\n")
29
+
30
+ # 4. 打印结果
31
+ for i, output in enumerate(outputs):
32
+ text = output.outputs[0].text
33
+ print(f"Head {i} [{heads[i]:<10}]: {text}")