SimpleTool / main.py
Cialtion's picture
Upload main.py with huggingface_hub
6588cb9 verified
raw
history blame
1.41 kB
from vllm import LLM, SamplingParams
from rt_templates import RTPrompts
from rt_tools import RTTools
import time
# 1. 初始化模型 (RT-Qwen 系列)
MODEL_PATH = "./RT-Qwen2.5-1.5B-AWQ"
llm = LLM(model=MODEL_PATH, enable_prefix_caching=True, gpu_memory_utilization=0.6)
stop_tokens = ["<|null|>", "</content>", "</function>", "</arg1>", "</arg2>", "</arg3>", "</arg4>", "</arg5>", "</arg6>"]
sampling_params = SamplingParams(temperature=0.0, max_tokens=16, stop=stop_tokens, include_stop_str_in_output=True)
# 2. 准备 Prompt (极简 Query)
prompt_prefix = RTPrompts.SYSTEM_PROMPT.format(tools_json=RTTools.get_all())
user_query = RTPrompts.get_query("Add Bob with 123")
full_prompt = prompt_prefix + user_query
# 3. 模拟多头并行解码 (Head 1: Function, Head 2: Name, Head 3: Phone)
heads = ["<content>", "<function>", "<arg1>", "<arg2>", "<arg3>", "<arg4>", "<arg5>", "<arg62>"]
prompts = [full_prompt + head for head in heads]
print(f"\n--- Parallel Decoding for: 'Add Bob with 123' ---")
start_time = time.perf_counter()
# vLLM 会自动处理 Prefix Caching,仅第一次 Prefill 全量,后续并发
outputs = llm.generate(prompts, sampling_params)
end_time = time.perf_counter()
print(f"Total Latency: {(end_time - start_time)*1000:.2f} ms\n")
# 4. 打印结果
for i, output in enumerate(outputs):
text = output.outputs[0].text
print(f"Head {i} [{heads[i]:<10}]: {text}")