| from vllm import LLM, SamplingParams |
| from rt_templates import RTPrompts |
| from rt_tools import RTTools |
| import time |
|
|
| |
| MODEL_PATH = "./RT-Qwen2.5-1.5B-AWQ" |
| llm = LLM(model=MODEL_PATH, enable_prefix_caching=True, gpu_memory_utilization=0.6) |
| stop_tokens = ["<|null|>", "</content>", "</function>", "</arg1>", "</arg2>", "</arg3>", "</arg4>", "</arg5>", "</arg6>"] |
| sampling_params = SamplingParams(temperature=0.0, max_tokens=16, stop=stop_tokens, include_stop_str_in_output=True) |
|
|
| |
| prompt_prefix = RTPrompts.SYSTEM_PROMPT.format(tools_json=RTTools.get_all()) |
| user_query = RTPrompts.get_query("Add Bob with 123") |
| full_prompt = prompt_prefix + user_query |
|
|
| |
| heads = ["<content>", "<function>", "<arg1>", "<arg2>", "<arg3>", "<arg4>", "<arg5>", "<arg62>"] |
| prompts = [full_prompt + head for head in heads] |
|
|
| print(f"\n--- Parallel Decoding for: 'Add Bob with 123' ---") |
| start_time = time.perf_counter() |
|
|
| |
| outputs = llm.generate(prompts, sampling_params) |
|
|
| end_time = time.perf_counter() |
| print(f"Total Latency: {(end_time - start_time)*1000:.2f} ms\n") |
|
|
| |
| for i, output in enumerate(outputs): |
| text = output.outputs[0].text |
| print(f"Head {i} [{heads[i]:<10}]: {text}") |
|
|