| |
| """Measure TTFT and TPOT by streaming from llama-server.""" |
|
|
| import argparse |
| import json |
| import subprocess |
| import sys |
| import time |
| import requests |
|
|
| SERVER_BIN = "./build/bin/llama-server" |
| PORT = 8081 |
| PROMPT = "Explain the difference between machine learning and deep learning in detail." * 8 |
| MAX_TOKENS = 128 |
| RUNS = 3 |
|
|
|
|
| def wait_for_server(address, timeout=120): |
| for _ in range(timeout): |
| try: |
| r = requests.get(f"{address}/health", timeout=2) |
| if r.status_code == 200: |
| return True |
| except Exception: |
| pass |
| time.sleep(1) |
| return False |
|
|
|
|
| def measure_once(address): |
| payload = { |
| "messages": [{"role": "user", "content": PROMPT}], |
| "max_tokens": MAX_TOKENS, |
| "stream": True, |
| "temperature": 0.0, |
| } |
| t0 = time.perf_counter() |
| ttft = None |
| n_tokens = 0 |
| with requests.post(f"{address}/v1/chat/completions", json=payload, stream=True, timeout=120) as resp: |
| for line in resp.iter_lines(): |
| if not line: |
| continue |
| line = line.decode("utf-8") |
| if not line.startswith("data:"): |
| continue |
| data = line[5:].strip() |
| if data == "[DONE]": |
| break |
| try: |
| chunk = json.loads(data) |
| delta = chunk["choices"][0]["delta"].get("content", "") |
| if delta: |
| if ttft is None: |
| ttft = time.perf_counter() - t0 |
| n_tokens += 1 |
| except Exception: |
| continue |
| t_total = time.perf_counter() - t0 |
| tpot = (t_total - ttft) / max(n_tokens - 1, 1) if n_tokens > 1 else 0 |
| return ttft, tpot, n_tokens, t_total |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("-m", "--model", required=True) |
| parser.add_argument("--runs", type=int, default=RUNS) |
| parser.add_argument("--device", default=None, |
| help="GGML device string, e.g. CUDA0 (default: all GPUs)") |
| args = parser.parse_args() |
|
|
| address = f"http://127.0.0.1:{PORT}" |
| cmd = [SERVER_BIN, "-m", args.model, "-ngl", "99", "--port", str(PORT), "--log-disable"] |
| if args.device: |
| cmd += ["-dev", args.device] |
| print(f"Starting server: {' '.join(cmd)}", file=sys.stderr) |
| proc = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) |
|
|
| try: |
| if not wait_for_server(address): |
| print("ERROR: server did not start", file=sys.stderr) |
| proc.kill() |
| sys.exit(1) |
| print("Server ready", file=sys.stderr) |
|
|
| ttfts, tpots = [], [] |
| for i in range(args.runs): |
| ttft, tpot, n_tokens, t_total = measure_once(address) |
| ttfts.append(ttft * 1000) |
| tpots.append(tpot * 1000) |
| print(f" Run {i+1}: TTFT={ttft*1000:.1f}ms TPOT={tpot*1000:.1f}ms tokens={n_tokens}", file=sys.stderr) |
|
|
| avg_ttft = sum(ttfts) / len(ttfts) |
| avg_tpot = sum(tpots) / len(tpots) |
| result = {"ttft_ms": round(avg_ttft, 1), "tpot_ms": round(avg_tpot, 1), "latency_ms": round(avg_ttft + avg_tpot, 1)} |
| print(json.dumps(result)) |
| finally: |
| proc.kill() |
| proc.wait() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|