File size: 6,837 Bytes
bd95217 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 | import statistics
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from translator import make_openai_client
BENCH_SENTENCES = {
"ja": [
"こんにちは、今日はいい天気ですね。",
"この映画はとても面白かったです。",
"明日の会議は何時からですか?",
"日本の桜は本当に美しいですね。",
"新しいレストランに行ってみましょう。",
],
"en": [
"Hello, the weather is nice today.",
"That movie was really interesting.",
"What time does tomorrow's meeting start?",
"The cherry blossoms in Japan are truly beautiful.",
"Let's try going to the new restaurant.",
],
"zh": [
"你好,今天天气真不错。",
"那部电影真的很有意思。",
"明天的会议几点开始?",
"日本的樱花真的很美丽。",
"我们去试试那家新餐厅吧。",
],
"ko": [
"안녕하세요, 오늘 날씨가 좋네요.",
"그 영화 정말 재미있었어요.",
"내일 회의는 몇 시부터인가요?",
"일본의 벚꽃은 정말 아름답네요.",
"새로운 레스토랑에 가볼까요?",
],
"fr": [
"Bonjour, il fait beau aujourd'hui.",
"Ce film était vraiment intéressant.",
"À quelle heure commence la réunion demain?",
"Les cerisiers en fleurs au Japon sont magnifiques.",
"Allons essayer le nouveau restaurant.",
],
"de": [
"Hallo, heute ist schönes Wetter.",
"Der Film war wirklich interessant.",
"Um wie viel Uhr beginnt das Meeting morgen?",
"Die Kirschblüten in Japan sind wunderschön.",
"Lass uns das neue Restaurant ausprobieren.",
],
}
def run_benchmark(models, source_lang, target_lang, timeout_s, prompt, result_callback):
"""Run benchmark in a background thread. Calls result_callback(str) for each output line."""
sentences = BENCH_SENTENCES.get(source_lang, BENCH_SENTENCES["en"])
rounds = len(sentences)
result_callback(
f"Testing {len(models)} model(s) x {rounds} rounds | "
f"timeout={timeout_s}s | {source_lang} -> {target_lang}\n"
f"{'=' * 60}\n"
)
def _test_model(m):
name = m["name"]
lines = [f"Model: {name}", f" {'─' * 50}"]
try:
client = make_openai_client(
m["api_base"],
m["api_key"],
proxy=m.get("proxy", "none"),
timeout=timeout_s,
)
ttfts = []
totals = []
for i, text in enumerate(sentences):
if m.get("no_system_role"):
messages = [{"role": "user", "content": f"{prompt}\n{text}"}]
else:
messages = [
{"role": "system", "content": prompt},
{"role": "user", "content": text},
]
try:
t0 = time.perf_counter()
stream = client.chat.completions.create(
model=m["model"],
messages=messages,
max_tokens=256,
temperature=0.3,
stream=True,
)
ttft = None
chunks = []
for chunk in stream:
if ttft is None:
ttft = (time.perf_counter() - t0) * 1000
delta = chunk.choices[0].delta
if delta.content:
chunks.append(delta.content)
total_ms = (time.perf_counter() - t0) * 1000
result_text = "".join(chunks).strip()
ttft = ttft or total_ms
except Exception:
t0 = time.perf_counter()
resp = client.chat.completions.create(
model=m["model"],
messages=messages,
max_tokens=256,
temperature=0.3,
stream=False,
)
total_ms = (time.perf_counter() - t0) * 1000
ttft = total_ms
result_text = resp.choices[0].message.content.strip()
ttfts.append(ttft)
totals.append(total_ms)
lines.append(
f" Round {i + 1}: {total_ms:7.0f}ms "
f"(TTFT {ttft:6.0f}ms) | {result_text[:60]}"
)
avg_total = statistics.mean(totals)
std_total = statistics.stdev(totals) if len(totals) > 1 else 0
avg_ttft = statistics.mean(ttfts)
std_ttft = statistics.stdev(ttfts) if len(ttfts) > 1 else 0
lines.append(
f" Avg: {avg_total:.0f}ms \u00b1 {std_total:.0f}ms "
f"(TTFT: {avg_ttft:.0f}ms \u00b1 {std_ttft:.0f}ms)"
)
result_callback("\n".join(lines))
return {
"name": name,
"avg_ttft": avg_ttft,
"std_ttft": std_ttft,
"avg_total": avg_total,
"std_total": std_total,
"error": None,
}
except Exception as e:
err_msg = str(e).split("\n")[0][:120]
lines.append(f" FAILED: {err_msg}")
result_callback("\n".join(lines))
return {
"name": name,
"avg_ttft": 0,
"std_ttft": 0,
"avg_total": 0,
"std_total": 0,
"error": err_msg,
}
def _run_all():
results = []
with ThreadPoolExecutor(max_workers=len(models)) as pool:
futures = {pool.submit(_test_model, m): m for m in models}
for fut in as_completed(futures):
results.append(fut.result())
ok = [r for r in results if not r["error"]]
ok.sort(key=lambda r: r["avg_ttft"])
result_callback(f"\n{'=' * 60}")
result_callback("Ranking by Avg TTFT:")
for i, r in enumerate(ok):
result_callback(
f" #{i + 1} TTFT {r['avg_ttft']:6.0f}ms \u00b1 {r['std_ttft']:4.0f}ms "
f"Total {r['avg_total']:6.0f}ms \u00b1 {r['std_total']:4.0f}ms "
f"{r['name']}"
)
failed = [r for r in results if r["error"]]
for r in failed:
result_callback(f" FAIL {r['name']}: {r['error']}")
result_callback("__DONE__")
threading.Thread(target=_run_all, daemon=True).start()
|