File size: 5,345 Bytes
9b7e0a7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | """Helpers for collecting token usage from LLM API responses."""
from __future__ import annotations
from typing import Any, Dict, Optional
def new_token_usage() -> Dict[str, Any]:
return {
"calls": 0,
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0,
"stages": {},
"models": {},
}
def _to_int(value: object, default: int = 0) -> int:
try:
return int(float(value))
except Exception:
return default
def _usage_get(usage_obj: object, key: str) -> Optional[int]:
if isinstance(usage_obj, dict):
if key not in usage_obj:
return None
return _to_int(usage_obj.get(key), default=0)
if hasattr(usage_obj, key):
return _to_int(getattr(usage_obj, key), default=0)
return None
def _extract_usage(resp: object) -> Dict[str, int]:
usage_obj = None
if isinstance(resp, dict):
usage_obj = resp.get("usage")
else:
usage_obj = getattr(resp, "usage", None)
if usage_obj is None:
return {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
prompt = _usage_get(usage_obj, "prompt_tokens")
if prompt is None:
prompt = _usage_get(usage_obj, "input_tokens")
completion = _usage_get(usage_obj, "completion_tokens")
if completion is None:
completion = _usage_get(usage_obj, "output_tokens")
total = _usage_get(usage_obj, "total_tokens")
p = max(0, prompt or 0)
c = max(0, completion or 0)
t = max(0, total if total is not None else (p + c))
return {"prompt_tokens": p, "completion_tokens": c, "total_tokens": t}
def record_token_usage(
usage: Dict[str, Any],
*,
response: object,
stage: Optional[str] = None,
model: Optional[str] = None,
) -> Dict[str, Any]:
delta = _extract_usage(response)
usage["calls"] = _to_int(usage.get("calls", 0)) + 1
usage["prompt_tokens"] = _to_int(usage.get("prompt_tokens", 0)) + delta["prompt_tokens"]
usage["completion_tokens"] = _to_int(usage.get("completion_tokens", 0)) + delta["completion_tokens"]
usage["total_tokens"] = _to_int(usage.get("total_tokens", 0)) + delta["total_tokens"]
if stage:
stages = usage.setdefault("stages", {})
s = stages.setdefault(
stage,
{"calls": 0, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
)
s["calls"] = _to_int(s.get("calls", 0)) + 1
s["prompt_tokens"] = _to_int(s.get("prompt_tokens", 0)) + delta["prompt_tokens"]
s["completion_tokens"] = _to_int(s.get("completion_tokens", 0)) + delta["completion_tokens"]
s["total_tokens"] = _to_int(s.get("total_tokens", 0)) + delta["total_tokens"]
if model:
models = usage.setdefault("models", {})
m = models.setdefault(
model,
{"calls": 0, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
)
m["calls"] = _to_int(m.get("calls", 0)) + 1
m["prompt_tokens"] = _to_int(m.get("prompt_tokens", 0)) + delta["prompt_tokens"]
m["completion_tokens"] = _to_int(m.get("completion_tokens", 0)) + delta["completion_tokens"]
m["total_tokens"] = _to_int(m.get("total_tokens", 0)) + delta["total_tokens"]
return usage
def merge_token_usage(base: Dict[str, Any], extra: Dict[str, Any]) -> Dict[str, Any]:
if not extra:
return base
base["calls"] = _to_int(base.get("calls", 0)) + _to_int(extra.get("calls", 0))
base["prompt_tokens"] = _to_int(base.get("prompt_tokens", 0)) + _to_int(extra.get("prompt_tokens", 0))
base["completion_tokens"] = _to_int(base.get("completion_tokens", 0)) + _to_int(extra.get("completion_tokens", 0))
base["total_tokens"] = _to_int(base.get("total_tokens", 0)) + _to_int(extra.get("total_tokens", 0))
base_stages = base.setdefault("stages", {})
for stage, stats in (extra.get("stages") or {}).items():
if not isinstance(stats, dict):
continue
dst = base_stages.setdefault(
stage,
{"calls": 0, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
)
dst["calls"] = _to_int(dst.get("calls", 0)) + _to_int(stats.get("calls", 0))
dst["prompt_tokens"] = _to_int(dst.get("prompt_tokens", 0)) + _to_int(stats.get("prompt_tokens", 0))
dst["completion_tokens"] = _to_int(dst.get("completion_tokens", 0)) + _to_int(stats.get("completion_tokens", 0))
dst["total_tokens"] = _to_int(dst.get("total_tokens", 0)) + _to_int(stats.get("total_tokens", 0))
base_models = base.setdefault("models", {})
for model, stats in (extra.get("models") or {}).items():
if not isinstance(stats, dict):
continue
dst = base_models.setdefault(
model,
{"calls": 0, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
)
dst["calls"] = _to_int(dst.get("calls", 0)) + _to_int(stats.get("calls", 0))
dst["prompt_tokens"] = _to_int(dst.get("prompt_tokens", 0)) + _to_int(stats.get("prompt_tokens", 0))
dst["completion_tokens"] = _to_int(dst.get("completion_tokens", 0)) + _to_int(stats.get("completion_tokens", 0))
dst["total_tokens"] = _to_int(dst.get("total_tokens", 0)) + _to_int(stats.get("total_tokens", 0))
return base
|