| """Helpers for collecting token usage from LLM API responses.""" |
|
|
| from __future__ import annotations |
|
|
| from typing import Any, Dict, Optional |
|
|
|
|
| def new_token_usage() -> Dict[str, Any]: |
| return { |
| "calls": 0, |
| "prompt_tokens": 0, |
| "completion_tokens": 0, |
| "total_tokens": 0, |
| "stages": {}, |
| "models": {}, |
| } |
|
|
|
|
| def _to_int(value: object, default: int = 0) -> int: |
| try: |
| return int(float(value)) |
| except Exception: |
| return default |
|
|
|
|
| def _usage_get(usage_obj: object, key: str) -> Optional[int]: |
| if isinstance(usage_obj, dict): |
| if key not in usage_obj: |
| return None |
| return _to_int(usage_obj.get(key), default=0) |
| if hasattr(usage_obj, key): |
| return _to_int(getattr(usage_obj, key), default=0) |
| return None |
|
|
|
|
| def _extract_usage(resp: object) -> Dict[str, int]: |
| usage_obj = None |
| if isinstance(resp, dict): |
| usage_obj = resp.get("usage") |
| else: |
| usage_obj = getattr(resp, "usage", None) |
|
|
| if usage_obj is None: |
| return {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0} |
|
|
| prompt = _usage_get(usage_obj, "prompt_tokens") |
| if prompt is None: |
| prompt = _usage_get(usage_obj, "input_tokens") |
| completion = _usage_get(usage_obj, "completion_tokens") |
| if completion is None: |
| completion = _usage_get(usage_obj, "output_tokens") |
| total = _usage_get(usage_obj, "total_tokens") |
|
|
| p = max(0, prompt or 0) |
| c = max(0, completion or 0) |
| t = max(0, total if total is not None else (p + c)) |
| return {"prompt_tokens": p, "completion_tokens": c, "total_tokens": t} |
|
|
|
|
| def record_token_usage( |
| usage: Dict[str, Any], |
| *, |
| response: object, |
| stage: Optional[str] = None, |
| model: Optional[str] = None, |
| ) -> Dict[str, Any]: |
| delta = _extract_usage(response) |
| usage["calls"] = _to_int(usage.get("calls", 0)) + 1 |
| usage["prompt_tokens"] = _to_int(usage.get("prompt_tokens", 0)) + delta["prompt_tokens"] |
| usage["completion_tokens"] = _to_int(usage.get("completion_tokens", 0)) + delta["completion_tokens"] |
| usage["total_tokens"] = _to_int(usage.get("total_tokens", 0)) + delta["total_tokens"] |
|
|
| if stage: |
| stages = usage.setdefault("stages", {}) |
| s = stages.setdefault( |
| stage, |
| {"calls": 0, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, |
| ) |
| s["calls"] = _to_int(s.get("calls", 0)) + 1 |
| s["prompt_tokens"] = _to_int(s.get("prompt_tokens", 0)) + delta["prompt_tokens"] |
| s["completion_tokens"] = _to_int(s.get("completion_tokens", 0)) + delta["completion_tokens"] |
| s["total_tokens"] = _to_int(s.get("total_tokens", 0)) + delta["total_tokens"] |
|
|
| if model: |
| models = usage.setdefault("models", {}) |
| m = models.setdefault( |
| model, |
| {"calls": 0, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, |
| ) |
| m["calls"] = _to_int(m.get("calls", 0)) + 1 |
| m["prompt_tokens"] = _to_int(m.get("prompt_tokens", 0)) + delta["prompt_tokens"] |
| m["completion_tokens"] = _to_int(m.get("completion_tokens", 0)) + delta["completion_tokens"] |
| m["total_tokens"] = _to_int(m.get("total_tokens", 0)) + delta["total_tokens"] |
|
|
| return usage |
|
|
|
|
| def merge_token_usage(base: Dict[str, Any], extra: Dict[str, Any]) -> Dict[str, Any]: |
| if not extra: |
| return base |
|
|
| base["calls"] = _to_int(base.get("calls", 0)) + _to_int(extra.get("calls", 0)) |
| base["prompt_tokens"] = _to_int(base.get("prompt_tokens", 0)) + _to_int(extra.get("prompt_tokens", 0)) |
| base["completion_tokens"] = _to_int(base.get("completion_tokens", 0)) + _to_int(extra.get("completion_tokens", 0)) |
| base["total_tokens"] = _to_int(base.get("total_tokens", 0)) + _to_int(extra.get("total_tokens", 0)) |
|
|
| base_stages = base.setdefault("stages", {}) |
| for stage, stats in (extra.get("stages") or {}).items(): |
| if not isinstance(stats, dict): |
| continue |
| dst = base_stages.setdefault( |
| stage, |
| {"calls": 0, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, |
| ) |
| dst["calls"] = _to_int(dst.get("calls", 0)) + _to_int(stats.get("calls", 0)) |
| dst["prompt_tokens"] = _to_int(dst.get("prompt_tokens", 0)) + _to_int(stats.get("prompt_tokens", 0)) |
| dst["completion_tokens"] = _to_int(dst.get("completion_tokens", 0)) + _to_int(stats.get("completion_tokens", 0)) |
| dst["total_tokens"] = _to_int(dst.get("total_tokens", 0)) + _to_int(stats.get("total_tokens", 0)) |
|
|
| base_models = base.setdefault("models", {}) |
| for model, stats in (extra.get("models") or {}).items(): |
| if not isinstance(stats, dict): |
| continue |
| dst = base_models.setdefault( |
| model, |
| {"calls": 0, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, |
| ) |
| dst["calls"] = _to_int(dst.get("calls", 0)) + _to_int(stats.get("calls", 0)) |
| dst["prompt_tokens"] = _to_int(dst.get("prompt_tokens", 0)) + _to_int(stats.get("prompt_tokens", 0)) |
| dst["completion_tokens"] = _to_int(dst.get("completion_tokens", 0)) + _to_int(stats.get("completion_tokens", 0)) |
| dst["total_tokens"] = _to_int(dst.get("total_tokens", 0)) + _to_int(stats.get("total_tokens", 0)) |
|
|
| return base |
|
|
|
|