| from __future__ import annotations |
|
|
| import os |
| from typing import Any, Optional |
|
|
| DEFAULT_GEMINI_TEXT_MODEL = os.getenv("GEMINI_TEXT_MODEL", "gemini-2.5-flash-lite-preview-09-2025") |
| DEFAULT_GEMINI_IMAGE_MODEL = os.getenv("GEMINI_IMAGE_MODEL", "gemini-2.5-flash-image-preview") |
|
|
|
|
| def iter_response_parts(response: Any): |
| """Yield parts from a Gemini response, supporting multiple shapes.""" |
| parts = getattr(response, "parts", None) |
| if parts: |
| for part in parts: |
| yield part |
| return |
|
|
| candidates = getattr(response, "candidates", None) |
| if candidates: |
| for candidate in candidates: |
| content = getattr(candidate, "content", None) |
| if content is not None: |
| content_parts = getattr(content, "parts", None) |
| if content_parts: |
| for part in content_parts: |
| yield part |
|
|
|
|
| def extract_usage_tokens(response: Any) -> tuple[Optional[int], Optional[int]]: |
| usage_metadata = getattr(response, "usage_metadata", None) |
| if usage_metadata is None: |
| return None, None |
|
|
| input_tokens: Optional[int] = getattr(usage_metadata, "prompt_token_count", None) |
| if not isinstance(input_tokens, int): |
| input_tokens = getattr(usage_metadata, "input_token_count", None) |
|
|
| output_tokens: Optional[int] = getattr(usage_metadata, "candidates_token_count", None) |
| if not isinstance(output_tokens, int): |
| output_tokens = getattr(usage_metadata, "output_token_count", None) |
|
|
| return input_tokens, output_tokens |
|
|