| """Embedding client wrapper.""" |
|
|
| from __future__ import annotations |
|
|
| from typing import Iterable, List |
|
|
| import numpy as np |
|
|
| from ..config import RuntimeConfig |
| from ..utils.retry import is_transient_api_error, retry_call |
|
|
|
|
| class EmbeddingClient: |
| def __init__(self, cfg: RuntimeConfig): |
| if not cfg.embedding_api_key: |
| raise ValueError("embedding_api_key is required") |
| try: |
| from openai import OpenAI |
| except Exception as e: |
| raise ImportError( |
| "openai package is required for embedding API calls. Install dependencies first." |
| ) from e |
| self.cfg = cfg |
| self.client = OpenAI(base_url=cfg.embedding_base_url, api_key=cfg.embedding_api_key) |
|
|
| def embed_texts(self, texts: Iterable[str]) -> np.ndarray: |
| items = [t.replace("\n", " ") if t else " " for t in texts] |
| if not items: |
| return np.zeros((0, 0), dtype=np.float32) |
|
|
| all_vectors: list[np.ndarray] = [] |
| batch_size = max(1, int(self.cfg.embedding_batch_size)) |
| total = len(items) |
| for i in range(0, len(items), batch_size): |
| batch = items[i : i + batch_size] |
| start = i + 1 |
| end = min(i + batch_size, total) |
| if self.cfg.api_progress: |
| print(f"[embedding] requesting vectors {start}-{end}/{total}", flush=True) |
|
|
| def _call() -> np.ndarray: |
| response = self.client.embeddings.create(input=batch, model=self.cfg.embedding_model) |
| data = getattr(response, "data", None) |
| if not data: |
| raise RuntimeError( |
| "Embedding API returned empty data. Check API key, model, quota, and base_url." |
| ) |
| if len(data) != len(batch): |
| raise RuntimeError( |
| f"Embedding API returned {len(data)} items for batch size {len(batch)}." |
| ) |
| vectors = np.array([v.embedding for v in data], dtype=np.float32) |
| if vectors.ndim != 2 or vectors.shape[1] == 0: |
| raise RuntimeError( |
| "Embedding API returned empty vectors. Verify model name and account access." |
| ) |
| return vectors |
|
|
| def _on_retry(attempt: int, err: Exception, delay: float) -> None: |
| print( |
| f"[embedding][retry] batch={start}-{end}/{total} attempt={attempt + 1}/{max(1, int(self.cfg.api_retry_attempts))} " |
| f"sleep={delay:.1f}s err={err}", |
| flush=True, |
| ) |
|
|
| vectors = retry_call( |
| _call, |
| max_attempts=max(1, int(self.cfg.api_retry_attempts)), |
| base_delay_sec=float(self.cfg.api_retry_base_delay_sec), |
| max_delay_sec=float(self.cfg.api_retry_max_delay_sec), |
| jitter_sec=float(self.cfg.api_retry_jitter_sec), |
| on_retry=_on_retry, |
| retry_predicate=lambda e: is_transient_api_error(e) or isinstance(e, RuntimeError), |
| ) |
| all_vectors.append(vectors) |
|
|
| return np.vstack(all_vectors) |
|
|