| | |
| | """Minimal API smoke test for fic-agent. |
| | |
| | Checks: |
| | 1) LLM chat completion |
| | 2) Embedding API call |
| | |
| | Exit code: |
| | 0 = all requested checks passed |
| | 1 = at least one requested check failed |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import argparse |
| | import sys |
| |
|
| | from fic_agent.config import RuntimeConfig |
| |
|
| |
|
| | def _mask_len(value: str | None) -> str: |
| | if not value: |
| | return "0" |
| | return str(len(value)) |
| |
|
| |
|
| | def _test_llm(cfg: RuntimeConfig) -> bool: |
| | if not cfg.llm_api_key: |
| | print("[LLM] FAIL: missing llm_api_key") |
| | return False |
| | try: |
| | from openai import OpenAI |
| | except Exception as e: |
| | print(f"[LLM] FAIL: openai import error: {e}") |
| | return False |
| |
|
| | try: |
| | client = OpenAI(base_url=cfg.llm_base_url, api_key=cfg.llm_api_key) |
| | resp = client.chat.completions.create( |
| | model=cfg.llm_model, |
| | messages=[ |
| | {"role": "system", "content": "You are a concise assistant."}, |
| | {"role": "user", "content": "Reply with exactly: API_OK"}, |
| | ], |
| | temperature=0.0, |
| | max_tokens=20, |
| | ) |
| | text = (resp.choices[0].message.content or "").strip() |
| | usage = getattr(resp, "usage", None) |
| | total = getattr(usage, "total_tokens", None) if usage is not None else None |
| | print(f"[LLM] PASS: model={cfg.llm_model} total_tokens={total} reply={text!r}") |
| | return True |
| | except Exception as e: |
| | print(f"[LLM] FAIL: {type(e).__name__}: {e}") |
| | return False |
| |
|
| |
|
| | def _test_embedding(cfg: RuntimeConfig) -> bool: |
| | if not cfg.embedding_api_key: |
| | print("[EMBED] FAIL: missing embedding_api_key") |
| | return False |
| | try: |
| | from openai import OpenAI |
| | except Exception as e: |
| | print(f"[EMBED] FAIL: openai import error: {e}") |
| | return False |
| |
|
| | try: |
| | client = OpenAI(base_url=cfg.embedding_base_url, api_key=cfg.embedding_api_key) |
| | resp = client.embeddings.create( |
| | model=cfg.embedding_model, |
| | input=["api smoke test"], |
| | ) |
| | data = getattr(resp, "data", None) or [] |
| | if not data: |
| | print("[EMBED] FAIL: empty data") |
| | return False |
| | vec = getattr(data[0], "embedding", None) |
| | dim = len(vec) if isinstance(vec, list) else 0 |
| | usage = getattr(resp, "usage", None) |
| | total = getattr(usage, "total_tokens", None) if usage is not None else None |
| | print(f"[EMBED] PASS: model={cfg.embedding_model} dim={dim} total_tokens={total}") |
| | return True |
| | except Exception as e: |
| | print(f"[EMBED] FAIL: {type(e).__name__}: {e}") |
| | return False |
| |
|
| |
|
| | def main() -> int: |
| | parser = argparse.ArgumentParser(description="Minimal API smoke test for fic-agent") |
| | parser.add_argument("--skip-llm", action="store_true", help="Skip LLM chat test") |
| | parser.add_argument("--skip-embedding", action="store_true", help="Skip embedding test") |
| | args = parser.parse_args() |
| |
|
| | cfg = RuntimeConfig() |
| | print( |
| | "[CFG] " |
| | f"llm_base_url={cfg.llm_base_url} llm_model={cfg.llm_model} llm_key_len={_mask_len(cfg.llm_api_key)}" |
| | ) |
| | print( |
| | "[CFG] " |
| | f"embedding_base_url={cfg.embedding_base_url} embedding_model={cfg.embedding_model} " |
| | f"embedding_key_len={_mask_len(cfg.embedding_api_key)}" |
| | ) |
| |
|
| | ok = True |
| | if not args.skip_llm: |
| | ok = _test_llm(cfg) and ok |
| | if not args.skip_embedding: |
| | ok = _test_embedding(cfg) and ok |
| |
|
| | if ok: |
| | print("API smoke test: PASS") |
| | return 0 |
| | print("API smoke test: FAIL") |
| | return 1 |
| |
|
| |
|
| | if __name__ == "__main__": |
| | sys.exit(main()) |
| |
|
| |
|