File size: 18,877 Bytes
9aa5185 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 | """Tests for provider-aware `/model` validation in hermes_cli.models."""
from unittest.mock import patch
from hermes_cli.models import (
copilot_model_api_mode,
fetch_github_model_catalog,
curated_models_for_provider,
fetch_api_models,
github_model_reasoning_efforts,
normalize_copilot_model_id,
normalize_provider,
parse_model_input,
probe_api_models,
provider_label,
provider_model_ids,
validate_requested_model,
)
# -- helpers -----------------------------------------------------------------
FAKE_API_MODELS = [
"anthropic/claude-opus-4.6",
"anthropic/claude-sonnet-4.5",
"openai/gpt-5.4-pro",
"openai/gpt-5.4",
"google/gemini-3-pro-preview",
]
def _validate(model, provider="openrouter", api_models=FAKE_API_MODELS, **kw):
"""Shortcut: call validate_requested_model with mocked API."""
probe_payload = {
"models": api_models,
"probed_url": "http://localhost:11434/v1/models",
"resolved_base_url": kw.get("base_url", "") or "http://localhost:11434/v1",
"suggested_base_url": None,
"used_fallback": False,
}
with patch("hermes_cli.models.fetch_api_models", return_value=api_models), \
patch("hermes_cli.models.probe_api_models", return_value=probe_payload):
return validate_requested_model(model, provider, **kw)
# -- parse_model_input -------------------------------------------------------
class TestParseModelInput:
def test_plain_model_keeps_current_provider(self):
provider, model = parse_model_input("anthropic/claude-sonnet-4.5", "openrouter")
assert provider == "openrouter"
assert model == "anthropic/claude-sonnet-4.5"
def test_provider_colon_model_switches_provider(self):
provider, model = parse_model_input("openrouter:anthropic/claude-sonnet-4.5", "nous")
assert provider == "openrouter"
assert model == "anthropic/claude-sonnet-4.5"
def test_provider_alias_resolved(self):
provider, model = parse_model_input("glm:glm-5", "openrouter")
assert provider == "zai"
assert model == "glm-5"
def test_no_slash_no_colon_keeps_provider(self):
provider, model = parse_model_input("gpt-5.4", "openrouter")
assert provider == "openrouter"
assert model == "gpt-5.4"
def test_nous_provider_switch(self):
provider, model = parse_model_input("nous:hermes-3", "openrouter")
assert provider == "nous"
assert model == "hermes-3"
def test_empty_model_after_colon_keeps_current(self):
provider, model = parse_model_input("openrouter:", "nous")
assert provider == "nous"
assert model == "openrouter:"
def test_colon_at_start_keeps_current(self):
provider, model = parse_model_input(":something", "openrouter")
assert provider == "openrouter"
assert model == ":something"
def test_unknown_prefix_colon_not_treated_as_provider(self):
"""Colons are only provider delimiters if the left side is a known provider."""
provider, model = parse_model_input("anthropic/claude-3.5-sonnet:beta", "openrouter")
assert provider == "openrouter"
assert model == "anthropic/claude-3.5-sonnet:beta"
def test_http_url_not_treated_as_provider(self):
provider, model = parse_model_input("http://localhost:8080/model", "openrouter")
assert provider == "openrouter"
assert model == "http://localhost:8080/model"
def test_custom_colon_model_single(self):
"""custom:model-name β anonymous custom provider."""
provider, model = parse_model_input("custom:qwen-2.5", "openrouter")
assert provider == "custom"
assert model == "qwen-2.5"
def test_custom_triple_syntax(self):
"""custom:name:model β named custom provider."""
provider, model = parse_model_input("custom:local-server:qwen-2.5", "openrouter")
assert provider == "custom:local-server"
assert model == "qwen-2.5"
def test_custom_triple_spaces(self):
"""Triple syntax should handle whitespace."""
provider, model = parse_model_input("custom: my-server : my-model ", "openrouter")
assert provider == "custom:my-server"
assert model == "my-model"
def test_custom_triple_empty_model_falls_back(self):
"""custom:name: with no model β treated as custom:name (bare)."""
provider, model = parse_model_input("custom:name:", "openrouter")
# Empty model after second colon β no triple match, falls through
assert provider == "custom"
assert model == "name:"
# -- curated_models_for_provider ---------------------------------------------
class TestCuratedModelsForProvider:
def test_openrouter_returns_curated_list(self):
models = curated_models_for_provider("openrouter")
assert len(models) > 0
assert any("claude" in m[0] for m in models)
def test_zai_returns_glm_models(self):
models = curated_models_for_provider("zai")
assert any("glm" in m[0] for m in models)
def test_unknown_provider_returns_empty(self):
assert curated_models_for_provider("totally-unknown") == []
# -- normalize_provider ------------------------------------------------------
class TestNormalizeProvider:
def test_defaults_to_openrouter(self):
assert normalize_provider(None) == "openrouter"
assert normalize_provider("") == "openrouter"
def test_known_aliases(self):
assert normalize_provider("glm") == "zai"
assert normalize_provider("kimi") == "kimi-coding"
assert normalize_provider("moonshot") == "kimi-coding"
assert normalize_provider("github-copilot") == "copilot"
def test_case_insensitive(self):
assert normalize_provider("OpenRouter") == "openrouter"
class TestProviderLabel:
def test_known_labels_and_auto(self):
assert provider_label("anthropic") == "Anthropic"
assert provider_label("kimi") == "Kimi / Moonshot"
assert provider_label("copilot") == "GitHub Copilot"
assert provider_label("copilot-acp") == "GitHub Copilot ACP"
assert provider_label("auto") == "Auto"
def test_unknown_provider_preserves_original_name(self):
assert provider_label("my-custom-provider") == "my-custom-provider"
# -- provider_model_ids ------------------------------------------------------
class TestProviderModelIds:
def test_openrouter_returns_curated_list(self):
ids = provider_model_ids("openrouter")
assert len(ids) > 0
assert all("/" in mid for mid in ids)
def test_unknown_provider_returns_empty(self):
assert provider_model_ids("some-unknown-provider") == []
def test_zai_returns_glm_models(self):
assert "glm-5" in provider_model_ids("zai")
def test_copilot_prefers_live_catalog(self):
with patch("hermes_cli.auth.resolve_api_key_provider_credentials", return_value={"api_key": "gh-token"}), \
patch("hermes_cli.models._fetch_github_models", return_value=["gpt-5.4", "claude-sonnet-4.6"]):
assert provider_model_ids("copilot") == ["gpt-5.4", "claude-sonnet-4.6"]
def test_copilot_acp_reuses_copilot_catalog(self):
with patch("hermes_cli.auth.resolve_api_key_provider_credentials", return_value={"api_key": "gh-token"}), \
patch("hermes_cli.models._fetch_github_models", return_value=["gpt-5.4", "claude-sonnet-4.6"]):
assert provider_model_ids("copilot-acp") == ["gpt-5.4", "claude-sonnet-4.6"]
def test_copilot_acp_falls_back_to_copilot_defaults(self):
with patch("hermes_cli.auth.resolve_api_key_provider_credentials", side_effect=Exception("no token")), \
patch("hermes_cli.models._fetch_github_models", return_value=None):
ids = provider_model_ids("copilot-acp")
assert "gpt-5.4" in ids
assert "copilot-acp" not in ids
# -- fetch_api_models --------------------------------------------------------
class TestFetchApiModels:
def test_returns_none_when_no_base_url(self):
assert fetch_api_models("key", None) is None
def test_returns_none_on_network_error(self):
with patch("hermes_cli.models.urllib.request.urlopen", side_effect=Exception("timeout")):
assert fetch_api_models("key", "https://example.com/v1") is None
def test_probe_api_models_tries_v1_fallback(self):
class _Resp:
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def read(self):
return b'{"data": [{"id": "local-model"}]}'
calls = []
def _fake_urlopen(req, timeout=5.0):
calls.append(req.full_url)
if req.full_url.endswith("/v1/models"):
return _Resp()
raise Exception("404")
with patch("hermes_cli.models.urllib.request.urlopen", side_effect=_fake_urlopen):
probe = probe_api_models("key", "http://localhost:8000")
assert calls == ["http://localhost:8000/models", "http://localhost:8000/v1/models"]
assert probe["models"] == ["local-model"]
assert probe["resolved_base_url"] == "http://localhost:8000/v1"
assert probe["used_fallback"] is True
def test_probe_api_models_uses_copilot_catalog(self):
class _Resp:
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def read(self):
return b'{"data": [{"id": "gpt-5.4", "model_picker_enabled": true, "supported_endpoints": ["/responses"], "capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}}}, {"id": "claude-sonnet-4.6", "model_picker_enabled": true, "supported_endpoints": ["/chat/completions"], "capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}}}, {"id": "text-embedding-3-small", "model_picker_enabled": true, "capabilities": {"type": "embedding"}}]}'
with patch("hermes_cli.models.urllib.request.urlopen", return_value=_Resp()) as mock_urlopen:
probe = probe_api_models("gh-token", "https://api.githubcopilot.com")
assert mock_urlopen.call_args[0][0].full_url == "https://api.githubcopilot.com/models"
assert probe["models"] == ["gpt-5.4", "claude-sonnet-4.6"]
assert probe["resolved_base_url"] == "https://api.githubcopilot.com"
assert probe["used_fallback"] is False
def test_fetch_github_model_catalog_filters_non_chat_models(self):
class _Resp:
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def read(self):
return b'{"data": [{"id": "gpt-5.4", "model_picker_enabled": true, "supported_endpoints": ["/responses"], "capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}}}, {"id": "text-embedding-3-small", "model_picker_enabled": true, "capabilities": {"type": "embedding"}}]}'
with patch("hermes_cli.models.urllib.request.urlopen", return_value=_Resp()):
catalog = fetch_github_model_catalog("gh-token")
assert catalog is not None
assert [item["id"] for item in catalog] == ["gpt-5.4"]
class TestGithubReasoningEfforts:
def test_gpt5_supports_minimal_to_high(self):
catalog = [{
"id": "gpt-5.4",
"capabilities": {"type": "chat", "supports": {"reasoning_effort": ["low", "medium", "high"]}},
"supported_endpoints": ["/responses"],
}]
assert github_model_reasoning_efforts("gpt-5.4", catalog=catalog) == [
"low",
"medium",
"high",
]
def test_legacy_catalog_reasoning_still_supported(self):
catalog = [{"id": "openai/o3", "capabilities": ["reasoning"]}]
assert github_model_reasoning_efforts("openai/o3", catalog=catalog) == [
"low",
"medium",
"high",
]
def test_non_reasoning_model_returns_empty(self):
catalog = [{"id": "gpt-4.1", "capabilities": {"type": "chat", "supports": {}}}]
assert github_model_reasoning_efforts("gpt-4.1", catalog=catalog) == []
class TestCopilotNormalization:
def test_normalize_old_github_models_slug(self):
catalog = [{"id": "gpt-4.1"}, {"id": "gpt-5.4"}]
assert normalize_copilot_model_id("openai/gpt-4.1-mini", catalog=catalog) == "gpt-4.1"
def test_copilot_api_mode_gpt5_uses_responses(self):
"""GPT-5+ models should use Responses API (matching opencode)."""
assert copilot_model_api_mode("gpt-5.4") == "codex_responses"
assert copilot_model_api_mode("gpt-5.4-mini") == "codex_responses"
assert copilot_model_api_mode("gpt-5.3-codex") == "codex_responses"
assert copilot_model_api_mode("gpt-5.2-codex") == "codex_responses"
assert copilot_model_api_mode("gpt-5.2") == "codex_responses"
def test_copilot_api_mode_gpt5_mini_uses_chat(self):
"""gpt-5-mini is the exception β uses Chat Completions."""
assert copilot_model_api_mode("gpt-5-mini") == "chat_completions"
def test_copilot_api_mode_non_gpt5_uses_chat(self):
"""Non-GPT-5 models use Chat Completions."""
assert copilot_model_api_mode("gpt-4.1") == "chat_completions"
assert copilot_model_api_mode("gpt-4o") == "chat_completions"
assert copilot_model_api_mode("gpt-4o-mini") == "chat_completions"
assert copilot_model_api_mode("claude-sonnet-4.6") == "chat_completions"
assert copilot_model_api_mode("claude-opus-4.6") == "chat_completions"
assert copilot_model_api_mode("gemini-2.5-pro") == "chat_completions"
def test_copilot_api_mode_with_catalog_both_endpoints(self):
"""When catalog shows both endpoints, model ID pattern wins."""
catalog = [{
"id": "gpt-5.4",
"supported_endpoints": ["/chat/completions", "/responses"],
}]
# GPT-5.4 should use responses even though chat/completions is listed
assert copilot_model_api_mode("gpt-5.4", catalog=catalog) == "codex_responses"
def test_copilot_api_mode_with_catalog_only_responses(self):
catalog = [{
"id": "gpt-5.4",
"supported_endpoints": ["/responses"],
"capabilities": {"type": "chat"},
}]
assert copilot_model_api_mode("gpt-5.4", catalog=catalog) == "codex_responses"
# -- validate β format checks -----------------------------------------------
class TestValidateFormatChecks:
def test_empty_model_rejected(self):
result = _validate("")
assert result["accepted"] is False
assert "empty" in result["message"]
def test_whitespace_only_rejected(self):
result = _validate(" ")
assert result["accepted"] is False
def test_model_with_spaces_rejected(self):
result = _validate("anthropic/ claude-opus")
assert result["accepted"] is False
def test_no_slash_model_still_probes_api(self):
result = _validate("gpt-5.4", api_models=["gpt-5.4", "gpt-5.4-pro"])
assert result["accepted"] is True
assert result["persist"] is True
def test_no_slash_model_rejected_if_not_in_api(self):
result = _validate("gpt-5.4", api_models=["openai/gpt-5.4"])
assert result["accepted"] is True
assert "not found" in result["message"]
# -- validate β API found ----------------------------------------------------
class TestValidateApiFound:
def test_model_found_in_api(self):
result = _validate("anthropic/claude-opus-4.6")
assert result["accepted"] is True
assert result["persist"] is True
assert result["recognized"] is True
def test_model_found_for_custom_endpoint(self):
result = _validate(
"my-model", provider="openrouter",
api_models=["my-model"], base_url="http://localhost:11434/v1",
)
assert result["accepted"] is True
assert result["persist"] is True
assert result["recognized"] is True
# -- validate β API not found ------------------------------------------------
class TestValidateApiNotFound:
def test_model_not_in_api_accepted_with_warning(self):
result = _validate("anthropic/claude-nonexistent")
assert result["accepted"] is True
assert result["persist"] is True
assert "not found" in result["message"]
def test_warning_includes_suggestions(self):
result = _validate("anthropic/claude-opus-4.5")
assert result["accepted"] is True
assert "Similar models" in result["message"]
# -- validate β API unreachable β accept and persist everything ----------------
class TestValidateApiFallback:
def test_any_model_accepted_when_api_down(self):
result = _validate("anthropic/claude-opus-4.6", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
def test_unknown_model_also_accepted_when_api_down(self):
"""No hardcoded catalog gatekeeping β accept, persist, and warn."""
result = _validate("anthropic/claude-next-gen", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
assert "could not reach" in result["message"].lower()
def test_zai_model_accepted_when_api_down(self):
result = _validate("glm-5", provider="zai", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
def test_unknown_provider_accepted_when_api_down(self):
result = _validate("some-model", provider="totally-unknown", api_models=None)
assert result["accepted"] is True
assert result["persist"] is True
def test_custom_endpoint_warns_with_probed_url_and_v1_hint(self):
with patch(
"hermes_cli.models.probe_api_models",
return_value={
"models": None,
"probed_url": "http://localhost:8000/v1/models",
"resolved_base_url": "http://localhost:8000",
"suggested_base_url": "http://localhost:8000/v1",
"used_fallback": False,
},
):
result = validate_requested_model(
"qwen3",
"custom",
api_key="local-key",
base_url="http://localhost:8000",
)
assert result["accepted"] is True
assert result["persist"] is True
assert "http://localhost:8000/v1/models" in result["message"]
assert "http://localhost:8000/v1" in result["message"]
|