Spaces:
Build error
Build error
Claude commited on
refactor: Sprint 3b — remove dead code, non-functional provider, unused fields
Browse files- Remove non-functional VertexAPIKeyProvider (always returned is_configured=False)
- Remove VERTEX_API_KEY enum value and all references
- Remove ModelConfig.available_models field (always passed as [])
- Remove _MISTRAL_VISION_MODELS backward-compat alias
- Remove vertex_api_key from config, client_factory, model_registry
- Clean up related tests
https://claude.ai/code/session_012NCh8yLxMXkRmBYQgHCTik
- backend/app/config.py +0 -1
- backend/app/schemas/model_config.py +0 -3
- backend/app/services/ai/client_factory.py +1 -15
- backend/app/services/ai/model_registry.py +0 -4
- backend/app/services/ai/provider_mistral.py +0 -4
- backend/app/services/ai/provider_vertex_key.py +0 -61
- backend/app/services/job_runner.py +0 -1
- backend/tests/test_ai_analyzer.py +0 -19
- backend/tests/test_ai_providers.py +7 -44
- backend/tests/test_api_providers.py +1 -4
- backend/tests/test_provider_mistral.py +0 -6
backend/app/config.py
CHANGED
|
@@ -49,7 +49,6 @@ class Settings(BaseSettings):
|
|
| 49 |
# providers sont disponibles selon les clés présentes. Pas de AI_PROVIDER
|
| 50 |
# global : le provider est choisi par corpus depuis l'interface.
|
| 51 |
google_ai_studio_api_key: str | None = None
|
| 52 |
-
vertex_api_key: str | None = None
|
| 53 |
vertex_service_account_json: str | None = None
|
| 54 |
mistral_api_key: str | None = None
|
| 55 |
|
|
|
|
| 49 |
# providers sont disponibles selon les clés présentes. Pas de AI_PROVIDER
|
| 50 |
# global : le provider est choisi par corpus depuis l'interface.
|
| 51 |
google_ai_studio_api_key: str | None = None
|
|
|
|
| 52 |
vertex_service_account_json: str | None = None
|
| 53 |
mistral_api_key: str | None = None
|
| 54 |
|
backend/app/schemas/model_config.py
CHANGED
|
@@ -4,7 +4,6 @@ Schémas Pydantic pour la configuration et la découverte des modèles IA.
|
|
| 4 |
# 1. stdlib
|
| 5 |
from datetime import datetime
|
| 6 |
from enum import Enum
|
| 7 |
-
from typing import Any
|
| 8 |
|
| 9 |
# 2. third-party
|
| 10 |
from pydantic import BaseModel, ConfigDict, Field
|
|
@@ -12,7 +11,6 @@ from pydantic import BaseModel, ConfigDict, Field
|
|
| 12 |
|
| 13 |
class ProviderType(str, Enum):
|
| 14 |
GOOGLE_AI_STUDIO = "google_ai_studio"
|
| 15 |
-
VERTEX_API_KEY = "vertex_api_key"
|
| 16 |
VERTEX_SERVICE_ACCOUNT = "vertex_service_account"
|
| 17 |
MISTRAL = "mistral"
|
| 18 |
|
|
@@ -39,4 +37,3 @@ class ModelConfig(BaseModel):
|
|
| 39 |
provider: ProviderType
|
| 40 |
supports_vision: bool
|
| 41 |
last_fetched_at: datetime
|
| 42 |
-
available_models: list[dict[str, Any]] # cache sérialisé des ModelInfo
|
|
|
|
| 4 |
# 1. stdlib
|
| 5 |
from datetime import datetime
|
| 6 |
from enum import Enum
|
|
|
|
| 7 |
|
| 8 |
# 2. third-party
|
| 9 |
from pydantic import BaseModel, ConfigDict, Field
|
|
|
|
| 11 |
|
| 12 |
class ProviderType(str, Enum):
|
| 13 |
GOOGLE_AI_STUDIO = "google_ai_studio"
|
|
|
|
| 14 |
VERTEX_SERVICE_ACCOUNT = "vertex_service_account"
|
| 15 |
MISTRAL = "mistral"
|
| 16 |
|
|
|
|
| 37 |
provider: ProviderType
|
| 38 |
supports_vision: bool
|
| 39 |
last_fetched_at: datetime
|
|
|
backend/app/services/ai/client_factory.py
CHANGED
|
@@ -28,11 +28,10 @@ def build_client(provider_type: ProviderType) -> genai.Client:
|
|
| 28 |
|
| 29 |
Lit les variables d'environnement nécessaires selon le provider :
|
| 30 |
- GOOGLE_AI_STUDIO → GOOGLE_AI_STUDIO_API_KEY
|
| 31 |
-
- VERTEX_API_KEY → VERTEX_API_KEY
|
| 32 |
- VERTEX_SA → VERTEX_SERVICE_ACCOUNT_JSON
|
| 33 |
|
| 34 |
Args:
|
| 35 |
-
provider_type: type de provider (GOOGLE_AI_STUDIO,
|
| 36 |
VERTEX_SERVICE_ACCOUNT).
|
| 37 |
|
| 38 |
Returns:
|
|
@@ -51,19 +50,6 @@ def build_client(provider_type: ProviderType) -> genai.Client:
|
|
| 51 |
logger.debug("Client Google AI Studio créé")
|
| 52 |
return genai.Client(api_key=api_key)
|
| 53 |
|
| 54 |
-
if provider_type == ProviderType.VERTEX_API_KEY:
|
| 55 |
-
api_key = os.environ.get("VERTEX_API_KEY")
|
| 56 |
-
if not api_key:
|
| 57 |
-
raise RuntimeError(
|
| 58 |
-
"Variable d'environnement manquante : VERTEX_API_KEY"
|
| 59 |
-
)
|
| 60 |
-
logger.debug("Client Vertex AI Express (clé API) créé")
|
| 61 |
-
# vertexai=True route vers aiplatform.googleapis.com (Vertex AI).
|
| 62 |
-
# Sans vertexai=True, le SDK route vers generativelanguage.googleapis.com
|
| 63 |
-
# (Gemini Developer API) qui rejette les clés Vertex Express avec 403.
|
| 64 |
-
# project/location sont omis : mutually exclusive avec api_key dans le SDK.
|
| 65 |
-
return genai.Client(vertexai=True, api_key=api_key)
|
| 66 |
-
|
| 67 |
if provider_type == ProviderType.VERTEX_SERVICE_ACCOUNT:
|
| 68 |
sa_json_str = os.environ.get("VERTEX_SERVICE_ACCOUNT_JSON")
|
| 69 |
if not sa_json_str:
|
|
|
|
| 28 |
|
| 29 |
Lit les variables d'environnement nécessaires selon le provider :
|
| 30 |
- GOOGLE_AI_STUDIO → GOOGLE_AI_STUDIO_API_KEY
|
|
|
|
| 31 |
- VERTEX_SA → VERTEX_SERVICE_ACCOUNT_JSON
|
| 32 |
|
| 33 |
Args:
|
| 34 |
+
provider_type: type de provider (GOOGLE_AI_STUDIO,
|
| 35 |
VERTEX_SERVICE_ACCOUNT).
|
| 36 |
|
| 37 |
Returns:
|
|
|
|
| 50 |
logger.debug("Client Google AI Studio créé")
|
| 51 |
return genai.Client(api_key=api_key)
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
if provider_type == ProviderType.VERTEX_SERVICE_ACCOUNT:
|
| 54 |
sa_json_str = os.environ.get("VERTEX_SERVICE_ACCOUNT_JSON")
|
| 55 |
if not sa_json_str:
|
backend/app/services/ai/model_registry.py
CHANGED
|
@@ -17,7 +17,6 @@ logger = logging.getLogger(__name__)
|
|
| 17 |
# Noms lisibles par provider (pour l'interface)
|
| 18 |
_PROVIDER_DISPLAY_NAMES: dict[ProviderType, str] = {
|
| 19 |
ProviderType.GOOGLE_AI_STUDIO: "Google AI Studio",
|
| 20 |
-
ProviderType.VERTEX_API_KEY: "Vertex AI (clé API)",
|
| 21 |
ProviderType.VERTEX_SERVICE_ACCOUNT: "Vertex AI (compte de service)",
|
| 22 |
ProviderType.MISTRAL: "Mistral AI",
|
| 23 |
}
|
|
@@ -34,12 +33,10 @@ def _build_providers() -> list[AIProvider]:
|
|
| 34 |
|
| 35 |
from app.services.ai.provider_google_ai import GoogleAIProvider
|
| 36 |
from app.services.ai.provider_mistral import MistralProvider
|
| 37 |
-
from app.services.ai.provider_vertex_key import VertexAPIKeyProvider
|
| 38 |
from app.services.ai.provider_vertex_sa import VertexServiceAccountProvider
|
| 39 |
|
| 40 |
_providers_cache = [
|
| 41 |
GoogleAIProvider(),
|
| 42 |
-
VertexAPIKeyProvider(),
|
| 43 |
VertexServiceAccountProvider(),
|
| 44 |
MistralProvider(),
|
| 45 |
]
|
|
@@ -171,5 +168,4 @@ def build_model_config(corpus_id: str, selected_model_id: str) -> ModelConfig:
|
|
| 171 |
provider=selected.provider,
|
| 172 |
supports_vision=selected.supports_vision,
|
| 173 |
last_fetched_at=datetime.now(tz=timezone.utc),
|
| 174 |
-
available_models=[m.model_dump() for m in models],
|
| 175 |
)
|
|
|
|
| 17 |
# Noms lisibles par provider (pour l'interface)
|
| 18 |
_PROVIDER_DISPLAY_NAMES: dict[ProviderType, str] = {
|
| 19 |
ProviderType.GOOGLE_AI_STUDIO: "Google AI Studio",
|
|
|
|
| 20 |
ProviderType.VERTEX_SERVICE_ACCOUNT: "Vertex AI (compte de service)",
|
| 21 |
ProviderType.MISTRAL: "Mistral AI",
|
| 22 |
}
|
|
|
|
| 33 |
|
| 34 |
from app.services.ai.provider_google_ai import GoogleAIProvider
|
| 35 |
from app.services.ai.provider_mistral import MistralProvider
|
|
|
|
| 36 |
from app.services.ai.provider_vertex_sa import VertexServiceAccountProvider
|
| 37 |
|
| 38 |
_providers_cache = [
|
| 39 |
GoogleAIProvider(),
|
|
|
|
| 40 |
VertexServiceAccountProvider(),
|
| 41 |
MistralProvider(),
|
| 42 |
]
|
|
|
|
| 168 |
provider=selected.provider,
|
| 169 |
supports_vision=selected.supports_vision,
|
| 170 |
last_fetched_at=datetime.now(tz=timezone.utc),
|
|
|
|
| 171 |
)
|
backend/app/services/ai/provider_mistral.py
CHANGED
|
@@ -63,10 +63,6 @@ _MISTRAL_FALLBACK_MODELS: list[ModelInfo] = [
|
|
| 63 |
),
|
| 64 |
]
|
| 65 |
|
| 66 |
-
# Alias backward-compat (utilisé dans certains tests)
|
| 67 |
-
_MISTRAL_VISION_MODELS = _MISTRAL_FALLBACK_MODELS
|
| 68 |
-
|
| 69 |
-
|
| 70 |
def _is_ocr_model(model_id: str) -> bool:
|
| 71 |
"""Retourne True si le modèle utilise l'endpoint OCR dédié (pas chat completions)."""
|
| 72 |
return "ocr" in model_id.lower()
|
|
|
|
| 63 |
),
|
| 64 |
]
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
def _is_ocr_model(model_id: str) -> bool:
|
| 67 |
"""Retourne True si le modèle utilise l'endpoint OCR dédié (pas chat completions)."""
|
| 68 |
return "ocr" in model_id.lower()
|
backend/app/services/ai/provider_vertex_key.py
DELETED
|
@@ -1,61 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Provider Vertex AI — authentification via clé API Express Vertex (VERTEX_API_KEY).
|
| 3 |
-
|
| 4 |
-
ÉTAT : NON FONCTIONNEL — aiplatform.googleapis.com n'accepte pas les clés API.
|
| 5 |
-
|
| 6 |
-
Diagnostic :
|
| 7 |
-
- Sans vertexai=True → generativelanguage.googleapis.com → 403 (clé Vertex rejetée)
|
| 8 |
-
- Avec vertexai=True → aiplatform.googleapis.com → 401 UNAUTHENTICATED
|
| 9 |
-
"API keys are not supported by this API. Expected OAuth2 access token."
|
| 10 |
-
|
| 11 |
-
Cause : Vertex AI (aiplatform) n'accepte que OAuth2 / service account / ADC.
|
| 12 |
-
Les clés API (format AQ.Ab...) ne sont pas prises en charge par cette API.
|
| 13 |
-
|
| 14 |
-
Alternatives fonctionnelles :
|
| 15 |
-
1. Google AI Studio : GOOGLE_AI_STUDIO_API_KEY (clé AIza...) → fonctionne
|
| 16 |
-
2. Vertex AI Service Account : VERTEX_SERVICE_ACCOUNT_JSON → fonctionne
|
| 17 |
-
|
| 18 |
-
Ce provider est conservé pour la cohérence de l'interface mais is_configured()
|
| 19 |
-
retourne toujours False afin d'éviter des appels réseau voués à l'échec.
|
| 20 |
-
"""
|
| 21 |
-
# 1. stdlib
|
| 22 |
-
import logging
|
| 23 |
-
import os
|
| 24 |
-
|
| 25 |
-
# 3. local
|
| 26 |
-
from app.schemas.model_config import ModelInfo, ProviderType
|
| 27 |
-
from app.services.ai.base import AIProvider
|
| 28 |
-
|
| 29 |
-
logger = logging.getLogger(__name__)
|
| 30 |
-
|
| 31 |
-
_ENV_KEY = "VERTEX_API_KEY"
|
| 32 |
-
|
| 33 |
-
_UNAVAILABLE_MSG = (
|
| 34 |
-
"VERTEX_API_KEY définie mais aiplatform.googleapis.com n'accepte pas les "
|
| 35 |
-
"clés API (OAuth2 requis). Utilisez GOOGLE_AI_STUDIO_API_KEY pour le "
|
| 36 |
-
"Gemini Developer API, ou VERTEX_SERVICE_ACCOUNT_JSON pour Vertex AI."
|
| 37 |
-
)
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
class VertexAPIKeyProvider(AIProvider):
|
| 41 |
-
"""Provider Vertex AI via clé API Express — NON FONCTIONNEL.
|
| 42 |
-
|
| 43 |
-
aiplatform.googleapis.com exige OAuth2/service account ; les clés API
|
| 44 |
-
sont systématiquement rejetées avec 401 UNAUTHENTICATED.
|
| 45 |
-
Ce provider reste présent mais is_configured() retourne toujours False.
|
| 46 |
-
"""
|
| 47 |
-
|
| 48 |
-
@property
|
| 49 |
-
def provider_type(self) -> ProviderType:
|
| 50 |
-
return ProviderType.VERTEX_API_KEY
|
| 51 |
-
|
| 52 |
-
def is_configured(self) -> bool:
|
| 53 |
-
if os.environ.get(_ENV_KEY):
|
| 54 |
-
logger.warning(_UNAVAILABLE_MSG)
|
| 55 |
-
return False
|
| 56 |
-
|
| 57 |
-
def list_models(self) -> list[ModelInfo]:
|
| 58 |
-
raise RuntimeError(_UNAVAILABLE_MSG)
|
| 59 |
-
|
| 60 |
-
def generate_content(self, image_bytes: bytes, prompt: str, model_id: str, supports_vision: bool = True) -> str:
|
| 61 |
-
raise RuntimeError(_UNAVAILABLE_MSG)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
backend/app/services/job_runner.py
CHANGED
|
@@ -129,7 +129,6 @@ async def _run_job_impl(job_id: str, db: AsyncSession) -> None:
|
|
| 129 |
provider=ProviderType(model_db.provider_type),
|
| 130 |
supports_vision=model_db.supports_vision,
|
| 131 |
last_fetched_at=model_db.updated_at,
|
| 132 |
-
available_models=[],
|
| 133 |
)
|
| 134 |
|
| 135 |
# ── 5. Obtenir l'image pour l'IA ─────────────────────────────────────
|
|
|
|
| 129 |
provider=ProviderType(model_db.provider_type),
|
| 130 |
supports_vision=model_db.supports_vision,
|
| 131 |
last_fetched_at=model_db.updated_at,
|
|
|
|
| 132 |
)
|
| 133 |
|
| 134 |
# ── 5. Obtenir l'image pour l'IA ─────────────────────────────────────
|
backend/tests/test_ai_analyzer.py
CHANGED
|
@@ -71,7 +71,6 @@ def _make_model_config(provider: ProviderType = ProviderType.GOOGLE_AI_STUDIO) -
|
|
| 71 |
provider=provider,
|
| 72 |
supports_vision=True,
|
| 73 |
last_fetched_at=datetime.now(tz=timezone.utc),
|
| 74 |
-
available_models=[],
|
| 75 |
)
|
| 76 |
|
| 77 |
|
|
@@ -189,24 +188,6 @@ def test_build_client_google_ai_studio_missing_env(monkeypatch):
|
|
| 189 |
build_client(ProviderType.GOOGLE_AI_STUDIO)
|
| 190 |
|
| 191 |
|
| 192 |
-
def test_build_client_vertex_api_key(monkeypatch):
|
| 193 |
-
monkeypatch.setenv("VERTEX_API_KEY", "fake-vertex-key")
|
| 194 |
-
|
| 195 |
-
with patch("app.services.ai.client_factory.genai.Client") as mock_cls:
|
| 196 |
-
mock_cls.return_value = MagicMock()
|
| 197 |
-
client = build_client(ProviderType.VERTEX_API_KEY)
|
| 198 |
-
|
| 199 |
-
mock_cls.assert_called_once_with(vertexai=True, api_key="fake-vertex-key")
|
| 200 |
-
assert client is mock_cls.return_value
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
def test_build_client_vertex_api_key_missing_env(monkeypatch):
|
| 204 |
-
monkeypatch.delenv("VERTEX_API_KEY", raising=False)
|
| 205 |
-
|
| 206 |
-
with pytest.raises(RuntimeError, match="VERTEX_API_KEY"):
|
| 207 |
-
build_client(ProviderType.VERTEX_API_KEY)
|
| 208 |
-
|
| 209 |
-
|
| 210 |
def test_build_client_vertex_service_account(monkeypatch):
|
| 211 |
sa_json = json.dumps({
|
| 212 |
"type": "service_account",
|
|
|
|
| 71 |
provider=provider,
|
| 72 |
supports_vision=True,
|
| 73 |
last_fetched_at=datetime.now(tz=timezone.utc),
|
|
|
|
| 74 |
)
|
| 75 |
|
| 76 |
|
|
|
|
| 188 |
build_client(ProviderType.GOOGLE_AI_STUDIO)
|
| 189 |
|
| 190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
def test_build_client_vertex_service_account(monkeypatch):
|
| 192 |
sa_json = json.dumps({
|
| 193 |
"type": "service_account",
|
backend/tests/test_ai_providers.py
CHANGED
|
@@ -16,7 +16,6 @@ from app.schemas.model_config import ModelConfig, ModelInfo, ProviderType
|
|
| 16 |
from app.services.ai.base import is_vision_model
|
| 17 |
from app.services.ai.model_registry import build_model_config, list_all_models
|
| 18 |
from app.services.ai.provider_google_ai import GoogleAIProvider
|
| 19 |
-
from app.services.ai.provider_vertex_key import VertexAPIKeyProvider
|
| 20 |
from app.services.ai.provider_vertex_sa import VertexServiceAccountProvider
|
| 21 |
|
| 22 |
# ---------------------------------------------------------------------------
|
|
@@ -114,7 +113,6 @@ def test_model_config_valid():
|
|
| 114 |
provider=ProviderType.GOOGLE_AI_STUDIO,
|
| 115 |
supports_vision=True,
|
| 116 |
last_fetched_at=datetime(2026, 3, 17, tzinfo=timezone.utc),
|
| 117 |
-
available_models=[],
|
| 118 |
)
|
| 119 |
assert cfg.corpus_id == "corpus-001"
|
| 120 |
assert cfg.supports_vision is True
|
|
@@ -223,38 +221,6 @@ def test_google_ai_provider_empty_list(monkeypatch):
|
|
| 223 |
assert models == []
|
| 224 |
|
| 225 |
|
| 226 |
-
# ---------------------------------------------------------------------------
|
| 227 |
-
# Tests — VertexAPIKeyProvider
|
| 228 |
-
# ---------------------------------------------------------------------------
|
| 229 |
-
|
| 230 |
-
def test_vertex_key_provider_not_configured(monkeypatch):
|
| 231 |
-
monkeypatch.delenv("VERTEX_API_KEY", raising=False)
|
| 232 |
-
assert VertexAPIKeyProvider().is_configured() is False
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
def test_vertex_key_provider_always_unavailable_even_with_key(monkeypatch):
|
| 236 |
-
"""aiplatform.googleapis.com rejette les clés API → is_configured() toujours False."""
|
| 237 |
-
monkeypatch.setenv("VERTEX_API_KEY", "fake-vertex-key")
|
| 238 |
-
assert VertexAPIKeyProvider().is_configured() is False
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
def test_vertex_key_provider_type():
|
| 242 |
-
assert VertexAPIKeyProvider().provider_type == ProviderType.VERTEX_API_KEY
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
def test_vertex_key_provider_list_models_raises(monkeypatch):
|
| 246 |
-
"""list_models() et generate_content() lèvent RuntimeError (provider indisponible)."""
|
| 247 |
-
monkeypatch.setenv("VERTEX_API_KEY", "fake-vertex-key")
|
| 248 |
-
with pytest.raises(RuntimeError, match="aiplatform"):
|
| 249 |
-
VertexAPIKeyProvider().list_models()
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
def test_vertex_key_provider_generate_content_raises(monkeypatch):
|
| 253 |
-
monkeypatch.setenv("VERTEX_API_KEY", "fake-vertex-key")
|
| 254 |
-
with pytest.raises(RuntimeError, match="aiplatform"):
|
| 255 |
-
VertexAPIKeyProvider().generate_content(b"img", "prompt", "gemini-2.0-flash")
|
| 256 |
-
|
| 257 |
-
|
| 258 |
# ---------------------------------------------------------------------------
|
| 259 |
# Tests — VertexServiceAccountProvider
|
| 260 |
# ---------------------------------------------------------------------------
|
|
@@ -348,7 +314,7 @@ def test_vertex_sa_provider_filters_non_generate_content(monkeypatch):
|
|
| 348 |
|
| 349 |
def test_list_all_models_no_providers_configured(monkeypatch):
|
| 350 |
monkeypatch.delenv("GOOGLE_AI_STUDIO_API_KEY", raising=False)
|
| 351 |
-
|
| 352 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 353 |
result = list_all_models()
|
| 354 |
assert result == []
|
|
@@ -356,7 +322,7 @@ def test_list_all_models_no_providers_configured(monkeypatch):
|
|
| 356 |
|
| 357 |
def test_list_all_models_one_provider(monkeypatch):
|
| 358 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "fake-key")
|
| 359 |
-
|
| 360 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 361 |
mock_model = _make_mock_model()
|
| 362 |
|
|
@@ -370,11 +336,10 @@ def test_list_all_models_one_provider(monkeypatch):
|
|
| 370 |
|
| 371 |
def test_list_all_models_aggregates_multiple_providers(monkeypatch):
|
| 372 |
"""Deux providers configurés → les deux listes sont agrégées.
|
| 373 |
-
VertexAPIKeyProvider est toujours indisponible (aiplatform n'accepte pas les clés).
|
| 374 |
On utilise Google AI Studio + Vertex Service Account pour tester l'agrégation.
|
| 375 |
"""
|
| 376 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "fake-key-ai")
|
| 377 |
-
|
| 378 |
monkeypatch.setenv("VERTEX_SERVICE_ACCOUNT_JSON", "{}") # déclenche is_configured()
|
| 379 |
|
| 380 |
models_ai = [ModelInfo(
|
|
@@ -404,7 +369,7 @@ def test_list_all_models_failing_provider_is_skipped(monkeypatch):
|
|
| 404 |
"""Un provider configuré qui échoue est ignoré ; l'autre est retourné."""
|
| 405 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "bad-key")
|
| 406 |
monkeypatch.setenv("VERTEX_SERVICE_ACCOUNT_JSON", "{}")
|
| 407 |
-
|
| 408 |
|
| 409 |
models_sa = [ModelInfo(
|
| 410 |
model_id="models/gemini-2.0-flash",
|
|
@@ -427,7 +392,7 @@ def test_list_all_models_failing_provider_is_skipped(monkeypatch):
|
|
| 427 |
|
| 428 |
def test_build_model_config_valid(monkeypatch):
|
| 429 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "fake-key")
|
| 430 |
-
|
| 431 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 432 |
mock_model = _make_mock_model()
|
| 433 |
|
|
@@ -440,13 +405,11 @@ def test_build_model_config_valid(monkeypatch):
|
|
| 440 |
assert cfg.selected_model_display_name == "Gemini 1.5 Pro"
|
| 441 |
assert cfg.provider == ProviderType.GOOGLE_AI_STUDIO
|
| 442 |
assert cfg.supports_vision is True
|
| 443 |
-
assert len(cfg.available_models) == 1
|
| 444 |
-
assert isinstance(cfg.available_models[0], dict)
|
| 445 |
|
| 446 |
|
| 447 |
def test_build_model_config_unknown_model(monkeypatch):
|
| 448 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "fake-key")
|
| 449 |
-
|
| 450 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 451 |
mock_model = _make_mock_model()
|
| 452 |
|
|
@@ -458,7 +421,7 @@ def test_build_model_config_unknown_model(monkeypatch):
|
|
| 458 |
|
| 459 |
def test_build_model_config_no_providers(monkeypatch):
|
| 460 |
monkeypatch.delenv("GOOGLE_AI_STUDIO_API_KEY", raising=False)
|
| 461 |
-
|
| 462 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 463 |
with pytest.raises(ValueError, match="non disponible"):
|
| 464 |
build_model_config("corpus-001", "models/gemini-1.5-pro")
|
|
|
|
| 16 |
from app.services.ai.base import is_vision_model
|
| 17 |
from app.services.ai.model_registry import build_model_config, list_all_models
|
| 18 |
from app.services.ai.provider_google_ai import GoogleAIProvider
|
|
|
|
| 19 |
from app.services.ai.provider_vertex_sa import VertexServiceAccountProvider
|
| 20 |
|
| 21 |
# ---------------------------------------------------------------------------
|
|
|
|
| 113 |
provider=ProviderType.GOOGLE_AI_STUDIO,
|
| 114 |
supports_vision=True,
|
| 115 |
last_fetched_at=datetime(2026, 3, 17, tzinfo=timezone.utc),
|
|
|
|
| 116 |
)
|
| 117 |
assert cfg.corpus_id == "corpus-001"
|
| 118 |
assert cfg.supports_vision is True
|
|
|
|
| 221 |
assert models == []
|
| 222 |
|
| 223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
# ---------------------------------------------------------------------------
|
| 225 |
# Tests — VertexServiceAccountProvider
|
| 226 |
# ---------------------------------------------------------------------------
|
|
|
|
| 314 |
|
| 315 |
def test_list_all_models_no_providers_configured(monkeypatch):
|
| 316 |
monkeypatch.delenv("GOOGLE_AI_STUDIO_API_KEY", raising=False)
|
| 317 |
+
|
| 318 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 319 |
result = list_all_models()
|
| 320 |
assert result == []
|
|
|
|
| 322 |
|
| 323 |
def test_list_all_models_one_provider(monkeypatch):
|
| 324 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "fake-key")
|
| 325 |
+
|
| 326 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 327 |
mock_model = _make_mock_model()
|
| 328 |
|
|
|
|
| 336 |
|
| 337 |
def test_list_all_models_aggregates_multiple_providers(monkeypatch):
|
| 338 |
"""Deux providers configurés → les deux listes sont agrégées.
|
|
|
|
| 339 |
On utilise Google AI Studio + Vertex Service Account pour tester l'agrégation.
|
| 340 |
"""
|
| 341 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "fake-key-ai")
|
| 342 |
+
|
| 343 |
monkeypatch.setenv("VERTEX_SERVICE_ACCOUNT_JSON", "{}") # déclenche is_configured()
|
| 344 |
|
| 345 |
models_ai = [ModelInfo(
|
|
|
|
| 369 |
"""Un provider configuré qui échoue est ignoré ; l'autre est retourné."""
|
| 370 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "bad-key")
|
| 371 |
monkeypatch.setenv("VERTEX_SERVICE_ACCOUNT_JSON", "{}")
|
| 372 |
+
|
| 373 |
|
| 374 |
models_sa = [ModelInfo(
|
| 375 |
model_id="models/gemini-2.0-flash",
|
|
|
|
| 392 |
|
| 393 |
def test_build_model_config_valid(monkeypatch):
|
| 394 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "fake-key")
|
| 395 |
+
|
| 396 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 397 |
mock_model = _make_mock_model()
|
| 398 |
|
|
|
|
| 405 |
assert cfg.selected_model_display_name == "Gemini 1.5 Pro"
|
| 406 |
assert cfg.provider == ProviderType.GOOGLE_AI_STUDIO
|
| 407 |
assert cfg.supports_vision is True
|
|
|
|
|
|
|
| 408 |
|
| 409 |
|
| 410 |
def test_build_model_config_unknown_model(monkeypatch):
|
| 411 |
monkeypatch.setenv("GOOGLE_AI_STUDIO_API_KEY", "fake-key")
|
| 412 |
+
|
| 413 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 414 |
mock_model = _make_mock_model()
|
| 415 |
|
|
|
|
| 421 |
|
| 422 |
def test_build_model_config_no_providers(monkeypatch):
|
| 423 |
monkeypatch.delenv("GOOGLE_AI_STUDIO_API_KEY", raising=False)
|
| 424 |
+
|
| 425 |
monkeypatch.delenv("VERTEX_SERVICE_ACCOUNT_JSON", raising=False)
|
| 426 |
with pytest.raises(ValueError, match="non disponible"):
|
| 427 |
build_model_config("corpus-001", "models/gemini-1.5-pro")
|
backend/tests/test_api_providers.py
CHANGED
|
@@ -26,21 +26,18 @@ _NOW = datetime.now(timezone.utc)
|
|
| 26 |
|
| 27 |
_PROVIDERS_ALL_UNAVAILABLE = [
|
| 28 |
{"provider_type": "google_ai_studio", "display_name": "Google AI Studio", "available": False, "model_count": 0},
|
| 29 |
-
{"provider_type": "vertex_api_key", "display_name": "Vertex AI (clé API)", "available": False, "model_count": 0},
|
| 30 |
{"provider_type": "vertex_service_account", "display_name": "Vertex AI (compte de service)", "available": False, "model_count": 0},
|
| 31 |
{"provider_type": "mistral", "display_name": "Mistral AI", "available": False, "model_count": 0},
|
| 32 |
]
|
| 33 |
|
| 34 |
_PROVIDERS_GOOGLE_ONLY = [
|
| 35 |
{"provider_type": "google_ai_studio", "display_name": "Google AI Studio", "available": True, "model_count": 2},
|
| 36 |
-
{"provider_type": "vertex_api_key", "display_name": "Vertex AI (clé API)", "available": False, "model_count": 0},
|
| 37 |
{"provider_type": "vertex_service_account", "display_name": "Vertex AI (compte de service)", "available": False, "model_count": 0},
|
| 38 |
{"provider_type": "mistral", "display_name": "Mistral AI", "available": False, "model_count": 0},
|
| 39 |
]
|
| 40 |
|
| 41 |
_PROVIDERS_GOOGLE_AND_MISTRAL = [
|
| 42 |
{"provider_type": "google_ai_studio", "display_name": "Google AI Studio", "available": True, "model_count": 3},
|
| 43 |
-
{"provider_type": "vertex_api_key", "display_name": "Vertex AI (clé API)", "available": False, "model_count": 0},
|
| 44 |
{"provider_type": "vertex_service_account", "display_name": "Vertex AI (compte de service)", "available": False, "model_count": 0},
|
| 45 |
{"provider_type": "mistral", "display_name": "Mistral AI", "available": True, "model_count": 2},
|
| 46 |
]
|
|
@@ -100,7 +97,7 @@ async def test_list_providers_returns_list(async_client, monkeypatch):
|
|
| 100 |
async def test_list_providers_count(async_client, monkeypatch):
|
| 101 |
monkeypatch.setattr("app.services.ai.model_registry.get_available_providers", lambda: _PROVIDERS_ALL_UNAVAILABLE)
|
| 102 |
data = (await async_client.get("/api/v1/providers")).json()
|
| 103 |
-
assert len(data) ==
|
| 104 |
|
| 105 |
|
| 106 |
@pytest.mark.asyncio
|
|
|
|
| 26 |
|
| 27 |
_PROVIDERS_ALL_UNAVAILABLE = [
|
| 28 |
{"provider_type": "google_ai_studio", "display_name": "Google AI Studio", "available": False, "model_count": 0},
|
|
|
|
| 29 |
{"provider_type": "vertex_service_account", "display_name": "Vertex AI (compte de service)", "available": False, "model_count": 0},
|
| 30 |
{"provider_type": "mistral", "display_name": "Mistral AI", "available": False, "model_count": 0},
|
| 31 |
]
|
| 32 |
|
| 33 |
_PROVIDERS_GOOGLE_ONLY = [
|
| 34 |
{"provider_type": "google_ai_studio", "display_name": "Google AI Studio", "available": True, "model_count": 2},
|
|
|
|
| 35 |
{"provider_type": "vertex_service_account", "display_name": "Vertex AI (compte de service)", "available": False, "model_count": 0},
|
| 36 |
{"provider_type": "mistral", "display_name": "Mistral AI", "available": False, "model_count": 0},
|
| 37 |
]
|
| 38 |
|
| 39 |
_PROVIDERS_GOOGLE_AND_MISTRAL = [
|
| 40 |
{"provider_type": "google_ai_studio", "display_name": "Google AI Studio", "available": True, "model_count": 3},
|
|
|
|
| 41 |
{"provider_type": "vertex_service_account", "display_name": "Vertex AI (compte de service)", "available": False, "model_count": 0},
|
| 42 |
{"provider_type": "mistral", "display_name": "Mistral AI", "available": True, "model_count": 2},
|
| 43 |
]
|
|
|
|
| 97 |
async def test_list_providers_count(async_client, monkeypatch):
|
| 98 |
monkeypatch.setattr("app.services.ai.model_registry.get_available_providers", lambda: _PROVIDERS_ALL_UNAVAILABLE)
|
| 99 |
data = (await async_client.get("/api/v1/providers")).json()
|
| 100 |
+
assert len(data) == 3 # 3 providers connus
|
| 101 |
|
| 102 |
|
| 103 |
@pytest.mark.asyncio
|
backend/tests/test_provider_mistral.py
CHANGED
|
@@ -20,7 +20,6 @@ from app.schemas.model_config import ProviderType
|
|
| 20 |
from app.services.ai.provider_mistral import (
|
| 21 |
MistralProvider,
|
| 22 |
_MISTRAL_FALLBACK_MODELS,
|
| 23 |
-
_MISTRAL_VISION_MODELS, # alias backward-compat
|
| 24 |
_model_supports_vision,
|
| 25 |
)
|
| 26 |
|
|
@@ -256,11 +255,6 @@ def test_list_models_raises_if_not_configured(monkeypatch):
|
|
| 256 |
MistralProvider().list_models()
|
| 257 |
|
| 258 |
|
| 259 |
-
def test_list_models_fallback_backward_compat():
|
| 260 |
-
"""_MISTRAL_VISION_MODELS est un alias de _MISTRAL_FALLBACK_MODELS."""
|
| 261 |
-
assert _MISTRAL_VISION_MODELS is _MISTRAL_FALLBACK_MODELS
|
| 262 |
-
|
| 263 |
-
|
| 264 |
# ---------------------------------------------------------------------------
|
| 265 |
# generate_content() — bifurcation vision / texte
|
| 266 |
# ---------------------------------------------------------------------------
|
|
|
|
| 20 |
from app.services.ai.provider_mistral import (
|
| 21 |
MistralProvider,
|
| 22 |
_MISTRAL_FALLBACK_MODELS,
|
|
|
|
| 23 |
_model_supports_vision,
|
| 24 |
)
|
| 25 |
|
|
|
|
| 255 |
MistralProvider().list_models()
|
| 256 |
|
| 257 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
# ---------------------------------------------------------------------------
|
| 259 |
# generate_content() — bifurcation vision / texte
|
| 260 |
# ---------------------------------------------------------------------------
|