Carlex22's picture
Revert "ParaAIV3.1"
1f24745
"""
Router de testes de LLM
Endpoints para testar providers e modelos LLM
"""
from fastapi import APIRouter, HTTPException
from datetime import datetime
import time
from api.models.requests import LLMGenerateRequest
from api.models.responses import LLMTestResponse, MessageResponse
from api.utils.logger import setup_logger
router = APIRouter()
logger = setup_logger(__name__)
@router.post("/generate", response_model=LLMTestResponse)
async def test_llm_generate(request: LLMGenerateRequest):
"""
**Testa geração de texto com LLM.**
Útil para:
- Verificar se API keys estão funcionando
- Testar diferentes modelos
- Medir latência e tokens
"""
start_time = time.time()
try:
# Importar LLM manager
from llm.llm_manager import LLMManager
llm = LLMManager()
# Verificar se provider está disponível
if not llm.is_provider_available(request.provider):
raise HTTPException(
status_code=503,
detail=f"Provider '{request.provider}' não disponível ou não configurado"
)
# Gerar texto
result = await llm.generate(
prompt=request.prompt,
provider=request.provider,
model=request.model,
temperature=request.temperature,
max_tokens=request.max_tokens
)
duration_ms = (time.time() - start_time) * 1000
logger.info(
f"✅ LLM test [{request.provider}] - "
f"Tokens: {result.get('tokens', 0)} - "
f"Duration: {duration_ms:.2f}ms"
)
return LLMTestResponse(
provider=request.provider,
model=result.get("model", request.model or "default"),
success=True,
response=result.get("text", ""),
tokens_used=result.get("tokens"),
duration_ms=duration_ms
)
except Exception as e:
duration_ms = (time.time() - start_time) * 1000
logger.error(f"❌ LLM test error: {str(e)}")
return LLMTestResponse(
provider=request.provider,
model=request.model or "unknown",
success=False,
duration_ms=duration_ms,
error=str(e)
)
@router.get("/providers")
async def list_providers():
"""
**Lista providers LLM disponíveis.**
Mostra quais providers estão configurados e prontos para uso.
"""
try:
from llm.llm_manager import LLMManager
llm = LLMManager()
providers = llm.list_providers()
return {
"providers": providers,
"total": len(providers),
"default": llm.default_provider
}
except Exception as e:
logger.error(f"Error listing providers: {e}")
return {
"providers": [],
"total": 0,
"error": str(e)
}
@router.get("/models/{provider}")
async def list_models(provider: str):
"""
**Lista modelos disponíveis de um provider.**
Args:
provider: Nome do provider (groq/openai/anthropic)
"""
try:
from llm.llm_manager import LLMManager
llm = LLMManager()
if not llm.is_provider_available(provider):
raise HTTPException(
status_code=404,
detail=f"Provider '{provider}' não disponível"
)
models = llm.get_available_models(provider)
return {
"provider": provider,
"models": models,
"total": len(models)
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error listing models: {e}")
raise HTTPException(status_code=500, detail=str(e))