File size: 4,127 Bytes
1a9e2c2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 | """模型接口 - OpenAI兼容的模型列表端点"""
import time
from typing import Dict, Any, List, Optional
from fastapi import APIRouter, HTTPException, Depends
from app.models.grok_models import Models
from app.core.auth import auth_manager
from app.core.logger import logger
router = APIRouter(tags=["模型"])
@router.get("/models")
async def list_models(_: Optional[str] = Depends(auth_manager.verify)) -> Dict[str, Any]:
"""获取可用模型列表"""
try:
logger.debug("[Models] 请求模型列表")
timestamp = int(time.time())
model_data: List[Dict[str, Any]] = []
for model in Models:
model_id = model.value
config = Models.get_model_info(model_id)
model_info = {
"id": model_id,
"object": "model",
"created": timestamp,
"owned_by": "x-ai",
"display_name": config.get("display_name", model_id),
"description": config.get("description", ""),
"raw_model_path": config.get("raw_model_path", f"xai/{model_id}"),
"default_temperature": config.get("default_temperature", 1.0),
"default_max_output_tokens": config.get("default_max_output_tokens", 8192),
"supported_max_output_tokens": config.get("supported_max_output_tokens", 131072),
"default_top_p": config.get("default_top_p", 0.95)
}
model_data.append(model_info)
logger.debug(f"[Models] 返回 {len(model_data)} 个模型")
return {"object": "list", "data": model_data}
except Exception as e:
logger.error(f"[Models] 获取列表失败: {e}")
raise HTTPException(
status_code=500,
detail={
"error": {
"message": f"Failed to retrieve models: {e}",
"type": "internal_error",
"code": "model_list_error"
}
}
)
@router.get("/models/{model_id}")
async def get_model(model_id: str, _: Optional[str] = Depends(auth_manager.verify)) -> Dict[str, Any]:
"""获取特定模型信息"""
try:
logger.debug(f"[Models] 请求模型: {model_id}")
# 验证模型
if not Models.is_valid_model(model_id):
logger.warning(f"[Models] 模型不存在: {model_id}")
raise HTTPException(
status_code=404,
detail={
"error": {
"message": f"Model '{model_id}' not found",
"type": "invalid_request_error",
"code": "model_not_found"
}
}
)
timestamp = int(time.time())
config = Models.get_model_info(model_id)
model_info = {
"id": model_id,
"object": "model",
"created": timestamp,
"owned_by": "x-ai",
"display_name": config.get("display_name", model_id),
"description": config.get("description", ""),
"raw_model_path": config.get("raw_model_path", f"xai/{model_id}"),
"default_temperature": config.get("default_temperature", 1.0),
"default_max_output_tokens": config.get("default_max_output_tokens", 8192),
"supported_max_output_tokens": config.get("supported_max_output_tokens", 131072),
"default_top_p": config.get("default_top_p", 0.95)
}
logger.debug(f"[Models] 返回模型: {model_id}")
return model_info
except HTTPException:
raise
except Exception as e:
logger.error(f"[Models] 获取模型失败: {e}")
raise HTTPException(
status_code=500,
detail={
"error": {
"message": f"Failed to retrieve model: {e}",
"type": "internal_error",
"code": "model_retrieve_error"
}
}
)
|