Update main.py
Browse files
main.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
# ===================================================================
|
| 2 |
-
# main.py (
|
| 3 |
# ===================================================================
|
| 4 |
|
| 5 |
import json
|
|
@@ -35,7 +35,6 @@ class ChatMessage(BaseModel):
|
|
| 35 |
role: str
|
| 36 |
content: Union[str, List[Dict[str, Any]]]
|
| 37 |
reasoning_content: Optional[str] = None
|
| 38 |
-
# ... (其他模型保持不变)
|
| 39 |
class ChatCompletionRequest(BaseModel):
|
| 40 |
model: str
|
| 41 |
messages: List[ChatMessage]
|
|
@@ -43,22 +42,18 @@ class ChatCompletionRequest(BaseModel):
|
|
| 43 |
temperature: Optional[float] = None
|
| 44 |
max_tokens: Optional[int] = None
|
| 45 |
top_p: Optional[float] = None
|
| 46 |
-
|
| 47 |
class ModelInfo(BaseModel):
|
| 48 |
id: str
|
| 49 |
object: str = "model"
|
| 50 |
created: int
|
| 51 |
owned_by: str
|
| 52 |
-
|
| 53 |
class ModelList(BaseModel):
|
| 54 |
object: str = "list"
|
| 55 |
data: List[ModelInfo]
|
| 56 |
-
|
| 57 |
class ChatCompletionChoice(BaseModel):
|
| 58 |
message: ChatMessage
|
| 59 |
index: int = 0
|
| 60 |
finish_reason: str = "stop"
|
| 61 |
-
|
| 62 |
class ChatCompletionResponse(BaseModel):
|
| 63 |
id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex}")
|
| 64 |
object: str = "chat.completion"
|
|
@@ -66,12 +61,10 @@ class ChatCompletionResponse(BaseModel):
|
|
| 66 |
model: str
|
| 67 |
choices: List[ChatCompletionChoice]
|
| 68 |
usage: Dict[str, int] = Field(default_factory=lambda: {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0})
|
| 69 |
-
|
| 70 |
class StreamChoice(BaseModel):
|
| 71 |
delta: Dict[str, Any] = Field(default_factory=dict)
|
| 72 |
index: int = 0
|
| 73 |
finish_reason: Optional[str] = None
|
| 74 |
-
|
| 75 |
class StreamResponse(BaseModel):
|
| 76 |
id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex}")
|
| 77 |
object: str = "chat.completion.chunk"
|
|
@@ -115,30 +108,7 @@ def load_codegeex_tokens_from_secrets():
|
|
| 115 |
except Exception as e:
|
| 116 |
print(f"FATAL: Error loading CodeGeeX tokens: {e}")
|
| 117 |
|
| 118 |
-
|
| 119 |
-
async def startup():
|
| 120 |
-
print("Starting CodeGeeX OpenAI API Adapter server...")
|
| 121 |
-
load_client_api_keys_from_secrets()
|
| 122 |
-
load_codegeex_tokens_from_secrets()
|
| 123 |
-
print("Server initialization completed.")
|
| 124 |
-
|
| 125 |
-
# --- THE FIX: Add a root endpoint for the health check ---
|
| 126 |
-
@app.get("/")
|
| 127 |
-
def health_check():
|
| 128 |
-
"""Health check endpoint for Hugging Face."""
|
| 129 |
-
return {"status": "ok", "message": "CodeGeeX API Adapter is running."}
|
| 130 |
-
# ---------------------------------------------------------
|
| 131 |
-
|
| 132 |
-
# --- 原始路由 ---
|
| 133 |
-
def get_models_list_response() -> ModelList:
|
| 134 |
-
return ModelList(data=[ModelInfo(id=model, created=int(time.time()), owned_by="anthropic") for model in CODEGEEX_MODELS])
|
| 135 |
-
|
| 136 |
-
@app.get("/v1/models", response_model=ModelList)
|
| 137 |
-
async def list_v1_models(_: None = Depends(authenticate_client)):
|
| 138 |
-
return get_models_list_response()
|
| 139 |
-
|
| 140 |
-
# ... (所有其他函数和路由都与之前的文件一致,无需改动)
|
| 141 |
-
# The rest of the original code is unchanged.
|
| 142 |
def get_best_codegeex_token() -> Optional[CodeGeeXToken]:
|
| 143 |
with token_rotation_lock:
|
| 144 |
now = time.time()
|
|
@@ -173,6 +143,25 @@ async def authenticate_client(auth: Optional[HTTPAuthorizationCredentials] = Dep
|
|
| 173 |
if not auth or not auth.credentials: raise HTTPException(status_code=401, detail="API key required.", headers={"WWW-Authenticate": "Bearer"})
|
| 174 |
if auth.credentials not in VALID_CLIENT_KEYS: raise HTTPException(status_code=403, detail="Invalid client API key.")
|
| 175 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
@app.get("/models", response_model=ModelList)
|
| 177 |
async def list_models_no_auth():
|
| 178 |
return get_models_list_response()
|
|
@@ -258,3 +247,4 @@ async def chat_completions(request: ChatCompletionRequest, _: None = Depends(aut
|
|
| 258 |
elif status_code in [429, 500, 502, 503, 504]: token["error_count"] += 1
|
| 259 |
except Exception as e:
|
| 260 |
with token_rotation_lock: token["error_count"] += 1
|
|
|
|
|
|
| 1 |
# ===================================================================
|
| 2 |
+
# main.py (最终修复版:修正函数定义顺序)
|
| 3 |
# ===================================================================
|
| 4 |
|
| 5 |
import json
|
|
|
|
| 35 |
role: str
|
| 36 |
content: Union[str, List[Dict[str, Any]]]
|
| 37 |
reasoning_content: Optional[str] = None
|
|
|
|
| 38 |
class ChatCompletionRequest(BaseModel):
|
| 39 |
model: str
|
| 40 |
messages: List[ChatMessage]
|
|
|
|
| 42 |
temperature: Optional[float] = None
|
| 43 |
max_tokens: Optional[int] = None
|
| 44 |
top_p: Optional[float] = None
|
|
|
|
| 45 |
class ModelInfo(BaseModel):
|
| 46 |
id: str
|
| 47 |
object: str = "model"
|
| 48 |
created: int
|
| 49 |
owned_by: str
|
|
|
|
| 50 |
class ModelList(BaseModel):
|
| 51 |
object: str = "list"
|
| 52 |
data: List[ModelInfo]
|
|
|
|
| 53 |
class ChatCompletionChoice(BaseModel):
|
| 54 |
message: ChatMessage
|
| 55 |
index: int = 0
|
| 56 |
finish_reason: str = "stop"
|
|
|
|
| 57 |
class ChatCompletionResponse(BaseModel):
|
| 58 |
id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex}")
|
| 59 |
object: str = "chat.completion"
|
|
|
|
| 61 |
model: str
|
| 62 |
choices: List[ChatCompletionChoice]
|
| 63 |
usage: Dict[str, int] = Field(default_factory=lambda: {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0})
|
|
|
|
| 64 |
class StreamChoice(BaseModel):
|
| 65 |
delta: Dict[str, Any] = Field(default_factory=dict)
|
| 66 |
index: int = 0
|
| 67 |
finish_reason: Optional[str] = None
|
|
|
|
| 68 |
class StreamResponse(BaseModel):
|
| 69 |
id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex}")
|
| 70 |
object: str = "chat.completion.chunk"
|
|
|
|
| 108 |
except Exception as e:
|
| 109 |
print(f"FATAL: Error loading CodeGeeX tokens: {e}")
|
| 110 |
|
| 111 |
+
# --- 核心工具函数和认证 (确保在使用前定义) ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
def get_best_codegeex_token() -> Optional[CodeGeeXToken]:
|
| 113 |
with token_rotation_lock:
|
| 114 |
now = time.time()
|
|
|
|
| 143 |
if not auth or not auth.credentials: raise HTTPException(status_code=401, detail="API key required.", headers={"WWW-Authenticate": "Bearer"})
|
| 144 |
if auth.credentials not in VALID_CLIENT_KEYS: raise HTTPException(status_code=403, detail="Invalid client API key.")
|
| 145 |
|
| 146 |
+
# --- FastAPI 事件和路由 ---
|
| 147 |
+
@app.on_event("startup")
|
| 148 |
+
async def startup():
|
| 149 |
+
print("Starting CodeGeeX OpenAI API Adapter server...")
|
| 150 |
+
load_client_api_keys_from_secrets()
|
| 151 |
+
load_codegeex_tokens_from_secrets()
|
| 152 |
+
print("Server initialization completed.")
|
| 153 |
+
|
| 154 |
+
@app.get("/")
|
| 155 |
+
def health_check():
|
| 156 |
+
return {"status": "ok", "message": "CodeGeeX API Adapter is running."}
|
| 157 |
+
|
| 158 |
+
def get_models_list_response() -> ModelList:
|
| 159 |
+
return ModelList(data=[ModelInfo(id=model, created=int(time.time()), owned_by="anthropic") for model in CODEGEEX_MODELS])
|
| 160 |
+
|
| 161 |
+
@app.get("/v1/models", response_model=ModelList)
|
| 162 |
+
async def list_v1_models(_: None = Depends(authenticate_client)):
|
| 163 |
+
return get_models_list_response()
|
| 164 |
+
|
| 165 |
@app.get("/models", response_model=ModelList)
|
| 166 |
async def list_models_no_auth():
|
| 167 |
return get_models_list_response()
|
|
|
|
| 247 |
elif status_code in [429, 500, 502, 503, 504]: token["error_count"] += 1
|
| 248 |
except Exception as e:
|
| 249 |
with token_rotation_lock: token["error_count"] += 1
|
| 250 |
+
|