Commit
·
931c600
1
Parent(s):
68ad966
feat(core): 添加对 GLM-4.6 模型及其思维模型的支持
Browse files- app/core/config.py +2 -0
- app/core/openai.py +2 -0
- app/core/zai_transformer.py +5 -1
app/core/config.py
CHANGED
|
@@ -20,6 +20,8 @@ class Settings(BaseSettings):
|
|
| 20 |
THINKING_MODEL: str = os.getenv("THINKING_MODEL", "GLM-4.5-Thinking")
|
| 21 |
SEARCH_MODEL: str = os.getenv("SEARCH_MODEL", "GLM-4.5-Search")
|
| 22 |
AIR_MODEL: str = os.getenv("AIR_MODEL", "GLM-4.5-Air")
|
|
|
|
|
|
|
| 23 |
|
| 24 |
# Server Configuration
|
| 25 |
LISTEN_PORT: int = int(os.getenv("LISTEN_PORT", "8080"))
|
|
|
|
| 20 |
THINKING_MODEL: str = os.getenv("THINKING_MODEL", "GLM-4.5-Thinking")
|
| 21 |
SEARCH_MODEL: str = os.getenv("SEARCH_MODEL", "GLM-4.5-Search")
|
| 22 |
AIR_MODEL: str = os.getenv("AIR_MODEL", "GLM-4.5-Air")
|
| 23 |
+
GLM_46_MODEL: str = os.getenv("GLM_46_MODEL", "GLM-4.6")
|
| 24 |
+
GLM_46_THINKING_MODEL: str = os.getenv("GLM_46_THINKING_MODEL", "GLM-4.6-Thinking")
|
| 25 |
|
| 26 |
# Server Configuration
|
| 27 |
LISTEN_PORT: int = int(os.getenv("LISTEN_PORT", "8080"))
|
app/core/openai.py
CHANGED
|
@@ -33,6 +33,8 @@ async def list_models():
|
|
| 33 |
Model(id=settings.THINKING_MODEL, created=current_time, owned_by="z.ai"),
|
| 34 |
Model(id=settings.SEARCH_MODEL, created=current_time, owned_by="z.ai"),
|
| 35 |
Model(id=settings.AIR_MODEL, created=current_time, owned_by="z.ai"),
|
|
|
|
|
|
|
| 36 |
]
|
| 37 |
)
|
| 38 |
return response
|
|
|
|
| 33 |
Model(id=settings.THINKING_MODEL, created=current_time, owned_by="z.ai"),
|
| 34 |
Model(id=settings.SEARCH_MODEL, created=current_time, owned_by="z.ai"),
|
| 35 |
Model(id=settings.AIR_MODEL, created=current_time, owned_by="z.ai"),
|
| 36 |
+
Model(id=settings.GLM_46_MODEL, created=current_time, owned_by="z.ai"),
|
| 37 |
+
Model(id=settings.GLM_46_THINKING_MODEL, created=current_time, owned_by="z.ai"),
|
| 38 |
]
|
| 39 |
)
|
| 40 |
return response
|
app/core/zai_transformer.py
CHANGED
|
@@ -256,6 +256,8 @@ class ZAITransformer:
|
|
| 256 |
settings.THINKING_MODEL: "0727-360B-API", # GLM-4.5-Thinking
|
| 257 |
settings.SEARCH_MODEL: "0727-360B-API", # GLM-4.5-Search
|
| 258 |
settings.AIR_MODEL: "0727-106B-API", # GLM-4.5-Air
|
|
|
|
|
|
|
| 259 |
}
|
| 260 |
|
| 261 |
async def get_token(self) -> str:
|
|
@@ -310,7 +312,9 @@ class ZAITransformer:
|
|
| 310 |
|
| 311 |
# 确定请求的模型特性
|
| 312 |
requested_model = request.get("model", settings.PRIMARY_MODEL)
|
| 313 |
-
is_thinking = requested_model == settings.THINKING_MODEL or
|
|
|
|
|
|
|
| 314 |
is_search = requested_model == settings.SEARCH_MODEL
|
| 315 |
is_air = requested_model == settings.AIR_MODEL
|
| 316 |
|
|
|
|
| 256 |
settings.THINKING_MODEL: "0727-360B-API", # GLM-4.5-Thinking
|
| 257 |
settings.SEARCH_MODEL: "0727-360B-API", # GLM-4.5-Search
|
| 258 |
settings.AIR_MODEL: "0727-106B-API", # GLM-4.5-Air
|
| 259 |
+
settings.GLM_46_MODEL: "GLM-4-6-API-V1", # GLM-4.6
|
| 260 |
+
settings.GLM_46_THINKING_MODEL: "GLM-4-6-API-V1", # GLM-4.6-Thinking
|
| 261 |
}
|
| 262 |
|
| 263 |
async def get_token(self) -> str:
|
|
|
|
| 312 |
|
| 313 |
# 确定请求的模型特性
|
| 314 |
requested_model = request.get("model", settings.PRIMARY_MODEL)
|
| 315 |
+
is_thinking = (requested_model == settings.THINKING_MODEL or
|
| 316 |
+
requested_model == settings.GLM_46_THINKING_MODEL or
|
| 317 |
+
request.get("reasoning", False))
|
| 318 |
is_search = requested_model == settings.SEARCH_MODEL
|
| 319 |
is_air = requested_model == settings.AIR_MODEL
|
| 320 |
|