Spaces:
Paused
Paused
Upload proxy_handler.py
Browse files- proxy_handler.py +41 -30
proxy_handler.py
CHANGED
|
@@ -27,7 +27,7 @@ class ProxyHandler:
|
|
| 27 |
if not self.client.is_closed:
|
| 28 |
await self.client.aclose()
|
| 29 |
|
| 30 |
-
# --- Text utilities
|
| 31 |
def _clean_thinking(self, s: str) -> str:
|
| 32 |
if not s: return ""
|
| 33 |
s = re.sub(r'<details[^>]*>.*?</details>', '', s, flags=re.DOTALL)
|
|
@@ -35,9 +35,26 @@ class ProxyHandler:
|
|
| 35 |
s = re.sub(r'<[^>]+>', '', s)
|
| 36 |
s = re.sub(r'^\s*>\s*', '', s, flags=re.MULTILINE)
|
| 37 |
return s
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
if not s: return ""
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
def _serialize_msgs(self, msgs) -> list:
|
| 42 |
out = []
|
| 43 |
for m in msgs:
|
|
@@ -53,7 +70,7 @@ class ProxyHandler:
|
|
| 53 |
body = { "stream": True, "model": model, "messages": self._serialize_msgs(req.messages), "background_tasks": {"title_generation": True, "tags_generation": True}, "chat_id": str(uuid.uuid4()), "features": {"image_generation": False, "code_interpreter": False, "web_search": False, "auto_web_search": False, "enable_thinking": True,}, "id": str(uuid.uuid4()), "mcp_servers": ["deep-web-search"], "model_item": {"id": model, "name": "GLM-4.5", "owned_by": "openai"}, "params": {}, "tool_servers": [], "variables": {"{{USER_NAME}}": "User", "{{USER_LOCATION}}": "Unknown", "{{CURRENT_DATETIME}}": time.strftime("%Y-%m-%d %H:%M:%S"),},}
|
| 54 |
headers = { "Content-Type": "application/json", "Authorization": f"Bearer {ck}", "User-Agent": ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36"), "Accept": "application/json, text/event-stream", "Accept-Language": "zh-CN", "sec-ch-ua": '"Not)A;Brand";v="8", "Chromium";v="138", "Google Chrome";v="138"', "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": '"macOS"', "x-fe-version": "prod-fe-1.0.53", "Origin": "https://chat.z.ai", "Referer": "https://chat.z.ai/",}
|
| 55 |
return body, headers, ck
|
| 56 |
-
|
| 57 |
# ---------- stream ----------
|
| 58 |
async def stream_proxy_response(self, req: ChatCompletionRequest) -> AsyncGenerator[str, None]:
|
| 59 |
ck = None
|
|
@@ -65,8 +82,8 @@ class ProxyHandler:
|
|
| 65 |
|
| 66 |
async with self.client.stream("POST", settings.UPSTREAM_URL, json=body, headers=headers) as resp:
|
| 67 |
if resp.status_code != 200:
|
| 68 |
-
await cookie_manager.mark_cookie_failed(ck)
|
| 69 |
-
|
| 70 |
err = {"id": comp_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": req.model, "choices": [{"index": 0, "delta": {"content": err_msg}, "finish_reason": "stop"}],}
|
| 71 |
yield f"data: {json.dumps(err)}\n\n"; yield "data: [DONE]\n\n"; return
|
| 72 |
await cookie_manager.mark_cookie_success(ck)
|
|
@@ -77,15 +94,14 @@ class ProxyHandler:
|
|
| 77 |
if not line or not line.startswith('data: '): continue
|
| 78 |
payload_str = line[6:]
|
| 79 |
if payload_str == '[DONE]':
|
| 80 |
-
if think_open:
|
| 81 |
-
|
| 82 |
-
yield f"data: {json.dumps({'id': comp_id, 'object': 'chat.completion.chunk', 'created': int(time.time()), 'model': req.model, 'choices': [{'index': 0, 'delta': {}, 'finish_reason': 'stop'}]})}\n\n"
|
| 83 |
-
yield "data: [DONE]\n\n"; return
|
| 84 |
try:
|
| 85 |
dat = json.loads(payload_str).get("data", {})
|
| 86 |
except (json.JSONDecodeError, AttributeError): continue
|
| 87 |
|
| 88 |
-
#
|
|
|
|
| 89 |
content = dat.get("delta_content", "") or dat.get("edit_content", "")
|
| 90 |
new_phase = dat.get("phase")
|
| 91 |
|
|
@@ -108,15 +124,14 @@ class ProxyHandler:
|
|
| 108 |
think_open = True
|
| 109 |
text_to_yield = self._clean_thinking(content)
|
| 110 |
elif current_content_phase == "answer":
|
| 111 |
-
|
|
|
|
| 112 |
|
| 113 |
if text_to_yield:
|
| 114 |
content_payload = {"id": comp_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": req.model, "choices": [{"index": 0, "delta": {"content": text_to_yield}, "finish_reason": None}],}
|
| 115 |
yield f"data: {json.dumps(content_payload)}\n\n"
|
| 116 |
except Exception:
|
| 117 |
-
logger.exception("Stream error")
|
| 118 |
-
# You might want to yield an error to the client here as well
|
| 119 |
-
raise
|
| 120 |
|
| 121 |
# ---------- non-stream ----------
|
| 122 |
async def non_stream_proxy_response(self, req: ChatCompletionRequest) -> ChatCompletionResponse:
|
|
@@ -128,8 +143,7 @@ class ProxyHandler:
|
|
| 128 |
|
| 129 |
async with self.client.stream("POST", settings.UPSTREAM_URL, json=body, headers=headers) as resp:
|
| 130 |
if resp.status_code != 200:
|
| 131 |
-
await cookie_manager.mark_cookie_failed(ck)
|
| 132 |
-
error_detail = await resp.text()
|
| 133 |
raise HTTPException(resp.status_code, f"Upstream error: {error_detail}")
|
| 134 |
await cookie_manager.mark_cookie_success(ck)
|
| 135 |
|
|
@@ -143,27 +157,25 @@ class ProxyHandler:
|
|
| 143 |
dat = json.loads(payload_str).get("data", {})
|
| 144 |
except (json.JSONDecodeError, AttributeError): continue
|
| 145 |
|
| 146 |
-
|
| 147 |
content = dat.get("delta_content") or dat.get("edit_content")
|
| 148 |
new_phase = dat.get("phase")
|
| 149 |
|
| 150 |
-
if new_phase:
|
| 151 |
-
phase_cur = new_phase
|
| 152 |
-
|
| 153 |
if content and phase_cur:
|
| 154 |
-
|
|
|
|
| 155 |
else: continue
|
| 156 |
break
|
| 157 |
|
| 158 |
think_buf = []
|
| 159 |
answer_buf = []
|
| 160 |
-
for phase, content in full_content:
|
| 161 |
if phase == "thinking":
|
| 162 |
think_buf.append(self._clean_thinking(content))
|
| 163 |
elif phase == "answer":
|
| 164 |
-
#
|
| 165 |
-
|
| 166 |
-
answer_buf.append(self._clean_answer(content))
|
| 167 |
|
| 168 |
ans_text = ''.join(answer_buf)
|
| 169 |
final_content = ans_text
|
|
@@ -171,16 +183,15 @@ class ProxyHandler:
|
|
| 171 |
if settings.SHOW_THINK_TAGS and think_buf:
|
| 172 |
think_text = ''.join(think_buf).strip()
|
| 173 |
if think_text:
|
| 174 |
-
|
| 175 |
-
final_content = f"<think>{think_text}</think>{
|
| 176 |
|
| 177 |
return ChatCompletionResponse(
|
| 178 |
id=f"chatcmpl-{uuid.uuid4().hex[:29]}", created=int(time.time()), model=req.model,
|
| 179 |
choices=[{"index": 0, "message": {"role": "assistant", "content": final_content}, "finish_reason": "stop"}],
|
| 180 |
)
|
| 181 |
except Exception:
|
| 182 |
-
logger.exception("Non-stream processing failed")
|
| 183 |
-
raise
|
| 184 |
|
| 185 |
# ---------- FastAPI entry ----------
|
| 186 |
async def handle_chat_completion(self, req: ChatCompletionRequest):
|
|
|
|
| 27 |
if not self.client.is_closed:
|
| 28 |
await self.client.aclose()
|
| 29 |
|
| 30 |
+
# --- Text utilities (REVISED _clean_answer_from_edit) ---
|
| 31 |
def _clean_thinking(self, s: str) -> str:
|
| 32 |
if not s: return ""
|
| 33 |
s = re.sub(r'<details[^>]*>.*?</details>', '', s, flags=re.DOTALL)
|
|
|
|
| 35 |
s = re.sub(r'<[^>]+>', '', s)
|
| 36 |
s = re.sub(r'^\s*>\s*', '', s, flags=re.MULTILINE)
|
| 37 |
return s
|
| 38 |
+
|
| 39 |
+
def _clean_answer(self, s: str, from_edit_content: bool = False) -> str:
|
| 40 |
+
"""
|
| 41 |
+
Cleans the answer string.
|
| 42 |
+
If from_edit_content is True, it extracts only the content after the last </details> tag.
|
| 43 |
+
"""
|
| 44 |
if not s: return ""
|
| 45 |
+
|
| 46 |
+
if from_edit_content:
|
| 47 |
+
# Find the position of the last </details> tag
|
| 48 |
+
last_details_pos = s.rfind('</details>')
|
| 49 |
+
if last_details_pos != -1:
|
| 50 |
+
# Extract everything after the tag
|
| 51 |
+
s = s[last_details_pos + len('</details>'):]
|
| 52 |
+
|
| 53 |
+
# General cleanup for any remaining <details> blocks (just in case)
|
| 54 |
+
s = re.sub(r"<details[^>]*>.*?</details>", "", s, flags=re.DOTALL)
|
| 55 |
+
return s.lstrip() # Use lstrip to remove leading whitespace like '\n' but keep internal space
|
| 56 |
+
|
| 57 |
+
# ... Other methods like _serialize_msgs, _prep_upstream remain the same ...
|
| 58 |
def _serialize_msgs(self, msgs) -> list:
|
| 59 |
out = []
|
| 60 |
for m in msgs:
|
|
|
|
| 70 |
body = { "stream": True, "model": model, "messages": self._serialize_msgs(req.messages), "background_tasks": {"title_generation": True, "tags_generation": True}, "chat_id": str(uuid.uuid4()), "features": {"image_generation": False, "code_interpreter": False, "web_search": False, "auto_web_search": False, "enable_thinking": True,}, "id": str(uuid.uuid4()), "mcp_servers": ["deep-web-search"], "model_item": {"id": model, "name": "GLM-4.5", "owned_by": "openai"}, "params": {}, "tool_servers": [], "variables": {"{{USER_NAME}}": "User", "{{USER_LOCATION}}": "Unknown", "{{CURRENT_DATETIME}}": time.strftime("%Y-%m-%d %H:%M:%S"),},}
|
| 71 |
headers = { "Content-Type": "application/json", "Authorization": f"Bearer {ck}", "User-Agent": ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36"), "Accept": "application/json, text/event-stream", "Accept-Language": "zh-CN", "sec-ch-ua": '"Not)A;Brand";v="8", "Chromium";v="138", "Google Chrome";v="138"', "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": '"macOS"', "x-fe-version": "prod-fe-1.0.53", "Origin": "https://chat.z.ai", "Referer": "https://chat.z.ai/",}
|
| 72 |
return body, headers, ck
|
| 73 |
+
|
| 74 |
# ---------- stream ----------
|
| 75 |
async def stream_proxy_response(self, req: ChatCompletionRequest) -> AsyncGenerator[str, None]:
|
| 76 |
ck = None
|
|
|
|
| 82 |
|
| 83 |
async with self.client.stream("POST", settings.UPSTREAM_URL, json=body, headers=headers) as resp:
|
| 84 |
if resp.status_code != 200:
|
| 85 |
+
await cookie_manager.mark_cookie_failed(ck); err_body = await resp.aread()
|
| 86 |
+
err_msg = f"Error: {resp.status_code} - {err_body.decode(errors='ignore')}"
|
| 87 |
err = {"id": comp_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": req.model, "choices": [{"index": 0, "delta": {"content": err_msg}, "finish_reason": "stop"}],}
|
| 88 |
yield f"data: {json.dumps(err)}\n\n"; yield "data: [DONE]\n\n"; return
|
| 89 |
await cookie_manager.mark_cookie_success(ck)
|
|
|
|
| 94 |
if not line or not line.startswith('data: '): continue
|
| 95 |
payload_str = line[6:]
|
| 96 |
if payload_str == '[DONE]':
|
| 97 |
+
if think_open: yield f"data: {json.dumps({'id': comp_id, 'object': 'chat.completion.chunk', 'created': int(time.time()), 'model': req.model, 'choices': [{'index': 0, 'delta': {'content': '</think>'}, 'finish_reason': None}]})}\n\n"
|
| 98 |
+
yield f"data: {json.dumps({'id': comp_id, 'object': 'chat.completion.chunk', 'created': int(time.time()), 'model': req.model, 'choices': [{'index': 0, 'delta': {}, 'finish_reason': 'stop'}]})}\n\n"; yield "data: [DONE]\n\n"; return
|
|
|
|
|
|
|
| 99 |
try:
|
| 100 |
dat = json.loads(payload_str).get("data", {})
|
| 101 |
except (json.JSONDecodeError, AttributeError): continue
|
| 102 |
|
| 103 |
+
# FIX: Differentiate content source
|
| 104 |
+
is_edit = "edit_content" in dat
|
| 105 |
content = dat.get("delta_content", "") or dat.get("edit_content", "")
|
| 106 |
new_phase = dat.get("phase")
|
| 107 |
|
|
|
|
| 124 |
think_open = True
|
| 125 |
text_to_yield = self._clean_thinking(content)
|
| 126 |
elif current_content_phase == "answer":
|
| 127 |
+
# FIX: Use the new cleaning logic
|
| 128 |
+
text_to_yield = self._clean_answer(content, from_edit_content=is_edit)
|
| 129 |
|
| 130 |
if text_to_yield:
|
| 131 |
content_payload = {"id": comp_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": req.model, "choices": [{"index": 0, "delta": {"content": text_to_yield}, "finish_reason": None}],}
|
| 132 |
yield f"data: {json.dumps(content_payload)}\n\n"
|
| 133 |
except Exception:
|
| 134 |
+
logger.exception("Stream error"); raise
|
|
|
|
|
|
|
| 135 |
|
| 136 |
# ---------- non-stream ----------
|
| 137 |
async def non_stream_proxy_response(self, req: ChatCompletionRequest) -> ChatCompletionResponse:
|
|
|
|
| 143 |
|
| 144 |
async with self.client.stream("POST", settings.UPSTREAM_URL, json=body, headers=headers) as resp:
|
| 145 |
if resp.status_code != 200:
|
| 146 |
+
await cookie_manager.mark_cookie_failed(ck); error_detail = await resp.text()
|
|
|
|
| 147 |
raise HTTPException(resp.status_code, f"Upstream error: {error_detail}")
|
| 148 |
await cookie_manager.mark_cookie_success(ck)
|
| 149 |
|
|
|
|
| 157 |
dat = json.loads(payload_str).get("data", {})
|
| 158 |
except (json.JSONDecodeError, AttributeError): continue
|
| 159 |
|
| 160 |
+
is_edit = "edit_content" in dat
|
| 161 |
content = dat.get("delta_content") or dat.get("edit_content")
|
| 162 |
new_phase = dat.get("phase")
|
| 163 |
|
| 164 |
+
if new_phase: phase_cur = new_phase
|
|
|
|
|
|
|
| 165 |
if content and phase_cur:
|
| 166 |
+
# Store the content along with its source (is_edit)
|
| 167 |
+
full_content.append((phase_cur, content, is_edit))
|
| 168 |
else: continue
|
| 169 |
break
|
| 170 |
|
| 171 |
think_buf = []
|
| 172 |
answer_buf = []
|
| 173 |
+
for phase, content, is_edit in full_content:
|
| 174 |
if phase == "thinking":
|
| 175 |
think_buf.append(self._clean_thinking(content))
|
| 176 |
elif phase == "answer":
|
| 177 |
+
# FIX: Use the new cleaning logic
|
| 178 |
+
answer_buf.append(self._clean_answer(content, from_edit_content=is_edit))
|
|
|
|
| 179 |
|
| 180 |
ans_text = ''.join(answer_buf)
|
| 181 |
final_content = ans_text
|
|
|
|
| 183 |
if settings.SHOW_THINK_TAGS and think_buf:
|
| 184 |
think_text = ''.join(think_buf).strip()
|
| 185 |
if think_text:
|
| 186 |
+
# No longer need to manually add newline, as .lstrip() in _clean_answer handles it
|
| 187 |
+
final_content = f"<think>{think_text}</think>{ans_text}"
|
| 188 |
|
| 189 |
return ChatCompletionResponse(
|
| 190 |
id=f"chatcmpl-{uuid.uuid4().hex[:29]}", created=int(time.time()), model=req.model,
|
| 191 |
choices=[{"index": 0, "message": {"role": "assistant", "content": final_content}, "finish_reason": "stop"}],
|
| 192 |
)
|
| 193 |
except Exception:
|
| 194 |
+
logger.exception("Non-stream processing failed"); raise
|
|
|
|
| 195 |
|
| 196 |
# ---------- FastAPI entry ----------
|
| 197 |
async def handle_chat_completion(self, req: ChatCompletionRequest):
|