KaThaNg commited on
Commit
1f13653
·
verified ·
1 Parent(s): 5600c52

Update proxy_server.py

Browse files
Files changed (1) hide show
  1. proxy_server.py +80 -41
proxy_server.py CHANGED
@@ -12,6 +12,7 @@ from fastapi.responses import StreamingResponse, JSONResponse
12
  from fastapi.middleware.cors import CORSMiddleware
13
  from loguru import logger
14
  from typing import AsyncGenerator, Set, Optional, Dict, Any, List
 
15
 
16
  # --- Logging Configuration ---
17
  logger.remove()
@@ -49,9 +50,19 @@ async def lifespan(app: FastAPI):
49
  timeout_config = httpx.Timeout(connect=CONNECT_TIMEOUT, read=READ_TIMEOUT, write=WRITE_TIMEOUT, pool=POOL_TIMEOUT)
50
  proxy_config = {"http://": HTTP_PROXY, "https://": HTTP_PROXY} if HTTP_PROXY else None
51
 
52
- logger.info(f"Initializing httpx client. Target Endpoint: {OPENAI_API_ENDPOINT}")
 
 
 
 
 
 
 
 
 
 
53
  if proxy_config:
54
- logger.info(f"Using outbound proxy: {HTTP_PROXY}")
55
  if not OPENAI_API_KEY:
56
  logger.warning("OPENAI_API_KEY is not set. Requests to the target endpoint might fail if it requires authentication.")
57
  if not VALID_API_KEYS:
@@ -101,9 +112,13 @@ async def get_api_key(key: Optional[str] = Security(api_key_header)) -> str:
101
  logger.warning("API key missing from request header.")
102
  raise HTTPException(status_code=401, detail=f"API Key required in header '{API_KEY_NAME}'")
103
  if key not in VALID_API_KEYS:
104
- logger.warning(f"Invalid API key received: '{key[:4]}...'")
 
 
105
  raise HTTPException(status_code=401, detail="Invalid or expired API Key")
106
- logger.debug(f"Valid API key received: '{key[:4]}...'")
 
 
107
  return key
108
 
109
  # --- Format Conversion Logic ---
@@ -143,7 +158,9 @@ def claude_request_to_openai_payload(claude_request: Dict[str, Any]) -> Dict[str
143
  # Add other relevant parameter mappings here (e.g., presence_penalty, frequency_penalty)
144
  }
145
 
146
- logger.debug("Converted Claude request to OpenAI payload.")
 
 
147
  return openai_payload
148
 
149
  def openai_response_to_claude_response(openai_response: Dict[str, Any], claude_request_id: str) -> Dict[str, Any]:
@@ -182,23 +199,24 @@ def openai_response_to_claude_response(openai_response: Dict[str, Any], claude_r
182
  "output_tokens": completion_tokens,
183
  },
184
  }
185
- logger.debug("Converted non-streaming OpenAI response to Claude format.")
186
  return claude_response
187
  except (KeyError, IndexError, TypeError) as e:
188
- logger.error(f"Error converting non-streaming OpenAI response: {e}\nOriginal response: {openai_response}")
189
- # Re-raise or return a structured error for the client
 
190
  raise ValueError(f"Failed to parse OpenAI response: {e}")
191
 
192
  async def stream_openai_response_to_claude_events(openai_response: httpx.Response, claude_request_id: str, requested_model: str) -> AsyncGenerator[str, None]:
193
  """Converts an OpenAI SSE stream to Claude API SSE format."""
194
  message_id = claude_request_id # Use the original request ID for consistency
195
- accumulated_content = ""
196
  openai_finish_reason = None
197
  input_tokens = 0 # Will be updated if usage info is sent
198
  output_tokens = 0 # Will be updated if usage info is sent
199
  last_ping_time = time.time()
200
 
201
- logger.debug(f"Starting Claude SSE stream conversion for request ID: {message_id}")
202
 
203
  # 1. Send message_start event
204
  yield f"event: message_start\ndata: {json.dumps({'type': 'message_start', 'message': {'id': message_id, 'type': 'message', 'role': 'assistant', 'content': [], 'model': requested_model, 'stop_reason': None, 'stop_sequence': None, 'usage': {'input_tokens': 0, 'output_tokens': 0}}})}\n\n"
@@ -216,7 +234,7 @@ async def stream_openai_response_to_claude_events(openai_response: httpx.Respons
216
  if line.startswith("data:"):
217
  data_str = line[len("data: "):].strip()
218
  if data_str == "[DONE]":
219
- logger.debug("Received [DONE] marker from OpenAI stream.")
220
  break # End of OpenAI stream
221
 
222
  try:
@@ -231,25 +249,25 @@ async def stream_openai_response_to_claude_events(openai_response: httpx.Respons
231
  # Check for finish reason in the chunk
232
  if choices[0].get("finish_reason"):
233
  openai_finish_reason = choices[0].get("finish_reason")
234
- logger.debug(f"Received OpenAI finish_reason: {openai_finish_reason}")
235
 
236
  # Check for usage update (some models send it at the end)
237
  usage_update = data.get("usage")
238
  if usage_update:
239
  input_tokens = usage_update.get("prompt_tokens", input_tokens)
240
  output_tokens = usage_update.get("completion_tokens", output_tokens)
241
- logger.debug(f"Received usage update: input={input_tokens}, output={output_tokens}")
242
 
243
  if content_chunk:
244
- accumulated_content += content_chunk
245
  # 4. Send content_block_delta for the text chunk
246
  yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': content_chunk}})}\n\n"
247
 
248
  except json.JSONDecodeError:
249
- logger.warning(f"Could not decode JSON from stream line: {data_str}")
250
  continue
251
  except Exception as e:
252
- logger.error(f"Error processing stream data chunk: {e}")
253
  continue # Skip this chunk
254
 
255
  # Send periodic pings
@@ -259,11 +277,11 @@ async def stream_openai_response_to_claude_events(openai_response: httpx.Respons
259
  last_ping_time = current_time
260
 
261
  except httpx.ReadTimeout:
262
- logger.error("Timeout reading from OpenAI stream.")
263
  openai_finish_reason = "error_timeout" # Custom reason
264
  yield f"event: error\ndata: {json.dumps({'type': 'error', 'error': {'type': 'overloaded_error', 'message': 'Proxy timed out waiting for OpenAI stream'}})}\n\n"
265
  except Exception as e:
266
- logger.exception(f"Unexpected error during stream processing: {e}")
267
  openai_finish_reason = "error_exception" # Custom reason
268
  yield f"event: error\ndata: {json.dumps({'type': 'error', 'error': {'type': 'internal_server_error', 'message': f'Proxy stream processing error: {e}'}})}\n\n"
269
  finally:
@@ -279,7 +297,7 @@ async def stream_openai_response_to_claude_events(openai_response: httpx.Respons
279
  }
280
  claude_stop_reason = stop_reason_map.get(openai_finish_reason, "stop_sequence") # Default
281
 
282
- logger.debug(f"Stream finished. OpenAI finish reason: {openai_finish_reason}, mapped Claude stop reason: {claude_stop_reason}")
283
 
284
  # 5. Send content_block_stop
285
  yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': 0})}\n\n"
@@ -293,14 +311,15 @@ async def stream_openai_response_to_claude_events(openai_response: httpx.Respons
293
  'stop_sequence': None # OpenAI doesn't provide this
294
  },
295
  'usage': {
296
- 'output_tokens': output_tokens if output_tokens > 0 else len(accumulated_content.split()) # Rough estimate if needed
 
297
  }
298
  }
299
  yield f"event: message_delta\ndata: {json.dumps(final_delta)}\n\n"
300
 
301
  # 7. Send message_stop
302
  yield f"event: message_stop\ndata: {json.dumps({'type': 'message_stop'})}\n\n"
303
- logger.info(f"Completed sending Claude SSE stream for request ID: {message_id}")
304
 
305
 
306
  def create_error_response(status_code: int, error_type: str, message: str) -> JSONResponse:
@@ -327,7 +346,9 @@ async def proxy_claude_to_openai(request: Request):
327
  request_id = f"msg_{uuid.uuid4().hex[:24]}" # Generate a unique ID for logging/tracking
328
  try:
329
  claude_request_data = await request.json()
 
330
  logger.info(f"[{request_id}] Received request. Stream: {claude_request_data.get('stream', False)}. Model: {claude_request_data.get('model')}")
 
331
  except json.JSONDecodeError:
332
  logger.error(f"[{request_id}] Invalid JSON received in request body.")
333
  return create_error_response(400, "invalid_request_error", "Invalid JSON data in request body.")
@@ -348,10 +369,16 @@ async def proxy_claude_to_openai(request: Request):
348
  # Add other headers if needed
349
  }
350
  if OPENAI_API_KEY:
 
 
351
  headers["Authorization"] = f"Bearer {OPENAI_API_KEY}"
 
352
 
353
  try:
354
- logger.debug(f"[{request_id}] Sending request to OpenAI endpoint: {OPENAI_API_ENDPOINT}")
 
 
 
355
  # Build the request to the target endpoint
356
  target_request = client.build_request(
357
  method="POST",
@@ -370,7 +397,7 @@ async def proxy_claude_to_openai(request: Request):
370
 
371
  # Process the response based on streaming or non-streaming
372
  if is_streaming:
373
- logger.info(f"[{request_id}] OpenAI response is streaming. Starting SSE conversion.")
374
  return StreamingResponse(
375
  stream_openai_response_to_claude_events(response, request_id, requested_model),
376
  media_type="text/event-stream",
@@ -381,15 +408,18 @@ async def proxy_claude_to_openai(request: Request):
381
  }
382
  )
383
  else:
384
- logger.info(f"[{request_id}] OpenAI response is non-streaming. Converting.")
385
- # --- FIX: Remove await here ---
386
  openai_response_data = response.json()
387
- logger.debug(f"[{request_id}] Non-streaming response from OpenAI: {json.dumps(openai_response_data)}")
 
 
 
388
  try:
389
  claude_response_data = openai_response_to_claude_response(openai_response_data, request_id)
390
  return JSONResponse(content=claude_response_data)
391
  except ValueError as e:
392
- logger.error(f"[{request_id}] Failed to convert OpenAI non-streaming response: {e}")
393
  return create_error_response(500, "api_error", f"Error processing response from upstream API: {e}")
394
  except Exception as e:
395
  logger.exception(f"[{request_id}] Unexpected error converting non-streaming response: {e}")
@@ -399,39 +429,48 @@ async def proxy_claude_to_openai(request: Request):
399
  # --- Error Handling for Target API Request ---
400
  except httpx.HTTPStatusError as e:
401
  status_code = e.response.status_code
 
402
  try:
403
  # Try reading error details as JSON first
404
  error_detail = e.response.json()
 
405
  except json.JSONDecodeError:
406
  # If not JSON, read as text
407
- error_detail = e.response.text # Use .text instead of await .aread() as body is likely read
408
- logger.error(f"[{request_id}] HTTP error from target endpoint ({status_code}): {error_detail}")
 
 
 
409
 
410
  # Map common HTTP errors to Claude error types
 
411
  if status_code == 400:
412
- err_type, msg = "invalid_request_error", f"Upstream API Bad Request: {error_detail}"
413
  elif status_code == 401:
414
- err_type, msg = "authentication_error", "Authentication failed with upstream API (check OPENAI_API_KEY)."
415
  elif status_code == 403:
416
- err_type, msg = "permission_error", f"Forbidden by upstream API: {error_detail}"
417
  elif status_code == 429:
418
- err_type, msg = "rate_limit_error", "Rate limit exceeded with upstream API."
419
- elif status_code == 500:
420
- err_type, msg = "api_error", "Upstream API Internal Server Error."
421
- elif status_code == 503:
422
- err_type, msg = "overloaded_error", "Upstream API is overloaded or unavailable."
423
  else:
424
- err_type, msg = "api_error", f"Upstream API error ({status_code}): {error_detail}"
425
 
426
- # Return error response immediately, even if original request was for streaming
427
  return create_error_response(status_code, err_type, msg)
428
 
429
  except httpx.TimeoutException:
 
430
  logger.error(f"[{request_id}] Request to target endpoint timed out ({READ_TIMEOUT}s).")
 
431
  return create_error_response(504, "api_error", "Gateway Timeout: Request to upstream API timed out.")
432
  except httpx.RequestError as e:
433
- logger.error(f"[{request_id}] Network error connecting to target endpoint: {e}")
434
- return create_error_response(502, "api_error", f"Bad Gateway: Network error connecting to upstream API: {e}")
 
 
 
435
  except Exception as e:
436
  logger.exception(f"[{request_id}] Unexpected error during proxy operation: {e}") # Use logger.exception to include traceback
437
  return create_error_response(500, "internal_server_error", f"Internal Server Error: {e}")
 
12
  from fastapi.middleware.cors import CORSMiddleware
13
  from loguru import logger
14
  from typing import AsyncGenerator, Set, Optional, Dict, Any, List
15
+ from urllib.parse import urlparse # Import để phân tích URL
16
 
17
  # --- Logging Configuration ---
18
  logger.remove()
 
50
  timeout_config = httpx.Timeout(connect=CONNECT_TIMEOUT, read=READ_TIMEOUT, write=WRITE_TIMEOUT, pool=POOL_TIMEOUT)
51
  proxy_config = {"http://": HTTP_PROXY, "https://": HTTP_PROXY} if HTTP_PROXY else None
52
 
53
+ # --- FIX: Hide full target endpoint URL from logs ---
54
+ # Parse the URL to get only the hostname for logging
55
+ try:
56
+ parsed_url = urlparse(OPENAI_API_ENDPOINT)
57
+ target_host = parsed_url.netloc # e.g., api.openai.com
58
+ except Exception:
59
+ target_host = "[Invalid Target URL]" # Handle potential parsing errors
60
+
61
+ logger.info(f"Initializing httpx client. Target Host: {target_host}") # Log only the host
62
+ # --- End Fix ---
63
+
64
  if proxy_config:
65
+ logger.info(f"Using outbound proxy: {HTTP_PROXY}") # Proxy URL might still be sensitive depending on config
66
  if not OPENAI_API_KEY:
67
  logger.warning("OPENAI_API_KEY is not set. Requests to the target endpoint might fail if it requires authentication.")
68
  if not VALID_API_KEYS:
 
112
  logger.warning("API key missing from request header.")
113
  raise HTTPException(status_code=401, detail=f"API Key required in header '{API_KEY_NAME}'")
114
  if key not in VALID_API_KEYS:
115
+ # --- FIX: Avoid logging the invalid key directly ---
116
+ logger.warning(f"Invalid API key received (length: {len(key)}).")
117
+ # --- End Fix ---
118
  raise HTTPException(status_code=401, detail="Invalid or expired API Key")
119
+ # --- FIX: Avoid logging the valid key directly ---
120
+ logger.debug(f"Valid API key received (length: {len(key)}).")
121
+ # --- End Fix ---
122
  return key
123
 
124
  # --- Format Conversion Logic ---
 
158
  # Add other relevant parameter mappings here (e.g., presence_penalty, frequency_penalty)
159
  }
160
 
161
+ # --- FIX: Avoid logging potentially large/sensitive payload ---
162
+ # logger.debug("Converted Claude request to OpenAI payload.") # Keep this simple
163
+ # --- End Fix ---
164
  return openai_payload
165
 
166
  def openai_response_to_claude_response(openai_response: Dict[str, Any], claude_request_id: str) -> Dict[str, Any]:
 
199
  "output_tokens": completion_tokens,
200
  },
201
  }
202
+ logger.debug(f"[{claude_request_id}] Converted non-streaming OpenAI response to Claude format.")
203
  return claude_response
204
  except (KeyError, IndexError, TypeError) as e:
205
+ logger.error(f"[{claude_request_id}] Error converting non-streaming OpenAI response: {e}")
206
+ # Avoid logging the full original response here as it might be large/sensitive
207
+ # logger.error(f"Original response snippet: {str(openai_response)[:200]}...") # Optional: log a snippet
208
  raise ValueError(f"Failed to parse OpenAI response: {e}")
209
 
210
  async def stream_openai_response_to_claude_events(openai_response: httpx.Response, claude_request_id: str, requested_model: str) -> AsyncGenerator[str, None]:
211
  """Converts an OpenAI SSE stream to Claude API SSE format."""
212
  message_id = claude_request_id # Use the original request ID for consistency
213
+ accumulated_content_len = 0 # Track length instead of full content
214
  openai_finish_reason = None
215
  input_tokens = 0 # Will be updated if usage info is sent
216
  output_tokens = 0 # Will be updated if usage info is sent
217
  last_ping_time = time.time()
218
 
219
+ logger.debug(f"[{message_id}] Starting Claude SSE stream conversion.")
220
 
221
  # 1. Send message_start event
222
  yield f"event: message_start\ndata: {json.dumps({'type': 'message_start', 'message': {'id': message_id, 'type': 'message', 'role': 'assistant', 'content': [], 'model': requested_model, 'stop_reason': None, 'stop_sequence': None, 'usage': {'input_tokens': 0, 'output_tokens': 0}}})}\n\n"
 
234
  if line.startswith("data:"):
235
  data_str = line[len("data: "):].strip()
236
  if data_str == "[DONE]":
237
+ logger.debug(f"[{message_id}] Received [DONE] marker from OpenAI stream.")
238
  break # End of OpenAI stream
239
 
240
  try:
 
249
  # Check for finish reason in the chunk
250
  if choices[0].get("finish_reason"):
251
  openai_finish_reason = choices[0].get("finish_reason")
252
+ logger.debug(f"[{message_id}] Received OpenAI finish_reason: {openai_finish_reason}")
253
 
254
  # Check for usage update (some models send it at the end)
255
  usage_update = data.get("usage")
256
  if usage_update:
257
  input_tokens = usage_update.get("prompt_tokens", input_tokens)
258
  output_tokens = usage_update.get("completion_tokens", output_tokens)
259
+ logger.debug(f"[{message_id}] Received usage update: input={input_tokens}, output={output_tokens}")
260
 
261
  if content_chunk:
262
+ accumulated_content_len += len(content_chunk)
263
  # 4. Send content_block_delta for the text chunk
264
  yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': content_chunk}})}\n\n"
265
 
266
  except json.JSONDecodeError:
267
+ logger.warning(f"[{message_id}] Could not decode JSON from stream line: {data_str}")
268
  continue
269
  except Exception as e:
270
+ logger.error(f"[{message_id}] Error processing stream data chunk: {e}")
271
  continue # Skip this chunk
272
 
273
  # Send periodic pings
 
277
  last_ping_time = current_time
278
 
279
  except httpx.ReadTimeout:
280
+ logger.error(f"[{message_id}] Timeout reading from OpenAI stream.")
281
  openai_finish_reason = "error_timeout" # Custom reason
282
  yield f"event: error\ndata: {json.dumps({'type': 'error', 'error': {'type': 'overloaded_error', 'message': 'Proxy timed out waiting for OpenAI stream'}})}\n\n"
283
  except Exception as e:
284
+ logger.exception(f"[{message_id}] Unexpected error during stream processing: {e}")
285
  openai_finish_reason = "error_exception" # Custom reason
286
  yield f"event: error\ndata: {json.dumps({'type': 'error', 'error': {'type': 'internal_server_error', 'message': f'Proxy stream processing error: {e}'}})}\n\n"
287
  finally:
 
297
  }
298
  claude_stop_reason = stop_reason_map.get(openai_finish_reason, "stop_sequence") # Default
299
 
300
+ logger.debug(f"[{message_id}] Stream finished. OpenAI finish reason: {openai_finish_reason}, mapped Claude stop reason: {claude_stop_reason}")
301
 
302
  # 5. Send content_block_stop
303
  yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': 0})}\n\n"
 
311
  'stop_sequence': None # OpenAI doesn't provide this
312
  },
313
  'usage': {
314
+ # Use accumulated length as a rough proxy if output_tokens not updated
315
+ 'output_tokens': output_tokens if output_tokens > 0 else (accumulated_content_len // 4) # Very rough estimate
316
  }
317
  }
318
  yield f"event: message_delta\ndata: {json.dumps(final_delta)}\n\n"
319
 
320
  # 7. Send message_stop
321
  yield f"event: message_stop\ndata: {json.dumps({'type': 'message_stop'})}\n\n"
322
+ logger.info(f"[{message_id}] Completed sending Claude SSE stream.")
323
 
324
 
325
  def create_error_response(status_code: int, error_type: str, message: str) -> JSONResponse:
 
346
  request_id = f"msg_{uuid.uuid4().hex[:24]}" # Generate a unique ID for logging/tracking
347
  try:
348
  claude_request_data = await request.json()
349
+ # --- FIX: Avoid logging potentially large/sensitive request data ---
350
  logger.info(f"[{request_id}] Received request. Stream: {claude_request_data.get('stream', False)}. Model: {claude_request_data.get('model')}")
351
+ # --- End Fix ---
352
  except json.JSONDecodeError:
353
  logger.error(f"[{request_id}] Invalid JSON received in request body.")
354
  return create_error_response(400, "invalid_request_error", "Invalid JSON data in request body.")
 
369
  # Add other headers if needed
370
  }
371
  if OPENAI_API_KEY:
372
+ # --- FIX: Avoid logging API key ---
373
+ # logger.debug(f"[{request_id}] Adding Authorization header to upstream request.") # Log presence, not value
374
  headers["Authorization"] = f"Bearer {OPENAI_API_KEY}"
375
+ # --- End Fix ---
376
 
377
  try:
378
+ # --- FIX: Hide full target endpoint URL and payload from logs ---
379
+ logger.debug(f"[{request_id}] Sending request to upstream API...")
380
+ # --- End Fix ---
381
+
382
  # Build the request to the target endpoint
383
  target_request = client.build_request(
384
  method="POST",
 
397
 
398
  # Process the response based on streaming or non-streaming
399
  if is_streaming:
400
+ logger.info(f"[{request_id}] Upstream response is streaming. Starting SSE conversion.")
401
  return StreamingResponse(
402
  stream_openai_response_to_claude_events(response, request_id, requested_model),
403
  media_type="text/event-stream",
 
408
  }
409
  )
410
  else:
411
+ logger.info(f"[{request_id}] Upstream response is non-streaming. Converting.")
412
+ # FIX: Remove await here
413
  openai_response_data = response.json()
414
+ # --- FIX: Avoid logging full response data ---
415
+ # logger.debug(f"[{request_id}] Non-streaming response from OpenAI: {json.dumps(openai_response_data)}")
416
+ logger.debug(f"[{request_id}] Received non-streaming response from upstream.")
417
+ # --- End Fix ---
418
  try:
419
  claude_response_data = openai_response_to_claude_response(openai_response_data, request_id)
420
  return JSONResponse(content=claude_response_data)
421
  except ValueError as e:
422
+ logger.error(f"[{request_id}] Failed to convert upstream non-streaming response: {e}")
423
  return create_error_response(500, "api_error", f"Error processing response from upstream API: {e}")
424
  except Exception as e:
425
  logger.exception(f"[{request_id}] Unexpected error converting non-streaming response: {e}")
 
429
  # --- Error Handling for Target API Request ---
430
  except httpx.HTTPStatusError as e:
431
  status_code = e.response.status_code
432
+ error_detail_text = "[Could not decode error response]" # Default message
433
  try:
434
  # Try reading error details as JSON first
435
  error_detail = e.response.json()
436
+ error_detail_text = json.dumps(error_detail) # Convert back to string for logging snippet
437
  except json.JSONDecodeError:
438
  # If not JSON, read as text
439
+ error_detail_text = e.response.text # Use .text instead of await .aread() as body is likely read
440
+
441
+ # --- FIX: Log error snippet, avoid full potentially sensitive detail ---
442
+ logger.error(f"[{request_id}] HTTP error from target endpoint ({status_code}). Response snippet: {error_detail_text[:200]}...")
443
+ # --- End Fix ---
444
 
445
  # Map common HTTP errors to Claude error types
446
+ # Use generic messages in production to avoid leaking upstream details
447
  if status_code == 400:
448
+ err_type, msg = "invalid_request_error", f"Upstream API reported Bad Request ({status_code})."
449
  elif status_code == 401:
450
+ err_type, msg = "authentication_error", f"Authentication failed with upstream API ({status_code})."
451
  elif status_code == 403:
452
+ err_type, msg = "permission_error", f"Forbidden by upstream API ({status_code})."
453
  elif status_code == 429:
454
+ err_type, msg = "rate_limit_error", f"Rate limit exceeded with upstream API ({status_code})."
455
+ elif status_code >= 500:
456
+ err_type, msg = "api_error", f"Upstream API unavailable or encountered an error ({status_code})."
 
 
457
  else:
458
+ err_type, msg = "api_error", f"Received unexpected error from upstream API ({status_code})."
459
 
460
+ # Return error response immediately
461
  return create_error_response(status_code, err_type, msg)
462
 
463
  except httpx.TimeoutException:
464
+ # --- FIX: Hide target endpoint URL from timeout log ---
465
  logger.error(f"[{request_id}] Request to target endpoint timed out ({READ_TIMEOUT}s).")
466
+ # --- End Fix ---
467
  return create_error_response(504, "api_error", "Gateway Timeout: Request to upstream API timed out.")
468
  except httpx.RequestError as e:
469
+ # --- FIX: Hide target endpoint URL from request error log ---
470
+ # The exception 'e' might contain the URL, so log a generic message
471
+ logger.error(f"[{request_id}] Network error connecting to target endpoint: {type(e).__name__}")
472
+ # --- End Fix ---
473
+ return create_error_response(502, "api_error", f"Bad Gateway: Network error connecting to upstream API.")
474
  except Exception as e:
475
  logger.exception(f"[{request_id}] Unexpected error during proxy operation: {e}") # Use logger.exception to include traceback
476
  return create_error_response(500, "internal_server_error", f"Internal Server Error: {e}")