Spaces:
Paused
Paused
Update proxy_server.py
Browse files- proxy_server.py +69 -73
proxy_server.py
CHANGED
|
@@ -17,10 +17,11 @@ from typing import AsyncGenerator, Set, Optional, Dict, Any, List
|
|
| 17 |
# --- Logging Configuration ---
|
| 18 |
logger.remove()
|
| 19 |
log_level = os.getenv("LOG_LEVEL", "INFO").upper()
|
| 20 |
-
|
|
|
|
| 21 |
|
| 22 |
# --- Environment Variable Configuration ---
|
| 23 |
-
# Target OpenAI API endpoint (can be OpenAI's official API or another compatible proxy)
|
| 24 |
OPENAI_API_ENDPOINT = os.getenv("OPENAI_API_ENDPOINT", "https://api.openai.com/v1/chat/completions")
|
| 25 |
# API Key for authenticating with the target OpenAI endpoint (if required by the endpoint)
|
| 26 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
|
@@ -50,9 +51,8 @@ async def lifespan(app: FastAPI):
|
|
| 50 |
timeout_config = httpx.Timeout(connect=CONNECT_TIMEOUT, read=READ_TIMEOUT, write=WRITE_TIMEOUT, pool=POOL_TIMEOUT)
|
| 51 |
proxy_config = {"http://": HTTP_PROXY, "https://": HTTP_PROXY} if HTTP_PROXY else None
|
| 52 |
|
| 53 |
-
#
|
| 54 |
logger.info("Initializing httpx client for upstream requests.") # Generic message
|
| 55 |
-
# --- End Fix ---
|
| 56 |
|
| 57 |
if proxy_config:
|
| 58 |
logger.info(f"Using outbound proxy: {HTTP_PROXY}") # Proxy URL might still be sensitive
|
|
@@ -97,27 +97,24 @@ async def get_api_key(key: Optional[str] = Security(api_key_header)) -> str:
|
|
| 97 |
"""Validate the API key provided in the request header."""
|
| 98 |
if not VALID_API_KEYS:
|
| 99 |
logger.warning("No PROXY_API_KEYS configured. Allowing request.")
|
| 100 |
-
|
| 101 |
-
# raise HTTPException(status_code=500, detail="Server configuration error: PROXY_API_KEYS not set.")
|
| 102 |
-
return "unsecured_dummy_key" # Return a dummy value
|
| 103 |
|
| 104 |
if key is None:
|
| 105 |
logger.warning("API key missing from request header.")
|
| 106 |
raise HTTPException(status_code=401, detail=f"API Key required in header '{API_KEY_NAME}'")
|
| 107 |
if key not in VALID_API_KEYS:
|
| 108 |
-
# --- FIX: Avoid logging the invalid key directly ---
|
| 109 |
logger.warning(f"Invalid API key received (length: {len(key)}).")
|
| 110 |
-
# --- End Fix ---
|
| 111 |
raise HTTPException(status_code=401, detail="Invalid or expired API Key")
|
| 112 |
-
# --- FIX: Avoid logging the valid key directly ---
|
| 113 |
logger.debug(f"Valid API key received (length: {len(key)}).")
|
| 114 |
-
# --- End Fix ---
|
| 115 |
return key
|
| 116 |
|
| 117 |
# --- Format Conversion Logic ---
|
| 118 |
|
| 119 |
-
def claude_request_to_openai_payload(claude_request: Dict[str, Any]) -> Dict[str, Any]:
|
| 120 |
-
"""
|
|
|
|
|
|
|
|
|
|
| 121 |
messages = []
|
| 122 |
system_prompt = claude_request.get("system")
|
| 123 |
if system_prompt:
|
|
@@ -153,6 +150,10 @@ def claude_request_to_openai_payload(claude_request: Dict[str, Any]) -> Dict[str
|
|
| 153 |
"model": claude_request.get("model", "gpt-3.5-turbo"), # Use model from request or default
|
| 154 |
"messages": messages,
|
| 155 |
"stream": claude_request.get("stream", False),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
# Optional parameters - only include if present in Claude request
|
| 157 |
**({ "max_tokens": v } if (v := claude_request.get("max_tokens")) is not None else {}),
|
| 158 |
**({ "temperature": v } if (v := claude_request.get("temperature")) is not None else {}),
|
|
@@ -164,6 +165,7 @@ def claude_request_to_openai_payload(claude_request: Dict[str, Any]) -> Dict[str
|
|
| 164 |
# logger.debug("Converted Claude request to OpenAI payload.") # Keep this simple
|
| 165 |
return openai_payload
|
| 166 |
|
|
|
|
| 167 |
def openai_response_to_claude_response(openai_response: Dict[str, Any], claude_request_id: str) -> Dict[str, Any]:
|
| 168 |
"""Converts a non-streaming OpenAI response to Claude API format."""
|
| 169 |
try:
|
|
@@ -204,8 +206,6 @@ def openai_response_to_claude_response(openai_response: Dict[str, Any], claude_r
|
|
| 204 |
return claude_response
|
| 205 |
except (KeyError, IndexError, TypeError) as e:
|
| 206 |
logger.error(f"[{claude_request_id}] Error converting non-streaming OpenAI response: {e}")
|
| 207 |
-
# Avoid logging the full original response here as it might be large/sensitive
|
| 208 |
-
# logger.error(f"Original response snippet: {str(openai_response)[:200]}...") # Optional: log a snippet
|
| 209 |
raise ValueError(f"Failed to parse OpenAI response: {e}")
|
| 210 |
|
| 211 |
async def stream_openai_response_to_claude_events(openai_response: httpx.Response, claude_request_id: str, requested_model: str) -> AsyncGenerator[str, None]:
|
|
@@ -304,7 +304,6 @@ async def stream_openai_response_to_claude_events(openai_response: httpx.Respons
|
|
| 304 |
yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': 0})}\n\n"
|
| 305 |
|
| 306 |
# 6. Send message_delta with final usage and stop reason
|
| 307 |
-
# Note: Usage might be estimated if not provided by OpenAI at the end
|
| 308 |
final_delta = {
|
| 309 |
'type': 'message_delta',
|
| 310 |
'delta': {
|
|
@@ -312,8 +311,7 @@ async def stream_openai_response_to_claude_events(openai_response: httpx.Respons
|
|
| 312 |
'stop_sequence': None # OpenAI doesn't provide this
|
| 313 |
},
|
| 314 |
'usage': {
|
| 315 |
-
|
| 316 |
-
'output_tokens': output_tokens if output_tokens > 0 else (accumulated_content_len // 4) # Very rough estimate
|
| 317 |
}
|
| 318 |
}
|
| 319 |
yield f"event: message_delta\ndata: {json.dumps(final_delta)}\n\n"
|
|
@@ -337,8 +335,8 @@ def create_error_response(status_code: int, error_type: str, message: str) -> JS
|
|
| 337 |
)
|
| 338 |
|
| 339 |
# --- Main Proxy Endpoint ---
|
| 340 |
-
@app.post("/v1/messages",
|
| 341 |
-
async def proxy_claude_to_openai(request: Request):
|
| 342 |
"""
|
| 343 |
Receives a Claude-formatted request, proxies it to OpenAI,
|
| 344 |
and returns a Claude-formatted response.
|
|
@@ -347,15 +345,15 @@ async def proxy_claude_to_openai(request: Request):
|
|
| 347 |
request_id = f"msg_{uuid.uuid4().hex[:24]}" # Generate a unique ID for logging/tracking
|
| 348 |
try:
|
| 349 |
claude_request_data = await request.json()
|
| 350 |
-
# Avoid logging potentially large/sensitive request data
|
| 351 |
logger.info(f"[{request_id}] Received request. Stream: {claude_request_data.get('stream', False)}. Model: {claude_request_data.get('model')}")
|
| 352 |
except json.JSONDecodeError:
|
| 353 |
logger.error(f"[{request_id}] Invalid JSON received in request body.")
|
| 354 |
return create_error_response(400, "invalid_request_error", "Invalid JSON data in request body.")
|
| 355 |
|
| 356 |
-
# Convert request format
|
| 357 |
try:
|
| 358 |
-
|
|
|
|
| 359 |
except Exception as e:
|
| 360 |
logger.error(f"[{request_id}] Failed to convert Claude request to OpenAI format: {e}")
|
| 361 |
return create_error_response(400, "invalid_request_error", f"Failed to process request data: {e}")
|
|
@@ -364,33 +362,54 @@ async def proxy_claude_to_openai(request: Request):
|
|
| 364 |
requested_model = openai_payload.get("model", "unknown_model") # For response generation
|
| 365 |
|
| 366 |
# Prepare headers for the target OpenAI endpoint
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
# Add other headers if needed
|
| 370 |
-
}
|
| 371 |
if OPENAI_API_KEY:
|
| 372 |
-
|
| 373 |
-
#
|
| 374 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 375 |
|
| 376 |
-
try:
|
| 377 |
-
# Hide full target endpoint URL and payload from logs
|
| 378 |
-
logger.debug(f"[{request_id}] Sending request to upstream API...")
|
| 379 |
|
|
|
|
| 380 |
# Build the request to the target endpoint
|
| 381 |
target_request = client.build_request(
|
| 382 |
method="POST",
|
| 383 |
url=OPENAI_API_ENDPOINT,
|
| 384 |
-
headers=
|
| 385 |
json=openai_payload,
|
| 386 |
-
# Timeout is handled globally by the client
|
| 387 |
)
|
| 388 |
|
| 389 |
# Send the request and handle the response
|
| 390 |
response = await client.send(target_request, stream=is_streaming)
|
| 391 |
|
| 392 |
# Check for HTTP errors from the target endpoint *before* processing the body
|
| 393 |
-
response.raise_for_status()
|
| 394 |
|
| 395 |
# Process the response based on streaming or non-streaming
|
| 396 |
if is_streaming:
|
|
@@ -406,9 +425,7 @@ async def proxy_claude_to_openai(request: Request):
|
|
| 406 |
)
|
| 407 |
else:
|
| 408 |
logger.info(f"[{request_id}] Upstream response is non-streaming. Converting.")
|
| 409 |
-
|
| 410 |
-
openai_response_data = response.json()
|
| 411 |
-
# Avoid logging full response data
|
| 412 |
logger.debug(f"[{request_id}] Received non-streaming response from upstream.")
|
| 413 |
try:
|
| 414 |
claude_response_data = openai_response_to_claude_response(openai_response_data, request_id)
|
|
@@ -424,66 +441,46 @@ async def proxy_claude_to_openai(request: Request):
|
|
| 424 |
# --- Error Handling for Target API Request ---
|
| 425 |
except httpx.HTTPStatusError as e:
|
| 426 |
status_code = e.response.status_code
|
| 427 |
-
error_detail_text = "[Could not decode error response]"
|
| 428 |
try:
|
| 429 |
-
# Try reading error details as JSON first
|
| 430 |
error_detail = e.response.json()
|
| 431 |
-
error_detail_text = json.dumps(error_detail)
|
| 432 |
except json.JSONDecodeError:
|
| 433 |
-
# If not JSON, read as text
|
| 434 |
try:
|
| 435 |
-
# Read text synchronously since the body is likely already loaded for non-streaming error responses
|
| 436 |
error_detail_text = e.response.text
|
| 437 |
except Exception:
|
| 438 |
logger.warning(f"[{request_id}] Could not read error response body as text.")
|
| 439 |
|
| 440 |
-
|
| 441 |
-
# Log error snippet, avoid full potentially sensitive detail
|
| 442 |
logger.error(f"[{request_id}] HTTP error from target endpoint ({status_code}). Response snippet: {error_detail_text[:200]}...")
|
| 443 |
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
elif status_code =
|
| 449 |
-
|
| 450 |
-
elif status_code == 403:
|
| 451 |
-
err_type, msg = "permission_error", f"Forbidden by upstream API ({status_code})."
|
| 452 |
-
elif status_code == 429:
|
| 453 |
-
err_type, msg = "rate_limit_error", f"Rate limit exceeded with upstream API ({status_code})."
|
| 454 |
-
elif status_code >= 500:
|
| 455 |
-
err_type, msg = "api_error", f"Upstream API unavailable or encountered an error ({status_code})."
|
| 456 |
-
else:
|
| 457 |
-
err_type, msg = "api_error", f"Received unexpected error from upstream API ({status_code})."
|
| 458 |
|
| 459 |
-
# Return error response immediately
|
| 460 |
return create_error_response(status_code, err_type, msg)
|
| 461 |
|
| 462 |
except httpx.TimeoutException:
|
| 463 |
-
# Hide target endpoint URL from timeout log
|
| 464 |
logger.error(f"[{request_id}] Request to target endpoint timed out ({READ_TIMEOUT}s).")
|
| 465 |
return create_error_response(504, "api_error", "Gateway Timeout: Request to upstream API timed out.")
|
| 466 |
except httpx.RequestError as e:
|
| 467 |
-
# Hide target endpoint URL from request error log
|
| 468 |
-
# The exception 'e' might contain the URL, so log a generic message
|
| 469 |
logger.error(f"[{request_id}] Network error connecting to target endpoint: {type(e).__name__}")
|
| 470 |
return create_error_response(502, "api_error", f"Bad Gateway: Network error connecting to upstream API.")
|
| 471 |
except Exception as e:
|
| 472 |
-
logger.exception(f"[{request_id}] Unexpected error during proxy operation: {e}")
|
| 473 |
return create_error_response(500, "internal_server_error", f"Internal Server Error: {e}")
|
| 474 |
|
| 475 |
# --- Health Check Endpoint ---
|
| 476 |
@app.get("/health", summary="Health Check", tags=["Management"])
|
| 477 |
async def health_check():
|
| 478 |
"""Returns the operational status of the proxy server."""
|
| 479 |
-
# Could add checks here (e.g., try connecting to target endpoint)
|
| 480 |
return {"status": "healthy"}
|
| 481 |
|
| 482 |
# --- Run with Uvicorn (for local development) ---
|
| 483 |
-
# In production (Docker), Uvicorn will be started via CMD
|
| 484 |
if __name__ == "__main__":
|
| 485 |
import uvicorn
|
| 486 |
-
# Load .env file for local development if python-dotenv is installed
|
| 487 |
try:
|
| 488 |
from dotenv import load_dotenv
|
| 489 |
load_dotenv()
|
|
@@ -497,22 +494,21 @@ if __name__ == "__main__":
|
|
| 497 |
log_level = os.getenv("LOG_LEVEL", "INFO").upper()
|
| 498 |
# Reconfigure logger if level changed
|
| 499 |
logger.remove()
|
| 500 |
-
logger.add(sys.stderr, level=log_level)
|
| 501 |
logger.info(f"Log level set to: {log_level}")
|
| 502 |
logger.info(f"Valid Proxy API Keys configured: {len(VALID_API_KEYS)}")
|
| 503 |
except ImportError:
|
| 504 |
logger.info("python-dotenv not installed, skipping .env file loading.")
|
| 505 |
|
| 506 |
-
# Use port 7860 for consistency with Dockerfile default
|
| 507 |
port = int(os.getenv("PORT", 7860))
|
| 508 |
host = os.getenv("HOST", "0.0.0.0")
|
| 509 |
log_config_level = log_level.lower() if log_level in ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "TRACE"] else "info"
|
| 510 |
|
| 511 |
logger.info(f"Starting Uvicorn server on {host}:{port}")
|
| 512 |
uvicorn.run(
|
| 513 |
-
"proxy_server:app",
|
| 514 |
host=host,
|
| 515 |
port=port,
|
| 516 |
-
reload=True,
|
| 517 |
-
log_level=log_config_level
|
| 518 |
)
|
|
|
|
| 17 |
# --- Logging Configuration ---
|
| 18 |
logger.remove()
|
| 19 |
log_level = os.getenv("LOG_LEVEL", "INFO").upper()
|
| 20 |
+
# Ensure DEBUG level enables detailed logging
|
| 21 |
+
logger.add(sys.stderr, level=log_level, format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>")
|
| 22 |
|
| 23 |
# --- Environment Variable Configuration ---
|
| 24 |
+
# Target OpenAI API endpoint (can be OpenAI's official API or another compatible proxy like chat-api)
|
| 25 |
OPENAI_API_ENDPOINT = os.getenv("OPENAI_API_ENDPOINT", "https://api.openai.com/v1/chat/completions")
|
| 26 |
# API Key for authenticating with the target OpenAI endpoint (if required by the endpoint)
|
| 27 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
|
|
|
| 51 |
timeout_config = httpx.Timeout(connect=CONNECT_TIMEOUT, read=READ_TIMEOUT, write=WRITE_TIMEOUT, pool=POOL_TIMEOUT)
|
| 52 |
proxy_config = {"http://": HTTP_PROXY, "https://": HTTP_PROXY} if HTTP_PROXY else None
|
| 53 |
|
| 54 |
+
# Completely hide target endpoint info from logs
|
| 55 |
logger.info("Initializing httpx client for upstream requests.") # Generic message
|
|
|
|
| 56 |
|
| 57 |
if proxy_config:
|
| 58 |
logger.info(f"Using outbound proxy: {HTTP_PROXY}") # Proxy URL might still be sensitive
|
|
|
|
| 97 |
"""Validate the API key provided in the request header."""
|
| 98 |
if not VALID_API_KEYS:
|
| 99 |
logger.warning("No PROXY_API_KEYS configured. Allowing request.")
|
| 100 |
+
return "unsecured_dummy_key"
|
|
|
|
|
|
|
| 101 |
|
| 102 |
if key is None:
|
| 103 |
logger.warning("API key missing from request header.")
|
| 104 |
raise HTTPException(status_code=401, detail=f"API Key required in header '{API_KEY_NAME}'")
|
| 105 |
if key not in VALID_API_KEYS:
|
|
|
|
| 106 |
logger.warning(f"Invalid API key received (length: {len(key)}).")
|
|
|
|
| 107 |
raise HTTPException(status_code=401, detail="Invalid or expired API Key")
|
|
|
|
| 108 |
logger.debug(f"Valid API key received (length: {len(key)}).")
|
|
|
|
| 109 |
return key
|
| 110 |
|
| 111 |
# --- Format Conversion Logic ---
|
| 112 |
|
| 113 |
+
def claude_request_to_openai_payload(claude_request: Dict[str, Any], client_api_key: str) -> Dict[str, Any]:
|
| 114 |
+
"""
|
| 115 |
+
Converts a Claude API request body to OpenAI API format.
|
| 116 |
+
Optionally adds a 'user' field based on the client's API key.
|
| 117 |
+
"""
|
| 118 |
messages = []
|
| 119 |
system_prompt = claude_request.get("system")
|
| 120 |
if system_prompt:
|
|
|
|
| 150 |
"model": claude_request.get("model", "gpt-3.5-turbo"), # Use model from request or default
|
| 151 |
"messages": messages,
|
| 152 |
"stream": claude_request.get("stream", False),
|
| 153 |
+
# --- Potential Fix: Add user field for chat-api ---
|
| 154 |
+
# Use the client's API key as the user identifier, or derive one if needed
|
| 155 |
+
"user": f"proxy_user_{client_api_key}", # Example: prefixing the key
|
| 156 |
+
# --- End Potential Fix ---
|
| 157 |
# Optional parameters - only include if present in Claude request
|
| 158 |
**({ "max_tokens": v } if (v := claude_request.get("max_tokens")) is not None else {}),
|
| 159 |
**({ "temperature": v } if (v := claude_request.get("temperature")) is not None else {}),
|
|
|
|
| 165 |
# logger.debug("Converted Claude request to OpenAI payload.") # Keep this simple
|
| 166 |
return openai_payload
|
| 167 |
|
| 168 |
+
# ... (openai_response_to_claude_response and stream_openai_response_to_claude_events remain the same) ...
|
| 169 |
def openai_response_to_claude_response(openai_response: Dict[str, Any], claude_request_id: str) -> Dict[str, Any]:
|
| 170 |
"""Converts a non-streaming OpenAI response to Claude API format."""
|
| 171 |
try:
|
|
|
|
| 206 |
return claude_response
|
| 207 |
except (KeyError, IndexError, TypeError) as e:
|
| 208 |
logger.error(f"[{claude_request_id}] Error converting non-streaming OpenAI response: {e}")
|
|
|
|
|
|
|
| 209 |
raise ValueError(f"Failed to parse OpenAI response: {e}")
|
| 210 |
|
| 211 |
async def stream_openai_response_to_claude_events(openai_response: httpx.Response, claude_request_id: str, requested_model: str) -> AsyncGenerator[str, None]:
|
|
|
|
| 304 |
yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': 0})}\n\n"
|
| 305 |
|
| 306 |
# 6. Send message_delta with final usage and stop reason
|
|
|
|
| 307 |
final_delta = {
|
| 308 |
'type': 'message_delta',
|
| 309 |
'delta': {
|
|
|
|
| 311 |
'stop_sequence': None # OpenAI doesn't provide this
|
| 312 |
},
|
| 313 |
'usage': {
|
| 314 |
+
'output_tokens': output_tokens if output_tokens > 0 else (accumulated_content_len // 4) # Rough estimate
|
|
|
|
| 315 |
}
|
| 316 |
}
|
| 317 |
yield f"event: message_delta\ndata: {json.dumps(final_delta)}\n\n"
|
|
|
|
| 335 |
)
|
| 336 |
|
| 337 |
# --- Main Proxy Endpoint ---
|
| 338 |
+
@app.post("/v1/messages") # Removed dependency here, will get API key manually
|
| 339 |
+
async def proxy_claude_to_openai(request: Request, client_api_key: str = Depends(get_api_key)): # Get API key via Depends
|
| 340 |
"""
|
| 341 |
Receives a Claude-formatted request, proxies it to OpenAI,
|
| 342 |
and returns a Claude-formatted response.
|
|
|
|
| 345 |
request_id = f"msg_{uuid.uuid4().hex[:24]}" # Generate a unique ID for logging/tracking
|
| 346 |
try:
|
| 347 |
claude_request_data = await request.json()
|
|
|
|
| 348 |
logger.info(f"[{request_id}] Received request. Stream: {claude_request_data.get('stream', False)}. Model: {claude_request_data.get('model')}")
|
| 349 |
except json.JSONDecodeError:
|
| 350 |
logger.error(f"[{request_id}] Invalid JSON received in request body.")
|
| 351 |
return create_error_response(400, "invalid_request_error", "Invalid JSON data in request body.")
|
| 352 |
|
| 353 |
+
# Convert request format, passing the client's API key
|
| 354 |
try:
|
| 355 |
+
# Pass the validated client_api_key to the conversion function
|
| 356 |
+
openai_payload = claude_request_to_openai_payload(claude_request_data, client_api_key)
|
| 357 |
except Exception as e:
|
| 358 |
logger.error(f"[{request_id}] Failed to convert Claude request to OpenAI format: {e}")
|
| 359 |
return create_error_response(400, "invalid_request_error", f"Failed to process request data: {e}")
|
|
|
|
| 362 |
requested_model = openai_payload.get("model", "unknown_model") # For response generation
|
| 363 |
|
| 364 |
# Prepare headers for the target OpenAI endpoint
|
| 365 |
+
# --- Start Detailed Logging (DEBUG Level) ---
|
| 366 |
+
target_headers = { "Content-Type": "application/json" }
|
|
|
|
|
|
|
| 367 |
if OPENAI_API_KEY:
|
| 368 |
+
target_headers["Authorization"] = f"Bearer {OPENAI_API_KEY}"
|
| 369 |
+
# Avoid logging the actual key value even in debug
|
| 370 |
+
logger.debug(f"[{request_id}] Added Authorization header to upstream request.")
|
| 371 |
+
else:
|
| 372 |
+
logger.debug(f"[{request_id}] No OPENAI_API_KEY configured for upstream request.")
|
| 373 |
+
|
| 374 |
+
# Log headers and payload if log level is DEBUG
|
| 375 |
+
if logger.level("DEBUG").no >= logger.level(log_level).no: # Check if DEBUG is enabled
|
| 376 |
+
# Redact Authorization header before logging
|
| 377 |
+
logged_headers = target_headers.copy()
|
| 378 |
+
if "Authorization" in logged_headers:
|
| 379 |
+
logged_headers["Authorization"] = "Bearer [REDACTED]"
|
| 380 |
+
logger.debug(f"[{request_id}] Sending request to upstream API.")
|
| 381 |
+
logger.debug(f"[{request_id}] Upstream Headers: {json.dumps(logged_headers)}")
|
| 382 |
+
# Be cautious logging the full payload due to size/sensitivity
|
| 383 |
+
try:
|
| 384 |
+
# Log payload safely using json.dumps for proper formatting
|
| 385 |
+
payload_str = json.dumps(openai_payload, indent=2)
|
| 386 |
+
# Limit payload log size if needed
|
| 387 |
+
max_log_len = 1024 # Log max 1KB of payload
|
| 388 |
+
if len(payload_str) > max_log_len:
|
| 389 |
+
logger.debug(f"[{request_id}] Upstream Payload (truncated): {payload_str[:max_log_len]}...")
|
| 390 |
+
else:
|
| 391 |
+
logger.debug(f"[{request_id}] Upstream Payload: {payload_str}")
|
| 392 |
+
except Exception as log_e:
|
| 393 |
+
logger.warning(f"[{request_id}] Could not serialize or log upstream payload: {log_e}")
|
| 394 |
+
else:
|
| 395 |
+
logger.debug(f"[{request_id}] Sending request to upstream API...") # Generic message for INFO level
|
| 396 |
+
# --- End Detailed Logging ---
|
| 397 |
|
|
|
|
|
|
|
|
|
|
| 398 |
|
| 399 |
+
try:
|
| 400 |
# Build the request to the target endpoint
|
| 401 |
target_request = client.build_request(
|
| 402 |
method="POST",
|
| 403 |
url=OPENAI_API_ENDPOINT,
|
| 404 |
+
headers=target_headers, # Use the prepared headers
|
| 405 |
json=openai_payload,
|
|
|
|
| 406 |
)
|
| 407 |
|
| 408 |
# Send the request and handle the response
|
| 409 |
response = await client.send(target_request, stream=is_streaming)
|
| 410 |
|
| 411 |
# Check for HTTP errors from the target endpoint *before* processing the body
|
| 412 |
+
response.raise_for_status() # Raises exception for 4xx/5xx errors
|
| 413 |
|
| 414 |
# Process the response based on streaming or non-streaming
|
| 415 |
if is_streaming:
|
|
|
|
| 425 |
)
|
| 426 |
else:
|
| 427 |
logger.info(f"[{request_id}] Upstream response is non-streaming. Converting.")
|
| 428 |
+
openai_response_data = response.json() # No await needed
|
|
|
|
|
|
|
| 429 |
logger.debug(f"[{request_id}] Received non-streaming response from upstream.")
|
| 430 |
try:
|
| 431 |
claude_response_data = openai_response_to_claude_response(openai_response_data, request_id)
|
|
|
|
| 441 |
# --- Error Handling for Target API Request ---
|
| 442 |
except httpx.HTTPStatusError as e:
|
| 443 |
status_code = e.response.status_code
|
| 444 |
+
error_detail_text = "[Could not decode error response]"
|
| 445 |
try:
|
|
|
|
| 446 |
error_detail = e.response.json()
|
| 447 |
+
error_detail_text = json.dumps(error_detail)
|
| 448 |
except json.JSONDecodeError:
|
|
|
|
| 449 |
try:
|
|
|
|
| 450 |
error_detail_text = e.response.text
|
| 451 |
except Exception:
|
| 452 |
logger.warning(f"[{request_id}] Could not read error response body as text.")
|
| 453 |
|
|
|
|
|
|
|
| 454 |
logger.error(f"[{request_id}] HTTP error from target endpoint ({status_code}). Response snippet: {error_detail_text[:200]}...")
|
| 455 |
|
| 456 |
+
if status_code == 400: err_type, msg = "invalid_request_error", f"Upstream API Bad Request ({status_code})."
|
| 457 |
+
elif status_code == 401: err_type, msg = "authentication_error", f"Authentication failed with upstream API ({status_code})."
|
| 458 |
+
elif status_code == 403: err_type, msg = "permission_error", f"Forbidden by upstream API ({status_code})."
|
| 459 |
+
elif status_code == 429: err_type, msg = "rate_limit_error", f"Rate limit exceeded with upstream API ({status_code})."
|
| 460 |
+
elif status_code >= 500: err_type, msg = "api_error", f"Upstream API unavailable or encountered an error ({status_code})."
|
| 461 |
+
else: err_type, msg = "api_error", f"Received unexpected error from upstream API ({status_code})."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 462 |
|
|
|
|
| 463 |
return create_error_response(status_code, err_type, msg)
|
| 464 |
|
| 465 |
except httpx.TimeoutException:
|
|
|
|
| 466 |
logger.error(f"[{request_id}] Request to target endpoint timed out ({READ_TIMEOUT}s).")
|
| 467 |
return create_error_response(504, "api_error", "Gateway Timeout: Request to upstream API timed out.")
|
| 468 |
except httpx.RequestError as e:
|
|
|
|
|
|
|
| 469 |
logger.error(f"[{request_id}] Network error connecting to target endpoint: {type(e).__name__}")
|
| 470 |
return create_error_response(502, "api_error", f"Bad Gateway: Network error connecting to upstream API.")
|
| 471 |
except Exception as e:
|
| 472 |
+
logger.exception(f"[{request_id}] Unexpected error during proxy operation: {e}")
|
| 473 |
return create_error_response(500, "internal_server_error", f"Internal Server Error: {e}")
|
| 474 |
|
| 475 |
# --- Health Check Endpoint ---
|
| 476 |
@app.get("/health", summary="Health Check", tags=["Management"])
|
| 477 |
async def health_check():
|
| 478 |
"""Returns the operational status of the proxy server."""
|
|
|
|
| 479 |
return {"status": "healthy"}
|
| 480 |
|
| 481 |
# --- Run with Uvicorn (for local development) ---
|
|
|
|
| 482 |
if __name__ == "__main__":
|
| 483 |
import uvicorn
|
|
|
|
| 484 |
try:
|
| 485 |
from dotenv import load_dotenv
|
| 486 |
load_dotenv()
|
|
|
|
| 494 |
log_level = os.getenv("LOG_LEVEL", "INFO").upper()
|
| 495 |
# Reconfigure logger if level changed
|
| 496 |
logger.remove()
|
| 497 |
+
logger.add(sys.stderr, level=log_level, format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>")
|
| 498 |
logger.info(f"Log level set to: {log_level}")
|
| 499 |
logger.info(f"Valid Proxy API Keys configured: {len(VALID_API_KEYS)}")
|
| 500 |
except ImportError:
|
| 501 |
logger.info("python-dotenv not installed, skipping .env file loading.")
|
| 502 |
|
|
|
|
| 503 |
port = int(os.getenv("PORT", 7860))
|
| 504 |
host = os.getenv("HOST", "0.0.0.0")
|
| 505 |
log_config_level = log_level.lower() if log_level in ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "TRACE"] else "info"
|
| 506 |
|
| 507 |
logger.info(f"Starting Uvicorn server on {host}:{port}")
|
| 508 |
uvicorn.run(
|
| 509 |
+
"proxy_server:app",
|
| 510 |
host=host,
|
| 511 |
port=port,
|
| 512 |
+
reload=True,
|
| 513 |
+
log_level=log_config_level
|
| 514 |
)
|