Dmitry Beresnev commited on
Commit
90f1c82
·
1 Parent(s): 950f41b

add requests logging

Browse files
Files changed (1) hide show
  1. app.py +84 -2
app.py CHANGED
@@ -3,14 +3,16 @@ import signal
3
  import os
4
  import time
5
  import asyncio
6
- from typing import Optional, Dict, List
7
  from dataclasses import dataclass, field
8
  from collections import OrderedDict
9
  from datetime import datetime, timedelta
10
  import hashlib
 
 
11
 
12
  import aiohttp
13
- from fastapi import FastAPI, HTTPException, BackgroundTasks
14
  from fastapi.openapi.utils import get_openapi
15
  from pydantic import BaseModel, Field
16
  from duckduckgo_search import DDGS
@@ -108,6 +110,8 @@ BASE_PORT = int(os.getenv("BASE_PORT", "8080"))
108
  PRELOAD_MODELS = os.getenv("PRELOAD_MODELS", "").split(",") if os.getenv("PRELOAD_MODELS") else []
109
  WEB_SEARCH_CACHE_TTL = int(os.getenv("WEB_SEARCH_CACHE_TTL", "3600")) # 1 hour
110
  REQUEST_TIMEOUT = int(os.getenv("REQUEST_TIMEOUT", "300")) # 5 minutes
 
 
111
 
112
 
113
  @dataclass
@@ -361,6 +365,84 @@ metrics = PerformanceMetrics()
361
  # HTTP session for connection pooling (will be initialized in startup)
362
  http_session: Optional[aiohttp.ClientSession] = None
363
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
364
 
365
  class ModelSwitchRequest(BaseModel):
366
  """Request to switch the active LLM model."""
 
3
  import os
4
  import time
5
  import asyncio
6
+ from typing import Optional, Dict, List, Any
7
  from dataclasses import dataclass, field
8
  from collections import OrderedDict
9
  from datetime import datetime, timedelta
10
  import hashlib
11
+ import json
12
+ import uuid
13
 
14
  import aiohttp
15
+ from fastapi import FastAPI, HTTPException, BackgroundTasks, Request
16
  from fastapi.openapi.utils import get_openapi
17
  from pydantic import BaseModel, Field
18
  from duckduckgo_search import DDGS
 
110
  PRELOAD_MODELS = os.getenv("PRELOAD_MODELS", "").split(",") if os.getenv("PRELOAD_MODELS") else []
111
  WEB_SEARCH_CACHE_TTL = int(os.getenv("WEB_SEARCH_CACHE_TTL", "3600")) # 1 hour
112
  REQUEST_TIMEOUT = int(os.getenv("REQUEST_TIMEOUT", "300")) # 5 minutes
113
+ LOG_REQUEST_BODY = os.getenv("LOG_REQUEST_BODY", "1") == "1"
114
+ LOG_REQUEST_BODY_MAX_CHARS = int(os.getenv("LOG_REQUEST_BODY_MAX_CHARS", "2000"))
115
 
116
 
117
  @dataclass
 
365
  # HTTP session for connection pooling (will be initialized in startup)
366
  http_session: Optional[aiohttp.ClientSession] = None
367
 
368
+ SENSITIVE_HEADERS = {"authorization", "proxy-authorization", "x-api-key", "api-key"}
369
+ SENSITIVE_FIELDS = {"authorization", "api_key", "api-key", "password", "token"}
370
+
371
+
372
+ def _redact_headers(headers: Dict[str, str]) -> Dict[str, str]:
373
+ """Redact sensitive headers before logging."""
374
+ redacted = {}
375
+ for key, value in headers.items():
376
+ if key.lower() in SENSITIVE_HEADERS:
377
+ redacted[key] = "[redacted]"
378
+ else:
379
+ redacted[key] = value
380
+ return redacted
381
+
382
+
383
+ def _redact_json(obj: Any) -> Any:
384
+ """Redact sensitive fields in JSON-like structures."""
385
+ if isinstance(obj, dict):
386
+ redacted = {}
387
+ for key, value in obj.items():
388
+ if str(key).lower() in SENSITIVE_FIELDS:
389
+ redacted[key] = "[redacted]"
390
+ else:
391
+ redacted[key] = _redact_json(value)
392
+ return redacted
393
+ if isinstance(obj, list):
394
+ return [_redact_json(item) for item in obj]
395
+ return obj
396
+
397
+
398
+ def _format_body_for_log(content_type: str, body: bytes) -> str:
399
+ """Format request body for logging with size limits and redaction."""
400
+ if not body:
401
+ return ""
402
+
403
+ text_preview: str
404
+ if "application/json" in (content_type or ""):
405
+ try:
406
+ parsed = json.loads(body.decode("utf-8"))
407
+ redacted = _redact_json(parsed)
408
+ text_preview = json.dumps(redacted, ensure_ascii=True)
409
+ except Exception:
410
+ text_preview = body.decode("utf-8", errors="replace")
411
+ else:
412
+ text_preview = body.decode("utf-8", errors="replace")
413
+
414
+ if len(text_preview) > LOG_REQUEST_BODY_MAX_CHARS:
415
+ text_preview = text_preview[:LOG_REQUEST_BODY_MAX_CHARS] + "...[truncated]"
416
+ return text_preview
417
+
418
+
419
+ @app.middleware("http")
420
+ async def log_received_request(request: Request, call_next):
421
+ """Log incoming requests and responses with basic metadata."""
422
+ request_id = uuid.uuid4().hex[:12]
423
+ start = time.perf_counter()
424
+
425
+ body_text = ""
426
+ if LOG_REQUEST_BODY:
427
+ body_bytes = await request.body()
428
+ body_text = _format_body_for_log(request.headers.get("content-type", ""), body_bytes)
429
+
430
+ headers = _redact_headers(dict(request.headers))
431
+ client_host = request.client.host if request.client else "-"
432
+ query = f"?{request.url.query}" if request.url.query else ""
433
+
434
+ logger.info(
435
+ f"➡️ {request_id} {request.method} {request.url.path}{query} "
436
+ f"from {client_host} ua={headers.get('user-agent', '-')}"
437
+ )
438
+ if body_text:
439
+ logger.info(f" body={body_text}")
440
+
441
+ response = await call_next(request)
442
+ elapsed_ms = (time.perf_counter() - start) * 1000
443
+ logger.info(f"⬅️ {request_id} {response.status_code} {elapsed_ms:.1f}ms")
444
+ return response
445
+
446
 
447
  class ModelSwitchRequest(BaseModel):
448
  """Request to switch the active LLM model."""