import asyncio import aiohttp import logging import os import random from typing import List, Dict, Optional, Any, Set from fastapi import FastAPI, HTTPException, BackgroundTasks from fastapi.responses import StreamingResponse, JSONResponse from pydantic import BaseModel, Field import uvicorn from datetime import datetime import json import time from collections import defaultdict, deque import threading from contextlib import asynccontextmanager from dotenv import load_dotenv # Load environment variables from .env file load_dotenv() # Setup logging logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) logger = logging.getLogger(__name__) # Error handling class ErrorHandler: """Handle and translate OpenRouter errors to user-friendly chatcsvandpdf messages""" @staticmethod def get_user_friendly_error(status_code: int, error_message: str, model: str = None) -> dict: """Convert OpenRouter errors to chatcsvandpdf branded error messages""" friendly_messages = { 400: { "message": "Invalid request format. Please check your message and try again.", "suggestion": "Verify that your request parameters are correctly formatted." }, 401: { "message": "Authentication issue with chatcsvandpdf service.", "suggestion": "This is a temporary service issue. Please try again in a moment." }, 402: { "message": "chatcsvandpdf service is temporarily at capacity.", "suggestion": "Please try again in a few minutes or use a different model." }, 403: { "message": "Your message was flagged by our content moderation system.", "suggestion": "Please rephrase your message and avoid potentially harmful content." }, 408: { "message": "Request timed out. The model took too long to respond.", "suggestion": "Try shortening your message or using a faster model." }, 429: { "message": f"Rate limit reached for {'model ' + model if model else 'this service'}. Please try again later.", "suggestion": "chatcsvandpdf is currently experiencing high demand. Please wait a moment and retry, or try a different model." }, 502: { "message": f"The {'model ' + model if model else 'selected model'} is currently unavailable.", "suggestion": "This model is temporarily down. Please try a different model or wait a few minutes." }, 503: { "message": "No available providers meet your requirements.", "suggestion": "Try adjusting your provider preferences or use a different model." } } # Default error for unknown status codes if status_code not in friendly_messages: return { "message": "chatcsvandpdf service encountered an unexpected issue.", "suggestion": "Please try again. If the problem persists, contact support.", "technical_info": f"Error {status_code}: {error_message}" } error_info = friendly_messages[status_code].copy() # Add specific handling for rate limiting if status_code == 429: if "free" in str(model).lower(): error_info["message"] = f"Free model {model} is currently rate-limited." error_info["suggestion"] = "Free models have usage limits. Try again in a few minutes or upgrade to a premium model." elif "quota" in error_message.lower() or "credit" in error_message.lower(): error_info["message"] = "chatcsvandpdf service quota reached." error_info["suggestion"] = "Our service is at capacity. Please try again later." # Add model-specific messaging for 502 errors if status_code == 502 and model: error_info["message"] = f"Model {model} is temporarily unavailable." error_info["suggestion"] = "This model is experiencing issues. Try another model or wait a few minutes." return error_info # Pydantic models class Message(BaseModel): role: str = Field(..., description="Role: 'system', 'user', or 'assistant'") content: str = Field(..., description="Message content") class ProviderPreferences(BaseModel): sort: Optional[str] = Field(None, description="Sort by 'price', 'throughput', or 'latency'") order: Optional[List[str]] = Field(None, description="Specific provider order") allow_fallbacks: Optional[bool] = Field(True, description="Allow fallback providers") require_parameters: Optional[bool] = Field(False, description="Require all parameters support") data_collection: Optional[str] = Field("allow", description="'allow' or 'deny' data collection") only: Optional[List[str]] = Field(None, description="Only use these providers") ignore: Optional[List[str]] = Field(None, description="Ignore these providers") quantizations: Optional[List[str]] = Field(None, description="Required quantization levels") max_price: Optional[Dict[str, float]] = Field(None, description="Maximum pricing constraints") class ChatRequest(BaseModel): model: str = Field(..., description="Model ID (e.g., 'openai/gpt-3.5-turbo')") messages: List[Message] = Field(..., description="List of messages") system_prompt: Optional[str] = Field(None, description="System prompt (will be added as system message)") max_tokens: Optional[int] = Field(1000, description="Maximum tokens to generate") temperature: Optional[float] = Field(0.7, description="Temperature (0-2)") top_p: Optional[float] = Field(1.0, description="Top-p sampling") frequency_penalty: Optional[float] = Field(0.0, description="Frequency penalty") presence_penalty: Optional[float] = Field(0.0, description="Presence penalty") stream: Optional[bool] = Field(False, description="Enable streaming response") provider: Optional[ProviderPreferences] = Field(None, description="Provider routing preferences") class Config: json_schema_extra = { "example": { "model": "openai/gpt-3.5-turbo", "messages": [ {"role": "user", "content": "Hello, how are you?"} ], "system_prompt": "You are a helpful assistant.", "max_tokens": 1000, "temperature": 0.7, "stream": False } } class ChatResponse(BaseModel): success: bool model: str choices: List[Dict[str, Any]] usage: Optional[Dict[str, Any]] response_time: float provider_used: Optional[str] = None timestamp: str class ModelValidator: def __init__(self): self.valid_models: Set[str] = set() self.last_updated: float = 0 self.update_interval: float = 3600 # Update every hour self.models_endpoint = "https://xce009-inference-test.hf.space/api/free-models/names" self.lock = threading.Lock() async def fetch_valid_models(self) -> Set[str]: """Fetch valid model names from the inference service""" try: async with aiohttp.ClientSession() as session: async with session.get( self.models_endpoint, timeout=aiohttp.ClientTimeout(total=10) ) as response: if response.status == 200: data = await response.json() # ✅ Extract from "models" key models_list = data.get("models", []) models = set() for item in models_list: if isinstance(item, dict) and "id" in item: models.add(item["id"]) elif isinstance(item, str): models.add(item) logger.info(f"Fetched {len(models)} valid models from inference service") return models else: logger.error(f"Failed to fetch models: HTTP {response.status}") return set() except Exception as e: logger.error(f"Error fetching valid models: {str(e)}") return set() async def update_models_if_needed(self): """Update the valid models list if needed""" current_time = time.time() with self.lock: if current_time - self.last_updated > self.update_interval or not self.valid_models: logger.info("Updating valid models list...") new_models = await self.fetch_valid_models() if new_models: # Only update if we got valid data self.valid_models = new_models self.last_updated = current_time logger.info(f"Updated valid models list with {len(self.valid_models)} models") def is_valid_model(self, model_name: str) -> bool: """Check if a model name is valid""" with self.lock: return model_name in self.valid_models def get_valid_models(self) -> List[str]: """Get list of valid models""" with self.lock: return sorted(list(self.valid_models)) class APIKeyManager: """Manages multiple API keys with rotation and rate limiting""" def __init__(self, api_keys: List[str]): if not api_keys: raise ValueError("At least one API key is required") self.api_keys = api_keys self.key_stats = {key: {"requests": 0, "errors": 0, "last_used": 0} for key in api_keys} self.current_index = 0 self.lock = threading.Lock() # Rate limiting per key (rough estimate) self.rate_limits = {key: deque() for key in api_keys} self.max_requests_per_minute = 60 # Conservative estimate logger.info(f"Initialized API key manager with {len(api_keys)} keys") def get_next_key(self) -> str: """Get the next available API key using round-robin with rate limiting""" with self.lock: current_time = time.time() # Try to find a key that's not rate limited for _ in range(len(self.api_keys)): key = self.api_keys[self.current_index] # Clean old requests from rate limit tracker while (self.rate_limits[key] and current_time - self.rate_limits[key][0] > 60): self.rate_limits[key].popleft() # Check if this key can handle more requests if len(self.rate_limits[key]) < self.max_requests_per_minute: self.rate_limits[key].append(current_time) self.key_stats[key]["requests"] += 1 self.key_stats[key]["last_used"] = current_time # Move to next key for next request self.current_index = (self.current_index + 1) % len(self.api_keys) return key # Try next key self.current_index = (self.current_index + 1) % len(self.api_keys) # If all keys are rate limited, use the one with the oldest request oldest_key = min(self.api_keys, key=lambda k: self.key_stats[k]["last_used"]) self.key_stats[oldest_key]["requests"] += 1 self.key_stats[oldest_key]["last_used"] = current_time return oldest_key def record_error(self, api_key: str): """Record an error for an API key""" with self.lock: if api_key in self.key_stats: self.key_stats[api_key]["errors"] += 1 def get_stats(self) -> Dict: """Get statistics for all API keys""" with self.lock: return dict(self.key_stats) class InferenceClient: """High-performance inference client with connection pooling and enhanced error handling""" def __init__(self, key_manager: APIKeyManager, model_validator: ModelValidator): self.key_manager = key_manager self.model_validator = model_validator self.base_url = "https://openrouter.ai/api/v1" self.session_pool = {} self.max_connections = 100 self.max_connections_per_host = 20 self.error_handler = ErrorHandler() async def get_session(self, api_key: str) -> aiohttp.ClientSession: """Get or create a session for the API key""" if api_key not in self.session_pool: connector = aiohttp.TCPConnector( limit=self.max_connections, limit_per_host=self.max_connections_per_host, keepalive_timeout=30, enable_cleanup_closed=True, ttl_dns_cache=300, use_dns_cache=True ) timeout = aiohttp.ClientTimeout( total=60, connect=10, sock_read=30 ) headers = { "Authorization": f"Bearer {api_key}", "Content-Type": "application/json", "HTTP-Referer": "https://chatcsvandpdf.com", "X-Title": "chatcsvandpdf API" } self.session_pool[api_key] = aiohttp.ClientSession( connector=connector, timeout=timeout, headers=headers, raise_for_status=False ) return self.session_pool[api_key] def _should_retry_with_different_key(self, status_code: int) -> bool: """Determine if we should retry with a different API key""" retry_codes = {401, 402, 429} # Auth issues, credits, rate limits return status_code in retry_codes async def chat_completion(self, request: ChatRequest, max_retries: int = 2) -> Dict[str, Any]: """Send chat completion request with enhanced error handling and retries""" start_time = time.time() # Update models list if needed await self.model_validator.update_models_if_needed() # Validate model - if no models loaded, skip validation if self.model_validator.valid_models and not self.model_validator.is_valid_model(request.model): valid_models = self.model_validator.get_valid_models() return { "success": False, "error": f"Model '{request.model}' is not available in chatcsvandpdf.", "suggestion": f"Try one of these available models: {', '.join(valid_models[:5])}{'...' if len(valid_models) > 5 else ''}", "response_time": time.time() - start_time } last_error = None # Try with different API keys if needed for attempt in range(max_retries + 1): api_key = self.key_manager.get_next_key() try: session = await self.get_session(api_key) # Prepare messages messages = [] if request.system_prompt: messages.append({"role": "system", "content": request.system_prompt}) messages.extend([msg.dict() for msg in request.messages]) # Prepare request payload payload = { "model": request.model, "messages": messages, "max_tokens": request.max_tokens, "temperature": request.temperature, "top_p": request.top_p, "frequency_penalty": request.frequency_penalty, "presence_penalty": request.presence_penalty, "stream": request.stream } # Add provider preferences if specified if request.provider: provider_dict = request.provider.dict(exclude_none=True) if provider_dict: payload["provider"] = provider_dict logger.debug(f"Attempt {attempt + 1}: Sending request to {request.model} with key ending in ...{api_key[-4:]}") async with session.post(f"{self.base_url}/chat/completions", json=payload) as response: response_time = time.time() - start_time if response.status == 200: result = await response.json() # Extract provider information if available provider_used = None if "model" in result and "/" in result["model"]: provider_used = result["model"].split("/")[0] return { "success": True, "data": result, "response_time": response_time, "provider_used": provider_used, "api_key_used": api_key[-4:] } else: error_data = await response.text() logger.warning(f"API error {response.status} on attempt {attempt + 1}: {error_data}") # Parse error response if JSON try: error_json = json.loads(error_data) original_error = error_json.get("error", {}).get("message", error_data) except: original_error = error_data # Record error for this key self.key_manager.record_error(api_key) # Check if we should retry with a different key if self._should_retry_with_different_key(response.status) and attempt < max_retries: last_error = { "status": response.status, "message": original_error, "attempt": attempt + 1 } # Wait briefly before retry await asyncio.sleep(min(2 ** attempt, 5)) # Exponential backoff, max 5s continue else: # Final attempt or non-retryable error error_info = self.error_handler.get_user_friendly_error( response.status, original_error, request.model ) return { "success": False, "error": error_info["message"], "suggestion": error_info["suggestion"], "response_time": response_time, "attempts_made": attempt + 1 } except asyncio.TimeoutError: logger.warning(f"Timeout on attempt {attempt + 1} with key ...{api_key[-4:]}") self.key_manager.record_error(api_key) if attempt < max_retries: last_error = {"status": 408, "message": "Request timeout", "attempt": attempt + 1} await asyncio.sleep(min(2 ** attempt, 5)) continue else: return { "success": False, "error": "chatcsvandpdf service timed out processing your request.", "suggestion": "Try shortening your message or using a different model.", "response_time": time.time() - start_time, "attempts_made": attempt + 1 } except Exception as e: logger.error(f"Request failed on attempt {attempt + 1} with key ...{api_key[-4:]}: {str(e)}") self.key_manager.record_error(api_key) if attempt < max_retries: last_error = {"status": 500, "message": str(e), "attempt": attempt + 1} await asyncio.sleep(min(2 ** attempt, 5)) continue else: return { "success": False, "error": "chatcsvandpdf service encountered an unexpected issue.", "suggestion": "Please try again. If the problem persists, contact support.", "response_time": time.time() - start_time, "attempts_made": attempt + 1 } # If we get here, all attempts failed if last_error: error_info = self.error_handler.get_user_friendly_error( last_error["status"], last_error["message"], request.model ) return { "success": False, "error": error_info["message"], "suggestion": error_info["suggestion"], "response_time": time.time() - start_time, "attempts_made": max_retries + 1 } else: return { "success": False, "error": "chatcsvandpdf service is currently unavailable.", "suggestion": "Please try again later.", "response_time": time.time() - start_time } async def stream_chat_completion(self, request: ChatRequest): """Stream chat completion response with enhanced error handling""" # Update models list if needed await self.model_validator.update_models_if_needed() # Validate model - if no models loaded, skip validation if self.model_validator.valid_models and not self.model_validator.is_valid_model(request.model): valid_models = self.model_validator.get_valid_models() error_msg = f"Model '{request.model}' is not available in chatcsvandpdf. Try: {', '.join(valid_models[:3])}" yield f"data: {json.dumps({'error': error_msg})}\n\n".encode() return api_key = self.key_manager.get_next_key() try: session = await self.get_session(api_key) # Prepare messages messages = [] if request.system_prompt: messages.append({"role": "system", "content": request.system_prompt}) messages.extend([msg.dict() for msg in request.messages]) # Prepare request payload payload = { "model": request.model, "messages": messages, "max_tokens": request.max_tokens, "temperature": request.temperature, "top_p": request.top_p, "frequency_penalty": request.frequency_penalty, "presence_penalty": request.presence_penalty, "stream": True } if request.provider: provider_dict = request.provider.dict(exclude_none=True) if provider_dict: payload["provider"] = provider_dict async with session.post(f"{self.base_url}/chat/completions", json=payload) as response: if response.status == 200: async for chunk in response.content.iter_chunked(1024): if chunk: yield chunk else: error_data = await response.text() self.key_manager.record_error(api_key) # Parse error and provide user-friendly message try: error_json = json.loads(error_data) original_error = error_json.get("error", {}).get("message", error_data) except: original_error = error_data error_info = self.error_handler.get_user_friendly_error( response.status, original_error, request.model ) yield f"data: {json.dumps({'error': error_info['message'], 'suggestion': error_info['suggestion']})}\n\n".encode() except asyncio.TimeoutError: logger.error(f"Streaming timeout with key ...{api_key[-4:]}") self.key_manager.record_error(api_key) yield f"data: {json.dumps({'error': 'chatcsvandpdf request timed out. Try a shorter message or different model.'})}\n\n".encode() except Exception as e: logger.error(f"Streaming failed with key ...{api_key[-4:]}: {str(e)}") self.key_manager.record_error(api_key) yield f"data: {json.dumps({'error': 'chatcsvandpdf service encountered an issue. Please try again.'})}\n\n".encode() async def close_all_sessions(self): """Close all aiohttp sessions""" for session in self.session_pool.values(): await session.close() self.session_pool.clear() # Global variables client: Optional[InferenceClient] = None key_manager: Optional[APIKeyManager] = None model_validator: Optional[ModelValidator] = None @asynccontextmanager async def lifespan(app: FastAPI): """Startup and shutdown events""" global client, key_manager, model_validator # Startup logger.info("Starting chatcsvandpdf API...") # Load API keys from environment api_keys_str = os.getenv("OPENROUTER_API_KEYS", "") if not api_keys_str: raise ValueError("OPENROUTER_API_KEYS environment variable is required") api_keys = [key.strip() for key in api_keys_str.split(",") if key.strip()] if not api_keys: raise ValueError("No valid API keys found in OPENROUTER_API_KEYS") # Initialize components model_validator = ModelValidator() key_manager = APIKeyManager(api_keys) client = InferenceClient(key_manager, model_validator) # Initial model fetch await model_validator.update_models_if_needed() logger.info(f"API initialized with {len(api_keys)} keys and {len(model_validator.get_valid_models())} available models") yield # Shutdown logger.info("Shutting down...") if client: await client.close_all_sessions() # Create FastAPI app app = FastAPI( title="chatcsvandpdf API", description="High-performance chat completions API with model validation and multiple key rotation", version="1.0.0", lifespan=lifespan ) @app.get("/", response_model=Dict) async def root(): """Root endpoint with API information""" return { "message": "chatcsvandpdf API", "version": "1.0.0", "endpoints": { "chat": "/api/chat", "chat_stream": "/api/chat (with stream=true)", "models": "/api/models", "stats": "/api/stats", "health": "/health" }, "features": [ "Multiple API key rotation", "Model validation", "Connection pooling", "Parallel processing", "Provider routing", "Streaming support", "Rate limiting", "Enhanced error handling" ] } @app.get("/api/models") async def get_available_models(): """Get list of available models""" if not model_validator: raise HTTPException(status_code=503, detail="Service not initialized") await model_validator.update_models_if_needed() valid_models = model_validator.get_valid_models() return { "models": valid_models, "total_count": len(valid_models), "last_updated": datetime.fromtimestamp(model_validator.last_updated).isoformat() if model_validator.last_updated > 0 else "Never" } @app.post("/api/chat", response_model=ChatResponse) async def chat_completion(request: ChatRequest): """Send chat completion request with enhanced error handling""" if not client: raise HTTPException(status_code=503, detail="chatcsvandpdf service is starting up. Please try again in a moment.") try: # Handle streaming requests if request.stream: return StreamingResponse( client.stream_chat_completion(request), media_type="text/plain", headers={"Cache-Control": "no-cache", "Connection": "keep-alive"} ) # Handle regular requests result = await client.chat_completion(request) if result["success"]: return ChatResponse( success=True, model=request.model, choices=result["data"].get("choices", []), usage=result["data"].get("usage"), response_time=result["response_time"], provider_used=result.get("provider_used"), timestamp=datetime.now().isoformat() ) else: # Return user-friendly error message error_detail = result["error"] if "suggestion" in result: error_detail += f" {result['suggestion']}" # Determine appropriate HTTP status code status_code = 400 if "not available" in result["error"] else 503 raise HTTPException(status_code=status_code, detail=error_detail) except HTTPException: raise except Exception as e: logger.error(f"Unexpected error in chat_completion: {str(e)}") raise HTTPException( status_code=503, detail="chatcsvandpdf service encountered an unexpected issue. Please try again." ) @app.get("/api/stats", response_model=Dict) async def get_api_stats(): """Get API key usage statistics""" if not key_manager: raise HTTPException(status_code=503, detail="Service not initialized") stats = key_manager.get_stats() # Calculate summary statistics total_requests = sum(stat["requests"] for stat in stats.values()) total_errors = sum(stat["errors"] for stat in stats.values()) error_rate = (total_errors / total_requests * 100) if total_requests > 0 else 0 return { "summary": { "total_keys": len(stats), "total_requests": total_requests, "total_errors": total_errors, "error_rate_percent": round(error_rate, 2) }, "key_stats": { f"key_...{key[-4:]}": { "requests": stat["requests"], "errors": stat["errors"], "error_rate": round((stat["errors"] / stat["requests"] * 100) if stat["requests"] > 0 else 0, 2), "last_used": datetime.fromtimestamp(stat["last_used"]).isoformat() if stat["last_used"] > 0 else "Never" } for key, stat in stats.items() } } @app.get("/health") async def health_check(): """Health check endpoint""" if not client or not key_manager or not model_validator: return JSONResponse( status_code=503, content={ "status": "unhealthy", "message": "Service not initialized", "timestamp": datetime.now().isoformat() } )