llm-api-proxy / src /rotator_library /providers /openai_provider.py
Mirrowel
feat: Refactor logging to use a dedicated logger across providers and enhance model retrieval with httpx
d51ae99
import httpx
import logging
from typing import List
from .provider_interface import ProviderInterface
lib_logger = logging.getLogger('rotator_library')
lib_logger.propagate = False # Ensure this logger doesn't propagate to root
if not lib_logger.handlers:
lib_logger.addHandler(logging.NullHandler())
class OpenAIProvider(ProviderInterface):
"""
Provider implementation for the OpenAI API.
"""
async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
"""
Fetches the list of available models from the OpenAI API.
"""
try:
response = await client.get(
"https://api.openai.com/v1/models",
headers={"Authorization": f"Bearer {api_key}"}
)
response.raise_for_status()
return [f"openai/{model['id']}" for model in response.json().get("data", [])]
except httpx.RequestError as e:
lib_logger.error(f"Failed to fetch OpenAI models: {e}")
return []