Mirrowel commited on
Commit
d51ae99
·
1 Parent(s): 522249e

feat: Refactor logging to use a dedicated logger across providers and enhance model retrieval with httpx

Browse files
src/rotator_library/client.py CHANGED
@@ -1,10 +1,21 @@
1
  import asyncio
2
  import json
 
3
  import litellm
4
  from litellm.litellm_core_utils.token_counter import token_counter
5
  import logging
6
  from typing import List, Dict, Any, AsyncGenerator
7
 
 
 
 
 
 
 
 
 
 
 
8
  from .usage_manager import UsageManager
9
  from .failure_logger import log_failure
10
  from .error_handler import is_server_error, is_unrecoverable_error
@@ -26,6 +37,7 @@ class RotatingClient:
26
  self._provider_instances = {
27
  name: plugin() for name, plugin in PROVIDER_PLUGINS.items()
28
  }
 
29
 
30
  async def _streaming_wrapper(self, stream: Any, key: str, model: str) -> AsyncGenerator[Any, None]:
31
  """
@@ -34,7 +46,7 @@ class RotatingClient:
34
  """
35
  try:
36
  async for chunk in stream:
37
- #logging.info(f"STREAM CHUNK: {chunk}")
38
  # Convert the litellm chunk object to a dictionary
39
  chunk_dict = chunk.dict()
40
 
@@ -43,13 +55,13 @@ class RotatingClient:
43
 
44
  # Safely check for usage data in the chunk
45
  if hasattr(chunk, 'usage') and chunk.usage:
46
- logging.info(f"Usage found in chunk for key ...{key[-4:]}: {chunk.usage}")
47
  self.usage_manager.record_success(key, model, chunk)
48
 
49
  finally:
50
  # Signal the end of the stream
51
  yield "data: [DONE]\n\n"
52
- logging.info("STREAM FINISHED and [DONE] signal sent.")
53
 
54
 
55
  async def acompletion(self, **kwargs) -> Any:
@@ -139,29 +151,37 @@ class RotatingClient:
139
  """
140
  Returns a list of available models for a specific provider, with caching.
141
  """
 
142
  if provider in self._model_list_cache:
 
143
  return self._model_list_cache[provider]
144
 
145
  api_key = self.api_keys.get(provider, [None])[0]
146
  if not api_key:
 
147
  return []
148
 
149
  if provider in self._provider_instances:
150
- models = await self._provider_instances[provider].get_models(api_key)
 
 
151
  self._model_list_cache[provider] = models
152
  return models
153
  else:
154
- logging.warning(f"Model list fetching not implemented for provider: {provider}")
155
  return []
156
 
157
  async def get_all_available_models(self, grouped: bool = True) -> Any:
158
  """
159
  Returns a list of all available models, either grouped by provider or as a flat list.
160
  """
 
161
  all_provider_models = {}
162
  for provider in self.api_keys.keys():
 
163
  all_provider_models[provider] = await self.get_available_models(provider)
164
 
 
165
  if grouped:
166
  return all_provider_models
167
  else:
 
1
  import asyncio
2
  import json
3
+ import httpx
4
  import litellm
5
  from litellm.litellm_core_utils.token_counter import token_counter
6
  import logging
7
  from typing import List, Dict, Any, AsyncGenerator
8
 
9
+ # Set up a dedicated logger for the library
10
+ lib_logger = logging.getLogger('rotator_library')
11
+ lib_logger.propagate = False
12
+
13
+ # You might want to add a handler if you want to see these logs specifically
14
+ # For example, a NullHandler to avoid "No handler found" warnings if the
15
+ # main app doesn't configure this logger.
16
+ if not lib_logger.handlers:
17
+ lib_logger.addHandler(logging.NullHandler())
18
+
19
  from .usage_manager import UsageManager
20
  from .failure_logger import log_failure
21
  from .error_handler import is_server_error, is_unrecoverable_error
 
37
  self._provider_instances = {
38
  name: plugin() for name, plugin in PROVIDER_PLUGINS.items()
39
  }
40
+ self.http_client = httpx.AsyncClient()
41
 
42
  async def _streaming_wrapper(self, stream: Any, key: str, model: str) -> AsyncGenerator[Any, None]:
43
  """
 
46
  """
47
  try:
48
  async for chunk in stream:
49
+ #lib_logger.info(f"STREAM CHUNK: {chunk}")
50
  # Convert the litellm chunk object to a dictionary
51
  chunk_dict = chunk.dict()
52
 
 
55
 
56
  # Safely check for usage data in the chunk
57
  if hasattr(chunk, 'usage') and chunk.usage:
58
+ lib_logger.info(f"Usage found in chunk for key ...{key[-4:]}: {chunk.usage}")
59
  self.usage_manager.record_success(key, model, chunk)
60
 
61
  finally:
62
  # Signal the end of the stream
63
  yield "data: [DONE]\n\n"
64
+ lib_logger.info("STREAM FINISHED and [DONE] signal sent.")
65
 
66
 
67
  async def acompletion(self, **kwargs) -> Any:
 
151
  """
152
  Returns a list of available models for a specific provider, with caching.
153
  """
154
+ lib_logger.info(f"Getting available models for provider: {provider}")
155
  if provider in self._model_list_cache:
156
+ lib_logger.info(f"Returning cached models for provider: {provider}")
157
  return self._model_list_cache[provider]
158
 
159
  api_key = self.api_keys.get(provider, [None])[0]
160
  if not api_key:
161
+ lib_logger.warning(f"No API key for provider: {provider}")
162
  return []
163
 
164
  if provider in self._provider_instances:
165
+ lib_logger.info(f"Calling get_models for provider: {provider}")
166
+ models = await self._provider_instances[provider].get_models(api_key, self.http_client)
167
+ lib_logger.info(f"Got {len(models)} models for provider: {provider}")
168
  self._model_list_cache[provider] = models
169
  return models
170
  else:
171
+ lib_logger.warning(f"Model list fetching not implemented for provider: {provider}")
172
  return []
173
 
174
  async def get_all_available_models(self, grouped: bool = True) -> Any:
175
  """
176
  Returns a list of all available models, either grouped by provider or as a flat list.
177
  """
178
+ lib_logger.info("Getting all available models...")
179
  all_provider_models = {}
180
  for provider in self.api_keys.keys():
181
+ lib_logger.info(f"Getting models for provider: {provider}")
182
  all_provider_models[provider] = await self.get_available_models(provider)
183
 
184
+ lib_logger.info("Finished getting all available models.")
185
  if grouped:
186
  return all_provider_models
187
  else:
src/rotator_library/failure_logger.py CHANGED
@@ -9,8 +9,9 @@ def setup_failure_logger():
9
  if not os.path.exists(log_dir):
10
  os.makedirs(log_dir)
11
 
12
- logger = logging.getLogger('failure_logger')
13
- logger.setLevel(logging.ERROR)
 
14
 
15
  # Prevent logs from propagating to the root logger
16
  logger.propagate = False
@@ -34,12 +35,13 @@ def setup_failure_logger():
34
 
35
  handler.setFormatter(JsonFormatter())
36
 
37
- # Add handler only if it hasn't been added before
38
- if not logger.handlers:
39
  logger.addHandler(handler)
40
 
41
  return logger
42
 
 
43
  failure_logger = setup_failure_logger()
44
 
45
  def log_failure(api_key: str, model: str, attempt: int, error: Exception, request_data: dict):
 
9
  if not os.path.exists(log_dir):
10
  os.makedirs(log_dir)
11
 
12
+ # Use the same named logger as the rest of the library
13
+ logger = logging.getLogger('rotator_library')
14
+ logger.setLevel(logging.INFO) # Set to INFO to capture all levels
15
 
16
  # Prevent logs from propagating to the root logger
17
  logger.propagate = False
 
35
 
36
  handler.setFormatter(JsonFormatter())
37
 
38
+ # Add handler only if it hasn't been added before, and is not a NullHandler
39
+ if not any(isinstance(h, RotatingFileHandler) for h in logger.handlers):
40
  logger.addHandler(handler)
41
 
42
  return logger
43
 
44
+ # Initialize the logger for failures
45
  failure_logger = setup_failure_logger()
46
 
47
  def log_failure(api_key: str, model: str, attempt: int, error: Exception, request_data: dict):
src/rotator_library/providers/anthropic_provider.py CHANGED
@@ -1,18 +1,23 @@
1
- import requests
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
 
 
 
 
 
6
  class AnthropicProvider(ProviderInterface):
7
  """
8
  Provider implementation for the Anthropic API.
9
  """
10
- async def get_models(self, api_key: str) -> List[str]:
11
  """
12
  Fetches the list of available models from the Anthropic API.
13
  """
14
  try:
15
- response = requests.get(
16
  "https://api.anthropic.com/v1/models",
17
  headers={
18
  "x-api-key": api_key,
@@ -21,6 +26,6 @@ class AnthropicProvider(ProviderInterface):
21
  )
22
  response.raise_for_status()
23
  return [f"anthropic/{model['id']}" for model in response.json().get("data", [])]
24
- except requests.RequestException as e:
25
- logging.error(f"Failed to fetch Anthropic models: {e}")
26
  return []
 
1
+ import httpx
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
6
+ lib_logger = logging.getLogger('rotator_library')
7
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
8
+ if not lib_logger.handlers:
9
+ lib_logger.addHandler(logging.NullHandler())
10
+
11
  class AnthropicProvider(ProviderInterface):
12
  """
13
  Provider implementation for the Anthropic API.
14
  """
15
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
16
  """
17
  Fetches the list of available models from the Anthropic API.
18
  """
19
  try:
20
+ response = await client.get(
21
  "https://api.anthropic.com/v1/models",
22
  headers={
23
  "x-api-key": api_key,
 
26
  )
27
  response.raise_for_status()
28
  return [f"anthropic/{model['id']}" for model in response.json().get("data", [])]
29
+ except httpx.RequestError as e:
30
+ lib_logger.error(f"Failed to fetch Anthropic models: {e}")
31
  return []
src/rotator_library/providers/bedrock_provider.py CHANGED
@@ -1,12 +1,18 @@
 
1
  import logging
2
  from typing import List
3
  from .provider_interface import ProviderInterface
4
 
 
 
 
 
 
5
  class BedrockProvider(ProviderInterface):
6
  """
7
  Provider implementation for AWS Bedrock.
8
  """
9
- async def get_models(self, api_key: str) -> List[str]:
10
  """
11
  Returns a hardcoded list of common Bedrock models, as there is no
12
  simple, unauthenticated API endpoint to list them.
@@ -14,7 +20,7 @@ class BedrockProvider(ProviderInterface):
14
  # Note: Listing Bedrock models typically requires AWS credentials and boto3.
15
  # For a simple, key-based proxy, we'll list common models.
16
  # This can be expanded with full AWS authentication if needed.
17
- logging.info("Returning hardcoded list for Bedrock. Full discovery requires AWS auth.")
18
  return [
19
  "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
20
  "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
 
1
+ import httpx
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
6
+ lib_logger = logging.getLogger('rotator_library')
7
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
8
+ if not lib_logger.handlers:
9
+ lib_logger.addHandler(logging.NullHandler())
10
+
11
  class BedrockProvider(ProviderInterface):
12
  """
13
  Provider implementation for AWS Bedrock.
14
  """
15
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
16
  """
17
  Returns a hardcoded list of common Bedrock models, as there is no
18
  simple, unauthenticated API endpoint to list them.
 
20
  # Note: Listing Bedrock models typically requires AWS credentials and boto3.
21
  # For a simple, key-based proxy, we'll list common models.
22
  # This can be expanded with full AWS authentication if needed.
23
+ lib_logger.info("Returning hardcoded list for Bedrock. Full discovery requires AWS auth.")
24
  return [
25
  "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
26
  "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
src/rotator_library/providers/chutes_provider.py CHANGED
@@ -1,23 +1,28 @@
1
- import requests
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
 
 
 
 
 
6
  class ChutesProvider(ProviderInterface):
7
  """
8
  Provider implementation for the chutes.ai API.
9
  """
10
- async def get_models(self, api_key: str) -> List[str]:
11
  """
12
  Fetches the list of available models from the chutes.ai API.
13
  """
14
  try:
15
- response = requests.get(
16
  "https://llm.chutes.ai/v1/models",
17
  headers={"Authorization": f"Bearer {api_key}"}
18
  )
19
  response.raise_for_status()
20
  return [f"chutes/{model['id']}" for model in response.json().get("data", [])]
21
- except requests.RequestException as e:
22
- logging.error(f"Failed to fetch chutes.ai models: {e}")
23
- return []
 
1
+ import httpx
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
6
+ lib_logger = logging.getLogger('rotator_library')
7
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
8
+ if not lib_logger.handlers:
9
+ lib_logger.addHandler(logging.NullHandler())
10
+
11
  class ChutesProvider(ProviderInterface):
12
  """
13
  Provider implementation for the chutes.ai API.
14
  """
15
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
16
  """
17
  Fetches the list of available models from the chutes.ai API.
18
  """
19
  try:
20
+ response = await client.get(
21
  "https://llm.chutes.ai/v1/models",
22
  headers={"Authorization": f"Bearer {api_key}"}
23
  )
24
  response.raise_for_status()
25
  return [f"chutes/{model['id']}" for model in response.json().get("data", [])]
26
+ except httpx.RequestError as e:
27
+ lib_logger.error(f"Failed to fetch chutes.ai models: {e}")
28
+ return []
src/rotator_library/providers/cohere_provider.py CHANGED
@@ -1,23 +1,28 @@
1
- import requests
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
 
 
 
 
 
6
  class CohereProvider(ProviderInterface):
7
  """
8
  Provider implementation for the Cohere API.
9
  """
10
- async def get_models(self, api_key: str) -> List[str]:
11
  """
12
  Fetches the list of available models from the Cohere API.
13
  """
14
  try:
15
- response = requests.get(
16
  "https://api.cohere.ai/v1/models",
17
  headers={"Authorization": f"Bearer {api_key}"}
18
  )
19
  response.raise_for_status()
20
  return [f"cohere/{model['name']}" for model in response.json().get("models", [])]
21
- except requests.RequestException as e:
22
- logging.error(f"Failed to fetch Cohere models: {e}")
23
  return []
 
1
+ import httpx
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
6
+ lib_logger = logging.getLogger('rotator_library')
7
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
8
+ if not lib_logger.handlers:
9
+ lib_logger.addHandler(logging.NullHandler())
10
+
11
  class CohereProvider(ProviderInterface):
12
  """
13
  Provider implementation for the Cohere API.
14
  """
15
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
16
  """
17
  Fetches the list of available models from the Cohere API.
18
  """
19
  try:
20
+ response = await client.get(
21
  "https://api.cohere.ai/v1/models",
22
  headers={"Authorization": f"Bearer {api_key}"}
23
  )
24
  response.raise_for_status()
25
  return [f"cohere/{model['name']}" for model in response.json().get("models", [])]
26
+ except httpx.RequestError as e:
27
+ lib_logger.error(f"Failed to fetch Cohere models: {e}")
28
  return []
src/rotator_library/providers/gemini_provider.py CHANGED
@@ -1,23 +1,28 @@
1
- import requests
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
 
 
 
 
 
6
  class GeminiProvider(ProviderInterface):
7
  """
8
  Provider implementation for the Google Gemini API.
9
  """
10
- async def get_models(self, api_key: str) -> List[str]:
11
  """
12
  Fetches the list of available models from the Google Gemini API.
13
  """
14
  try:
15
- response = requests.get(
16
  "https://generativelanguage.googleapis.com/v1beta/models",
17
  headers={"x-goog-api-key": api_key}
18
  )
19
  response.raise_for_status()
20
  return [f"gemini/{model['name'].replace('models/', '')}" for model in response.json().get("models", [])]
21
- except requests.RequestException as e:
22
- logging.error(f"Failed to fetch Gemini models: {e}")
23
  return []
 
1
+ import httpx
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
6
+ lib_logger = logging.getLogger('rotator_library')
7
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
8
+ if not lib_logger.handlers:
9
+ lib_logger.addHandler(logging.NullHandler())
10
+
11
  class GeminiProvider(ProviderInterface):
12
  """
13
  Provider implementation for the Google Gemini API.
14
  """
15
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
16
  """
17
  Fetches the list of available models from the Google Gemini API.
18
  """
19
  try:
20
+ response = await client.get(
21
  "https://generativelanguage.googleapis.com/v1beta/models",
22
  headers={"x-goog-api-key": api_key}
23
  )
24
  response.raise_for_status()
25
  return [f"gemini/{model['name'].replace('models/', '')}" for model in response.json().get("models", [])]
26
+ except httpx.RequestError as e:
27
+ lib_logger.error(f"Failed to fetch Gemini models: {e}")
28
  return []
src/rotator_library/providers/groq_provider.py CHANGED
@@ -1,23 +1,28 @@
1
- import requests
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
 
 
 
 
 
6
  class GroqProvider(ProviderInterface):
7
  """
8
  Provider implementation for the Groq API.
9
  """
10
- async def get_models(self, api_key: str) -> List[str]:
11
  """
12
  Fetches the list of available models from the Groq API.
13
  """
14
  try:
15
- response = requests.get(
16
  "https://api.groq.com/openai/v1/models",
17
  headers={"Authorization": f"Bearer {api_key}"}
18
  )
19
  response.raise_for_status()
20
  return [f"groq/{model['id']}" for model in response.json().get("data", [])]
21
- except requests.RequestException as e:
22
- logging.error(f"Failed to fetch Groq models: {e}")
23
  return []
 
1
+ import httpx
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
6
+ lib_logger = logging.getLogger('rotator_library')
7
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
8
+ if not lib_logger.handlers:
9
+ lib_logger.addHandler(logging.NullHandler())
10
+
11
  class GroqProvider(ProviderInterface):
12
  """
13
  Provider implementation for the Groq API.
14
  """
15
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
16
  """
17
  Fetches the list of available models from the Groq API.
18
  """
19
  try:
20
+ response = await client.get(
21
  "https://api.groq.com/openai/v1/models",
22
  headers={"Authorization": f"Bearer {api_key}"}
23
  )
24
  response.raise_for_status()
25
  return [f"groq/{model['id']}" for model in response.json().get("data", [])]
26
+ except httpx.RequestError as e:
27
+ lib_logger.error(f"Failed to fetch Groq models: {e}")
28
  return []
src/rotator_library/providers/mistral_provider.py CHANGED
@@ -1,23 +1,28 @@
1
- import requests
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
 
 
 
 
 
6
  class MistralProvider(ProviderInterface):
7
  """
8
  Provider implementation for the Mistral API.
9
  """
10
- async def get_models(self, api_key: str) -> List[str]:
11
  """
12
  Fetches the list of available models from the Mistral API.
13
  """
14
  try:
15
- response = requests.get(
16
  "https://api.mistral.ai/v1/models",
17
  headers={"Authorization": f"Bearer {api_key}"}
18
  )
19
  response.raise_for_status()
20
  return [f"mistral/{model['id']}" for model in response.json().get("data", [])]
21
- except requests.RequestException as e:
22
- logging.error(f"Failed to fetch Mistral models: {e}")
23
  return []
 
1
+ import httpx
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
6
+ lib_logger = logging.getLogger('rotator_library')
7
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
8
+ if not lib_logger.handlers:
9
+ lib_logger.addHandler(logging.NullHandler())
10
+
11
  class MistralProvider(ProviderInterface):
12
  """
13
  Provider implementation for the Mistral API.
14
  """
15
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
16
  """
17
  Fetches the list of available models from the Mistral API.
18
  """
19
  try:
20
+ response = await client.get(
21
  "https://api.mistral.ai/v1/models",
22
  headers={"Authorization": f"Bearer {api_key}"}
23
  )
24
  response.raise_for_status()
25
  return [f"mistral/{model['id']}" for model in response.json().get("data", [])]
26
+ except httpx.RequestError as e:
27
+ lib_logger.error(f"Failed to fetch Mistral models: {e}")
28
  return []
src/rotator_library/providers/openai_provider.py CHANGED
@@ -1,23 +1,28 @@
1
- import requests
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
 
 
 
 
 
6
  class OpenAIProvider(ProviderInterface):
7
  """
8
  Provider implementation for the OpenAI API.
9
  """
10
- async def get_models(self, api_key: str) -> List[str]:
11
  """
12
  Fetches the list of available models from the OpenAI API.
13
  """
14
  try:
15
- response = requests.get(
16
  "https://api.openai.com/v1/models",
17
  headers={"Authorization": f"Bearer {api_key}"}
18
  )
19
  response.raise_for_status()
20
  return [f"openai/{model['id']}" for model in response.json().get("data", [])]
21
- except requests.RequestException as e:
22
- logging.error(f"Failed to fetch OpenAI models: {e}")
23
  return []
 
1
+ import httpx
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
6
+ lib_logger = logging.getLogger('rotator_library')
7
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
8
+ if not lib_logger.handlers:
9
+ lib_logger.addHandler(logging.NullHandler())
10
+
11
  class OpenAIProvider(ProviderInterface):
12
  """
13
  Provider implementation for the OpenAI API.
14
  """
15
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
16
  """
17
  Fetches the list of available models from the OpenAI API.
18
  """
19
  try:
20
+ response = await client.get(
21
  "https://api.openai.com/v1/models",
22
  headers={"Authorization": f"Bearer {api_key}"}
23
  )
24
  response.raise_for_status()
25
  return [f"openai/{model['id']}" for model in response.json().get("data", [])]
26
+ except httpx.RequestError as e:
27
+ lib_logger.error(f"Failed to fetch OpenAI models: {e}")
28
  return []
src/rotator_library/providers/openrouter_provider.py CHANGED
@@ -1,23 +1,28 @@
1
- import requests
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
 
 
 
 
 
6
  class OpenRouterProvider(ProviderInterface):
7
  """
8
  Provider implementation for the OpenRouter API.
9
  """
10
- async def get_models(self, api_key: str) -> List[str]:
11
  """
12
  Fetches the list of available models from the OpenRouter API.
13
  """
14
  try:
15
- response = requests.get(
16
  "https://openrouter.ai/api/v1/models",
17
  headers={"Authorization": f"Bearer {api_key}"}
18
  )
19
  response.raise_for_status()
20
  return [f"openrouter/{model['id']}" for model in response.json().get("data", [])]
21
- except requests.RequestException as e:
22
- logging.error(f"Failed to fetch OpenRouter models: {e}")
23
  return []
 
1
+ import httpx
2
  import logging
3
  from typing import List
4
  from .provider_interface import ProviderInterface
5
 
6
+ lib_logger = logging.getLogger('rotator_library')
7
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
8
+ if not lib_logger.handlers:
9
+ lib_logger.addHandler(logging.NullHandler())
10
+
11
  class OpenRouterProvider(ProviderInterface):
12
  """
13
  Provider implementation for the OpenRouter API.
14
  """
15
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
16
  """
17
  Fetches the list of available models from the OpenRouter API.
18
  """
19
  try:
20
+ response = await client.get(
21
  "https://openrouter.ai/api/v1/models",
22
  headers={"Authorization": f"Bearer {api_key}"}
23
  )
24
  response.raise_for_status()
25
  return [f"openrouter/{model['id']}" for model in response.json().get("data", [])]
26
+ except httpx.RequestError as e:
27
+ lib_logger.error(f"Failed to fetch OpenRouter models: {e}")
28
  return []
src/rotator_library/providers/provider_interface.py CHANGED
@@ -1,5 +1,6 @@
1
  from abc import ABC, abstractmethod
2
- from typing import List, Any
 
3
 
4
  class ProviderInterface(ABC):
5
  """
@@ -8,12 +9,13 @@ class ProviderInterface(ABC):
8
  """
9
 
10
  @abstractmethod
11
- async def get_models(self, api_key: str) -> List[str]:
12
  """
13
  Fetches the list of available model names from the provider's API.
14
 
15
  Args:
16
  api_key: The API key required for authentication.
 
17
 
18
  Returns:
19
  A list of model name strings.
 
1
  from abc import ABC, abstractmethod
2
+ from typing import List
3
+ import httpx
4
 
5
  class ProviderInterface(ABC):
6
  """
 
9
  """
10
 
11
  @abstractmethod
12
+ async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
13
  """
14
  Fetches the list of available model names from the provider's API.
15
 
16
  Args:
17
  api_key: The API key required for authentication.
18
+ client: An httpx.AsyncClient instance for making requests.
19
 
20
  Returns:
21
  A list of model name strings.
src/rotator_library/pyproject.toml CHANGED
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
 
5
  [project]
6
  name = "rotating-api-key-client"
7
- version = "0.5.2"
8
  authors = [
9
  { name="Mirrowel", email="nuh@uh.com" },
10
  ]
@@ -19,7 +19,7 @@ classifiers = [
19
  dependencies = [
20
  "litellm",
21
  "filelock",
22
- "requests",
23
  ]
24
 
25
  [project.urls]
 
4
 
5
  [project]
6
  name = "rotating-api-key-client"
7
+ version = "0.5.5"
8
  authors = [
9
  { name="Mirrowel", email="nuh@uh.com" },
10
  ]
 
19
  dependencies = [
20
  "litellm",
21
  "filelock",
22
+ "httpx"
23
  ]
24
 
25
  [project.urls]
src/rotator_library/usage_manager.py CHANGED
@@ -1,11 +1,17 @@
1
  import json
2
  import os
3
  import time
 
4
  from datetime import date, datetime
5
  from typing import Dict, List, Optional, Any
6
  from filelock import FileLock
7
  import litellm
8
 
 
 
 
 
 
9
  class UsageManager:
10
  """
11
  Manages daily and global usage statistics and cooldowns for API keys.
@@ -109,7 +115,7 @@ class UsageManager:
109
  cost = litellm.completion_cost(completion_response=completion_response)
110
  daily_model_data["approx_cost"] += cost
111
  except Exception as e:
112
- print(f"Warning: Could not calculate cost for model {model}: {e}")
113
 
114
  key_data["last_used_ts"] = time.time()
115
  self._save_usage()
 
1
  import json
2
  import os
3
  import time
4
+ import logging
5
  from datetime import date, datetime
6
  from typing import Dict, List, Optional, Any
7
  from filelock import FileLock
8
  import litellm
9
 
10
+ lib_logger = logging.getLogger('rotator_library')
11
+ lib_logger.propagate = False # Ensure this logger doesn't propagate to root
12
+ if not lib_logger.handlers:
13
+ lib_logger.addHandler(logging.NullHandler())
14
+
15
  class UsageManager:
16
  """
17
  Manages daily and global usage statistics and cooldowns for API keys.
 
115
  cost = litellm.completion_cost(completion_response=completion_response)
116
  daily_model_data["approx_cost"] += cost
117
  except Exception as e:
118
+ lib_logger.warning(f"Could not calculate cost for model {model}: {e}")
119
 
120
  key_data["last_used_ts"] = time.time()
121
  self._save_usage()