sto-rai / src /utils /init_openai.py
yiiilonggg's picture
Init files
b151e60
import random
import time
from openai import OpenAI, RateLimitError
from openai.types.chat.chat_completion import ChatCompletion
from typing import Any, Dict
client: OpenAI = None
MODEL_ = 'gpt-4o'
def init_client(api_key: str) -> bool:
global client
try:
client = OpenAI(api_key=api_key)
except:
return False
return True
# from OpenAI's website
# define a retry decorator
def __retry_with_exponential_backoff(
func,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = True,
max_retries: int = 10,
errors: tuple = (RateLimitError,),
):
"""Retry a function with exponential backoff."""
def wrapper(*args, **kwargs):
# Initialize variables
num_retries = 0
delay = initial_delay
# Loop until a successful response or max_retries is hit or an exception is raised
while True:
try:
return func(*args, **kwargs)
# Retry on specified errors
except errors as e:
# Increment retries
num_retries += 1
# Check if max retries has been reached
if num_retries > max_retries:
raise Exception(
f"Maximum number of retries ({max_retries}) exceeded."
)
# Increment the delay
delay *= exponential_base * (1 + jitter * random.random())
# Sleep for the delay
time.sleep(delay)
# Raise exceptions for any errors not specified
except Exception as e:
raise e
return wrapper
@__retry_with_exponential_backoff
def completions_with_backoff(
messages: Dict[str, Any],
temperature: float = 0,
max_tokens: int = 8192
) -> ChatCompletion:
kwargs = {
'model': MODEL_,
'messages': messages,
'temperature': temperature,
'max_tokens': max_tokens
}
return client.chat.completions.create(**kwargs)