Spaces:
Runtime error
Runtime error
| from langchain_openai import OpenAI | |
| import openai | |
| import sys | |
| import os | |
| import requests | |
| from json import JSONDecodeError | |
| import time | |
| sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))) | |
| from config import * | |
| class LLM_API_Call: | |
| def __init__(self, type) -> None: | |
| if type == "openai": | |
| self.llm = OpenAI_API_Call(api_key = LLM_CONFIG[""], | |
| model = LLM_CONFIG["model"]) | |
| elif type == "gilas": | |
| self.llm = Gilas_API_Call(api_keys = GILAS_API_KEYS, | |
| model = GILAS_CONFIG["model"], | |
| base_url = GILAS_CONFIG["base_url"]) | |
| else: | |
| self.llm = OpenAI( | |
| **LLM_CONFIG | |
| ) | |
| def get_LLM_response(self, prompt: str) -> str: | |
| return self.llm.invoke(prompt) | |
| class OpenAI_API_Call: | |
| def __init__(self, api_key, model="gpt-4"): | |
| self.api_key = api_key | |
| openai.api_key = api_key | |
| self.model = model | |
| self.conversation = [] | |
| def add_message(self, role, content): | |
| self.conversation.append({"role": role, "content": content}) | |
| def get_response(self): | |
| response = openai.ChatCompletion.create( | |
| model=self.model, | |
| messages=self.conversation | |
| ) | |
| return response['choices'][0]['message']['content'] | |
| def invoke(self, user_input): | |
| self.add_message("user", user_input) | |
| response = self.get_response() | |
| self.add_message("assistant", response) | |
| return response | |
| class Gilas_API_Call: | |
| def __init__(self, api_keys, base_url, model="gpt-4o-mini"): | |
| self.api_keys = api_keys | |
| self.base_url = base_url | |
| self.model = model | |
| self.headers = { | |
| "Content-Type": "application/json" | |
| } | |
| self.conversation = [] | |
| self.retry_wait_time = 30 | |
| def add_message(self, role, content): | |
| self.conversation.append({"role": role, "content": content}) | |
| def get_response(self, api_key): | |
| self.headers["Authorization"] = f"Bearer {api_key}" | |
| data = { | |
| "model": self.model, | |
| "messages": self.conversation | |
| } | |
| response = requests.post( | |
| url=f"{self.base_url}/chat/completions", | |
| headers=self.headers, | |
| json=data | |
| ) | |
| if response.status_code == 200: | |
| try: | |
| return response.json()['choices'][0]['message']['content'] | |
| except (KeyError, IndexError, ValueError) as e: | |
| raise Exception(f"Unexpected API response format: {e}") | |
| else: | |
| raise Exception(f"Gilas API call failed: {response.status_code} - {response.text}") | |
| def invoke(self, user_input, max_retries=3): | |
| self.add_message("user", user_input) | |
| retries = 0 | |
| while retries < max_retries: | |
| for i, api_key in enumerate(self.api_keys): | |
| try: | |
| response = self.get_response(api_key) | |
| self.add_message("assistant", response) | |
| return response | |
| except (JSONDecodeError, Exception) as e: | |
| print(f"Error encountered with API key {api_key}: {e}. Trying next key...") | |
| # Sleep before trying next key | |
| if i == len(self.api_keys) - 1: | |
| print(f"All keys failed. Retrying oldest key after {self.retry_wait_time} seconds...") | |
| time.sleep(self.retry_wait_time) | |
| self.retry_wait_time += 30 # Increase wait time for next retry | |
| retries += 1 | |
| raise Exception(f"Failed to get a valid response after {max_retries} retries.") | |