Spaces:
Running
Running
| """ | |
| API Handler - Obs艂uga komunikacji z OpenAI API | |
| """ | |
| from openai import OpenAI | |
| import time | |
| class APIHandler: | |
| """Obs艂uguje komunikacj臋 z OpenAI API""" | |
| def __init__(self, api_key): | |
| self.api_key = api_key | |
| self.client = None | |
| if api_key: | |
| self.client = OpenAI(api_key=api_key) | |
| def set_api_key(self, api_key): | |
| """Ustawia nowy klucz API""" | |
| self.api_key = api_key | |
| self.client = OpenAI(api_key=api_key) | |
| def validate_api_key(self): | |
| """ | |
| Waliduje klucz API poprzez pr贸b臋 pobrania listy modeli | |
| Returns: | |
| tuple: (success: bool, message: str) | |
| """ | |
| if not self.api_key: | |
| return False, "Klucz API jest pusty" | |
| try: | |
| self.client = OpenAI(api_key=self.api_key) | |
| # Pr贸ba pobrania modeli jako test po艂膮czenia | |
| models = self.client.models.list() | |
| return True, "Klucz API jest poprawny" | |
| except Exception as e: | |
| error_msg = str(e) | |
| if "401" in error_msg or "Incorrect API key" in error_msg: | |
| return False, "Nieprawid艂owy klucz API" | |
| elif "429" in error_msg: | |
| return False, "Przekroczono limit zapyta艅" | |
| else: | |
| return False, f"B艂膮d po艂膮czenia: {error_msg[:100]}" | |
| def get_available_models(self): | |
| """ | |
| Pobiera list臋 dost臋pnych modeli (GPT, o1, fine-tuned) | |
| Returns: | |
| list: Lista nazw modeli | |
| """ | |
| if not self.client: | |
| return ["gpt-4o", "gpt-4", "gpt-3.5-turbo"] # Domy艣lne modele | |
| try: | |
| models = self.client.models.list() | |
| # Filtruj modele OpenAI (GPT, o1, fine-tuned) | |
| # Wykluczamy tylko modele innych firm (np. whisper, dall-e, tts, embeddings) | |
| excluded_prefixes = ('whisper-', 'dall-e', 'tts-', 'text-embedding', 'babbage', 'davinci') | |
| openai_models = [ | |
| model.id for model in models.data | |
| if not model.id.startswith(excluded_prefixes) | |
| ] | |
| # Sortuj z priorytetem dla popularnych modeli | |
| priority_models = ["gpt-4o", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo", "o1", "o1-mini", "o1-preview"] | |
| sorted_models = [] | |
| # Dodaj modele priorytetowe, je艣li istniej膮 | |
| for pm in priority_models: | |
| if pm in openai_models: | |
| sorted_models.append(pm) | |
| openai_models.remove(pm) | |
| # Posortuj pozosta艂e modele | |
| # Fine-tuned modele (ft:...) na ko艅cu, alfabetycznie | |
| standard_models = [m for m in openai_models if not m.startswith('ft:')] | |
| finetuned_models = [m for m in openai_models if m.startswith('ft:')] | |
| sorted_models.extend(sorted(standard_models)) | |
| sorted_models.extend(sorted(finetuned_models)) | |
| return sorted_models if sorted_models else ["gpt-4o", "gpt-4", "gpt-3.5-turbo"] | |
| except Exception as e: | |
| print(f"B艂膮d pobierania modeli: {e}") | |
| return ["gpt-4o", "gpt-4", "gpt-3.5-turbo"] | |
| def generate_response(self, prompt, model="gpt-4o", temperature=0.1, max_tokens=2000, top_p=1.0): | |
| """ | |
| Generuje odpowied藕 z OpenAI API z automatycznym fallback dla nowych modeli | |
| Args: | |
| prompt: Tekst promptu systemowego | |
| model: Model OpenAI | |
| temperature: Temperatura (0.0-2.0) | |
| max_tokens: Maksymalna d艂ugo艣膰 odpowiedzi | |
| top_p: Nucleus sampling parameter | |
| Returns: | |
| str: Wygenerowana odpowied藕 lub komunikat b艂臋du | |
| """ | |
| if not self.client: | |
| return "ERROR: Brak po艂膮czenia z API (nieprawid艂owy klucz)" | |
| # Przygotuj parametry API - najpierw spr贸buj ze starym API (max_tokens) | |
| api_params = { | |
| "model": model, | |
| "messages": [ | |
| {"role": "system", "content": prompt}, | |
| {"role": "user", "content": "Please provide your response based on the system prompt."} | |
| ], | |
| "temperature": temperature, | |
| "max_tokens": max_tokens, # Starsze modele (gpt-4, gpt-3.5, fine-tuned) | |
| "top_p": top_p | |
| } | |
| try: | |
| # Pierwsza pr贸ba: u偶yj max_tokens (kompatybilno艣膰 ze starszymi modelami) | |
| response = self.client.chat.completions.create(**api_params) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| error_msg = str(e) | |
| # Automatyczny fallback: je艣li b艂膮d dotyczy max_tokens, prze艂膮cz na max_completion_tokens | |
| if "max_tokens" in error_msg and "max_completion_tokens" in error_msg: | |
| try: | |
| # Usu艅 stary parametr i dodaj nowy | |
| api_params.pop("max_tokens") | |
| api_params["max_completion_tokens"] = max_tokens | |
| # Usu艅 r贸wnie偶 temperature i top_p (nowe modele ich nie akceptuj膮) | |
| api_params.pop("temperature", None) | |
| api_params.pop("top_p", None) | |
| # Pon贸w zapytanie z nowymi parametrami | |
| response = self.client.chat.completions.create(**api_params) | |
| return response.choices[0].message.content | |
| except Exception as retry_error: | |
| return f"ERROR: Retry failed: {str(retry_error)[:200]}" | |
| # Standardowa obs艂uga b艂臋d贸w | |
| if "429" in error_msg: | |
| return f"ERROR: Rate limit exceeded - poczekaj chwil臋" | |
| elif "insufficient_quota" in error_msg: | |
| return f"ERROR: Brak 艣rodk贸w na koncie OpenAI" | |
| elif "invalid_request_error" in error_msg: | |
| return f"ERROR: Nieprawid艂owe parametry: {error_msg[:200]}" | |
| else: | |
| return f"ERROR: {error_msg[:200]}" | |
| def estimate_cost(self, model, num_responses, avg_prompt_tokens=500, avg_completion_tokens=1000): | |
| """ | |
| Szacuje koszt testu | |
| Args: | |
| model: Nazwa modelu | |
| num_responses: Liczba odpowiedzi (dla obu prompt贸w 艂膮cznie) | |
| avg_prompt_tokens: 艢rednia liczba token贸w w prompcie | |
| avg_completion_tokens: 艢rednia liczba token贸w w odpowiedzi | |
| Returns: | |
| float: Szacunkowy koszt w USD | |
| """ | |
| # Ceny za 1M token贸w (stan na grudzie艅 2024) | |
| pricing = { | |
| "gpt-4o": {"input": 2.50, "output": 10.00}, | |
| "gpt-4-turbo": {"input": 10.00, "output": 30.00}, | |
| "gpt-4": {"input": 30.00, "output": 60.00}, | |
| "gpt-3.5-turbo": {"input": 0.50, "output": 1.50}, | |
| "o1-preview": {"input": 15.00, "output": 60.00}, | |
| "o1-mini": {"input": 3.00, "output": 12.00}, | |
| "o1": {"input": 15.00, "output": 60.00} | |
| } | |
| # Znajd藕 odpowiedni cennik | |
| model_pricing = None | |
| for key in pricing: | |
| if key in model: | |
| model_pricing = pricing[key] | |
| break | |
| if not model_pricing: | |
| model_pricing = pricing["gpt-4o"] # Domy艣lnie gpt-4o | |
| # Oblicz koszt | |
| input_cost = (avg_prompt_tokens * num_responses * model_pricing["input"]) / 1_000_000 | |
| output_cost = (avg_completion_tokens * num_responses * model_pricing["output"]) / 1_000_000 | |
| total_cost = input_cost + output_cost | |
| return round(total_cost, 4) | |