File size: 7,521 Bytes
e4b5062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcb100b
e4b5062
 
 
 
bcb100b
e4b5062
bcb100b
e4b5062
 
 
 
 
 
 
bcb100b
 
 
 
f748f4a
 
bcb100b
 
 
 
 
f748f4a
bcb100b
 
 
e4b5062
bcb100b
e4b5062
 
bcb100b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e4b5062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f748f4a
e4b5062
 
 
 
f748f4a
 
 
 
e4b5062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
"""
API Handler - Obs艂uga komunikacji z OpenAI API
"""

from openai import OpenAI
import time


class APIHandler:
    """Obs艂uguje komunikacj臋 z OpenAI API"""

    def __init__(self, api_key):
        self.api_key = api_key
        self.client = None
        if api_key:
            self.client = OpenAI(api_key=api_key)

    def set_api_key(self, api_key):
        """Ustawia nowy klucz API"""
        self.api_key = api_key
        self.client = OpenAI(api_key=api_key)

    def validate_api_key(self):
        """
        Waliduje klucz API poprzez pr贸b臋 pobrania listy modeli

        Returns:
            tuple: (success: bool, message: str)
        """
        if not self.api_key:
            return False, "Klucz API jest pusty"

        try:
            self.client = OpenAI(api_key=self.api_key)
            # Pr贸ba pobrania modeli jako test po艂膮czenia
            models = self.client.models.list()
            return True, "Klucz API jest poprawny"
        except Exception as e:
            error_msg = str(e)
            if "401" in error_msg or "Incorrect API key" in error_msg:
                return False, "Nieprawid艂owy klucz API"
            elif "429" in error_msg:
                return False, "Przekroczono limit zapyta艅"
            else:
                return False, f"B艂膮d po艂膮czenia: {error_msg[:100]}"

    def get_available_models(self):
        """
        Pobiera list臋 dost臋pnych modeli (GPT, o1, fine-tuned)

        Returns:
            list: Lista nazw modeli
        """
        if not self.client:
            return ["gpt-4o", "gpt-4", "gpt-3.5-turbo"]  # Domy艣lne modele

        try:
            models = self.client.models.list()
            # Filtruj modele OpenAI (GPT, o1, fine-tuned)
            # Wykluczamy tylko modele innych firm (np. whisper, dall-e, tts, embeddings)
            excluded_prefixes = ('whisper-', 'dall-e', 'tts-', 'text-embedding', 'babbage', 'davinci')

            openai_models = [
                model.id for model in models.data
                if not model.id.startswith(excluded_prefixes)
            ]

            # Sortuj z priorytetem dla popularnych modeli
            priority_models = ["gpt-4o", "gpt-4-turbo", "gpt-4", "gpt-3.5-turbo", "o1", "o1-mini", "o1-preview"]
            sorted_models = []

            # Dodaj modele priorytetowe, je艣li istniej膮
            for pm in priority_models:
                if pm in openai_models:
                    sorted_models.append(pm)
                    openai_models.remove(pm)

            # Posortuj pozosta艂e modele
            # Fine-tuned modele (ft:...) na ko艅cu, alfabetycznie
            standard_models = [m for m in openai_models if not m.startswith('ft:')]
            finetuned_models = [m for m in openai_models if m.startswith('ft:')]

            sorted_models.extend(sorted(standard_models))
            sorted_models.extend(sorted(finetuned_models))

            return sorted_models if sorted_models else ["gpt-4o", "gpt-4", "gpt-3.5-turbo"]
        except Exception as e:
            print(f"B艂膮d pobierania modeli: {e}")
            return ["gpt-4o", "gpt-4", "gpt-3.5-turbo"]

    def generate_response(self, prompt, model="gpt-4o", temperature=0.1, max_tokens=2000, top_p=1.0):
        """
        Generuje odpowied藕 z OpenAI API z automatycznym fallback dla nowych modeli

        Args:
            prompt: Tekst promptu systemowego
            model: Model OpenAI
            temperature: Temperatura (0.0-2.0)
            max_tokens: Maksymalna d艂ugo艣膰 odpowiedzi
            top_p: Nucleus sampling parameter

        Returns:
            str: Wygenerowana odpowied藕 lub komunikat b艂臋du
        """
        if not self.client:
            return "ERROR: Brak po艂膮czenia z API (nieprawid艂owy klucz)"

        # Przygotuj parametry API - najpierw spr贸buj ze starym API (max_tokens)
        api_params = {
            "model": model,
            "messages": [
                {"role": "system", "content": prompt},
                {"role": "user", "content": "Please provide your response based on the system prompt."}
            ],
            "temperature": temperature,
            "max_tokens": max_tokens,  # Starsze modele (gpt-4, gpt-3.5, fine-tuned)
            "top_p": top_p
        }

        try:
            # Pierwsza pr贸ba: u偶yj max_tokens (kompatybilno艣膰 ze starszymi modelami)
            response = self.client.chat.completions.create(**api_params)
            return response.choices[0].message.content

        except Exception as e:
            error_msg = str(e)

            # Automatyczny fallback: je艣li b艂膮d dotyczy max_tokens, prze艂膮cz na max_completion_tokens
            if "max_tokens" in error_msg and "max_completion_tokens" in error_msg:
                try:
                    # Usu艅 stary parametr i dodaj nowy
                    api_params.pop("max_tokens")
                    api_params["max_completion_tokens"] = max_tokens

                    # Usu艅 r贸wnie偶 temperature i top_p (nowe modele ich nie akceptuj膮)
                    api_params.pop("temperature", None)
                    api_params.pop("top_p", None)

                    # Pon贸w zapytanie z nowymi parametrami
                    response = self.client.chat.completions.create(**api_params)
                    return response.choices[0].message.content

                except Exception as retry_error:
                    return f"ERROR: Retry failed: {str(retry_error)[:200]}"

            # Standardowa obs艂uga b艂臋d贸w
            if "429" in error_msg:
                return f"ERROR: Rate limit exceeded - poczekaj chwil臋"
            elif "insufficient_quota" in error_msg:
                return f"ERROR: Brak 艣rodk贸w na koncie OpenAI"
            elif "invalid_request_error" in error_msg:
                return f"ERROR: Nieprawid艂owe parametry: {error_msg[:200]}"
            else:
                return f"ERROR: {error_msg[:200]}"

    def estimate_cost(self, model, num_responses, avg_prompt_tokens=500, avg_completion_tokens=1000):
        """
        Szacuje koszt testu

        Args:
            model: Nazwa modelu
            num_responses: Liczba odpowiedzi (dla obu prompt贸w 艂膮cznie)
            avg_prompt_tokens: 艢rednia liczba token贸w w prompcie
            avg_completion_tokens: 艢rednia liczba token贸w w odpowiedzi

        Returns:
            float: Szacunkowy koszt w USD
        """
        # Ceny za 1M token贸w (stan na grudzie艅 2024)
        pricing = {
            "gpt-4o": {"input": 2.50, "output": 10.00},
            "gpt-4-turbo": {"input": 10.00, "output": 30.00},
            "gpt-4": {"input": 30.00, "output": 60.00},
            "gpt-3.5-turbo": {"input": 0.50, "output": 1.50},
            "o1-preview": {"input": 15.00, "output": 60.00},
            "o1-mini": {"input": 3.00, "output": 12.00},
            "o1": {"input": 15.00, "output": 60.00}
        }

        # Znajd藕 odpowiedni cennik
        model_pricing = None
        for key in pricing:
            if key in model:
                model_pricing = pricing[key]
                break

        if not model_pricing:
            model_pricing = pricing["gpt-4o"]  # Domy艣lnie gpt-4o

        # Oblicz koszt
        input_cost = (avg_prompt_tokens * num_responses * model_pricing["input"]) / 1_000_000
        output_cost = (avg_completion_tokens * num_responses * model_pricing["output"]) / 1_000_000

        total_cost = input_cost + output_cost
        return round(total_cost, 4)