habulaj commited on
Commit
100df4e
·
verified ·
1 Parent(s): 491c9ae

Create core.py

Browse files
Files changed (1) hide show
  1. gemini_client/core.py +965 -0
gemini_client/core.py ADDED
@@ -0,0 +1,965 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ #########################################
3
+ # Code Modified to use curl_cffi
4
+ #########################################
5
+ import asyncio
6
+ import json
7
+ import os
8
+ import random
9
+ import re
10
+ import string
11
+ from pathlib import Path
12
+ from datetime import datetime
13
+ from typing import Dict, List, Tuple, Union, Optional
14
+
15
+ from gemini_client.enums import Endpoint, Headers, Model
16
+
17
+ # Use curl_cffi for requests
18
+ from curl_cffi import CurlError
19
+ from curl_cffi.requests import AsyncSession
20
+ # Import common request exceptions (curl_cffi often wraps these)
21
+ from requests.exceptions import RequestException, Timeout, HTTPError
22
+
23
+ # For image models using validation. Adjust based on organization internal pydantic.
24
+ # Updated import for Pydantic V2
25
+ from pydantic import BaseModel, field_validator
26
+
27
+ # Rich is retained for logging within image methods.
28
+ from rich.console import Console
29
+ from rich.markdown import Markdown
30
+
31
+ console = Console()
32
+
33
+ #########################################
34
+ # New Enums and functions for endpoints,
35
+ # headers, models, file upload and images.
36
+ #########################################
37
+
38
+ #########################################
39
+ # Cookie loading and Chatbot classes
40
+ #########################################
41
+
42
+ from gemini_client.utils import upload_file, load_cookies, save_cookies
43
+
44
+ class Chatbot:
45
+ """
46
+ Synchronous wrapper for the AsyncChatbot class.
47
+
48
+ This class provides a synchronous interface to interact with Google Gemini,
49
+ handling authentication, conversation management, and message sending.
50
+
51
+ Attributes:
52
+ loop (asyncio.AbstractEventLoop): Event loop for running async tasks.
53
+ secure_1psid (str): Authentication cookie.
54
+ secure_1psidts (str): Authentication cookie.
55
+ async_chatbot (AsyncChatbot): Underlying asynchronous chatbot instance.
56
+ """
57
+ def __init__(
58
+ self,
59
+ cookie_path: str,
60
+ proxy: Optional[Union[str, Dict[str, str]]] = None, # Allow string or dict proxy
61
+ timeout: int = 20,
62
+ model: Model = Model.UNSPECIFIED,
63
+ impersonate: str = "chrome110" # Added impersonate
64
+ ):
65
+ # Use asyncio.run() for cleaner async execution in sync context
66
+ # Handle potential RuntimeError if an event loop is already running
67
+ try:
68
+ self.loop = asyncio.get_running_loop()
69
+ except RuntimeError:
70
+ self.loop = asyncio.new_event_loop()
71
+ asyncio.set_event_loop(self.loop)
72
+
73
+ self.secure_1psid, self.secure_1psidts, additional_cookies = load_cookies(cookie_path)
74
+ self.async_chatbot = self.loop.run_until_complete(
75
+ AsyncChatbot.create(self.secure_1psid, self.secure_1psidts, proxy, timeout, model, impersonate, additional_cookies, cookie_path)
76
+ )
77
+
78
+ def save_conversation(self, file_path: str, conversation_name: str):
79
+ return self.loop.run_until_complete(
80
+ self.async_chatbot.save_conversation(file_path, conversation_name)
81
+ )
82
+
83
+ def load_conversations(self, file_path: str) -> List[Dict]:
84
+ return self.loop.run_until_complete(
85
+ self.async_chatbot.load_conversations(file_path)
86
+ )
87
+
88
+ def load_conversation(self, file_path: str, conversation_name: str) -> bool:
89
+ return self.loop.run_until_complete(
90
+ self.async_chatbot.load_conversation(file_path, conversation_name)
91
+ )
92
+
93
+ def refresh_cookie(self) -> bool:
94
+ """
95
+ Atualiza proativamente o cookie __Secure-1PSIDTS e salva automaticamente.
96
+ Útil para atualizar o cookie antes que expire.
97
+
98
+ Returns:
99
+ bool: True se o cookie foi atualizado com sucesso, False caso contrário.
100
+ """
101
+ return self.loop.run_until_complete(
102
+ self.async_chatbot.refresh_cookie()
103
+ )
104
+
105
+ def ask(
106
+ self,
107
+ message: str,
108
+ image: Optional[Union[bytes, str, Path]] = None,
109
+ video: Optional[Union[bytes, str, Path]] = None,
110
+ audio: Optional[Union[bytes, str, Path]] = None
111
+ ) -> dict:
112
+ # Pass media to async ask method
113
+ return self.loop.run_until_complete(self.async_chatbot.ask(message, image=image, video=video, audio=audio))
114
+
115
+ class AsyncChatbot:
116
+ """
117
+ Asynchronous chatbot client for interacting with Google Gemini using curl_cffi.
118
+
119
+ This class manages authentication, session state, conversation history,
120
+ and sending/receiving messages (including images) asynchronously.
121
+
122
+ Attributes:
123
+ headers (dict): HTTP headers for requests.
124
+ _reqid (int): Request identifier for Gemini API.
125
+ SNlM0e (str): Session token required for API requests.
126
+ conversation_id (str): Current conversation ID.
127
+ response_id (str): Current response ID.
128
+ choice_id (str): Current choice ID.
129
+ proxy (str | dict | None): Proxy configuration.
130
+ proxies_dict (dict | None): Proxy dictionary for curl_cffi.
131
+ secure_1psid (str): Authentication cookie.
132
+ secure_1psidts (str): Authentication cookie.
133
+ session (AsyncSession): curl_cffi session for HTTP requests.
134
+ timeout (int): Request timeout in seconds.
135
+ model (Model): Selected Gemini model.
136
+ impersonate (str): Browser profile for curl_cffi to impersonate.
137
+ """
138
+ __slots__ = [
139
+ "headers",
140
+ "_reqid",
141
+ "SNlM0e",
142
+ "conversation_id",
143
+ "response_id",
144
+ "choice_id",
145
+ "proxy", # Store the original proxy config
146
+ "proxies_dict", # Store the curl_cffi-compatible proxy dict
147
+ "secure_1psidts",
148
+ "secure_1psid",
149
+ "session",
150
+ "timeout",
151
+ "model",
152
+ "impersonate", # Store impersonate setting
153
+ "cookie_path", # Path to cookie file for auto-saving
154
+ "additional_cookies", # Store additional cookies
155
+ ]
156
+
157
+ def __init__(
158
+ self,
159
+ secure_1psid: str,
160
+ secure_1psidts: str,
161
+ proxy: Optional[Union[str, Dict[str, str]]] = None, # Allow string or dict proxy
162
+ timeout: int = 20,
163
+ model: Model = Model.UNSPECIFIED,
164
+ impersonate: str = "chrome110", # Added impersonate
165
+ additional_cookies: Optional[Dict[str, str]] = None, # Additional cookies like COMPASS
166
+ cookie_path: Optional[str] = None, # Path to cookie file for auto-saving
167
+ ):
168
+ headers = Headers.GEMINI.value.copy()
169
+ if model != Model.UNSPECIFIED:
170
+ headers.update(model.model_header)
171
+ self._reqid = int("".join(random.choices(string.digits, k=7))) # Increased length for less collision chance
172
+ self.proxy = proxy # Store original proxy setting
173
+ self.impersonate = impersonate # Store impersonate setting
174
+
175
+ # Prepare proxy dictionary for curl_cffi
176
+ self.proxies_dict = None
177
+ if isinstance(proxy, str):
178
+ self.proxies_dict = {"http": proxy, "https": proxy} # curl_cffi uses http/https keys
179
+ elif isinstance(proxy, dict):
180
+ self.proxies_dict = proxy # Assume it's already in the correct format
181
+
182
+ self.conversation_id = ""
183
+ self.response_id = ""
184
+ self.choice_id = ""
185
+ self.secure_1psid = secure_1psid
186
+ self.secure_1psidts = secure_1psidts
187
+
188
+ # Prepare cookies dict with required cookies and any additional ones
189
+ cookies_dict = {
190
+ "__Secure-1PSID": secure_1psid,
191
+ "__Secure-1PSIDTS": secure_1psidts
192
+ }
193
+ if additional_cookies:
194
+ cookies_dict.update(additional_cookies)
195
+
196
+ # Initialize curl_cffi AsyncSession
197
+ self.session = AsyncSession(
198
+ headers=headers,
199
+ cookies=cookies_dict,
200
+ proxies=self.proxies_dict,
201
+ timeout=timeout,
202
+ impersonate=self.impersonate
203
+ # verify and http2 are handled automatically by curl_cffi
204
+ )
205
+ # No need to set proxies/headers/cookies again, done in constructor
206
+
207
+ self.timeout = timeout # Store timeout for potential direct use in requests
208
+ self.model = model
209
+ self.SNlM0e = None # Initialize SNlM0e
210
+ self.cookie_path = cookie_path # Store cookie path for auto-saving
211
+ self.additional_cookies = additional_cookies or {} # Store additional cookies
212
+
213
+ @classmethod
214
+ async def create(
215
+ cls,
216
+ secure_1psid: str,
217
+ secure_1psidts: str,
218
+ proxy: Optional[Union[str, Dict[str, str]]] = None, # Allow string or dict proxy
219
+ timeout: int = 20,
220
+ model: Model = Model.UNSPECIFIED,
221
+ impersonate: str = "chrome110", # Added impersonate
222
+ additional_cookies: Optional[Dict[str, str]] = None, # Additional cookies like COMPASS
223
+ cookie_path: Optional[str] = None, # Path to cookie file for auto-saving
224
+ ) -> "AsyncChatbot":
225
+ """
226
+ Factory method to create and initialize an AsyncChatbot instance.
227
+ Fetches the necessary SNlM0e value asynchronously.
228
+ """
229
+ instance = cls(secure_1psid, secure_1psidts, proxy, timeout, model, impersonate, additional_cookies, cookie_path)
230
+ try:
231
+ instance.SNlM0e = await instance.__get_snlm0e()
232
+ except Exception as e:
233
+ # Log the error and re-raise or handle appropriately
234
+ console.log(f"[red]Error during AsyncChatbot initialization (__get_snlm0e): {e}[/red]", style="bold red")
235
+ # Optionally close the session if initialization fails critically
236
+ await instance.session.close() # Use close() for AsyncSession
237
+ raise # Re-raise the exception to signal failure
238
+ return instance
239
+
240
+ async def refresh_cookie(self) -> bool:
241
+ """
242
+ Atualiza proativamente o cookie __Secure-1PSIDTS e salva automaticamente.
243
+ Útil para atualizar o cookie antes que expire.
244
+
245
+ Returns:
246
+ bool: True se o cookie foi atualizado com sucesso, False caso contrário.
247
+ """
248
+ try:
249
+ await self.__rotate_cookies()
250
+ # Atualizar SNlM0e após atualizar o cookie
251
+ self.SNlM0e = await self.__get_snlm0e()
252
+ return True
253
+ except Exception as e:
254
+ console.log(f"[red]Falha ao atualizar cookie proativamente: {e}[/red]")
255
+ return False
256
+
257
+ async def save_conversation(self, file_path: str, conversation_name: str) -> None:
258
+ # Logic remains the same
259
+ conversations = await self.load_conversations(file_path)
260
+ conversation_data = {
261
+ "conversation_name": conversation_name,
262
+ "_reqid": self._reqid,
263
+ "conversation_id": self.conversation_id,
264
+ "response_id": self.response_id,
265
+ "choice_id": self.choice_id,
266
+ "SNlM0e": self.SNlM0e,
267
+ "model_name": self.model.model_name, # Save the model used
268
+ "timestamp": datetime.now().isoformat(), # Add timestamp
269
+ }
270
+
271
+ found = False
272
+ for i, conv in enumerate(conversations):
273
+ if conv.get("conversation_name") == conversation_name:
274
+ conversations[i] = conversation_data # Update existing
275
+ found = True
276
+ break
277
+ if not found:
278
+ conversations.append(conversation_data) # Add new
279
+
280
+ try:
281
+ # Ensure directory exists
282
+ Path(file_path).parent.mkdir(parents=True, exist_ok=True)
283
+ with open(file_path, "w", encoding="utf-8") as f:
284
+ json.dump(conversations, f, indent=4, ensure_ascii=False)
285
+ except IOError as e:
286
+ console.log(f"[red]Error saving conversation to {file_path}: {e}[/red]")
287
+ raise
288
+
289
+ async def load_conversations(self, file_path: str) -> List[Dict]:
290
+ # Logic remains the same
291
+ if not os.path.isfile(file_path):
292
+ return []
293
+ try:
294
+ with open(file_path, 'r', encoding="utf-8") as f:
295
+ return json.load(f)
296
+ except (json.JSONDecodeError, IOError) as e:
297
+ console.log(f"[red]Error loading conversations from {file_path}: {e}[/red]")
298
+ return []
299
+
300
+ async def load_conversation(self, file_path: str, conversation_name: str) -> bool:
301
+ # Logic remains the same, but update headers on the session
302
+ conversations = await self.load_conversations(file_path)
303
+ for conversation in conversations:
304
+ if conversation.get("conversation_name") == conversation_name:
305
+ try:
306
+ self._reqid = conversation["_reqid"]
307
+ self.conversation_id = conversation["conversation_id"]
308
+ self.response_id = conversation["response_id"]
309
+ self.choice_id = conversation["choice_id"]
310
+ self.SNlM0e = conversation["SNlM0e"]
311
+ if "model_name" in conversation:
312
+ try:
313
+ self.model = Model.from_name(conversation["model_name"])
314
+ # Update headers in the session if model changed
315
+ self.session.headers.update(self.model.model_header)
316
+ except ValueError as e:
317
+ console.log(f"[yellow]Warning: Model '{conversation['model_name']}' from saved conversation not found. Using current model '{self.model.model_name}'. Error: {e}[/yellow]")
318
+
319
+ console.log(f"Loaded conversation '{conversation_name}'")
320
+ return True
321
+ except KeyError as e:
322
+ console.log(f"[red]Error loading conversation '{conversation_name}': Missing key {e}[/red]")
323
+ return False
324
+ console.log(f"[yellow]Conversation '{conversation_name}' not found in {file_path}[/yellow]")
325
+ return False
326
+
327
+ async def __get_snlm0e(self):
328
+ """Fetches the SNlM0e value required for API requests using curl_cffi."""
329
+ if not self.secure_1psid:
330
+ raise ValueError("__Secure-1PSID cookie is required.")
331
+
332
+ try:
333
+ # Use the session's get method
334
+ resp = await self.session.get(
335
+ Endpoint.INIT.value,
336
+ timeout=self.timeout # Timeout is already set in session, but can override
337
+ # follow_redirects is handled automatically by curl_cffi
338
+ )
339
+ resp.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
340
+
341
+ # Check for authentication issues
342
+ if "Sign in to continue" in resp.text or "accounts.google.com" in str(resp.url):
343
+ raise PermissionError("Authentication failed. Cookies might be invalid or expired. Please update them.")
344
+
345
+ # Regex to find the SNlM0e value
346
+ snlm0e_match = re.search(r'''["']SNlM0e["']\s*:\s*["'](.*?)["']''', resp.text)
347
+ if not snlm0e_match:
348
+ error_message = "SNlM0e value not found in response."
349
+ if resp.status_code == 429:
350
+ error_message += " Rate limit likely exceeded."
351
+ else:
352
+ error_message += f" Response status: {resp.status_code}. Check cookie validity and network."
353
+ raise ValueError(error_message)
354
+
355
+ # Try to refresh PSIDTS if needed
356
+ if not self.secure_1psidts and "PSIDTS" not in self.session.cookies:
357
+ try:
358
+ # Attempt to rotate cookies to get a fresh PSIDTS
359
+ await self.__rotate_cookies()
360
+ except Exception as e:
361
+ console.log(f"[yellow]Warning: Could not refresh PSIDTS cookie: {e}[/yellow]")
362
+ # Continue anyway as some accounts don't need PSIDTS
363
+
364
+ return snlm0e_match.group(1)
365
+
366
+ except Timeout as e: # Catch requests.exceptions.Timeout
367
+ raise TimeoutError(f"Request timed out while fetching SNlM0e: {e}") from e
368
+ except (RequestException, CurlError) as e: # Catch general request errors and Curl specific errors
369
+ raise ConnectionError(f"Network error while fetching SNlM0e: {e}") from e
370
+ except HTTPError as e: # Catch requests.exceptions.HTTPError
371
+ # Tentar atualizar cookie automaticamente se for erro de autenticação
372
+ if e.response.status_code in (401, 403):
373
+ console.log(f"[yellow]Erro de autenticação ao buscar SNlM0e (status {e.response.status_code}). Tentando atualizar cookie...[/yellow]")
374
+ try:
375
+ await self.__rotate_cookies()
376
+ # Retentar buscar SNlM0e após atualizar cookie
377
+ resp = await self.session.get(
378
+ Endpoint.INIT.value,
379
+ timeout=self.timeout
380
+ )
381
+ resp.raise_for_status()
382
+
383
+ if "Sign in to continue" in resp.text or "accounts.google.com" in str(resp.url):
384
+ raise PermissionError("Authentication failed. Cookies might be invalid or expired. Please update them.")
385
+
386
+ snlm0e_match = re.search(r'''["']SNlM0e["']\s*:\s*["'](.*?)["']''', resp.text)
387
+ if snlm0e_match:
388
+ console.log("[green]✓ SNlM0e obtido com sucesso após atualização do cookie![/green]")
389
+ return snlm0e_match.group(1)
390
+ else:
391
+ raise ValueError("SNlM0e value not found in response after cookie update.")
392
+ except Exception as rotate_error:
393
+ console.log(f"[red]Falha ao atualizar cookie: {rotate_error}[/red]")
394
+ raise PermissionError(f"Authentication failed (status {e.response.status_code}). Cookie update failed. {e}") from e
395
+ else:
396
+ raise Exception(f"HTTP error {e.response.status_code} while fetching SNlM0e: {e}") from e
397
+
398
+ async def __rotate_cookies(self):
399
+ """
400
+ Rotates the __Secure-1PSIDTS cookie and saves it automatically.
401
+ Returns the new cookie value if successful.
402
+ """
403
+ try:
404
+ console.log("[yellow]Atualizando cookie __Secure-1PSIDTS automaticamente...[/yellow]")
405
+ response = await self.session.post(
406
+ Endpoint.ROTATE_COOKIES.value,
407
+ headers=Headers.ROTATE_COOKIES.value,
408
+ data='[000,"-0000000000000000000"]',
409
+ timeout=self.timeout
410
+ )
411
+ response.raise_for_status()
412
+
413
+ if new_1psidts := response.cookies.get("__Secure-1PSIDTS"):
414
+ self.secure_1psidts = new_1psidts
415
+ self.session.cookies.set("__Secure-1PSIDTS", new_1psidts)
416
+
417
+ # Salvar automaticamente no arquivo se cookie_path estiver definido
418
+ if self.cookie_path:
419
+ try:
420
+ save_cookies(
421
+ self.cookie_path,
422
+ self.secure_1psid,
423
+ self.secure_1psidts,
424
+ self.additional_cookies
425
+ )
426
+ console.log("[green]✓ Cookie atualizado e salvo automaticamente![/green]")
427
+ except Exception as save_error:
428
+ console.log(f"[yellow]Aviso: Cookie atualizado mas não foi possível salvar no arquivo: {save_error}[/yellow]")
429
+
430
+ return new_1psidts
431
+ else:
432
+ raise ValueError("Novo cookie __Secure-1PSIDTS não encontrado na resposta")
433
+ except Exception as e:
434
+ console.log(f"[red]Falha ao atualizar cookie: {e}[/red]")
435
+ raise
436
+
437
+
438
+ async def ask(
439
+ self,
440
+ message: str,
441
+ image: Optional[Union[bytes, str, Path]] = None,
442
+ video: Optional[Union[bytes, str, Path]] = None,
443
+ audio: Optional[Union[bytes, str, Path]] = None
444
+ ) -> dict:
445
+ """
446
+ Sends a message to Google Gemini and returns the response using curl_cffi.
447
+
448
+ Parameters:
449
+ message: str
450
+ The message to send.
451
+ image: Optional[Union[bytes, str, Path]]
452
+ Optional image data (bytes) or path to an image file to include.
453
+ video: Optional[Union[bytes, str, Path]]
454
+ Optional video data (bytes) or path to a video file to include.
455
+ audio: Optional[Union[bytes, str, Path]]
456
+ Optional audio data (bytes) or path to an audio file to include.
457
+
458
+ Returns:
459
+ dict: A dictionary containing the response content and metadata.
460
+ """
461
+ if self.SNlM0e is None:
462
+ raise RuntimeError("AsyncChatbot not properly initialized. Call AsyncChatbot.create()")
463
+
464
+ params = {
465
+ "bl": "boq_assistant-bard-web-server_20240625.13_p0",
466
+ "_reqid": str(self._reqid),
467
+ "rt": "c",
468
+ }
469
+
470
+ # Handle media upload (image, video, or audio) - only one at a time
471
+ media_upload_id = None
472
+ media_filename = None
473
+ media_mime_type = None
474
+ media_file = None
475
+ media_type = None # Initialize to None
476
+
477
+ # Determine which media type to use (priority: image > video > audio)
478
+ if image:
479
+ media_file = image
480
+ media_type = "image"
481
+ elif video:
482
+ media_file = video
483
+ media_type = "video"
484
+ elif audio:
485
+ media_file = audio
486
+ media_type = "audio"
487
+
488
+ if media_file:
489
+ try:
490
+ # Get filename and mime type if file is a path
491
+ if not isinstance(media_file, bytes):
492
+ file_path = Path(media_file)
493
+ media_filename = file_path.name
494
+ import mimetypes
495
+ media_mime_type, _ = mimetypes.guess_type(str(file_path))
496
+ if not media_mime_type:
497
+ # Fallback based on file extension
498
+ ext = file_path.suffix.lower()
499
+ if media_type == "video" or ext in ['.mp4', '.avi', '.mov', '.webm', '.mkv']:
500
+ media_mime_type = "video/mp4"
501
+ elif media_type == "audio" or ext in ['.mp3', '.wav', '.ogg', '.flac', '.aac', '.m4a']:
502
+ media_mime_type = "audio/mpeg"
503
+ else:
504
+ media_mime_type = "image/jpeg"
505
+ else:
506
+ # Default filenames for bytes
507
+ if media_type == "video":
508
+ media_filename = "video.mp4"
509
+ media_mime_type = "video/mp4"
510
+ elif media_type == "audio":
511
+ media_filename = "audio.mp3"
512
+ media_mime_type = "audio/mpeg"
513
+ else:
514
+ media_filename = "image.jpg"
515
+ media_mime_type = "image/jpeg"
516
+
517
+ # Pass proxy and impersonate settings to upload_file
518
+ media_upload_id = await upload_file(media_file, proxy=self.proxies_dict, impersonate=self.impersonate)
519
+ console.log(f"{media_type.capitalize()} uploaded successfully. ID: {media_upload_id}")
520
+ except Exception as e:
521
+ console.log(f"[red]Error uploading {media_type}: {e}[/red]")
522
+ return {"content": f"Error uploading {media_type}: {e}", "error": True}
523
+
524
+ # Always start a new conversation (no history/context)
525
+ # Reset conversation IDs to ensure each message is independent
526
+ self.conversation_id = ""
527
+ self.response_id = ""
528
+ self.choice_id = ""
529
+
530
+ # Prepare message structure
531
+ if media_upload_id:
532
+ # Structure when media (image/video/audio) is included (based on real Gemini request):
533
+ # The structure has many more fields than just the message
534
+ # Element 0: [message, 0, null, [[[media_id, 1, null, mime_type], filename, null*7, [0]]], null, null, 0]
535
+ # Element 1: ["pt"] - language
536
+ # Element 2: ["", "", "", null, null, null, null, null, null, ""] - empty strings array
537
+ # Element 3+: Many more fields (tokens, hashes, etc.)
538
+ media_data = [
539
+ [media_upload_id, 1, None, media_mime_type],
540
+ media_filename,
541
+ None, None, None, None, None, None,
542
+ [0]
543
+ ]
544
+ # Build the complete structure with all required fields
545
+ message_struct = [
546
+ [message, 0, None, [media_data], None, None, 0], # Element 0: message with media
547
+ ["pt"], # Element 1: language
548
+ ["", "", "", None, None, None, None, None, None, ""], # Element 2: empty strings array
549
+ # Note: The real request has many more fields (tokens, hashes, etc.) but
550
+ # we'll start with the minimal required structure and see if it works
551
+ ]
552
+ else:
553
+ # Even without media, start fresh conversation
554
+ message_struct = [
555
+ [message],
556
+ None,
557
+ ["", "", ""], # Empty conversation IDs for new conversation
558
+ ]
559
+
560
+ # Prepare request data
561
+ # Use separators to match exact format (no spaces) like Gemini expects
562
+ inner_json = json.dumps(message_struct, ensure_ascii=False, separators=(',', ':'))
563
+ data = {
564
+ "f.req": json.dumps([None, inner_json], ensure_ascii=False, separators=(',', ':')),
565
+ "at": self.SNlM0e,
566
+ }
567
+
568
+ try:
569
+ # Debug: log the request structure if there's media
570
+ if media_upload_id:
571
+ console.log(f"[cyan]Debug - Message struct with {media_type}:[/cyan] {json.dumps(message_struct, indent=2)[:500]}")
572
+ console.log(f"[cyan]Debug - {media_type.capitalize()} ID:[/cyan] {media_upload_id}")
573
+ console.log(f"[cyan]Debug - {media_type.capitalize()} filename:[/cyan] {media_filename}")
574
+ console.log(f"[cyan]Debug - {media_type.capitalize()} mime type:[/cyan] {media_mime_type}")
575
+
576
+ # Send request
577
+ # Use longer timeout for videos and audios (they take longer to process)
578
+ request_timeout = self.timeout
579
+ if media_upload_id:
580
+ if media_type == "video":
581
+ request_timeout = max(self.timeout, 300) # At least 5 minutes for videos (they can be slow)
582
+ elif media_type == "audio":
583
+ request_timeout = max(self.timeout, 180) # At least 3 minutes for audios
584
+ else:
585
+ request_timeout = max(self.timeout, 60) # At least 1 minute for images
586
+
587
+ resp = await self.session.post(
588
+ Endpoint.GENERATE.value,
589
+ params=params,
590
+ data=data,
591
+ timeout=request_timeout,
592
+ )
593
+
594
+ # Check status before raising
595
+ if resp.status_code != 200:
596
+ # Detectar erros de autenticação e tentar atualizar cookie automaticamente
597
+ if resp.status_code in (401, 403):
598
+ console.log(f"[yellow]Erro de autenticação detectado (status {resp.status_code}). Tentando atualizar cookie automaticamente...[/yellow]")
599
+ try:
600
+ # Tentar atualizar o cookie
601
+ await self.__rotate_cookies()
602
+ # Atualizar SNlM0e com o novo cookie
603
+ self.SNlM0e = await self.__get_snlm0e()
604
+ # Atualizar o token na requisição
605
+ data["at"] = self.SNlM0e
606
+ # Retentar a requisição uma vez
607
+ console.log("[cyan]Retentando requisição com cookie atualizado...[/cyan]")
608
+ resp = await self.session.post(
609
+ Endpoint.GENERATE.value,
610
+ params=params,
611
+ data=data,
612
+ timeout=request_timeout,
613
+ )
614
+ if resp.status_code == 200:
615
+ console.log("[green]✓ Requisição bem-sucedida após atualização do cookie![/green]")
616
+ else:
617
+ console.log(f"[red]Ainda recebendo status {resp.status_code} após atualização do cookie[/red]")
618
+ except Exception as rotate_error:
619
+ console.log(f"[red]Falha ao atualizar cookie automaticamente: {rotate_error}[/red]")
620
+
621
+ if resp.status_code != 200:
622
+ console.log(f"[red]Non-200 status code: {resp.status_code}[/red]")
623
+ console.log(f"[yellow]Response headers:[/yellow] {dict(resp.headers)}")
624
+ console.log(f"[yellow]Response text (first 3000 chars):[/yellow]\n{resp.text[:3000]}")
625
+ console.log(f"[yellow]Request URL:[/yellow] {resp.url}")
626
+ console.log(f"[yellow]Request params:[/yellow] {params}")
627
+ # Log the data being sent (sanitized)
628
+ debug_data = data.copy()
629
+ if 'f.req' in debug_data:
630
+ # Show first 500 chars of f.req
631
+ console.log(f"[yellow]f.req (first 500 chars):[/yellow] {debug_data['f.req'][:500]}")
632
+
633
+ resp.raise_for_status()
634
+
635
+ # Process response
636
+ lines = resp.text.splitlines()
637
+ if len(lines) < 3:
638
+ raise ValueError(f"Unexpected response format. Status: {resp.status_code}. Content: {resp.text[:200]}...")
639
+
640
+ # Find the line with the response data - process all JSON lines
641
+ body = None
642
+ body_index = 0
643
+ response_json = None # Store the full response_json for model extraction
644
+ all_parsed_parts = [] # Store all parsed parts for model search
645
+
646
+ # Try to parse all JSON lines in the response
647
+ for line_index, line in enumerate(lines):
648
+ # Skip empty lines and the prefix line
649
+ if not line.strip() or line.startswith(")]}'") or line.isdigit():
650
+ continue
651
+
652
+ # Try to parse JSON lines
653
+ if line.startswith("["):
654
+ try:
655
+ parsed_json = json.loads(line)
656
+ response_json = parsed_json # Store for later use
657
+
658
+ # Process all parts in this JSON array
659
+ for part_index, part in enumerate(parsed_json):
660
+ try:
661
+ if isinstance(part, list) and len(part) > 2:
662
+ # part[2] might be a string that needs parsing
663
+ if isinstance(part[2], str):
664
+ main_part = json.loads(part[2])
665
+ else:
666
+ main_part = part[2]
667
+
668
+ # Store all parsed parts for model search
669
+ if main_part:
670
+ all_parsed_parts.append(main_part)
671
+
672
+ # Check if this part contains conversation data
673
+ if main_part and isinstance(main_part, list) and len(main_part) > 4 and main_part[4]:
674
+ body = main_part
675
+ body_index = part_index
676
+ except (IndexError, TypeError, json.JSONDecodeError, AttributeError):
677
+ continue
678
+
679
+ # If we found a body, stop looking
680
+ if body:
681
+ break
682
+ except json.JSONDecodeError:
683
+ continue
684
+
685
+ if not body:
686
+ return {"content": "Failed to parse response body. No valid data found.", "error": True}
687
+
688
+ # Extract data from the response
689
+ try:
690
+ # Extract main content
691
+ content = ""
692
+ if len(body) > 4 and len(body[4]) > 0 and len(body[4][0]) > 1:
693
+ content = body[4][0][1][0] if len(body[4][0][1]) > 0 else ""
694
+
695
+ # Extract conversation metadata
696
+ conversation_id = body[1][0] if len(body) > 1 and len(body[1]) > 0 else self.conversation_id
697
+ response_id = body[1][1] if len(body) > 1 and len(body[1]) > 1 else self.response_id
698
+
699
+ # Extract additional data
700
+ factualityQueries = body[3] if len(body) > 3 else None
701
+ textQuery = body[2][0] if len(body) > 2 and body[2] else ""
702
+
703
+ # Extract choices
704
+ choices = []
705
+ if len(body) > 4:
706
+ for candidate in body[4]:
707
+ if len(candidate) > 1 and isinstance(candidate[1], list) and len(candidate[1]) > 0:
708
+ choices.append({"id": candidate[0], "content": candidate[1][0]})
709
+
710
+ choice_id = choices[0]["id"] if choices else self.choice_id
711
+
712
+ # Extract images - multiple possible formats
713
+ images = []
714
+
715
+ # Format 1: Regular web images
716
+ if len(body) > 4 and len(body[4]) > 0 and len(body[4][0]) > 4 and body[4][0][4]:
717
+ for img_data in body[4][0][4]:
718
+ try:
719
+ img_url = img_data[0][0][0]
720
+ img_alt = img_data[2] if len(img_data) > 2 else ""
721
+ img_title = img_data[1] if len(img_data) > 1 else "[Image]"
722
+ images.append({"url": img_url, "alt": img_alt, "title": img_title})
723
+ except (IndexError, TypeError):
724
+ console.log("[yellow]Warning: Could not parse image data structure (format 1).[/yellow]")
725
+ continue
726
+
727
+ # Format 2: Generated images in standard location
728
+ generated_images = []
729
+ if len(body) > 4 and len(body[4]) > 0 and len(body[4][0]) > 12 and body[4][0][12]:
730
+ try:
731
+ # Path 1: Check for images in [12][7][0]
732
+ if body[4][0][12][7] and body[4][0][12][7][0]:
733
+ # This is the standard path for generated images
734
+ for img_index, img_data in enumerate(body[4][0][12][7][0]):
735
+ try:
736
+ img_url = img_data[0][3][3]
737
+ img_title = f"[Generated Image {img_index+1}]"
738
+ img_alt = img_data[3][5][0] if len(img_data[3]) > 5 and len(img_data[3][5]) > 0 else ""
739
+ generated_images.append({"url": img_url, "alt": img_alt, "title": img_title})
740
+ except (IndexError, TypeError):
741
+ continue
742
+
743
+ # If we found images, but they might be in a different part of the response
744
+ if not generated_images:
745
+ # Look for image generation data in other response parts
746
+ for part_index, part in enumerate(response_json):
747
+ if part_index <= body_index:
748
+ continue
749
+ try:
750
+ img_part = json.loads(part[2])
751
+ if img_part[4][0][12][7][0]:
752
+ for img_index, img_data in enumerate(img_part[4][0][12][7][0]):
753
+ try:
754
+ img_url = img_data[0][3][3]
755
+ img_title = f"[Generated Image {img_index+1}]"
756
+ img_alt = img_data[3][5][0] if len(img_data[3]) > 5 and len(img_data[3][5]) > 0 else ""
757
+ generated_images.append({"url": img_url, "alt": img_alt, "title": img_title})
758
+ except (IndexError, TypeError):
759
+ continue
760
+ break
761
+ except (IndexError, TypeError, json.JSONDecodeError):
762
+ continue
763
+ except (IndexError, TypeError):
764
+ pass
765
+
766
+ # Format 3: Alternative location for generated images
767
+ if len(generated_images) == 0 and len(body) > 4 and len(body[4]) > 0:
768
+ try:
769
+ # Try to find images in candidate[4] structure
770
+ candidate = body[4][0]
771
+ if len(candidate) > 22 and candidate[22]:
772
+ # Look for URLs in the candidate[22] field
773
+ import re
774
+ content = candidate[22][0] if isinstance(candidate[22], list) and len(candidate[22]) > 0 else str(candidate[22])
775
+ urls = re.findall(r'https?://[^\s]+', content)
776
+ for i, url in enumerate(urls):
777
+ # Clean up URL if it ends with punctuation
778
+ if url[-1] in ['.', ',', ')', ']', '}', '"', "'"]:
779
+ url = url[:-1]
780
+ generated_images.append({
781
+ "url": url,
782
+ "title": f"[Generated Image {i+1}]",
783
+ "alt": ""
784
+ })
785
+ except (IndexError, TypeError) as e:
786
+ console.log(f"[yellow]Warning: Could not parse alternative image structure: {e}[/yellow]")
787
+
788
+ # Format 4: Look for image URLs in the text content
789
+ if len(images) == 0 and len(generated_images) == 0 and content:
790
+ try:
791
+ import re
792
+ # Look for image URLs in the content - try multiple patterns
793
+
794
+ # Pattern 1: Standard image URLs
795
+ urls = re.findall(r'(https?://[^\s]+\.(jpg|jpeg|png|gif|webp))', content.lower())
796
+
797
+ # Pattern 2: Google image URLs (which might not have extensions)
798
+ google_urls = re.findall(r'(https?://lh\d+\.googleusercontent\.com/[^\s]+)', content)
799
+
800
+ # Pattern 3: General URLs that might be images
801
+ general_urls = re.findall(r'(https?://[^\s]+)', content)
802
+
803
+ # Combine all found URLs
804
+ all_urls = []
805
+ if urls:
806
+ all_urls.extend([url_tuple[0] for url_tuple in urls])
807
+ if google_urls:
808
+ all_urls.extend(google_urls)
809
+
810
+ # Add general URLs only if we didn't find any specific image URLs
811
+ if not all_urls and general_urls:
812
+ all_urls = general_urls
813
+
814
+ # Process all found URLs
815
+ if all_urls:
816
+ for i, url in enumerate(all_urls):
817
+ # Clean up URL if it ends with punctuation
818
+ if url[-1] in ['.', ',', ')', ']', '}', '"', "'"]:
819
+ url = url[:-1]
820
+ images.append({
821
+ "url": url,
822
+ "title": f"[Image in Content {i+1}]",
823
+ "alt": ""
824
+ })
825
+ console.log(f"[green]Found {len(all_urls)} potential image URLs in content.[/green]")
826
+ except Exception as e:
827
+ console.log(f"[yellow]Warning: Error extracting URLs from content: {e}[/yellow]")
828
+
829
+ # Combine all images
830
+ all_images = images + generated_images
831
+
832
+ # Extract model name from response
833
+ # Model appears near the end of the structure, look for strings like "Fast", "3 Pro", "Thinking"
834
+ model_name = self.model.model_name # Default fallback
835
+ try:
836
+ def find_model_in_structure(obj, depth=0, max_depth=15):
837
+ """Recursively search for model name"""
838
+ if depth > max_depth:
839
+ return None
840
+ if isinstance(obj, list):
841
+ # Check elements from end to beginning (model is usually near the end)
842
+ for i in range(len(obj) - 1, -1, -1):
843
+ item = obj[i]
844
+ if isinstance(item, str):
845
+ # Common model names: Fast, 3 Pro, Thinking, Pro, Exp
846
+ # Check for known model patterns
847
+ if item in ["Fast", "3 Pro", "Thinking", "Pro", "Exp", "Raciocinio"]:
848
+ # Verify it's followed by a boolean (typical pattern: model name, then bool)
849
+ if i + 1 < len(obj) and isinstance(obj[i + 1], bool):
850
+ return item
851
+ # Also check for patterns like "Pro" in the string
852
+ elif any(pattern in item for pattern in ["Pro", "Fast", "Thinking"]):
853
+ # More careful check - should be a short string
854
+ if len(item) < 20 and (i + 1 < len(obj) and isinstance(obj[i + 1], bool)):
855
+ return item
856
+ elif isinstance(item, (list, dict)):
857
+ result = find_model_in_structure(item, depth + 1, max_depth)
858
+ if result:
859
+ return result
860
+ elif isinstance(obj, dict):
861
+ for value in obj.values():
862
+ result = find_model_in_structure(value, depth + 1, max_depth)
863
+ if result:
864
+ return result
865
+ return None
866
+
867
+ # Search in the body structure first
868
+ found_model = find_model_in_structure(body)
869
+
870
+ # If not found in body, search in all parsed parts
871
+ if not found_model:
872
+ for parsed_part in all_parsed_parts:
873
+ found_model = find_model_in_structure(parsed_part)
874
+ if found_model:
875
+ break
876
+
877
+ # If still not found, search in the full response_json structure
878
+ if not found_model and response_json:
879
+ found_model = find_model_in_structure(response_json)
880
+
881
+ if found_model:
882
+ model_name = found_model
883
+ except Exception as e:
884
+ console.log(f"[yellow]Warning: Could not extract model name from response: {e}[/yellow]")
885
+ # Use default model name
886
+
887
+ # Prepare results
888
+ results = {
889
+ "content": content,
890
+ "conversation_id": conversation_id,
891
+ "response_id": response_id,
892
+ "factualityQueries": factualityQueries,
893
+ "textQuery": textQuery,
894
+ "choices": choices,
895
+ "images": all_images,
896
+ "model": model_name, # Use extracted model name from response
897
+ "error": False,
898
+ }
899
+
900
+ # Don't update state - we want each message to be independent (no conversation history)
901
+ # Reset IDs to ensure next message is a new conversation
902
+ self.conversation_id = ""
903
+ self.response_id = ""
904
+ self.choice_id = ""
905
+ self._reqid += random.randint(1000, 9000)
906
+
907
+ return results
908
+
909
+ except (IndexError, TypeError) as e:
910
+ console.log(f"[red]Error extracting data from response: {e}[/red]")
911
+ return {"content": f"Error extracting data from response: {e}", "error": True}
912
+
913
+ except json.JSONDecodeError as e:
914
+ console.log(f"[red]Error parsing JSON response: {e}[/red]")
915
+ return {"content": f"Error parsing JSON response: {e}. Response: {resp.text[:200]}...", "error": True}
916
+ except Timeout as e:
917
+ console.log(f"[red]Request timed out: {e}[/red]")
918
+ return {"content": f"Request timed out: {e}", "error": True}
919
+ except (RequestException, CurlError) as e:
920
+ error_msg = f"Network error: {e}"
921
+ # Try to get more details if it's an HTTPError wrapped
922
+ if hasattr(e, 'response') and e.response is not None:
923
+ try:
924
+ error_msg += f"\nStatus: {e.response.status_code}"
925
+ error_msg += f"\nResponse: {e.response.text[:1000]}"
926
+ console.log(f"[red]{error_msg}[/red]")
927
+ console.log(f"[yellow]Full response text (first 2000 chars):[/yellow]\n{e.response.text[:2000]}")
928
+ except:
929
+ pass
930
+ console.log(f"[red]{error_msg}[/red]")
931
+ return {"content": error_msg, "error": True}
932
+ except HTTPError as e:
933
+ # Detectar erros de autenticação e tentar atualizar cookie automaticamente
934
+ if e.response.status_code in (401, 403):
935
+ console.log(f"[yellow]Erro de autenticação detectado (status {e.response.status_code}). Tentando atualizar cookie automaticamente...[/yellow]")
936
+ try:
937
+ # Tentar atualizar o cookie
938
+ await self.__rotate_cookies()
939
+ # Atualizar SNlM0e com o novo cookie
940
+ self.SNlM0e = await self.__get_snlm0e()
941
+ # Retentar a requisição original
942
+ console.log("[cyan]Retentando requisição com cookie atualizado...[/cyan]")
943
+ return await self.ask(message, image=image, video=video, audio=audio)
944
+ except Exception as rotate_error:
945
+ console.log(f"[red]Falha ao atualizar cookie automaticamente: {rotate_error}[/red]")
946
+
947
+ error_details = f"HTTP error {e.response.status_code}: {e}"
948
+ try:
949
+ error_text = e.response.text[:1000] if hasattr(e.response, 'text') else str(e.response)
950
+ error_details += f"\nResponse: {error_text}"
951
+ console.log(f"[red]{error_details}[/red]")
952
+ # Log full response for debugging
953
+ if hasattr(e.response, 'text'):
954
+ console.log(f"[yellow]Full response text (first 2000 chars):[/yellow]\n{e.response.text[:2000]}")
955
+ except:
956
+ pass
957
+ return {"content": error_details, "error": True}
958
+ except Exception as e:
959
+ console.log(f"[red]An unexpected error occurred during ask: {e}[/red]", style="bold red")
960
+ return {"content": f"An unexpected error occurred: {e}", "error": True}
961
+
962
+
963
+ #########################################
964
+ # Imports for refactored classes
965
+ #########################################