Upload 49 files
Browse files- gemini_webapi/__init__.py +6 -0
- gemini_webapi/__pycache__/__init__.cpython-313.pyc +0 -0
- gemini_webapi/__pycache__/client.cpython-313.pyc +0 -0
- gemini_webapi/__pycache__/constants.cpython-313.pyc +0 -0
- gemini_webapi/__pycache__/exceptions.cpython-313.pyc +0 -0
- gemini_webapi/client.py +1871 -0
- gemini_webapi/components/__init__.py +3 -0
- gemini_webapi/components/__pycache__/__init__.cpython-313.pyc +0 -0
- gemini_webapi/components/__pycache__/gem_mixin.cpython-313.pyc +0 -0
- gemini_webapi/components/gem_mixin.py +311 -0
- gemini_webapi/constants.py +142 -0
- gemini_webapi/exceptions.py +62 -0
- gemini_webapi/types/__init__.py +11 -0
- gemini_webapi/types/__pycache__/__init__.cpython-313.pyc +0 -0
- gemini_webapi/types/__pycache__/availablemodel.cpython-313.pyc +0 -0
- gemini_webapi/types/__pycache__/candidate.cpython-313.pyc +0 -0
- gemini_webapi/types/__pycache__/chathistory.cpython-313.pyc +0 -0
- gemini_webapi/types/__pycache__/chatinfo.cpython-313.pyc +0 -0
- gemini_webapi/types/__pycache__/gem.cpython-313.pyc +0 -0
- gemini_webapi/types/__pycache__/grpc.cpython-313.pyc +0 -0
- gemini_webapi/types/__pycache__/image.cpython-313.pyc +0 -0
- gemini_webapi/types/__pycache__/modeloutput.cpython-313.pyc +0 -0
- gemini_webapi/types/__pycache__/video.cpython-313.pyc +0 -0
- gemini_webapi/types/availablemodel.py +24 -0
- gemini_webapi/types/candidate.py +58 -0
- gemini_webapi/types/chathistory.py +53 -0
- gemini_webapi/types/chatinfo.py +18 -0
- gemini_webapi/types/gem.py +132 -0
- gemini_webapi/types/grpc.py +34 -0
- gemini_webapi/types/image.py +234 -0
- gemini_webapi/types/modeloutput.py +58 -0
- gemini_webapi/types/video.py +177 -0
- gemini_webapi/utils/__init__.py +14 -0
- gemini_webapi/utils/__pycache__/__init__.cpython-313.pyc +0 -0
- gemini_webapi/utils/__pycache__/decorators.cpython-313.pyc +0 -0
- gemini_webapi/utils/__pycache__/get_access_token.cpython-313.pyc +0 -0
- gemini_webapi/utils/__pycache__/load_browser_cookies.cpython-313.pyc +0 -0
- gemini_webapi/utils/__pycache__/logger.cpython-313.pyc +0 -0
- gemini_webapi/utils/__pycache__/parsing.cpython-313.pyc +0 -0
- gemini_webapi/utils/__pycache__/rotate_1psidts.cpython-313.pyc +0 -0
- gemini_webapi/utils/__pycache__/upload_file.cpython-313.pyc +0 -0
- gemini_webapi/utils/decorators.py +99 -0
- gemini_webapi/utils/get_access_token.py +202 -0
- gemini_webapi/utils/load_browser_cookies.py +82 -0
- gemini_webapi/utils/logger.py +37 -0
- gemini_webapi/utils/parsing.py +265 -0
- gemini_webapi/utils/rotate_1psidts.py +102 -0
- gemini_webapi/utils/temp/.cached_1psidts_g.a0007Qjc5GP_JJ8G6lqxKsBvwooBDG0kaQQpdrq1eVMavCuae6YHM71QR0oHtpOONkPxs87_PQACgYKAZcSARISFQHGX2MiBLscUC-RI65KuaeNsGHqgxoVAUF8yKrfh50pYTc-6ectdvp0W-we0076.txt +1 -0
- gemini_webapi/utils/upload_file.py +121 -0
gemini_webapi/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
from .client import GeminiClient, ChatSession
|
| 4 |
+
from .exceptions import *
|
| 5 |
+
from .types import *
|
| 6 |
+
from .utils import set_log_level, logger
|
gemini_webapi/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (380 Bytes). View file
|
|
|
gemini_webapi/__pycache__/client.cpython-313.pyc
ADDED
|
Binary file (73.4 kB). View file
|
|
|
gemini_webapi/__pycache__/constants.cpython-313.pyc
ADDED
|
Binary file (5.27 kB). View file
|
|
|
gemini_webapi/__pycache__/exceptions.cpython-313.pyc
ADDED
|
Binary file (2.17 kB). View file
|
|
|
gemini_webapi/client.py
ADDED
|
@@ -0,0 +1,1871 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import codecs
|
| 3 |
+
import io
|
| 4 |
+
import random
|
| 5 |
+
import re
|
| 6 |
+
import time
|
| 7 |
+
from asyncio import Task
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from typing import Any, AsyncGenerator, Optional
|
| 10 |
+
|
| 11 |
+
import orjson as json
|
| 12 |
+
from curl_cffi.requests import AsyncSession, Cookies, Response
|
| 13 |
+
from curl_cffi.requests.exceptions import ReadTimeout
|
| 14 |
+
|
| 15 |
+
from .components import GemMixin
|
| 16 |
+
from .constants import (
|
| 17 |
+
Endpoint,
|
| 18 |
+
ErrorCode,
|
| 19 |
+
GRPC,
|
| 20 |
+
Model,
|
| 21 |
+
TEMPORARY_CHAT_FLAG_INDEX,
|
| 22 |
+
STREAMING_FLAG_INDEX,
|
| 23 |
+
GEM_FLAG_INDEX,
|
| 24 |
+
)
|
| 25 |
+
from .exceptions import (
|
| 26 |
+
APIError,
|
| 27 |
+
AuthError,
|
| 28 |
+
GeminiError,
|
| 29 |
+
ModelInvalid,
|
| 30 |
+
TemporarilyBlocked,
|
| 31 |
+
TimeoutError,
|
| 32 |
+
UsageLimitExceeded,
|
| 33 |
+
)
|
| 34 |
+
from .types import (
|
| 35 |
+
Candidate,
|
| 36 |
+
Gem,
|
| 37 |
+
GeneratedImage,
|
| 38 |
+
ModelOutput,
|
| 39 |
+
RPCData,
|
| 40 |
+
WebImage,
|
| 41 |
+
AvailableModel,
|
| 42 |
+
ChatInfo,
|
| 43 |
+
ChatTurn,
|
| 44 |
+
ChatHistory,
|
| 45 |
+
GeneratedVideo,
|
| 46 |
+
)
|
| 47 |
+
from .utils import (
|
| 48 |
+
extract_json_from_response,
|
| 49 |
+
get_access_token,
|
| 50 |
+
get_delta_by_fp_len,
|
| 51 |
+
get_nested_value,
|
| 52 |
+
logger,
|
| 53 |
+
parse_file_name,
|
| 54 |
+
parse_response_by_frame,
|
| 55 |
+
rotate_1psidts,
|
| 56 |
+
running,
|
| 57 |
+
upload_file,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
_CARD_CONTENT_RE = re.compile(r"^http://googleusercontent\.com/card_content/\d+")
|
| 61 |
+
_ARTIFACTS_RE = re.compile(r"http://googleusercontent\.com/\w+/\d+\n*")
|
| 62 |
+
_DEFAULT_METADATA: list[Any] = ["", "", "", None, None, None, None, None, None, ""]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class GeminiClient(GemMixin):
|
| 66 |
+
"""
|
| 67 |
+
Async requests client interface for gemini.google.com.
|
| 68 |
+
|
| 69 |
+
`secure_1psid` must be provided unless the optional dependency `browser-cookie3` is installed, and
|
| 70 |
+
you have logged in to google.com in your local browser.
|
| 71 |
+
|
| 72 |
+
Parameters
|
| 73 |
+
----------
|
| 74 |
+
secure_1psid: `str`, optional
|
| 75 |
+
__Secure-1PSID cookie value.
|
| 76 |
+
secure_1psidts: `str`, optional
|
| 77 |
+
__Secure-1PSIDTS cookie value, some Google accounts don't require this value, provide only if it's in the cookie list.
|
| 78 |
+
proxy: `str`, optional
|
| 79 |
+
Proxy URL.
|
| 80 |
+
kwargs: `dict`, optional
|
| 81 |
+
Additional arguments which will be passed to the http client.
|
| 82 |
+
Refer to `curl_cffi.requests.AsyncSession` for more information.
|
| 83 |
+
|
| 84 |
+
Raises
|
| 85 |
+
------
|
| 86 |
+
`ValueError`
|
| 87 |
+
If `browser-cookie3` is installed but cookies for google.com are not found in your local browser storage.
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
__slots__ = [
|
| 91 |
+
"_cookies",
|
| 92 |
+
"proxy",
|
| 93 |
+
"_running",
|
| 94 |
+
"client",
|
| 95 |
+
"access_token",
|
| 96 |
+
"build_label",
|
| 97 |
+
"session_id",
|
| 98 |
+
"timeout",
|
| 99 |
+
"auto_close",
|
| 100 |
+
"close_delay",
|
| 101 |
+
"close_task",
|
| 102 |
+
"auto_refresh",
|
| 103 |
+
"refresh_interval",
|
| 104 |
+
"refresh_task",
|
| 105 |
+
"verbose",
|
| 106 |
+
"watchdog_timeout",
|
| 107 |
+
"_lock",
|
| 108 |
+
"_reqid",
|
| 109 |
+
"_gems", # From GemMixin
|
| 110 |
+
"_available_models",
|
| 111 |
+
"_recent_chats",
|
| 112 |
+
"kwargs",
|
| 113 |
+
]
|
| 114 |
+
|
| 115 |
+
def __init__(
|
| 116 |
+
self,
|
| 117 |
+
secure_1psid: str | None = None,
|
| 118 |
+
secure_1psidts: str | None = None,
|
| 119 |
+
proxy: str | None = None,
|
| 120 |
+
**kwargs,
|
| 121 |
+
):
|
| 122 |
+
super().__init__()
|
| 123 |
+
self._cookies = Cookies()
|
| 124 |
+
self.proxy = proxy
|
| 125 |
+
self._running: bool = False
|
| 126 |
+
self.client: AsyncSession | None = None
|
| 127 |
+
self.access_token: str | None = None
|
| 128 |
+
self.build_label: str | None = None
|
| 129 |
+
self.session_id: str | None = None
|
| 130 |
+
self.timeout: float = 600
|
| 131 |
+
self.auto_close: bool = False
|
| 132 |
+
self.close_delay: float = 600
|
| 133 |
+
self.close_task: Task | None = None
|
| 134 |
+
self.auto_refresh: bool = True
|
| 135 |
+
self.refresh_interval: float = 600
|
| 136 |
+
self.refresh_task: Task | None = None
|
| 137 |
+
self.verbose: bool = True
|
| 138 |
+
self.watchdog_timeout: float = 90
|
| 139 |
+
self._lock = asyncio.Lock()
|
| 140 |
+
self._reqid: int = random.randint(10000, 99999)
|
| 141 |
+
|
| 142 |
+
self._available_models: list[AvailableModel] | None = None
|
| 143 |
+
self._recent_chats: list[ChatInfo] | None = None
|
| 144 |
+
self.kwargs = kwargs
|
| 145 |
+
|
| 146 |
+
if secure_1psid:
|
| 147 |
+
self._cookies.set("__Secure-1PSID", secure_1psid, domain=".google.com")
|
| 148 |
+
if secure_1psidts:
|
| 149 |
+
self._cookies.set(
|
| 150 |
+
"__Secure-1PSIDTS", secure_1psidts, domain=".google.com"
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
@property
|
| 154 |
+
def cookies(self) -> Cookies:
|
| 155 |
+
"""
|
| 156 |
+
Returns the cookies used for the current session.
|
| 157 |
+
"""
|
| 158 |
+
return self.client.cookies if self.client else self._cookies
|
| 159 |
+
|
| 160 |
+
@cookies.setter
|
| 161 |
+
def cookies(self, value: Cookies | dict):
|
| 162 |
+
if isinstance(value, Cookies):
|
| 163 |
+
self._cookies.update(value)
|
| 164 |
+
elif isinstance(value, dict):
|
| 165 |
+
for k, v in value.items():
|
| 166 |
+
self._cookies.set(k, v, domain=".google.com")
|
| 167 |
+
|
| 168 |
+
if self.client:
|
| 169 |
+
self.client.cookies.update(self._cookies)
|
| 170 |
+
|
| 171 |
+
async def init(
|
| 172 |
+
self,
|
| 173 |
+
timeout: float = 600,
|
| 174 |
+
auto_close: bool = False,
|
| 175 |
+
close_delay: float = 600,
|
| 176 |
+
auto_refresh: bool = True,
|
| 177 |
+
refresh_interval: float = 600,
|
| 178 |
+
verbose: bool = True,
|
| 179 |
+
watchdog_timeout: float = 90,
|
| 180 |
+
) -> None:
|
| 181 |
+
"""
|
| 182 |
+
Get SNlM0e value as access token. Without this token posting will fail with 400 bad request.
|
| 183 |
+
|
| 184 |
+
Parameters
|
| 185 |
+
----------
|
| 186 |
+
timeout: `float`, optional
|
| 187 |
+
Request timeout of the client in seconds. Used to limit the max waiting time when sending a request.
|
| 188 |
+
auto_close: `bool`, optional
|
| 189 |
+
If `True`, the client will close connections and clear resource usage after a certain period
|
| 190 |
+
of inactivity. Useful for always-on services.
|
| 191 |
+
close_delay: `float`, optional
|
| 192 |
+
Time to wait before auto-closing the client in seconds. Effective only if `auto_close` is `True`.
|
| 193 |
+
auto_refresh: `bool`, optional
|
| 194 |
+
If `True`, will schedule a task to automatically refresh cookies and access token in the background.
|
| 195 |
+
refresh_interval: `float`, optional
|
| 196 |
+
Time interval for background cookie and access token refresh in seconds.
|
| 197 |
+
Effective only if `auto_refresh` is `True`.
|
| 198 |
+
verbose: `bool`, optional
|
| 199 |
+
If `True`, will print more infomation in logs.
|
| 200 |
+
watchdog_timeout: `float`, optional
|
| 201 |
+
Timeout in seconds for shadow retry watchdog. If no data receives from stream but connection is active,
|
| 202 |
+
client will retry automatically after this duration.
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
async with self._lock:
|
| 206 |
+
if self._running:
|
| 207 |
+
return
|
| 208 |
+
|
| 209 |
+
try:
|
| 210 |
+
self.verbose = verbose
|
| 211 |
+
self.watchdog_timeout = watchdog_timeout
|
| 212 |
+
access_token, build_label, session_id, session = await get_access_token(
|
| 213 |
+
base_cookies=self.cookies,
|
| 214 |
+
proxy=self.proxy,
|
| 215 |
+
verbose=self.verbose,
|
| 216 |
+
verify=self.kwargs.get("verify", True),
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
session.timeout = timeout
|
| 220 |
+
self.client = session
|
| 221 |
+
self._cookies.update(self.client.cookies)
|
| 222 |
+
self.access_token = access_token
|
| 223 |
+
self.build_label = build_label
|
| 224 |
+
self.session_id = session_id
|
| 225 |
+
self._running = True
|
| 226 |
+
self._reqid = random.randint(10000, 99999)
|
| 227 |
+
|
| 228 |
+
self.timeout = timeout
|
| 229 |
+
self.auto_close = auto_close
|
| 230 |
+
self.close_delay = close_delay
|
| 231 |
+
if self.auto_close:
|
| 232 |
+
await self.reset_close_task()
|
| 233 |
+
|
| 234 |
+
self.auto_refresh = auto_refresh
|
| 235 |
+
self.refresh_interval = refresh_interval
|
| 236 |
+
|
| 237 |
+
if self.refresh_task:
|
| 238 |
+
self.refresh_task.cancel()
|
| 239 |
+
self.refresh_task = None
|
| 240 |
+
|
| 241 |
+
if self.auto_refresh:
|
| 242 |
+
self.refresh_task = asyncio.create_task(self.start_auto_refresh())
|
| 243 |
+
|
| 244 |
+
await self._init_rpc()
|
| 245 |
+
|
| 246 |
+
if self.verbose:
|
| 247 |
+
logger.success("Gemini client initialized successfully.")
|
| 248 |
+
except Exception:
|
| 249 |
+
await self.close()
|
| 250 |
+
raise
|
| 251 |
+
|
| 252 |
+
async def close(self, delay: float = 0) -> None:
|
| 253 |
+
"""
|
| 254 |
+
Close the client after a certain period of inactivity, or call manually to close immediately.
|
| 255 |
+
|
| 256 |
+
Parameters
|
| 257 |
+
----------
|
| 258 |
+
delay: `float`, optional
|
| 259 |
+
Time to wait before closing the client in seconds.
|
| 260 |
+
"""
|
| 261 |
+
|
| 262 |
+
if delay:
|
| 263 |
+
await asyncio.sleep(delay)
|
| 264 |
+
|
| 265 |
+
self._running = False
|
| 266 |
+
|
| 267 |
+
if self.close_task:
|
| 268 |
+
self.close_task.cancel()
|
| 269 |
+
self.close_task = None
|
| 270 |
+
|
| 271 |
+
if self.refresh_task:
|
| 272 |
+
self.refresh_task.cancel()
|
| 273 |
+
self.refresh_task = None
|
| 274 |
+
|
| 275 |
+
if self.client:
|
| 276 |
+
self._cookies.update(self.client.cookies)
|
| 277 |
+
await self.client.close()
|
| 278 |
+
self.client = None
|
| 279 |
+
|
| 280 |
+
async def reset_close_task(self) -> None:
|
| 281 |
+
"""
|
| 282 |
+
Reset the timer for closing the client when a new request is made.
|
| 283 |
+
"""
|
| 284 |
+
|
| 285 |
+
if self.close_task:
|
| 286 |
+
self.close_task.cancel()
|
| 287 |
+
self.close_task = None
|
| 288 |
+
|
| 289 |
+
self.close_task = asyncio.create_task(self.close(self.close_delay))
|
| 290 |
+
|
| 291 |
+
async def start_auto_refresh(self) -> None:
|
| 292 |
+
"""
|
| 293 |
+
Start the background task to automatically refresh cookies.
|
| 294 |
+
"""
|
| 295 |
+
if self.refresh_interval < 60:
|
| 296 |
+
self.refresh_interval = 60
|
| 297 |
+
|
| 298 |
+
while self._running:
|
| 299 |
+
await asyncio.sleep(self.refresh_interval)
|
| 300 |
+
|
| 301 |
+
if not self._running:
|
| 302 |
+
break
|
| 303 |
+
|
| 304 |
+
try:
|
| 305 |
+
async with self._lock:
|
| 306 |
+
# Refresh all cookies in the background to keep the session alive.
|
| 307 |
+
new_1psidts = await rotate_1psidts(self.client, self.verbose)
|
| 308 |
+
|
| 309 |
+
if new_1psidts:
|
| 310 |
+
logger.debug("Cookies refreshed (network update).")
|
| 311 |
+
else:
|
| 312 |
+
logger.warning(
|
| 313 |
+
"Rotation response did not contain a new __Secure-1PSIDTS. "
|
| 314 |
+
"Session might expire soon if this persists."
|
| 315 |
+
)
|
| 316 |
+
except asyncio.CancelledError:
|
| 317 |
+
raise
|
| 318 |
+
except AuthError:
|
| 319 |
+
logger.warning(
|
| 320 |
+
"AuthError: Failed to refresh cookies. Retrying in next interval."
|
| 321 |
+
)
|
| 322 |
+
except Exception:
|
| 323 |
+
logger.warning(
|
| 324 |
+
"Unexpected error while refreshing cookies. Retrying in next interval."
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
async def _init_rpc(self) -> None:
|
| 328 |
+
"""
|
| 329 |
+
Send initial RPC calls to set up the session.
|
| 330 |
+
"""
|
| 331 |
+
await self._fetch_models()
|
| 332 |
+
await self._send_bard_settings()
|
| 333 |
+
await self._send_bard_activity()
|
| 334 |
+
await self._fetch_recent_chats()
|
| 335 |
+
|
| 336 |
+
async def _fetch_models(self) -> None:
|
| 337 |
+
"""
|
| 338 |
+
Fetch and parse available models.
|
| 339 |
+
"""
|
| 340 |
+
response = await self._batch_execute(
|
| 341 |
+
[
|
| 342 |
+
RPCData(
|
| 343 |
+
rpcid=GRPC.LIST_MODELS,
|
| 344 |
+
payload="[]",
|
| 345 |
+
)
|
| 346 |
+
]
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
response_json = extract_json_from_response(response.text)
|
| 350 |
+
|
| 351 |
+
available_models = []
|
| 352 |
+
for part in response_json:
|
| 353 |
+
part_body_str = get_nested_value(part, [2])
|
| 354 |
+
if not part_body_str:
|
| 355 |
+
continue
|
| 356 |
+
|
| 357 |
+
part_body = json.loads(part_body_str)
|
| 358 |
+
|
| 359 |
+
models_list = get_nested_value(part_body, [15])
|
| 360 |
+
if isinstance(models_list, list):
|
| 361 |
+
for model_data in models_list:
|
| 362 |
+
if isinstance(model_data, list) and len(model_data) > 2:
|
| 363 |
+
model_id = get_nested_value(model_data, [0], "")
|
| 364 |
+
name = get_nested_value(model_data, [10]) or get_nested_value(
|
| 365 |
+
model_data, [1], ""
|
| 366 |
+
)
|
| 367 |
+
description = get_nested_value(
|
| 368 |
+
model_data, [12]
|
| 369 |
+
) or get_nested_value(model_data, [2], "")
|
| 370 |
+
core_model = Model.UNSPECIFIED
|
| 371 |
+
code_name = "unspecified"
|
| 372 |
+
for enum_model in Model:
|
| 373 |
+
val = enum_model.model_header.get(
|
| 374 |
+
"x-goog-ext-525001261-jspb", ""
|
| 375 |
+
)
|
| 376 |
+
if val and (model_id in val):
|
| 377 |
+
core_model = enum_model
|
| 378 |
+
code_name = enum_model.model_name
|
| 379 |
+
break
|
| 380 |
+
|
| 381 |
+
if model_id and name:
|
| 382 |
+
available_models.append(
|
| 383 |
+
AvailableModel(
|
| 384 |
+
id=code_name,
|
| 385 |
+
name=name,
|
| 386 |
+
model=core_model,
|
| 387 |
+
description=description,
|
| 388 |
+
)
|
| 389 |
+
)
|
| 390 |
+
break
|
| 391 |
+
|
| 392 |
+
self._available_models = available_models
|
| 393 |
+
|
| 394 |
+
async def _fetch_recent_chats(self, recent: int = 13) -> None:
|
| 395 |
+
"""
|
| 396 |
+
Fetch and parse recent chats.
|
| 397 |
+
"""
|
| 398 |
+
response_chats1 = await self._batch_execute(
|
| 399 |
+
[
|
| 400 |
+
RPCData(
|
| 401 |
+
rpcid=GRPC.LIST_CHATS,
|
| 402 |
+
payload=json.dumps([recent, None, [1, None, 1]]).decode("utf-8"),
|
| 403 |
+
),
|
| 404 |
+
]
|
| 405 |
+
)
|
| 406 |
+
response_chats2 = await self._batch_execute(
|
| 407 |
+
[
|
| 408 |
+
RPCData(
|
| 409 |
+
rpcid=GRPC.LIST_CHATS,
|
| 410 |
+
payload=json.dumps([recent, None, [0, None, 1]]).decode("utf-8"),
|
| 411 |
+
),
|
| 412 |
+
]
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
recent_chats: list[ChatInfo] = []
|
| 416 |
+
for response_chats in (response_chats1, response_chats2):
|
| 417 |
+
chats_json = extract_json_from_response(response_chats.text)
|
| 418 |
+
for part in chats_json:
|
| 419 |
+
part_body_str = get_nested_value(part, [2])
|
| 420 |
+
if not part_body_str:
|
| 421 |
+
continue
|
| 422 |
+
|
| 423 |
+
try:
|
| 424 |
+
part_body = json.loads(part_body_str)
|
| 425 |
+
except json.JSONDecodeError:
|
| 426 |
+
continue
|
| 427 |
+
|
| 428 |
+
chat_list = get_nested_value(part_body, [2])
|
| 429 |
+
if isinstance(chat_list, list):
|
| 430 |
+
for chat_data in chat_list:
|
| 431 |
+
if isinstance(chat_data, list) and len(chat_data) > 1:
|
| 432 |
+
cid = get_nested_value(chat_data, [0], "")
|
| 433 |
+
title = get_nested_value(chat_data, [1], "")
|
| 434 |
+
is_pinned = bool(get_nested_value(chat_data, [2]))
|
| 435 |
+
|
| 436 |
+
if cid and title:
|
| 437 |
+
if not any(c.cid == cid for c in recent_chats):
|
| 438 |
+
recent_chats.append(
|
| 439 |
+
ChatInfo(
|
| 440 |
+
cid=cid, title=title, is_pinned=is_pinned
|
| 441 |
+
)
|
| 442 |
+
)
|
| 443 |
+
break
|
| 444 |
+
|
| 445 |
+
self._recent_chats = recent_chats
|
| 446 |
+
|
| 447 |
+
async def _send_bard_settings(self) -> None:
|
| 448 |
+
"""
|
| 449 |
+
Send required setup activity to Gemini.
|
| 450 |
+
"""
|
| 451 |
+
await self._batch_execute(
|
| 452 |
+
[
|
| 453 |
+
RPCData(
|
| 454 |
+
rpcid=GRPC.BARD_SETTINGS,
|
| 455 |
+
payload='[[["adaptive_device_responses_enabled","advanced_mode_theme_override_triggered","advanced_zs_upsell_dismissal_count","advanced_zs_upsell_last_dismissed","ai_transparency_notice_dismissed","audio_overview_discovery_dismissal_count","audio_overview_discovery_last_dismissed","bard_in_chrome_link_sharing_enabled","bard_sticky_mode_disabled_count","canvas_create_discovery_tooltip_seen_count","combined_files_button_tag_seen_count","indigo_banner_explicit_dismissal_count","indigo_banner_impression_count","indigo_banner_last_seen_sec","current_popup_id","deep_research_has_seen_file_upload_tooltip","deep_research_model_update_disclaimer_display_count","default_bot_id","disabled_discovery_card_feature_ids","disabled_model_discovery_tooltip_feature_ids","disabled_mode_disclaimers","disabled_new_model_badge_mode_ids","disabled_settings_discovery_tooltip_feature_ids","disablement_disclaimer_last_dismissed_sec","disable_advanced_beta_dialog","disable_advanced_beta_non_en_banner","disable_advanced_resubscribe_ui","disable_at_mentions_discovery_tooltip","disable_autorun_fact_check_u18","disable_bot_create_tips_card","disable_bot_docs_in_gems_disclaimer","disable_bot_onboarding_dialog","disable_bot_save_reminder_tips_card","disable_bot_send_prompt_tips_card","disable_bot_shared_in_drive_disclaimer","disable_bot_try_create_tips_card","disable_colab_tooltip","disable_collapsed_tool_menu_tooltip","disable_continue_discovery_tooltip","disable_debug_info_moved_tooltip_v2","disable_enterprise_mode_dialog","disable_export_python_tooltip","disable_extensions_discovery_dialog","disable_extension_one_time_badge","disable_fact_check_tooltip_v2","disable_free_file_upload_tips_card","disable_generated_image_download_dialog","disable_get_app_banner","disable_get_app_desktop_dialog","disable_googler_in_enterprise_mode","disable_human_review_disclosure","disable_ice_open_vega_editor_tooltip","disable_image_upload_tooltip","disable_legal_concern_tooltip","disable_llm_history_import_disclaimer","disable_location_popup","disable_memory_discovery","disable_memory_extraction_discovery","disable_new_conversation_dialog","disable_onboarding_experience","disable_personal_context_tooltip","disable_photos_upload_disclaimer","disable_power_up_intro_tooltip","disable_scheduled_actions_mobile_notification_snackbar","disable_storybook_listen_button_tooltip","disable_streaming_settings_tooltip","disable_take_control_disclaimer","disable_teens_only_english_language_dialog","disable_tier1_rebranding_tooltip","disable_try_advanced_mode_dialog","enable_advanced_beta_mode","enable_advanced_mode","enable_googler_in_enterprise_mode","enable_memory","enable_memory_extraction","enable_personal_context","enable_personal_context_gemini","enable_personal_context_gemini_using_photos","enable_personal_context_gemini_using_workspace","enable_personal_context_search","enable_personal_context_youtube","enable_token_streaming","enforce_default_to_fast_version","mayo_discovery_banner_dismissal_count","mayo_discovery_banner_last_dismissed_sec","gempix_discovery_banner_dismissal_count","gempix_discovery_banner_last_dismissed","get_app_banner_ack_count","get_app_banner_seen_count","get_app_mobile_dialog_ack_count","guided_learning_banner_dismissal_count","guided_learning_banner_last_dismissed","has_accepted_agent_mode_fre_disclaimer","has_received_streaming_response","has_seen_agent_mode_tooltip","has_seen_bespoke_tooltip","has_seen_deepthink_mustard_tooltip","has_seen_deepthink_v2_tooltip","has_seen_deep_think_tooltip","has_seen_first_youtube_video_disclaimer","has_seen_ggo_tooltip","has_seen_image_grams_discovery_banner","has_seen_image_preview_in_input_area_tooltip","has_seen_kallo_discovery_banner","has_seen_kallo_tooltip","has_seen_model_picker_in_input_area_tooltip","has_seen_model_tooltip_in_input_area_for_gempix","has_seen_redo_with_gempix2_tooltip","has_seen_veograms_discovery_banner","has_seen_video_generation_discovery_banner","is_imported_chats_panel_open_by_default","jumpstart_onboarding_dismissal_count","last_dismissed_deep_research_implicit_invite","last_dismissed_discovery_feature_implicit_invites","last_dismissed_immersives_canvas_implicit_invite","last_dismissed_immersive_share_disclaimer_sec","last_dismissed_strike_timestamp_sec","last_dismissed_zs_student_aip_banner_sec","last_get_app_banner_ack_timestamp_sec","last_get_app_mobile_dialog_ack_timestamp_sec","last_human_review_disclosure_ack","last_selected_mode_id_in_embedded","last_selected_mode_id_on_web","last_two_up_activation_timestamp_sec","last_winter_olympics_interaction_timestamp_sec","memory_extracted_greeting_name","mini_gemini_tos_closed","mode_switcher_soft_badge_disabled_ids","mode_switcher_soft_badge_seen_count","personalization_first_party_onboarding_cross_surface_clicked","personalization_first_party_onboarding_cross_surface_seen_count","personalization_one_p_discovery_card_seen_count","personalization_one_p_discovery_last_consented","personalization_zero_state_card_last_interacted","personalization_zero_state_card_seen_count","popup_zs_visits_cooldown","require_reconsent_setting_for_personalization_banner_seen_count","show_debug_info","side_nav_open_by_default","student_verification_dismissal_count","student_verification_last_dismissed","task_viewer_cc_banner_dismissed_count","task_viewer_cc_banner_dismissed_time_sec","tool_menu_new_badge_disabled_ids","tool_menu_new_badge_impression_counts","tool_menu_soft_badge_disabled_ids","tool_menu_soft_badge_impression_counts","upload_disclaimer_last_consent_time_sec","viewed_student_aip_upsell_campaign_ids","voice_language","voice_name","web_and_app_activity_enabled","wellbeing_nudge_notice_last_dismissed_sec","zs_student_aip_banner_dismissal_count"]]]',
|
| 456 |
+
)
|
| 457 |
+
]
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
async def _send_bard_activity(self) -> None:
|
| 461 |
+
"""
|
| 462 |
+
Send warmup RPC calls before querying.
|
| 463 |
+
"""
|
| 464 |
+
await self._batch_execute(
|
| 465 |
+
[
|
| 466 |
+
RPCData(
|
| 467 |
+
rpcid=GRPC.BARD_SETTINGS,
|
| 468 |
+
payload='[[["bard_activity_enabled"]]]',
|
| 469 |
+
)
|
| 470 |
+
]
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
def list_models(self) -> list[AvailableModel] | None:
|
| 474 |
+
"""
|
| 475 |
+
List all available models for the current account.
|
| 476 |
+
|
| 477 |
+
Returns
|
| 478 |
+
-------
|
| 479 |
+
`list[gemini_webapi.types.AvailableModel]`
|
| 480 |
+
List of models with their name and description. Returns `None` if the client holds no session cache.
|
| 481 |
+
"""
|
| 482 |
+
return self._available_models
|
| 483 |
+
|
| 484 |
+
async def generate_content(
|
| 485 |
+
self,
|
| 486 |
+
prompt: str,
|
| 487 |
+
files: list[str | Path | bytes | io.BytesIO] | None = None,
|
| 488 |
+
model: Model | str | dict = Model.UNSPECIFIED,
|
| 489 |
+
gem: Gem | str | None = None,
|
| 490 |
+
chat: Optional["ChatSession"] = None,
|
| 491 |
+
temporary: bool = False,
|
| 492 |
+
**kwargs,
|
| 493 |
+
) -> ModelOutput:
|
| 494 |
+
"""
|
| 495 |
+
Generates contents with prompt.
|
| 496 |
+
|
| 497 |
+
Parameters
|
| 498 |
+
----------
|
| 499 |
+
prompt: `str`
|
| 500 |
+
Text prompt provided by user.
|
| 501 |
+
files: `list[str | Path | bytes | io.BytesIO]`, optional
|
| 502 |
+
List of file paths or byte streams to be attached.
|
| 503 |
+
model: `Model | str | dict`, optional
|
| 504 |
+
Specify the model to use for generation.
|
| 505 |
+
Pass either a `gemini_webapi.constants.Model` enum or a model name string to use predefined models.
|
| 506 |
+
Pass a dictionary to use custom model header strings ("model_name" and "model_header" keys must be provided).
|
| 507 |
+
gem: `Gem | str`, optional
|
| 508 |
+
Specify a gem to use as system prompt for the chat session.
|
| 509 |
+
Pass either a `gemini_webapi.types.Gem` object or a gem id string.
|
| 510 |
+
chat: `ChatSession`, optional
|
| 511 |
+
Chat data to retrieve conversation history.
|
| 512 |
+
If None, will automatically generate a new chat id when sending post request.
|
| 513 |
+
temporary: `bool`, optional
|
| 514 |
+
If set to `True`, the ongoing conversation will not show up in Gemini history.
|
| 515 |
+
kwargs: `dict`, optional
|
| 516 |
+
Additional arguments which will be passed to the post request.
|
| 517 |
+
Refer to `curl_cffi.requests.AsyncSession.request` for more information.
|
| 518 |
+
|
| 519 |
+
Returns
|
| 520 |
+
-------
|
| 521 |
+
:class:`ModelOutput`
|
| 522 |
+
Output data from gemini.google.com.
|
| 523 |
+
|
| 524 |
+
Raises
|
| 525 |
+
------
|
| 526 |
+
`AssertionError`
|
| 527 |
+
If prompt is empty.
|
| 528 |
+
`gemini_webapi.TimeoutError`
|
| 529 |
+
If request timed out.
|
| 530 |
+
`gemini_webapi.GeminiError`
|
| 531 |
+
If no reply candidate found in response.
|
| 532 |
+
`gemini_webapi.APIError`
|
| 533 |
+
- If request failed with status code other than 200.
|
| 534 |
+
- If response structure is invalid and failed to parse.
|
| 535 |
+
"""
|
| 536 |
+
|
| 537 |
+
if self.auto_close:
|
| 538 |
+
await self.reset_close_task()
|
| 539 |
+
|
| 540 |
+
file_data = None
|
| 541 |
+
if files:
|
| 542 |
+
await self._send_bard_activity()
|
| 543 |
+
|
| 544 |
+
uploaded_urls = await asyncio.gather(
|
| 545 |
+
*(
|
| 546 |
+
upload_file(file, client=self.client, verbose=self.verbose)
|
| 547 |
+
for file in files
|
| 548 |
+
)
|
| 549 |
+
)
|
| 550 |
+
file_data = [
|
| 551 |
+
[[url], parse_file_name(file)]
|
| 552 |
+
for url, file in zip(uploaded_urls, files)
|
| 553 |
+
]
|
| 554 |
+
|
| 555 |
+
try:
|
| 556 |
+
await self._send_bard_activity()
|
| 557 |
+
|
| 558 |
+
session_state = {
|
| 559 |
+
"last_texts": {},
|
| 560 |
+
"last_thoughts": {},
|
| 561 |
+
"last_progress_time": time.time(),
|
| 562 |
+
"is_thinking": False,
|
| 563 |
+
"is_queueing": False,
|
| 564 |
+
"title": None,
|
| 565 |
+
}
|
| 566 |
+
output = None
|
| 567 |
+
async for output in self._generate(
|
| 568 |
+
prompt=prompt,
|
| 569 |
+
req_file_data=file_data,
|
| 570 |
+
model=model,
|
| 571 |
+
gem=gem,
|
| 572 |
+
chat=chat,
|
| 573 |
+
temporary=temporary,
|
| 574 |
+
session_state=session_state,
|
| 575 |
+
**kwargs,
|
| 576 |
+
):
|
| 577 |
+
pass
|
| 578 |
+
|
| 579 |
+
if output is None:
|
| 580 |
+
raise GeminiError(
|
| 581 |
+
"Failed to generate contents. No output data found in response."
|
| 582 |
+
)
|
| 583 |
+
|
| 584 |
+
if isinstance(chat, ChatSession):
|
| 585 |
+
output.metadata = chat.metadata
|
| 586 |
+
chat.last_output = output
|
| 587 |
+
|
| 588 |
+
return output
|
| 589 |
+
|
| 590 |
+
finally:
|
| 591 |
+
if files:
|
| 592 |
+
for file in files:
|
| 593 |
+
if isinstance(file, io.BytesIO):
|
| 594 |
+
file.close()
|
| 595 |
+
|
| 596 |
+
async def generate_content_stream(
|
| 597 |
+
self,
|
| 598 |
+
prompt: str,
|
| 599 |
+
files: list[str | Path | bytes | io.BytesIO] | None = None,
|
| 600 |
+
model: Model | str | dict = Model.UNSPECIFIED,
|
| 601 |
+
gem: Gem | str | None = None,
|
| 602 |
+
chat: Optional["ChatSession"] = None,
|
| 603 |
+
temporary: bool = False,
|
| 604 |
+
**kwargs,
|
| 605 |
+
) -> AsyncGenerator[ModelOutput, None]:
|
| 606 |
+
"""
|
| 607 |
+
Generates contents with prompt in streaming mode.
|
| 608 |
+
|
| 609 |
+
This method sends a request to Gemini and yields partial responses as they arrive.
|
| 610 |
+
It automatically calculates the text delta (new characters) to provide a smooth
|
| 611 |
+
streaming experience. It also continuously updates chat metadata and candidate IDs.
|
| 612 |
+
|
| 613 |
+
Parameters
|
| 614 |
+
----------
|
| 615 |
+
prompt: `str`
|
| 616 |
+
Text prompt provided by user.
|
| 617 |
+
files: `list[str | Path | bytes | io.BytesIO]`, optional
|
| 618 |
+
List of file paths or byte streams to be attached.
|
| 619 |
+
model: `Model | str | dict`, optional
|
| 620 |
+
Specify the model to use for generation.
|
| 621 |
+
gem: `Gem | str`, optional
|
| 622 |
+
Specify a gem to use as system prompt for the chat session.
|
| 623 |
+
chat: `ChatSession`, optional
|
| 624 |
+
Chat data to retrieve conversation history.
|
| 625 |
+
temporary: `bool`, optional
|
| 626 |
+
If set to `True`, the ongoing conversation will not show up in Gemini history.
|
| 627 |
+
kwargs: `dict`, optional
|
| 628 |
+
Additional arguments passed to `curl_cffi.requests.AsyncSession.stream`.
|
| 629 |
+
|
| 630 |
+
Yields
|
| 631 |
+
------
|
| 632 |
+
:class:`ModelOutput`
|
| 633 |
+
Partial output data. The `text_delta` attribute contains only the NEW characters
|
| 634 |
+
received since the last yield.
|
| 635 |
+
|
| 636 |
+
Raises
|
| 637 |
+
------
|
| 638 |
+
`gemini_webapi.APIError`
|
| 639 |
+
If the request fails or response structure is invalid.
|
| 640 |
+
`gemini_webapi.TimeoutError`
|
| 641 |
+
If the stream request times out.
|
| 642 |
+
"""
|
| 643 |
+
|
| 644 |
+
if self.auto_close:
|
| 645 |
+
await self.reset_close_task()
|
| 646 |
+
|
| 647 |
+
file_data = None
|
| 648 |
+
if files:
|
| 649 |
+
await self._send_bard_activity()
|
| 650 |
+
|
| 651 |
+
uploaded_urls = await asyncio.gather(
|
| 652 |
+
*(
|
| 653 |
+
upload_file(file, client=self.client, verbose=self.verbose)
|
| 654 |
+
for file in files
|
| 655 |
+
)
|
| 656 |
+
)
|
| 657 |
+
file_data = [
|
| 658 |
+
[[url], parse_file_name(file)]
|
| 659 |
+
for url, file in zip(uploaded_urls, files)
|
| 660 |
+
]
|
| 661 |
+
|
| 662 |
+
try:
|
| 663 |
+
await self._send_bard_activity()
|
| 664 |
+
|
| 665 |
+
session_state = {
|
| 666 |
+
"last_texts": {},
|
| 667 |
+
"last_thoughts": {},
|
| 668 |
+
"last_progress_time": time.time(),
|
| 669 |
+
"is_thinking": False,
|
| 670 |
+
"is_queueing": False,
|
| 671 |
+
"title": None,
|
| 672 |
+
}
|
| 673 |
+
output = None
|
| 674 |
+
async for output in self._generate(
|
| 675 |
+
prompt=prompt,
|
| 676 |
+
req_file_data=file_data,
|
| 677 |
+
model=model,
|
| 678 |
+
gem=gem,
|
| 679 |
+
chat=chat,
|
| 680 |
+
temporary=temporary,
|
| 681 |
+
session_state=session_state,
|
| 682 |
+
**kwargs,
|
| 683 |
+
):
|
| 684 |
+
yield output
|
| 685 |
+
|
| 686 |
+
if output and isinstance(chat, ChatSession):
|
| 687 |
+
output.metadata = chat.metadata
|
| 688 |
+
chat.last_output = output
|
| 689 |
+
|
| 690 |
+
finally:
|
| 691 |
+
if files:
|
| 692 |
+
for file in files:
|
| 693 |
+
if isinstance(file, io.BytesIO):
|
| 694 |
+
file.close()
|
| 695 |
+
|
| 696 |
+
@running(retry=5)
|
| 697 |
+
async def _generate(
|
| 698 |
+
self,
|
| 699 |
+
prompt: str,
|
| 700 |
+
req_file_data: list[Any] | None = None,
|
| 701 |
+
model: Model | str | dict = Model.UNSPECIFIED,
|
| 702 |
+
gem: Gem | str | None = None,
|
| 703 |
+
chat: Optional["ChatSession"] = None,
|
| 704 |
+
temporary: bool = False,
|
| 705 |
+
session_state: dict[str, Any] | None = None,
|
| 706 |
+
**kwargs,
|
| 707 |
+
) -> AsyncGenerator[ModelOutput, None]:
|
| 708 |
+
"""
|
| 709 |
+
Internal method which actually sends content generation requests.
|
| 710 |
+
"""
|
| 711 |
+
|
| 712 |
+
assert prompt, "Prompt cannot be empty."
|
| 713 |
+
|
| 714 |
+
if isinstance(model, str):
|
| 715 |
+
model = Model.from_name(model)
|
| 716 |
+
elif isinstance(model, dict):
|
| 717 |
+
model = Model.from_dict(model)
|
| 718 |
+
elif not isinstance(model, Model):
|
| 719 |
+
raise TypeError(
|
| 720 |
+
f"'model' must be a `gemini_webapi.constants.Model` instance, "
|
| 721 |
+
f"string, or dictionary; got `{type(model).__name__}`"
|
| 722 |
+
)
|
| 723 |
+
|
| 724 |
+
_reqid = self._reqid
|
| 725 |
+
self._reqid += 100000
|
| 726 |
+
|
| 727 |
+
gem_id = gem.id if isinstance(gem, Gem) else gem
|
| 728 |
+
|
| 729 |
+
chat_backup: dict[str, Any] | None = None
|
| 730 |
+
if chat:
|
| 731 |
+
chat_backup = {
|
| 732 |
+
"metadata": (
|
| 733 |
+
list(chat.metadata)
|
| 734 |
+
if getattr(chat, "metadata", None)
|
| 735 |
+
else list(_DEFAULT_METADATA)
|
| 736 |
+
),
|
| 737 |
+
"cid": getattr(chat, "cid", ""),
|
| 738 |
+
"rid": getattr(chat, "rid", ""),
|
| 739 |
+
"rcid": getattr(chat, "rcid", ""),
|
| 740 |
+
}
|
| 741 |
+
|
| 742 |
+
if session_state is None:
|
| 743 |
+
session_state = {
|
| 744 |
+
"last_texts": {},
|
| 745 |
+
"last_thoughts": {},
|
| 746 |
+
"last_progress_time": time.time(),
|
| 747 |
+
"is_thinking": False,
|
| 748 |
+
"is_queueing": False,
|
| 749 |
+
"title": None,
|
| 750 |
+
}
|
| 751 |
+
else:
|
| 752 |
+
# Reset connection-specific states during a retry attempt
|
| 753 |
+
session_state["last_progress_time"] = time.time()
|
| 754 |
+
session_state["is_thinking"] = False
|
| 755 |
+
session_state["is_queueing"] = False
|
| 756 |
+
|
| 757 |
+
has_generated_text = False
|
| 758 |
+
sleep_time = 10
|
| 759 |
+
|
| 760 |
+
message_content = [
|
| 761 |
+
prompt,
|
| 762 |
+
0,
|
| 763 |
+
None,
|
| 764 |
+
req_file_data,
|
| 765 |
+
None,
|
| 766 |
+
None,
|
| 767 |
+
0,
|
| 768 |
+
]
|
| 769 |
+
|
| 770 |
+
params: dict[str, Any] = {"_reqid": _reqid, "rt": "c"}
|
| 771 |
+
if self.build_label:
|
| 772 |
+
params["bl"] = self.build_label
|
| 773 |
+
if self.session_id:
|
| 774 |
+
params["f.sid"] = self.session_id
|
| 775 |
+
|
| 776 |
+
while True:
|
| 777 |
+
try:
|
| 778 |
+
inner_req_list: list[Any] = [None] * 69
|
| 779 |
+
inner_req_list[0] = message_content
|
| 780 |
+
inner_req_list[2] = chat.metadata if chat else list(_DEFAULT_METADATA)
|
| 781 |
+
inner_req_list[STREAMING_FLAG_INDEX] = 1
|
| 782 |
+
if gem_id:
|
| 783 |
+
inner_req_list[GEM_FLAG_INDEX] = gem_id
|
| 784 |
+
if temporary:
|
| 785 |
+
inner_req_list[TEMPORARY_CHAT_FLAG_INDEX] = 1
|
| 786 |
+
|
| 787 |
+
request_data = {
|
| 788 |
+
"at": self.access_token,
|
| 789 |
+
"f.req": json.dumps(
|
| 790 |
+
[
|
| 791 |
+
None,
|
| 792 |
+
json.dumps(inner_req_list).decode("utf-8"),
|
| 793 |
+
]
|
| 794 |
+
).decode("utf-8"),
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
async with self.client.stream(
|
| 798 |
+
"POST",
|
| 799 |
+
Endpoint.GENERATE,
|
| 800 |
+
params=params,
|
| 801 |
+
headers=model.model_header,
|
| 802 |
+
data=request_data,
|
| 803 |
+
**kwargs,
|
| 804 |
+
) as response:
|
| 805 |
+
if self.verbose:
|
| 806 |
+
logger.debug(
|
| 807 |
+
f"HTTP Request: POST {Endpoint.GENERATE} [{response.status_code}]"
|
| 808 |
+
)
|
| 809 |
+
if response.status_code != 200:
|
| 810 |
+
await self.close()
|
| 811 |
+
raise APIError(
|
| 812 |
+
f"Failed to generate contents. Status: {response.status_code}"
|
| 813 |
+
)
|
| 814 |
+
|
| 815 |
+
buffer = ""
|
| 816 |
+
decoder = codecs.getincrementaldecoder("utf-8")(errors="replace")
|
| 817 |
+
|
| 818 |
+
last_texts: dict[str, str] = session_state["last_texts"]
|
| 819 |
+
last_thoughts: dict[str, str] = session_state["last_thoughts"]
|
| 820 |
+
last_progress_time = session_state["last_progress_time"]
|
| 821 |
+
|
| 822 |
+
is_thinking = session_state["is_thinking"]
|
| 823 |
+
is_queueing = session_state["is_queueing"]
|
| 824 |
+
has_candidates = False
|
| 825 |
+
is_completed = False # Check if this conversation turn has been fully answered.
|
| 826 |
+
is_final_chunk = False # Check if this turn is saved to history and marked complete or still pending (e.g., video generation).
|
| 827 |
+
cid = chat.cid if chat else ""
|
| 828 |
+
rid = chat.rid if chat else ""
|
| 829 |
+
|
| 830 |
+
async def _process_parts(
|
| 831 |
+
parts: list[Any],
|
| 832 |
+
) -> AsyncGenerator[ModelOutput, None]:
|
| 833 |
+
nonlocal is_thinking, is_queueing, has_candidates, is_completed, is_final_chunk, cid, rid
|
| 834 |
+
for part in parts:
|
| 835 |
+
# Check for fatal error codes
|
| 836 |
+
error_code = get_nested_value(part, [5, 2, 0, 1, 0])
|
| 837 |
+
if error_code:
|
| 838 |
+
await self.close()
|
| 839 |
+
match error_code:
|
| 840 |
+
case ErrorCode.USAGE_LIMIT_EXCEEDED:
|
| 841 |
+
raise UsageLimitExceeded(
|
| 842 |
+
f"Usage limit exceeded for model '{model.model_name}'. Please wait a few minutes, "
|
| 843 |
+
"switch to a different model (e.g., Gemini Flash), or check your account limits on gemini.google.com."
|
| 844 |
+
)
|
| 845 |
+
case ErrorCode.MODEL_INCONSISTENT:
|
| 846 |
+
raise ModelInvalid(
|
| 847 |
+
"The specified model is inconsistent with the conversation history. "
|
| 848 |
+
"Please ensure you are using the same 'model' parameter throughout the entire ChatSession."
|
| 849 |
+
)
|
| 850 |
+
case ErrorCode.MODEL_HEADER_INVALID:
|
| 851 |
+
raise ModelInvalid(
|
| 852 |
+
f"The model '{model.model_name}' is currently unavailable or the request structure is outdated. "
|
| 853 |
+
"Please update 'gemini_webapi' to the latest version or report this on GitHub if the problem persists."
|
| 854 |
+
)
|
| 855 |
+
case ErrorCode.IP_TEMPORARILY_BLOCKED:
|
| 856 |
+
raise TemporarilyBlocked(
|
| 857 |
+
"Your IP address has been temporarily flagged or blocked by Google. "
|
| 858 |
+
"Please try using a proxy, a different network, or wait for a while before retrying."
|
| 859 |
+
)
|
| 860 |
+
case ErrorCode.TEMPORARY_ERROR_1013:
|
| 861 |
+
raise APIError(
|
| 862 |
+
"Gemini encountered a temporary error (1013). Retrying..."
|
| 863 |
+
)
|
| 864 |
+
case _:
|
| 865 |
+
raise APIError(
|
| 866 |
+
f"Failed to generate contents (stream). Unknown API error code: {error_code}. "
|
| 867 |
+
"This might be a temporary Google service issue."
|
| 868 |
+
)
|
| 869 |
+
|
| 870 |
+
# Check for queueing status
|
| 871 |
+
status = get_nested_value(part, [5])
|
| 872 |
+
if isinstance(status, list) and status:
|
| 873 |
+
if not is_thinking:
|
| 874 |
+
is_queueing = True
|
| 875 |
+
session_state["is_queueing"] = True
|
| 876 |
+
if not has_candidates:
|
| 877 |
+
logger.debug(
|
| 878 |
+
"Model is in a waiting state (queueing)..."
|
| 879 |
+
)
|
| 880 |
+
|
| 881 |
+
inner_json_str = get_nested_value(part, [2])
|
| 882 |
+
if inner_json_str:
|
| 883 |
+
try:
|
| 884 |
+
part_json = json.loads(inner_json_str)
|
| 885 |
+
m_data = get_nested_value(part_json, [1])
|
| 886 |
+
cid = get_nested_value(m_data, [0], "")
|
| 887 |
+
rid = get_nested_value(m_data, [1], "")
|
| 888 |
+
if m_data and isinstance(chat, ChatSession):
|
| 889 |
+
chat.metadata = m_data
|
| 890 |
+
|
| 891 |
+
# Check for busy analyzing data
|
| 892 |
+
tool_name = get_nested_value(part_json, [6, 1, 0])
|
| 893 |
+
if tool_name == "data_analysis_tool":
|
| 894 |
+
is_thinking = True
|
| 895 |
+
session_state["is_thinking"] = True
|
| 896 |
+
is_queueing = False
|
| 897 |
+
session_state["is_queueing"] = False
|
| 898 |
+
if not has_candidates:
|
| 899 |
+
logger.debug(
|
| 900 |
+
f"Model is active (thinking/analyzing)... Raw: {str(part_json)[:500]}"
|
| 901 |
+
)
|
| 902 |
+
|
| 903 |
+
context_str = get_nested_value(part_json, [25])
|
| 904 |
+
if isinstance(context_str, str):
|
| 905 |
+
is_final_chunk = True
|
| 906 |
+
is_thinking = False
|
| 907 |
+
session_state["is_thinking"] = False
|
| 908 |
+
is_queueing = False
|
| 909 |
+
session_state["is_queueing"] = False
|
| 910 |
+
if isinstance(chat, ChatSession):
|
| 911 |
+
chat.metadata = [None] * 9 + [context_str]
|
| 912 |
+
|
| 913 |
+
title = get_nested_value(part_json, [10, 0])
|
| 914 |
+
if title:
|
| 915 |
+
session_state["title"] = title
|
| 916 |
+
|
| 917 |
+
candidates_list = get_nested_value(
|
| 918 |
+
part_json, [4], []
|
| 919 |
+
)
|
| 920 |
+
if candidates_list:
|
| 921 |
+
output_candidates = []
|
| 922 |
+
for i, candidate_data in enumerate(
|
| 923 |
+
candidates_list
|
| 924 |
+
):
|
| 925 |
+
rcid = get_nested_value(candidate_data, [0])
|
| 926 |
+
if not rcid:
|
| 927 |
+
continue
|
| 928 |
+
if isinstance(chat, ChatSession):
|
| 929 |
+
chat.rcid = rcid
|
| 930 |
+
|
| 931 |
+
(
|
| 932 |
+
text,
|
| 933 |
+
thoughts,
|
| 934 |
+
web_images,
|
| 935 |
+
generated_images,
|
| 936 |
+
generated_videos,
|
| 937 |
+
) = self._parse_candidate(
|
| 938 |
+
candidate_data, cid, rid, rcid
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
# Check if this frame represents the complete state of the message
|
| 942 |
+
is_completed = (
|
| 943 |
+
get_nested_value(
|
| 944 |
+
candidate_data, [8, 0], 1
|
| 945 |
+
)
|
| 946 |
+
== 2
|
| 947 |
+
)
|
| 948 |
+
|
| 949 |
+
# Save this conversation turn to list_chats whenever it is stored in history.
|
| 950 |
+
if is_final_chunk:
|
| 951 |
+
cid = get_nested_value(
|
| 952 |
+
part_json, [1, 0]
|
| 953 |
+
)
|
| 954 |
+
if cid and isinstance(
|
| 955 |
+
self._recent_chats, list
|
| 956 |
+
):
|
| 957 |
+
chat_title = session_state.get(
|
| 958 |
+
"title"
|
| 959 |
+
)
|
| 960 |
+
if not chat_title:
|
| 961 |
+
for c in self._recent_chats:
|
| 962 |
+
if c.cid == cid:
|
| 963 |
+
chat_title = c.title
|
| 964 |
+
break
|
| 965 |
+
|
| 966 |
+
if chat_title:
|
| 967 |
+
is_pinned = False
|
| 968 |
+
for c in self._recent_chats:
|
| 969 |
+
if c.cid == cid:
|
| 970 |
+
is_pinned = c.is_pinned
|
| 971 |
+
break
|
| 972 |
+
|
| 973 |
+
expected_idx = (
|
| 974 |
+
0
|
| 975 |
+
if is_pinned
|
| 976 |
+
else sum(
|
| 977 |
+
1
|
| 978 |
+
for c in self._recent_chats
|
| 979 |
+
if c.cid != cid
|
| 980 |
+
and c.is_pinned
|
| 981 |
+
)
|
| 982 |
+
)
|
| 983 |
+
|
| 984 |
+
if not (
|
| 985 |
+
len(self._recent_chats)
|
| 986 |
+
> expected_idx
|
| 987 |
+
and self._recent_chats[
|
| 988 |
+
expected_idx
|
| 989 |
+
].cid
|
| 990 |
+
== cid
|
| 991 |
+
and self._recent_chats[
|
| 992 |
+
expected_idx
|
| 993 |
+
].title
|
| 994 |
+
== chat_title
|
| 995 |
+
):
|
| 996 |
+
self._recent_chats = [
|
| 997 |
+
c
|
| 998 |
+
for c in self._recent_chats
|
| 999 |
+
if c.cid != cid
|
| 1000 |
+
]
|
| 1001 |
+
self._recent_chats.insert(
|
| 1002 |
+
expected_idx,
|
| 1003 |
+
ChatInfo(
|
| 1004 |
+
cid=cid,
|
| 1005 |
+
title=chat_title,
|
| 1006 |
+
is_pinned=is_pinned,
|
| 1007 |
+
),
|
| 1008 |
+
)
|
| 1009 |
+
|
| 1010 |
+
last_sent_text = last_texts.get(
|
| 1011 |
+
rcid
|
| 1012 |
+
) or last_texts.get(f"idx_{i}", "")
|
| 1013 |
+
text_delta, new_full_text = (
|
| 1014 |
+
get_delta_by_fp_len(
|
| 1015 |
+
text,
|
| 1016 |
+
last_sent_text,
|
| 1017 |
+
is_final=is_completed,
|
| 1018 |
+
)
|
| 1019 |
+
)
|
| 1020 |
+
last_sent_thought = last_thoughts.get(
|
| 1021 |
+
rcid
|
| 1022 |
+
) or last_thoughts.get(f"idx_{i}", "")
|
| 1023 |
+
if thoughts:
|
| 1024 |
+
thoughts_delta, new_full_thought = (
|
| 1025 |
+
get_delta_by_fp_len(
|
| 1026 |
+
thoughts,
|
| 1027 |
+
last_sent_thought,
|
| 1028 |
+
is_final=is_completed,
|
| 1029 |
+
)
|
| 1030 |
+
)
|
| 1031 |
+
else:
|
| 1032 |
+
thoughts_delta = ""
|
| 1033 |
+
new_full_thought = ""
|
| 1034 |
+
|
| 1035 |
+
if (
|
| 1036 |
+
text_delta
|
| 1037 |
+
or thoughts_delta
|
| 1038 |
+
or web_images
|
| 1039 |
+
or generated_images
|
| 1040 |
+
):
|
| 1041 |
+
has_candidates = True
|
| 1042 |
+
if thoughts_delta:
|
| 1043 |
+
logger.debug(f"[Thinking]: {thoughts_delta.strip()}")
|
| 1044 |
+
if text_delta:
|
| 1045 |
+
logger.debug(f"[Generating]: {text_delta.strip()}")
|
| 1046 |
+
|
| 1047 |
+
# Update state with the provider's cleaned state to handle drift
|
| 1048 |
+
last_texts[rcid] = last_texts[
|
| 1049 |
+
f"idx_{i}"
|
| 1050 |
+
] = new_full_text
|
| 1051 |
+
|
| 1052 |
+
last_thoughts[rcid] = last_thoughts[
|
| 1053 |
+
f"idx_{i}"
|
| 1054 |
+
] = new_full_thought
|
| 1055 |
+
|
| 1056 |
+
output_candidates.append(
|
| 1057 |
+
Candidate(
|
| 1058 |
+
rcid=rcid,
|
| 1059 |
+
text=text,
|
| 1060 |
+
text_delta=text_delta,
|
| 1061 |
+
thoughts=thoughts or None,
|
| 1062 |
+
thoughts_delta=thoughts_delta,
|
| 1063 |
+
web_images=web_images,
|
| 1064 |
+
generated_images=generated_images,
|
| 1065 |
+
generated_videos=generated_videos,
|
| 1066 |
+
)
|
| 1067 |
+
)
|
| 1068 |
+
|
| 1069 |
+
if output_candidates:
|
| 1070 |
+
is_thinking = False
|
| 1071 |
+
session_state["is_thinking"] = False
|
| 1072 |
+
is_queueing = False
|
| 1073 |
+
session_state["is_queueing"] = False
|
| 1074 |
+
yield ModelOutput(
|
| 1075 |
+
metadata=get_nested_value(
|
| 1076 |
+
part_json, [1], []
|
| 1077 |
+
),
|
| 1078 |
+
candidates=output_candidates,
|
| 1079 |
+
)
|
| 1080 |
+
except json.JSONDecodeError:
|
| 1081 |
+
continue
|
| 1082 |
+
|
| 1083 |
+
chunk_iterator = response.aiter_content().__aiter__()
|
| 1084 |
+
while True:
|
| 1085 |
+
try:
|
| 1086 |
+
stall_threshold = (
|
| 1087 |
+
self.timeout
|
| 1088 |
+
if (is_thinking or is_queueing)
|
| 1089 |
+
else min(self.timeout, self.watchdog_timeout)
|
| 1090 |
+
)
|
| 1091 |
+
chunk = await asyncio.wait_for(
|
| 1092 |
+
chunk_iterator.__anext__(), timeout=stall_threshold + 5
|
| 1093 |
+
)
|
| 1094 |
+
except StopAsyncIteration:
|
| 1095 |
+
break
|
| 1096 |
+
except asyncio.TimeoutError:
|
| 1097 |
+
logger.debug(
|
| 1098 |
+
f"[Watchdog] Socket idle for {stall_threshold + 5}s. Refreshing connection..."
|
| 1099 |
+
)
|
| 1100 |
+
break
|
| 1101 |
+
|
| 1102 |
+
buffer += decoder.decode(chunk, final=False)
|
| 1103 |
+
if buffer.startswith(")]}'"):
|
| 1104 |
+
buffer = buffer[4:].lstrip()
|
| 1105 |
+
parsed_parts, buffer = parse_response_by_frame(buffer)
|
| 1106 |
+
|
| 1107 |
+
got_update = False
|
| 1108 |
+
async for out in _process_parts(parsed_parts):
|
| 1109 |
+
has_generated_text = True
|
| 1110 |
+
yield out
|
| 1111 |
+
got_update = True
|
| 1112 |
+
|
| 1113 |
+
if got_update:
|
| 1114 |
+
last_progress_time = time.time()
|
| 1115 |
+
session_state["last_progress_time"] = last_progress_time
|
| 1116 |
+
else:
|
| 1117 |
+
stall_threshold = (
|
| 1118 |
+
self.timeout
|
| 1119 |
+
if (is_thinking or is_queueing)
|
| 1120 |
+
else min(self.timeout, self.watchdog_timeout)
|
| 1121 |
+
)
|
| 1122 |
+
if (time.time() - last_progress_time) > stall_threshold:
|
| 1123 |
+
if is_thinking:
|
| 1124 |
+
logger.debug(
|
| 1125 |
+
f"[Watchdog] Model is taking its time thinking ({int(time.time() - last_progress_time)}s). Reconnecting to poll..."
|
| 1126 |
+
)
|
| 1127 |
+
break
|
| 1128 |
+
else:
|
| 1129 |
+
logger.debug(
|
| 1130 |
+
f"[Watchdog] Connection idle for {stall_threshold}s (queueing={is_queueing}). "
|
| 1131 |
+
"Attempting recovery..."
|
| 1132 |
+
)
|
| 1133 |
+
await self.close()
|
| 1134 |
+
break
|
| 1135 |
+
|
| 1136 |
+
# Final flush
|
| 1137 |
+
buffer += decoder.decode(b"", final=True)
|
| 1138 |
+
if buffer:
|
| 1139 |
+
parsed_parts, _ = parse_response_by_frame(buffer)
|
| 1140 |
+
async for out in _process_parts(parsed_parts):
|
| 1141 |
+
has_generated_text = True
|
| 1142 |
+
yield out
|
| 1143 |
+
|
| 1144 |
+
if not is_completed or is_thinking or is_queueing:
|
| 1145 |
+
stall_threshold = (
|
| 1146 |
+
self.timeout
|
| 1147 |
+
if (is_thinking or is_queueing)
|
| 1148 |
+
else min(self.timeout, self.watchdog_timeout)
|
| 1149 |
+
)
|
| 1150 |
+
if (time.time() - last_progress_time) > stall_threshold:
|
| 1151 |
+
if not is_thinking:
|
| 1152 |
+
logger.debug(
|
| 1153 |
+
f"[Watchdog] Stream ended after {stall_threshold}s without completing. Triggering recovery..."
|
| 1154 |
+
)
|
| 1155 |
+
else:
|
| 1156 |
+
logger.debug(
|
| 1157 |
+
"[Watchdog] Stream finished but model is still thinking. Polling again..."
|
| 1158 |
+
)
|
| 1159 |
+
|
| 1160 |
+
if cid:
|
| 1161 |
+
logger.debug(
|
| 1162 |
+
f"Stream incomplete. Checking conversation history for {cid}..."
|
| 1163 |
+
)
|
| 1164 |
+
|
| 1165 |
+
poll_start_time = time.time()
|
| 1166 |
+
|
| 1167 |
+
while True:
|
| 1168 |
+
if (time.time() - poll_start_time) > self.timeout:
|
| 1169 |
+
logger.warning(
|
| 1170 |
+
f"[Recovery] Polling for {cid} timed out after {self.timeout}s."
|
| 1171 |
+
)
|
| 1172 |
+
if has_generated_text:
|
| 1173 |
+
raise GeminiError(
|
| 1174 |
+
"The connection to Gemini was lost while generating the response, and recovery timed out. "
|
| 1175 |
+
"Please try sending your prompt again."
|
| 1176 |
+
)
|
| 1177 |
+
else:
|
| 1178 |
+
raise APIError(
|
| 1179 |
+
"read_chat polling timed out waiting for the model to finish. "
|
| 1180 |
+
"The original request may have been silently aborted by Google."
|
| 1181 |
+
)
|
| 1182 |
+
await self._send_bard_activity()
|
| 1183 |
+
recovered_history = await self.read_chat(cid)
|
| 1184 |
+
if (
|
| 1185 |
+
recovered_history
|
| 1186 |
+
and recovered_history.turns
|
| 1187 |
+
and recovered_history.turns[-1].role == "model"
|
| 1188 |
+
):
|
| 1189 |
+
recovered = recovered_history.turns[-1].info
|
| 1190 |
+
if (
|
| 1191 |
+
recovered
|
| 1192 |
+
and recovered.candidates
|
| 1193 |
+
and (
|
| 1194 |
+
recovered.candidates[0].text.strip()
|
| 1195 |
+
or recovered.candidates[0].generated_images
|
| 1196 |
+
or recovered.candidates[0].web_images
|
| 1197 |
+
)
|
| 1198 |
+
):
|
| 1199 |
+
rec_rcid = recovered.candidates[0].rcid
|
| 1200 |
+
prev_rcid = (
|
| 1201 |
+
chat_backup["rcid"] if chat_backup else ""
|
| 1202 |
+
)
|
| 1203 |
+
current_expected_rcid = (
|
| 1204 |
+
getattr(chat, "rcid", "") if chat else ""
|
| 1205 |
+
)
|
| 1206 |
+
|
| 1207 |
+
is_new_turn = (
|
| 1208 |
+
rec_rcid == current_expected_rcid
|
| 1209 |
+
if current_expected_rcid
|
| 1210 |
+
else rec_rcid != prev_rcid
|
| 1211 |
+
)
|
| 1212 |
+
|
| 1213 |
+
if is_new_turn:
|
| 1214 |
+
logger.debug(
|
| 1215 |
+
f"[Recovery] Successfully recovered response for CID: {cid} (RCID: {rec_rcid})"
|
| 1216 |
+
)
|
| 1217 |
+
if chat:
|
| 1218 |
+
recovered.metadata = chat.metadata
|
| 1219 |
+
chat.rcid = rec_rcid
|
| 1220 |
+
yield recovered
|
| 1221 |
+
break
|
| 1222 |
+
else:
|
| 1223 |
+
logger.debug(
|
| 1224 |
+
f"[Recovery] Recovered turn is not the target turn (target: {current_expected_rcid or 'NEW'}, got {rec_rcid}). Waiting..."
|
| 1225 |
+
)
|
| 1226 |
+
|
| 1227 |
+
logger.debug(
|
| 1228 |
+
f"[Recovery] Response not ready, waiting {sleep_time}s..."
|
| 1229 |
+
)
|
| 1230 |
+
await asyncio.sleep(sleep_time)
|
| 1231 |
+
break
|
| 1232 |
+
else:
|
| 1233 |
+
logger.debug(
|
| 1234 |
+
f"Stream suspended (completed={is_completed}, final_chunk={is_final_chunk}, thinking={is_thinking}, queueing={is_queueing}). "
|
| 1235 |
+
f"No CID found to recover. (Request ID: {_reqid})"
|
| 1236 |
+
)
|
| 1237 |
+
raise APIError(
|
| 1238 |
+
"The original request may have been silently aborted by Google."
|
| 1239 |
+
)
|
| 1240 |
+
|
| 1241 |
+
break
|
| 1242 |
+
|
| 1243 |
+
except ReadTimeout:
|
| 1244 |
+
raise TimeoutError(
|
| 1245 |
+
"The request timed out while waiting for Gemini to respond. This often happens with very long prompts "
|
| 1246 |
+
"or complex file analysis. Try increasing the 'timeout' value when initializing GeminiClient."
|
| 1247 |
+
)
|
| 1248 |
+
except (GeminiError, APIError):
|
| 1249 |
+
if not has_generated_text and chat and chat_backup:
|
| 1250 |
+
chat.metadata = list(chat_backup["metadata"]) # type: ignore
|
| 1251 |
+
chat.cid = chat_backup["cid"]
|
| 1252 |
+
chat.rid = chat_backup["rid"]
|
| 1253 |
+
chat.rcid = chat_backup["rcid"]
|
| 1254 |
+
raise
|
| 1255 |
+
except Exception:
|
| 1256 |
+
if not has_generated_text and chat and chat_backup:
|
| 1257 |
+
chat.metadata = list(chat_backup["metadata"]) # type: ignore
|
| 1258 |
+
chat.cid = chat_backup["cid"]
|
| 1259 |
+
chat.rid = chat_backup["rid"]
|
| 1260 |
+
chat.rcid = chat_backup["rcid"]
|
| 1261 |
+
logger.debug(
|
| 1262 |
+
"Stream parsing interrupted. Attempting to recover conversation context..."
|
| 1263 |
+
)
|
| 1264 |
+
raise APIError(
|
| 1265 |
+
"Failed to parse response body from Google. This might be a temporary API change or invalid data."
|
| 1266 |
+
)
|
| 1267 |
+
|
| 1268 |
+
def start_chat(self, **kwargs) -> "ChatSession":
|
| 1269 |
+
"""
|
| 1270 |
+
Returns a `ChatSession` object attached to this client.
|
| 1271 |
+
|
| 1272 |
+
Parameters
|
| 1273 |
+
----------
|
| 1274 |
+
kwargs: `dict`, optional
|
| 1275 |
+
Additional arguments which will be passed to the chat session.
|
| 1276 |
+
Refer to `gemini_webapi.ChatSession` for more information.
|
| 1277 |
+
|
| 1278 |
+
Returns
|
| 1279 |
+
-------
|
| 1280 |
+
:class:`ChatSession`
|
| 1281 |
+
Empty chat session object for retrieving conversation history.
|
| 1282 |
+
"""
|
| 1283 |
+
|
| 1284 |
+
return ChatSession(geminiclient=self, **kwargs)
|
| 1285 |
+
|
| 1286 |
+
async def delete_chat(self, cid: str) -> None:
|
| 1287 |
+
"""
|
| 1288 |
+
Delete a specific conversation by chat id.
|
| 1289 |
+
|
| 1290 |
+
Parameters
|
| 1291 |
+
----------
|
| 1292 |
+
cid: `str`
|
| 1293 |
+
The ID of the chat requiring deletion (e.g. "c_...").
|
| 1294 |
+
"""
|
| 1295 |
+
|
| 1296 |
+
await self._batch_execute(
|
| 1297 |
+
[
|
| 1298 |
+
RPCData(
|
| 1299 |
+
rpcid=GRPC.DELETE_CHAT,
|
| 1300 |
+
payload=json.dumps([cid]).decode("utf-8"),
|
| 1301 |
+
),
|
| 1302 |
+
]
|
| 1303 |
+
)
|
| 1304 |
+
await self._batch_execute(
|
| 1305 |
+
[
|
| 1306 |
+
RPCData(
|
| 1307 |
+
rpcid=GRPC.DELETE_CHAT_SECOND,
|
| 1308 |
+
payload=json.dumps([cid, [1, None, 0, 1]]).decode("utf-8"),
|
| 1309 |
+
),
|
| 1310 |
+
]
|
| 1311 |
+
)
|
| 1312 |
+
|
| 1313 |
+
def list_chats(self) -> list[ChatInfo] | None:
|
| 1314 |
+
"""
|
| 1315 |
+
List all conversations.
|
| 1316 |
+
|
| 1317 |
+
Returns
|
| 1318 |
+
-------
|
| 1319 |
+
`list[gemini_webapi.types.ChatInfo] | None`
|
| 1320 |
+
The list of conversations. Returns `None` if the client holds no session cache.
|
| 1321 |
+
"""
|
| 1322 |
+
return self._recent_chats
|
| 1323 |
+
|
| 1324 |
+
async def read_chat(self, cid: str, limit: int = 10) -> ChatHistory | None:
|
| 1325 |
+
"""
|
| 1326 |
+
Fetch the full conversation history by chat id.
|
| 1327 |
+
|
| 1328 |
+
Parameters
|
| 1329 |
+
----------
|
| 1330 |
+
cid: `str`
|
| 1331 |
+
The ID of the conversation to read (e.g. "c_...").
|
| 1332 |
+
limit: `int`, optional
|
| 1333 |
+
The maximum number of turns to fetch, by default 10.
|
| 1334 |
+
|
| 1335 |
+
Returns
|
| 1336 |
+
-------
|
| 1337 |
+
:class:`ChatHistory` | None
|
| 1338 |
+
The conversation history, or None if reading failed.
|
| 1339 |
+
"""
|
| 1340 |
+
try:
|
| 1341 |
+
response = await self._batch_execute(
|
| 1342 |
+
[
|
| 1343 |
+
RPCData(
|
| 1344 |
+
rpcid=GRPC.READ_CHAT,
|
| 1345 |
+
payload=json.dumps(
|
| 1346 |
+
[cid, limit, None, 1, [1], [4], None, 1]
|
| 1347 |
+
).decode("utf-8"),
|
| 1348 |
+
),
|
| 1349 |
+
]
|
| 1350 |
+
)
|
| 1351 |
+
|
| 1352 |
+
response_json = extract_json_from_response(response.text)
|
| 1353 |
+
|
| 1354 |
+
for part in response_json:
|
| 1355 |
+
part_body_str = get_nested_value(part, [2])
|
| 1356 |
+
if not part_body_str:
|
| 1357 |
+
continue
|
| 1358 |
+
|
| 1359 |
+
part_body = json.loads(part_body_str)
|
| 1360 |
+
turns_data = get_nested_value(part_body, [0])
|
| 1361 |
+
if not turns_data:
|
| 1362 |
+
continue
|
| 1363 |
+
|
| 1364 |
+
chat_turns = []
|
| 1365 |
+
for conv_turn in turns_data:
|
| 1366 |
+
# User turn
|
| 1367 |
+
user_text = get_nested_value(conv_turn, [2, 0, 0], "")
|
| 1368 |
+
if user_text:
|
| 1369 |
+
chat_turns.append(ChatTurn(role="user", text=user_text))
|
| 1370 |
+
|
| 1371 |
+
# Model turn
|
| 1372 |
+
candidates_list = get_nested_value(conv_turn, [3, 0])
|
| 1373 |
+
if candidates_list:
|
| 1374 |
+
output_candidates = []
|
| 1375 |
+
rid = get_nested_value(conv_turn, [1], "")
|
| 1376 |
+
for candidate_data in candidates_list:
|
| 1377 |
+
rcid = get_nested_value(candidate_data, [0], "")
|
| 1378 |
+
(
|
| 1379 |
+
text,
|
| 1380 |
+
thoughts,
|
| 1381 |
+
web_images,
|
| 1382 |
+
generated_images,
|
| 1383 |
+
generated_videos,
|
| 1384 |
+
) = self._parse_candidate(candidate_data, cid, rid, rcid)
|
| 1385 |
+
output_candidates.append(
|
| 1386 |
+
Candidate(
|
| 1387 |
+
rcid=rcid,
|
| 1388 |
+
text=text,
|
| 1389 |
+
thoughts=thoughts,
|
| 1390 |
+
web_images=web_images,
|
| 1391 |
+
generated_images=generated_images,
|
| 1392 |
+
generated_videos=generated_videos,
|
| 1393 |
+
)
|
| 1394 |
+
)
|
| 1395 |
+
|
| 1396 |
+
if output_candidates:
|
| 1397 |
+
model_output = ModelOutput(
|
| 1398 |
+
metadata=[cid, rid, output_candidates[0].rcid],
|
| 1399 |
+
candidates=output_candidates,
|
| 1400 |
+
)
|
| 1401 |
+
chat_turns.append(
|
| 1402 |
+
ChatTurn(
|
| 1403 |
+
role="model",
|
| 1404 |
+
text=output_candidates[0].text,
|
| 1405 |
+
info=model_output,
|
| 1406 |
+
)
|
| 1407 |
+
)
|
| 1408 |
+
|
| 1409 |
+
return ChatHistory(cid=cid, metadata=[cid], turns=chat_turns)
|
| 1410 |
+
|
| 1411 |
+
return None
|
| 1412 |
+
except Exception:
|
| 1413 |
+
logger.debug(
|
| 1414 |
+
f"[read_chat] Response data for {cid!r} is still incomplete (model is still processing)..."
|
| 1415 |
+
)
|
| 1416 |
+
return None
|
| 1417 |
+
|
| 1418 |
+
def _parse_candidate(
|
| 1419 |
+
self, candidate_data: list[Any], cid: str, rid: str, rcid: str
|
| 1420 |
+
) -> tuple[str, str, list[WebImage], list[GeneratedImage], list[GeneratedVideo]]:
|
| 1421 |
+
"""
|
| 1422 |
+
Parses individual candidate data from the Gemini response.
|
| 1423 |
+
|
| 1424 |
+
Args:
|
| 1425 |
+
candidate_data (list[Any]): The raw candidate list from the API response.
|
| 1426 |
+
cid (str): Conversation ID.
|
| 1427 |
+
rid (str): Response ID.
|
| 1428 |
+
rcid (str): Response Candidate ID.
|
| 1429 |
+
|
| 1430 |
+
Returns:
|
| 1431 |
+
tuple: A tuple containing:
|
| 1432 |
+
- text (str): The main response text.
|
| 1433 |
+
- thoughts (str): The model's reasoning or internal thoughts.
|
| 1434 |
+
- web_images (list[WebImage]): List of images found on the web.
|
| 1435 |
+
- generated_images (list[GeneratedImage]): List of images generated by the model.
|
| 1436 |
+
- generated_videos (list[GeneratedVideo]): List of videos generated by the model.
|
| 1437 |
+
"""
|
| 1438 |
+
text = get_nested_value(candidate_data, [1, 0], "")
|
| 1439 |
+
if _CARD_CONTENT_RE.match(text):
|
| 1440 |
+
text = get_nested_value(candidate_data, [22, 0]) or text
|
| 1441 |
+
|
| 1442 |
+
# Cleanup googleusercontent artifacts
|
| 1443 |
+
text = _ARTIFACTS_RE.sub("", text)
|
| 1444 |
+
|
| 1445 |
+
thoughts = get_nested_value(candidate_data, [37, 0, 0]) or ""
|
| 1446 |
+
|
| 1447 |
+
# Image handling
|
| 1448 |
+
web_images = []
|
| 1449 |
+
for img_idx, web_img_data in enumerate(
|
| 1450 |
+
get_nested_value(candidate_data, [12, 1], [])
|
| 1451 |
+
):
|
| 1452 |
+
url = get_nested_value(web_img_data, [0, 0, 0])
|
| 1453 |
+
if url:
|
| 1454 |
+
web_images.append(
|
| 1455 |
+
WebImage(
|
| 1456 |
+
url=url,
|
| 1457 |
+
title=f"[Image {img_idx + 1}]",
|
| 1458 |
+
alt=get_nested_value(web_img_data, [0, 4], ""),
|
| 1459 |
+
proxy=self.proxy,
|
| 1460 |
+
client=self.client,
|
| 1461 |
+
)
|
| 1462 |
+
)
|
| 1463 |
+
|
| 1464 |
+
generated_images = []
|
| 1465 |
+
for img_idx, gen_img_data in enumerate(
|
| 1466 |
+
get_nested_value(candidate_data, [12, 7, 0], [])
|
| 1467 |
+
):
|
| 1468 |
+
url = get_nested_value(gen_img_data, [0, 3, 3])
|
| 1469 |
+
if url:
|
| 1470 |
+
image_id = get_nested_value(gen_img_data, [1, 0])
|
| 1471 |
+
if not image_id:
|
| 1472 |
+
image_id = f"http://googleusercontent.com/image_generation_content/{img_idx}"
|
| 1473 |
+
|
| 1474 |
+
generated_images.append(
|
| 1475 |
+
GeneratedImage(
|
| 1476 |
+
url=url,
|
| 1477 |
+
title=f"[Generated Image {img_idx}]",
|
| 1478 |
+
alt=get_nested_value(gen_img_data, [0, 3, 2], ""),
|
| 1479 |
+
proxy=self.proxy,
|
| 1480 |
+
client=self.client,
|
| 1481 |
+
client_ref=self,
|
| 1482 |
+
cid=cid,
|
| 1483 |
+
rid=rid,
|
| 1484 |
+
rcid=rcid,
|
| 1485 |
+
image_id=image_id,
|
| 1486 |
+
)
|
| 1487 |
+
)
|
| 1488 |
+
|
| 1489 |
+
# Video handling
|
| 1490 |
+
generated_videos = []
|
| 1491 |
+
for video_root in get_nested_value(candidate_data, [12, 59, 0], []):
|
| 1492 |
+
video_info = get_nested_value(video_root, [0])
|
| 1493 |
+
if video_info:
|
| 1494 |
+
urls = get_nested_value(video_info, [0, 7], [])
|
| 1495 |
+
if len(urls) >= 2:
|
| 1496 |
+
generated_videos.append(
|
| 1497 |
+
GeneratedVideo(
|
| 1498 |
+
url=urls[1],
|
| 1499 |
+
thumbnail=urls[0],
|
| 1500 |
+
cid=cid,
|
| 1501 |
+
rid=rid,
|
| 1502 |
+
rcid=rcid,
|
| 1503 |
+
client_ref=self,
|
| 1504 |
+
proxy=self.proxy,
|
| 1505 |
+
)
|
| 1506 |
+
)
|
| 1507 |
+
|
| 1508 |
+
return text, thoughts, web_images, generated_images, generated_videos
|
| 1509 |
+
|
| 1510 |
+
async def _get_image_full_size(
|
| 1511 |
+
self, cid: str, rid: str, rcid: str, image_id: str
|
| 1512 |
+
) -> str | None:
|
| 1513 |
+
"""
|
| 1514 |
+
Get the full size URL of an image.
|
| 1515 |
+
"""
|
| 1516 |
+
try:
|
| 1517 |
+
payload = [
|
| 1518 |
+
[
|
| 1519 |
+
[None, None, None, [None, None, None, None, None, ""]],
|
| 1520 |
+
[image_id, 0],
|
| 1521 |
+
None,
|
| 1522 |
+
[19, ""],
|
| 1523 |
+
None,
|
| 1524 |
+
None,
|
| 1525 |
+
None,
|
| 1526 |
+
None,
|
| 1527 |
+
None,
|
| 1528 |
+
"",
|
| 1529 |
+
],
|
| 1530 |
+
[rid, rcid, cid, None, ""],
|
| 1531 |
+
1,
|
| 1532 |
+
0,
|
| 1533 |
+
1,
|
| 1534 |
+
]
|
| 1535 |
+
|
| 1536 |
+
response = await self._batch_execute(
|
| 1537 |
+
[
|
| 1538 |
+
RPCData(
|
| 1539 |
+
rpcid=GRPC.IMAGE_FULL_SIZE,
|
| 1540 |
+
payload=json.dumps(payload).decode("utf-8"),
|
| 1541 |
+
),
|
| 1542 |
+
]
|
| 1543 |
+
)
|
| 1544 |
+
|
| 1545 |
+
response_data = extract_json_from_response(response.text)
|
| 1546 |
+
return get_nested_value(
|
| 1547 |
+
json.loads(get_nested_value(response_data, [0, 2], "[]")), [0]
|
| 1548 |
+
)
|
| 1549 |
+
except Exception:
|
| 1550 |
+
logger.debug(
|
| 1551 |
+
"[_get_image_full_size] Could not retrieve full size URL via RPC."
|
| 1552 |
+
)
|
| 1553 |
+
return None
|
| 1554 |
+
|
| 1555 |
+
@running(retry=2)
|
| 1556 |
+
async def _batch_execute(self, payloads: list[RPCData], **kwargs) -> Response:
|
| 1557 |
+
"""
|
| 1558 |
+
Execute a batch of requests to Gemini API.
|
| 1559 |
+
|
| 1560 |
+
Parameters
|
| 1561 |
+
----------
|
| 1562 |
+
payloads: `list[RPCData]`
|
| 1563 |
+
List of `gemini_webapi.types.RPCData` objects to be executed.
|
| 1564 |
+
kwargs: `dict`, optional
|
| 1565 |
+
Additional arguments which will be passed to the post request.
|
| 1566 |
+
Refer to `curl_cffi.requests.AsyncSession.request` for more information.
|
| 1567 |
+
|
| 1568 |
+
Returns
|
| 1569 |
+
-------
|
| 1570 |
+
:class:`curl_cffi.requests.Response`
|
| 1571 |
+
Response object containing the result of the batch execution.
|
| 1572 |
+
"""
|
| 1573 |
+
|
| 1574 |
+
_reqid = self._reqid
|
| 1575 |
+
self._reqid += 100000
|
| 1576 |
+
|
| 1577 |
+
try:
|
| 1578 |
+
params: dict[str, Any] = {
|
| 1579 |
+
"rpcids": ",".join([p.rpcid for p in payloads]),
|
| 1580 |
+
"_reqid": _reqid,
|
| 1581 |
+
"rt": "c",
|
| 1582 |
+
"source-path": "/app",
|
| 1583 |
+
}
|
| 1584 |
+
if self.build_label:
|
| 1585 |
+
params["bl"] = self.build_label
|
| 1586 |
+
if self.session_id:
|
| 1587 |
+
params["f.sid"] = self.session_id
|
| 1588 |
+
|
| 1589 |
+
response = await self.client.post(
|
| 1590 |
+
Endpoint.BATCH_EXEC,
|
| 1591 |
+
params=params,
|
| 1592 |
+
data={
|
| 1593 |
+
"at": self.access_token,
|
| 1594 |
+
"f.req": json.dumps(
|
| 1595 |
+
[[payload.serialize() for payload in payloads]]
|
| 1596 |
+
).decode("utf-8"),
|
| 1597 |
+
},
|
| 1598 |
+
**kwargs,
|
| 1599 |
+
)
|
| 1600 |
+
if self.verbose:
|
| 1601 |
+
logger.debug(
|
| 1602 |
+
f"HTTP Request: POST {Endpoint.BATCH_EXEC} [{response.status_code}]"
|
| 1603 |
+
)
|
| 1604 |
+
except ReadTimeout:
|
| 1605 |
+
raise TimeoutError(
|
| 1606 |
+
"The request timed out while waiting for Gemini to respond. This often happens with very long prompts "
|
| 1607 |
+
"or complex file analysis. Try increasing the 'timeout' value when initializing GeminiClient."
|
| 1608 |
+
)
|
| 1609 |
+
|
| 1610 |
+
if response.status_code != 200:
|
| 1611 |
+
await self.close()
|
| 1612 |
+
raise APIError(
|
| 1613 |
+
f"Batch execution failed with status code {response.status_code}"
|
| 1614 |
+
)
|
| 1615 |
+
|
| 1616 |
+
return response
|
| 1617 |
+
|
| 1618 |
+
|
| 1619 |
+
class ChatSession:
|
| 1620 |
+
"""
|
| 1621 |
+
Chat data to retrieve conversation history. Only if all 3 ids are provided will the conversation history be retrieved.
|
| 1622 |
+
|
| 1623 |
+
Parameters
|
| 1624 |
+
----------
|
| 1625 |
+
geminiclient: `GeminiClient`
|
| 1626 |
+
Async requests client interface for gemini.google.com.
|
| 1627 |
+
metadata: `list[str]`, optional
|
| 1628 |
+
List of chat metadata `[cid, rid, rcid]`, can be shorter than 3 elements, like `[cid, rid]` or `[cid]` only.
|
| 1629 |
+
cid: `str`, optional
|
| 1630 |
+
Chat id, if provided together with metadata, will override the first value in it.
|
| 1631 |
+
rid: `str`, optional
|
| 1632 |
+
Reply id, if provided together with metadata, will override the second value in it.
|
| 1633 |
+
rcid: `str`, optional
|
| 1634 |
+
Reply candidate id, if provided together with metadata, will override the third value in it.
|
| 1635 |
+
model: `Model | str | dict`, optional
|
| 1636 |
+
Specify the model to use for generation.
|
| 1637 |
+
Pass either a `gemini_webapi.constants.Model` enum or a model name string to use predefined models.
|
| 1638 |
+
Pass a dictionary to use custom model header strings ("model_name" and "model_header" keys must be provided).
|
| 1639 |
+
gem: `Gem | str`, optional
|
| 1640 |
+
Specify a gem to use as system prompt for the chat session.
|
| 1641 |
+
Pass either a `gemini_webapi.types.Gem` object or a gem id string.
|
| 1642 |
+
"""
|
| 1643 |
+
|
| 1644 |
+
__slots__ = [
|
| 1645 |
+
"__metadata",
|
| 1646 |
+
"geminiclient",
|
| 1647 |
+
"last_output",
|
| 1648 |
+
"model",
|
| 1649 |
+
"gem",
|
| 1650 |
+
]
|
| 1651 |
+
|
| 1652 |
+
def __init__(
|
| 1653 |
+
self,
|
| 1654 |
+
geminiclient: GeminiClient,
|
| 1655 |
+
metadata: list[str | None] | None = None,
|
| 1656 |
+
cid: str = "", # chat id
|
| 1657 |
+
rid: str = "", # reply id
|
| 1658 |
+
rcid: str = "", # reply candidate id
|
| 1659 |
+
model: Model | str | dict = Model.UNSPECIFIED,
|
| 1660 |
+
gem: Gem | str | None = None,
|
| 1661 |
+
):
|
| 1662 |
+
self.__metadata: list[Any] = list(_DEFAULT_METADATA)
|
| 1663 |
+
self.geminiclient: GeminiClient = geminiclient
|
| 1664 |
+
self.last_output: ModelOutput | None = None
|
| 1665 |
+
self.model: Model | str | dict = model
|
| 1666 |
+
self.gem: Gem | str | None = gem
|
| 1667 |
+
|
| 1668 |
+
if metadata:
|
| 1669 |
+
self.metadata = metadata
|
| 1670 |
+
if cid:
|
| 1671 |
+
self.cid = cid
|
| 1672 |
+
if rid:
|
| 1673 |
+
self.rid = rid
|
| 1674 |
+
if rcid:
|
| 1675 |
+
self.rcid = rcid
|
| 1676 |
+
|
| 1677 |
+
def __str__(self):
|
| 1678 |
+
return f"ChatSession(cid='{self.cid}', rid='{self.rid}', rcid='{self.rcid}')"
|
| 1679 |
+
|
| 1680 |
+
__repr__ = __str__
|
| 1681 |
+
|
| 1682 |
+
def __setattr__(self, name: str, value: Any) -> None:
|
| 1683 |
+
super().__setattr__(name, value)
|
| 1684 |
+
# update conversation history when last output is updated
|
| 1685 |
+
if name == "last_output" and isinstance(value, ModelOutput):
|
| 1686 |
+
self.metadata = value.metadata
|
| 1687 |
+
self.rcid = value.rcid
|
| 1688 |
+
|
| 1689 |
+
async def send_message(
|
| 1690 |
+
self,
|
| 1691 |
+
prompt: str,
|
| 1692 |
+
files: list[str | Path | bytes | io.BytesIO] | None = None,
|
| 1693 |
+
temporary: bool = False,
|
| 1694 |
+
**kwargs,
|
| 1695 |
+
) -> ModelOutput:
|
| 1696 |
+
"""
|
| 1697 |
+
Generates contents with prompt.
|
| 1698 |
+
Use as a shortcut for `GeminiClient.generate_content(prompt, files, self)`.
|
| 1699 |
+
|
| 1700 |
+
Parameters
|
| 1701 |
+
----------
|
| 1702 |
+
prompt: `str`
|
| 1703 |
+
Text prompt provided by user.
|
| 1704 |
+
files: `list[str | Path | bytes | io.BytesIO]`, optional
|
| 1705 |
+
List of file paths or byte streams to be attached.
|
| 1706 |
+
temporary: `bool`, optional
|
| 1707 |
+
If set to `True`, the ongoing conversation will not show up in Gemini history.
|
| 1708 |
+
Switching temporary mode within a chat session will clear the previous context
|
| 1709 |
+
and create a new chat session under the hood.
|
| 1710 |
+
kwargs: `dict`, optional
|
| 1711 |
+
Additional arguments which will be passed to the post request.
|
| 1712 |
+
Refer to `curl_cffi.requests.AsyncSession.request` for more information.
|
| 1713 |
+
|
| 1714 |
+
Returns
|
| 1715 |
+
-------
|
| 1716 |
+
:class:`ModelOutput`
|
| 1717 |
+
Output data from gemini.google.com.
|
| 1718 |
+
|
| 1719 |
+
Raises
|
| 1720 |
+
------
|
| 1721 |
+
`AssertionError`
|
| 1722 |
+
If prompt is empty.
|
| 1723 |
+
`gemini_webapi.TimeoutError`
|
| 1724 |
+
If request timed out.
|
| 1725 |
+
`gemini_webapi.GeminiError`
|
| 1726 |
+
If no reply candidate found in response.
|
| 1727 |
+
`gemini_webapi.APIError`
|
| 1728 |
+
- If request failed with status code other than 200.
|
| 1729 |
+
- If response structure is invalid and failed to parse.
|
| 1730 |
+
"""
|
| 1731 |
+
|
| 1732 |
+
return await self.geminiclient.generate_content(
|
| 1733 |
+
prompt=prompt,
|
| 1734 |
+
files=files,
|
| 1735 |
+
model=self.model,
|
| 1736 |
+
gem=self.gem,
|
| 1737 |
+
chat=self,
|
| 1738 |
+
temporary=temporary,
|
| 1739 |
+
**kwargs,
|
| 1740 |
+
)
|
| 1741 |
+
|
| 1742 |
+
async def send_message_stream(
|
| 1743 |
+
self,
|
| 1744 |
+
prompt: str,
|
| 1745 |
+
files: list[str | Path | bytes | io.BytesIO] | None = None,
|
| 1746 |
+
temporary: bool = False,
|
| 1747 |
+
**kwargs,
|
| 1748 |
+
) -> AsyncGenerator[ModelOutput, None]:
|
| 1749 |
+
"""
|
| 1750 |
+
Generates contents with prompt in streaming mode within this chat session.
|
| 1751 |
+
|
| 1752 |
+
This is a shortcut for `GeminiClient.generate_content_stream(prompt, files, self)`.
|
| 1753 |
+
The session's metadata and conversation history are automatically managed.
|
| 1754 |
+
|
| 1755 |
+
Parameters
|
| 1756 |
+
----------
|
| 1757 |
+
prompt: `str`
|
| 1758 |
+
Text prompt provided by user.
|
| 1759 |
+
files: `list[str | Path | bytes | io.BytesIO]`, optional
|
| 1760 |
+
List of file paths or byte streams to be attached.
|
| 1761 |
+
temporary: `bool`, optional
|
| 1762 |
+
If set to `True`, the ongoing conversation will not show up in Gemini history.
|
| 1763 |
+
Switching temporary mode within a chat session will clear the previous context
|
| 1764 |
+
and create a new chat session under the hood.
|
| 1765 |
+
kwargs: `dict`, optional
|
| 1766 |
+
Additional arguments passed to the streaming request.
|
| 1767 |
+
|
| 1768 |
+
Yields
|
| 1769 |
+
------
|
| 1770 |
+
:class:`ModelOutput`
|
| 1771 |
+
Partial output data containing text deltas.
|
| 1772 |
+
"""
|
| 1773 |
+
|
| 1774 |
+
async for output in self.geminiclient.generate_content_stream(
|
| 1775 |
+
prompt=prompt,
|
| 1776 |
+
files=files,
|
| 1777 |
+
model=self.model,
|
| 1778 |
+
gem=self.gem,
|
| 1779 |
+
chat=self,
|
| 1780 |
+
temporary=temporary,
|
| 1781 |
+
**kwargs,
|
| 1782 |
+
):
|
| 1783 |
+
yield output
|
| 1784 |
+
|
| 1785 |
+
def choose_candidate(self, index: int) -> ModelOutput:
|
| 1786 |
+
"""
|
| 1787 |
+
Choose a candidate from the last `ModelOutput` to control the ongoing conversation flow.
|
| 1788 |
+
|
| 1789 |
+
Parameters
|
| 1790 |
+
----------
|
| 1791 |
+
index: `int`
|
| 1792 |
+
Index of the candidate to choose, starting from 0.
|
| 1793 |
+
|
| 1794 |
+
Returns
|
| 1795 |
+
-------
|
| 1796 |
+
:class:`ModelOutput`
|
| 1797 |
+
Output data of the chosen candidate.
|
| 1798 |
+
|
| 1799 |
+
Raises
|
| 1800 |
+
------
|
| 1801 |
+
`ValueError`
|
| 1802 |
+
If no previous output data found in this chat session, or if index exceeds the number of candidates in last model output.
|
| 1803 |
+
"""
|
| 1804 |
+
|
| 1805 |
+
if not self.last_output:
|
| 1806 |
+
raise ValueError("No previous output data found in this chat session.")
|
| 1807 |
+
|
| 1808 |
+
if index >= len(self.last_output.candidates):
|
| 1809 |
+
raise ValueError(
|
| 1810 |
+
f"Index {index} exceeds the number of candidates in last model output."
|
| 1811 |
+
)
|
| 1812 |
+
|
| 1813 |
+
self.last_output.chosen = index
|
| 1814 |
+
self.rcid = self.last_output.rcid
|
| 1815 |
+
return self.last_output
|
| 1816 |
+
|
| 1817 |
+
async def read_history(self, limit: int = 10) -> ChatHistory | None:
|
| 1818 |
+
"""
|
| 1819 |
+
Fetch the conversation history for this session.
|
| 1820 |
+
|
| 1821 |
+
Parameters
|
| 1822 |
+
----------
|
| 1823 |
+
limit: `int`, optional
|
| 1824 |
+
The maximum number of turns to fetch, by default 10.
|
| 1825 |
+
|
| 1826 |
+
Returns
|
| 1827 |
+
-------
|
| 1828 |
+
:class:`ChatHistory` | None
|
| 1829 |
+
The conversation history, or None if reading failed or cid is missing.
|
| 1830 |
+
"""
|
| 1831 |
+
if not self.cid:
|
| 1832 |
+
return None
|
| 1833 |
+
return await self.geminiclient.read_chat(self.cid, limit=limit)
|
| 1834 |
+
|
| 1835 |
+
@property
|
| 1836 |
+
def metadata(self):
|
| 1837 |
+
return self.__metadata
|
| 1838 |
+
|
| 1839 |
+
@metadata.setter
|
| 1840 |
+
def metadata(self, value: list[str]):
|
| 1841 |
+
if not isinstance(value, list):
|
| 1842 |
+
return
|
| 1843 |
+
|
| 1844 |
+
# Update only non-None elements to preserve existing CID/RID/RCID/Context
|
| 1845 |
+
for i, val in enumerate(value):
|
| 1846 |
+
if i < 10 and val is not None:
|
| 1847 |
+
self.__metadata[i] = val
|
| 1848 |
+
|
| 1849 |
+
@property
|
| 1850 |
+
def cid(self):
|
| 1851 |
+
return self.__metadata[0]
|
| 1852 |
+
|
| 1853 |
+
@cid.setter
|
| 1854 |
+
def cid(self, value: str):
|
| 1855 |
+
self.__metadata[0] = value
|
| 1856 |
+
|
| 1857 |
+
@property
|
| 1858 |
+
def rcid(self):
|
| 1859 |
+
return self.__metadata[2]
|
| 1860 |
+
|
| 1861 |
+
@rcid.setter
|
| 1862 |
+
def rcid(self, value: str):
|
| 1863 |
+
self.__metadata[2] = value
|
| 1864 |
+
|
| 1865 |
+
@property
|
| 1866 |
+
def rid(self):
|
| 1867 |
+
return self.__metadata[1]
|
| 1868 |
+
|
| 1869 |
+
@rid.setter
|
| 1870 |
+
def rid(self, value: str):
|
| 1871 |
+
self.__metadata[1] = value
|
gemini_webapi/components/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
from .gem_mixin import GemMixin
|
gemini_webapi/components/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (254 Bytes). View file
|
|
|
gemini_webapi/components/__pycache__/gem_mixin.cpython-313.pyc
ADDED
|
Binary file (9.29 kB). View file
|
|
|
gemini_webapi/components/gem_mixin.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
|
| 3 |
+
import orjson as json
|
| 4 |
+
|
| 5 |
+
from ..constants import GRPC
|
| 6 |
+
from ..exceptions import APIError
|
| 7 |
+
from ..types import Gem, GemJar, RPCData
|
| 8 |
+
from ..utils import extract_json_from_response, get_nested_value, logger
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class GemMixin:
|
| 12 |
+
"""
|
| 13 |
+
Mixin class providing gem-related functionality for GeminiClient.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, *args, **kwargs):
|
| 17 |
+
super().__init__(*args, **kwargs)
|
| 18 |
+
self._gems: GemJar | None = None
|
| 19 |
+
|
| 20 |
+
@property
|
| 21 |
+
def gems(self) -> GemJar:
|
| 22 |
+
"""
|
| 23 |
+
Returns a `GemJar` object containing cached gems.
|
| 24 |
+
Only available after calling `GeminiClient.fetch_gems()`.
|
| 25 |
+
|
| 26 |
+
Returns
|
| 27 |
+
-------
|
| 28 |
+
:class:`GemJar`
|
| 29 |
+
Refer to `gemini_webapi.types.GemJar`.
|
| 30 |
+
|
| 31 |
+
Raises
|
| 32 |
+
------
|
| 33 |
+
`RuntimeError`
|
| 34 |
+
If `GeminiClient.fetch_gems()` has not been called before accessing this property.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
if self._gems is None:
|
| 38 |
+
raise RuntimeError(
|
| 39 |
+
"Gems not fetched yet. Call `GeminiClient.fetch_gems()` method to fetch gems from gemini.google.com."
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
return self._gems
|
| 43 |
+
|
| 44 |
+
async def fetch_gems(
|
| 45 |
+
self, include_hidden: bool = False, language: str = "en", **kwargs
|
| 46 |
+
) -> GemJar:
|
| 47 |
+
"""
|
| 48 |
+
Get a list of available gems from gemini, including system predefined gems and user-created custom gems.
|
| 49 |
+
|
| 50 |
+
Note that network request will be sent every time this method is called.
|
| 51 |
+
Once the gems are fetched, they will be cached and accessible via `GeminiClient.gems` property.
|
| 52 |
+
|
| 53 |
+
Parameters
|
| 54 |
+
----------
|
| 55 |
+
include_hidden: `bool`, optional
|
| 56 |
+
There are some predefined gems that by default are not shown to users (and therefore may not work properly).
|
| 57 |
+
Set this parameter to `True` to include them in the fetched gem list.
|
| 58 |
+
language: `str`, optional
|
| 59 |
+
Language code for the gems to fetch. Default is 'en'.
|
| 60 |
+
|
| 61 |
+
Returns
|
| 62 |
+
-------
|
| 63 |
+
:class:`GemJar`
|
| 64 |
+
Refer to `gemini_webapi.types.GemJar`.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
response = await self._batch_execute(
|
| 68 |
+
[
|
| 69 |
+
RPCData(
|
| 70 |
+
rpcid=GRPC.LIST_GEMS,
|
| 71 |
+
payload=(
|
| 72 |
+
f"[4,['{language}'],0]"
|
| 73 |
+
if include_hidden
|
| 74 |
+
else f"[3,['{language}'],0]"
|
| 75 |
+
),
|
| 76 |
+
identifier="system",
|
| 77 |
+
),
|
| 78 |
+
RPCData(
|
| 79 |
+
rpcid=GRPC.LIST_GEMS,
|
| 80 |
+
payload=f"[2,['{language}'],0]",
|
| 81 |
+
identifier="custom",
|
| 82 |
+
),
|
| 83 |
+
],
|
| 84 |
+
**kwargs,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
try:
|
| 88 |
+
response_json = extract_json_from_response(response.text)
|
| 89 |
+
|
| 90 |
+
predefined_gems, custom_gems = [], []
|
| 91 |
+
|
| 92 |
+
for part in response_json:
|
| 93 |
+
try:
|
| 94 |
+
identifier = get_nested_value(part, [-1])
|
| 95 |
+
part_body_str = get_nested_value(part, [2])
|
| 96 |
+
if not part_body_str:
|
| 97 |
+
continue
|
| 98 |
+
|
| 99 |
+
part_body = json.loads(part_body_str)
|
| 100 |
+
if identifier == "system":
|
| 101 |
+
predefined_gems = get_nested_value(part_body, [2], [])
|
| 102 |
+
elif identifier == "custom":
|
| 103 |
+
custom_gems = get_nested_value(part_body, [2], [])
|
| 104 |
+
except json.JSONDecodeError:
|
| 105 |
+
continue
|
| 106 |
+
|
| 107 |
+
if not predefined_gems and not custom_gems:
|
| 108 |
+
raise Exception
|
| 109 |
+
except Exception:
|
| 110 |
+
await self.close()
|
| 111 |
+
logger.debug(f"Unexpected response data structure: {response.text}")
|
| 112 |
+
raise APIError(
|
| 113 |
+
"Failed to fetch gems. Unexpected response data structure. Client will try to re-initialize on next request."
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
self._gems = GemJar(
|
| 117 |
+
itertools.chain(
|
| 118 |
+
(
|
| 119 |
+
(
|
| 120 |
+
gem[0],
|
| 121 |
+
Gem(
|
| 122 |
+
id=gem[0],
|
| 123 |
+
name=gem[1][0],
|
| 124 |
+
description=gem[1][1],
|
| 125 |
+
prompt=gem[2] and gem[2][0] or None,
|
| 126 |
+
predefined=True,
|
| 127 |
+
),
|
| 128 |
+
)
|
| 129 |
+
for gem in predefined_gems
|
| 130 |
+
),
|
| 131 |
+
(
|
| 132 |
+
(
|
| 133 |
+
gem[0],
|
| 134 |
+
Gem(
|
| 135 |
+
id=gem[0],
|
| 136 |
+
name=gem[1][0],
|
| 137 |
+
description=gem[1][1],
|
| 138 |
+
prompt=gem[2] and gem[2][0] or None,
|
| 139 |
+
predefined=False,
|
| 140 |
+
),
|
| 141 |
+
)
|
| 142 |
+
for gem in custom_gems
|
| 143 |
+
),
|
| 144 |
+
)
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
return self._gems
|
| 148 |
+
|
| 149 |
+
async def create_gem(self, name: str, prompt: str, description: str = "") -> Gem:
|
| 150 |
+
"""
|
| 151 |
+
Create a new custom gem.
|
| 152 |
+
|
| 153 |
+
Parameters
|
| 154 |
+
----------
|
| 155 |
+
name: `str`
|
| 156 |
+
Name of the custom gem.
|
| 157 |
+
prompt: `str`
|
| 158 |
+
System instructions for the custom gem.
|
| 159 |
+
description: `str`, optional
|
| 160 |
+
Description of the custom gem (has no effect on the model's behavior).
|
| 161 |
+
|
| 162 |
+
Returns
|
| 163 |
+
-------
|
| 164 |
+
:class:`Gem`
|
| 165 |
+
The created gem.
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
response = await self._batch_execute(
|
| 169 |
+
[
|
| 170 |
+
RPCData(
|
| 171 |
+
rpcid=GRPC.CREATE_GEM,
|
| 172 |
+
payload=json.dumps(
|
| 173 |
+
[
|
| 174 |
+
[
|
| 175 |
+
name,
|
| 176 |
+
description,
|
| 177 |
+
prompt,
|
| 178 |
+
None,
|
| 179 |
+
None,
|
| 180 |
+
None,
|
| 181 |
+
None,
|
| 182 |
+
None,
|
| 183 |
+
0,
|
| 184 |
+
None,
|
| 185 |
+
1,
|
| 186 |
+
None,
|
| 187 |
+
None,
|
| 188 |
+
None,
|
| 189 |
+
[],
|
| 190 |
+
]
|
| 191 |
+
]
|
| 192 |
+
).decode("utf-8"),
|
| 193 |
+
)
|
| 194 |
+
]
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
try:
|
| 198 |
+
response_json = extract_json_from_response(response.text)
|
| 199 |
+
part_body_str = get_nested_value(response_json, [0, 2], verbose=True)
|
| 200 |
+
if not part_body_str:
|
| 201 |
+
raise Exception
|
| 202 |
+
|
| 203 |
+
part_body = json.loads(part_body_str)
|
| 204 |
+
gem_id = get_nested_value(part_body, [0], verbose=True)
|
| 205 |
+
if not gem_id:
|
| 206 |
+
raise Exception
|
| 207 |
+
except Exception:
|
| 208 |
+
await self.close()
|
| 209 |
+
logger.debug(f"Unexpected response data structure: {response.text}")
|
| 210 |
+
raise APIError(
|
| 211 |
+
"Failed to create gem. Unexpected response data structure. Client will try to re-initialize on next request."
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
return Gem(
|
| 215 |
+
id=gem_id,
|
| 216 |
+
name=name,
|
| 217 |
+
description=description,
|
| 218 |
+
prompt=prompt,
|
| 219 |
+
predefined=False,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
async def update_gem(
|
| 223 |
+
self, gem: Gem | str, name: str, prompt: str, description: str = ""
|
| 224 |
+
) -> Gem:
|
| 225 |
+
"""
|
| 226 |
+
Update an existing custom gem.
|
| 227 |
+
|
| 228 |
+
Parameters
|
| 229 |
+
----------
|
| 230 |
+
gem: `Gem | str`
|
| 231 |
+
Gem to update, can be either a `gemini_webapi.types.Gem` object or a gem id string.
|
| 232 |
+
name: `str`
|
| 233 |
+
New name for the custom gem.
|
| 234 |
+
prompt: `str`
|
| 235 |
+
New system instructions for the custom gem.
|
| 236 |
+
description: `str`, optional
|
| 237 |
+
New description of the custom gem (has no effect on the model's behavior).
|
| 238 |
+
|
| 239 |
+
Returns
|
| 240 |
+
-------
|
| 241 |
+
:class:`Gem`
|
| 242 |
+
The updated gem.
|
| 243 |
+
"""
|
| 244 |
+
|
| 245 |
+
if isinstance(gem, Gem):
|
| 246 |
+
gem_id = gem.id
|
| 247 |
+
else:
|
| 248 |
+
gem_id = gem
|
| 249 |
+
|
| 250 |
+
await self._batch_execute(
|
| 251 |
+
[
|
| 252 |
+
RPCData(
|
| 253 |
+
rpcid=GRPC.UPDATE_GEM,
|
| 254 |
+
payload=json.dumps(
|
| 255 |
+
[
|
| 256 |
+
gem_id,
|
| 257 |
+
[
|
| 258 |
+
name,
|
| 259 |
+
description,
|
| 260 |
+
prompt,
|
| 261 |
+
None,
|
| 262 |
+
None,
|
| 263 |
+
None,
|
| 264 |
+
None,
|
| 265 |
+
None,
|
| 266 |
+
0,
|
| 267 |
+
None,
|
| 268 |
+
1,
|
| 269 |
+
None,
|
| 270 |
+
None,
|
| 271 |
+
None,
|
| 272 |
+
[],
|
| 273 |
+
0,
|
| 274 |
+
],
|
| 275 |
+
]
|
| 276 |
+
).decode("utf-8"),
|
| 277 |
+
)
|
| 278 |
+
]
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
return Gem(
|
| 282 |
+
id=gem_id,
|
| 283 |
+
name=name,
|
| 284 |
+
description=description,
|
| 285 |
+
prompt=prompt,
|
| 286 |
+
predefined=False,
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
async def delete_gem(self, gem: Gem | str, **kwargs) -> None:
|
| 290 |
+
"""
|
| 291 |
+
Delete a custom gem.
|
| 292 |
+
|
| 293 |
+
Parameters
|
| 294 |
+
----------
|
| 295 |
+
gem: `Gem | str`
|
| 296 |
+
Gem to delete, can be either a `gemini_webapi.types.Gem` object or a gem id string.
|
| 297 |
+
"""
|
| 298 |
+
|
| 299 |
+
if isinstance(gem, Gem):
|
| 300 |
+
gem_id = gem.id
|
| 301 |
+
else:
|
| 302 |
+
gem_id = gem
|
| 303 |
+
|
| 304 |
+
await self._batch_execute(
|
| 305 |
+
[
|
| 306 |
+
RPCData(
|
| 307 |
+
rpcid=GRPC.DELETE_GEM, payload=json.dumps([gem_id]).decode("utf-8")
|
| 308 |
+
)
|
| 309 |
+
],
|
| 310 |
+
**kwargs,
|
| 311 |
+
)
|
gemini_webapi/constants.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from enum import Enum, IntEnum, StrEnum
|
| 2 |
+
|
| 3 |
+
STREAMING_FLAG_INDEX = 7
|
| 4 |
+
GEM_FLAG_INDEX = 19
|
| 5 |
+
TEMPORARY_CHAT_FLAG_INDEX = 45
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Endpoint(StrEnum):
|
| 9 |
+
GOOGLE = "https://www.google.com"
|
| 10 |
+
INIT = "https://gemini.google.com/app"
|
| 11 |
+
GENERATE = "https://gemini.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate"
|
| 12 |
+
ROTATE_COOKIES = "https://accounts.google.com/RotateCookies"
|
| 13 |
+
UPLOAD = "https://content-push.googleapis.com/upload"
|
| 14 |
+
BATCH_EXEC = "https://gemini.google.com/_/BardChatUi/data/batchexecute"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class GRPC(StrEnum):
|
| 18 |
+
"""
|
| 19 |
+
Google RPC ids used in Gemini API.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
# Chat methods
|
| 23 |
+
LIST_CHATS = "MaZiqc"
|
| 24 |
+
READ_CHAT = "hNvQHb"
|
| 25 |
+
DELETE_CHAT = "GzXR5e"
|
| 26 |
+
DELETE_CHAT_SECOND = "qWymEb"
|
| 27 |
+
|
| 28 |
+
# Gem methods
|
| 29 |
+
LIST_GEMS = "CNgdBe"
|
| 30 |
+
CREATE_GEM = "oMH3Zd"
|
| 31 |
+
UPDATE_GEM = "kHv0Vd"
|
| 32 |
+
DELETE_GEM = "UXcSJb"
|
| 33 |
+
|
| 34 |
+
BARD_SETTINGS = "ESY5D"
|
| 35 |
+
|
| 36 |
+
LIST_MODELS = "otAQ7b"
|
| 37 |
+
|
| 38 |
+
IMAGE_FULL_SIZE = "c8o8Fe"
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class Headers(Enum):
|
| 42 |
+
GEMINI = {
|
| 43 |
+
"Content-Type": "application/x-www-form-urlencoded;charset=utf-8",
|
| 44 |
+
"Host": "gemini.google.com",
|
| 45 |
+
"Origin": "https://gemini.google.com",
|
| 46 |
+
"Referer": "https://gemini.google.com/",
|
| 47 |
+
"X-Same-Domain": "1",
|
| 48 |
+
}
|
| 49 |
+
ROTATE_COOKIES = {
|
| 50 |
+
"Content-Type": "application/json",
|
| 51 |
+
}
|
| 52 |
+
UPLOAD = {"Push-ID": "feeds/mcudyrk2a4khkz"}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class Model(Enum):
|
| 56 |
+
UNSPECIFIED = ("unspecified", {}, False)
|
| 57 |
+
G_3_PRO_AI_FREE = (
|
| 58 |
+
"gemini-3-pro-ai-free",
|
| 59 |
+
{
|
| 60 |
+
"x-goog-ext-525001261-jspb": '[1,null,null,null,"9d8ca3786ebdfbea",null,null,0,[4],null,null,1]'
|
| 61 |
+
},
|
| 62 |
+
False,
|
| 63 |
+
)
|
| 64 |
+
G_3_FLASH_AI_FREE = (
|
| 65 |
+
"gemini-3-flash-ai-free",
|
| 66 |
+
{
|
| 67 |
+
"x-goog-ext-525001261-jspb": '[1,null,null,null,"fbb127bbb056c959",null,null,0,[4],null,null,1]'
|
| 68 |
+
},
|
| 69 |
+
False,
|
| 70 |
+
)
|
| 71 |
+
G_3_FLASH_THINKING_AI_FREE = (
|
| 72 |
+
"gemini-3-flash-thinking-ai-free",
|
| 73 |
+
{
|
| 74 |
+
"x-goog-ext-525001261-jspb": '[1,null,null,null,"5bf011840784117a",null,null,0,[4],null,null,1]'
|
| 75 |
+
},
|
| 76 |
+
False,
|
| 77 |
+
)
|
| 78 |
+
G_3_PRO_AI_PRO = (
|
| 79 |
+
"gemini-3-pro-ai-pro",
|
| 80 |
+
{
|
| 81 |
+
"x-goog-ext-525001261-jspb": '[1,null,null,null,"e6fa609c3fa255c0",null,null,0,[4],null,null,2]'
|
| 82 |
+
},
|
| 83 |
+
True,
|
| 84 |
+
)
|
| 85 |
+
G_3_FLASH_AI_PRO = (
|
| 86 |
+
"gemini-3-flash-ai-pro",
|
| 87 |
+
{
|
| 88 |
+
"x-goog-ext-525001261-jspb": '[1,null,null,null,"56fdd199312815e2",null,null,0,[4],null,null,2]'
|
| 89 |
+
},
|
| 90 |
+
True,
|
| 91 |
+
)
|
| 92 |
+
G_3_FLASH_THINKING_AI_PRO = (
|
| 93 |
+
"gemini-3-flash-thinking-ai-pro",
|
| 94 |
+
{
|
| 95 |
+
"x-goog-ext-525001261-jspb": '[1,null,null,null,"e051ce1aa80aa576",null,null,0,[4],null,null,2]'
|
| 96 |
+
},
|
| 97 |
+
True,
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
def __init__(self, name, header, advanced_only):
|
| 101 |
+
self.model_name = name
|
| 102 |
+
self.model_header = header
|
| 103 |
+
self.advanced_only = advanced_only
|
| 104 |
+
|
| 105 |
+
@classmethod
|
| 106 |
+
def from_name(cls, name: str):
|
| 107 |
+
for model in cls:
|
| 108 |
+
if model.model_name == name:
|
| 109 |
+
return model
|
| 110 |
+
|
| 111 |
+
raise ValueError(
|
| 112 |
+
f"Unknown model name: {name}. Available models: {', '.join([model.model_name for model in cls])}"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
@classmethod
|
| 116 |
+
def from_dict(cls, model_dict: dict):
|
| 117 |
+
if "model_name" not in model_dict or "model_header" not in model_dict:
|
| 118 |
+
raise ValueError(
|
| 119 |
+
"When passing a custom model as a dictionary, 'model_name' and 'model_header' keys must be provided."
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
if not isinstance(model_dict["model_header"], dict):
|
| 123 |
+
raise ValueError(
|
| 124 |
+
"When passing a custom model as a dictionary, 'model_header' must be a dictionary containing valid header strings."
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
custom_model = cls.UNSPECIFIED
|
| 128 |
+
custom_model.model_name = model_dict["model_name"]
|
| 129 |
+
custom_model.model_header = model_dict["model_header"]
|
| 130 |
+
return custom_model
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class ErrorCode(IntEnum):
|
| 134 |
+
"""
|
| 135 |
+
Known error codes returned from server.
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
TEMPORARY_ERROR_1013 = 1013 # Randomly raised when generating with certain models, but disappears soon after
|
| 139 |
+
USAGE_LIMIT_EXCEEDED = 1037
|
| 140 |
+
MODEL_INCONSISTENT = 1050
|
| 141 |
+
MODEL_HEADER_INVALID = 1052
|
| 142 |
+
IP_TEMPORARILY_BLOCKED = 1060
|
gemini_webapi/exceptions.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class AuthError(Exception):
|
| 2 |
+
"""
|
| 3 |
+
Exception for authentication errors caused by invalid credentials/cookies.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
pass
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class APIError(Exception):
|
| 10 |
+
"""
|
| 11 |
+
Exception for package-level errors which need to be fixed in the future development (e.g. validation errors).
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ImageGenerationError(APIError):
|
| 18 |
+
"""
|
| 19 |
+
Exception for generated image parsing errors.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class GeminiError(Exception):
|
| 26 |
+
"""
|
| 27 |
+
Exception for errors returned from Gemini server which are not handled by the package.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
pass
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class TimeoutError(GeminiError):
|
| 34 |
+
"""
|
| 35 |
+
Exception for request timeouts.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class UsageLimitExceeded(GeminiError):
|
| 42 |
+
"""
|
| 43 |
+
Exception for model usage limit exceeded errors.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
pass
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class ModelInvalid(GeminiError):
|
| 50 |
+
"""
|
| 51 |
+
Exception for invalid model header string errors.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class TemporarilyBlocked(GeminiError):
|
| 58 |
+
"""
|
| 59 |
+
Exception for 429 Too Many Requests when IP is temporarily blocked.
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
pass
|
gemini_webapi/types/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
from .candidate import Candidate
|
| 4 |
+
from .gem import Gem, GemJar
|
| 5 |
+
from .grpc import RPCData
|
| 6 |
+
from .image import Image, WebImage, GeneratedImage
|
| 7 |
+
from .video import Video, GeneratedVideo
|
| 8 |
+
from .modeloutput import ModelOutput
|
| 9 |
+
from .availablemodel import AvailableModel
|
| 10 |
+
from .chatinfo import ChatInfo
|
| 11 |
+
from .chathistory import ChatTurn, ChatHistory
|
gemini_webapi/types/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (694 Bytes). View file
|
|
|
gemini_webapi/types/__pycache__/availablemodel.cpython-313.pyc
ADDED
|
Binary file (1.03 kB). View file
|
|
|
gemini_webapi/types/__pycache__/candidate.cpython-313.pyc
ADDED
|
Binary file (3.05 kB). View file
|
|
|
gemini_webapi/types/__pycache__/chathistory.cpython-313.pyc
ADDED
|
Binary file (2.44 kB). View file
|
|
|
gemini_webapi/types/__pycache__/chatinfo.cpython-313.pyc
ADDED
|
Binary file (795 Bytes). View file
|
|
|
gemini_webapi/types/__pycache__/gem.cpython-313.pyc
ADDED
|
Binary file (4.71 kB). View file
|
|
|
gemini_webapi/types/__pycache__/grpc.cpython-313.pyc
ADDED
|
Binary file (1.67 kB). View file
|
|
|
gemini_webapi/types/__pycache__/image.cpython-313.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
gemini_webapi/types/__pycache__/modeloutput.cpython-313.pyc
ADDED
|
Binary file (3.57 kB). View file
|
|
|
gemini_webapi/types/__pycache__/video.cpython-313.pyc
ADDED
|
Binary file (8.57 kB). View file
|
|
|
gemini_webapi/types/availablemodel.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
from ..constants import Model
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class AvailableModel(BaseModel):
|
| 6 |
+
"""
|
| 7 |
+
Available model configuration for the current account.
|
| 8 |
+
|
| 9 |
+
Parameters
|
| 10 |
+
----------
|
| 11 |
+
id: `str`
|
| 12 |
+
The explicit internal code name of the model.
|
| 13 |
+
name: `str`
|
| 14 |
+
The display name of the model on the web UI.
|
| 15 |
+
model: `gemini_webapi.constants.Model`
|
| 16 |
+
The core model variation enum.
|
| 17 |
+
description: `str`
|
| 18 |
+
A brief description of the model's capabilities.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
id: str
|
| 22 |
+
name: str
|
| 23 |
+
model: Model
|
| 24 |
+
description: str
|
gemini_webapi/types/candidate.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import html
|
| 2 |
+
import reprlib
|
| 3 |
+
|
| 4 |
+
from pydantic import BaseModel, field_validator
|
| 5 |
+
|
| 6 |
+
from .image import Image, WebImage, GeneratedImage
|
| 7 |
+
from .video import GeneratedVideo
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Candidate(BaseModel):
|
| 11 |
+
"""
|
| 12 |
+
A single reply candidate object in the model output. A full response from Gemini usually contains multiple reply candidates.
|
| 13 |
+
|
| 14 |
+
Parameters
|
| 15 |
+
----------
|
| 16 |
+
rcid: `str`
|
| 17 |
+
Reply candidate ID to build the metadata
|
| 18 |
+
text: `str`
|
| 19 |
+
Text output
|
| 20 |
+
thoughts: `str`, optional
|
| 21 |
+
Model's thought process, can be empty. Only populated with `-thinking` models
|
| 22 |
+
web_images: `list[WebImage]`, optional
|
| 23 |
+
List of web images in reply, can be empty.
|
| 24 |
+
generated_images: `list[GeneratedImage]`, optional
|
| 25 |
+
List of generated images in reply, can be empty
|
| 26 |
+
generated_videos: `list[GeneratedVideo]`, optional
|
| 27 |
+
List of generated videos in reply, can be empty
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
rcid: str
|
| 31 |
+
text: str
|
| 32 |
+
text_delta: str | None = None
|
| 33 |
+
thoughts: str | None = None
|
| 34 |
+
thoughts_delta: str | None = None
|
| 35 |
+
web_images: list[WebImage] = []
|
| 36 |
+
generated_images: list[GeneratedImage] = []
|
| 37 |
+
generated_videos: list[GeneratedVideo] = []
|
| 38 |
+
|
| 39 |
+
def __str__(self):
|
| 40 |
+
return self.text
|
| 41 |
+
|
| 42 |
+
def __repr__(self):
|
| 43 |
+
return f"Candidate(rcid='{self.rcid}', text='{reprlib.repr(self.text)}', images={self.images}, videos={self.generated_videos})"
|
| 44 |
+
|
| 45 |
+
@field_validator("text", "thoughts")
|
| 46 |
+
@classmethod
|
| 47 |
+
def decode_html(cls, value: str) -> str:
|
| 48 |
+
"""
|
| 49 |
+
Auto unescape HTML entities in text/thoughts if any.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
if value:
|
| 53 |
+
value = html.unescape(value)
|
| 54 |
+
return value
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def images(self) -> list[Image]:
|
| 58 |
+
return list(self.web_images) + list(self.generated_images) # type: ignore
|
gemini_webapi/types/chathistory.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import reprlib
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
|
| 4 |
+
from pydantic import BaseModel
|
| 5 |
+
|
| 6 |
+
from .modeloutput import ModelOutput
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ChatTurn(BaseModel):
|
| 10 |
+
"""
|
| 11 |
+
Represents a single turn (message) in a chat conversation.
|
| 12 |
+
|
| 13 |
+
Parameters
|
| 14 |
+
----------
|
| 15 |
+
role: `str`
|
| 16 |
+
The role of the message sender, either "user" or "model".
|
| 17 |
+
text: `str`
|
| 18 |
+
The text content of the message.
|
| 19 |
+
info: `ModelOutput`, optional
|
| 20 |
+
The full model output if the role is "model". This contains candidates, images, and metadata.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
role: str
|
| 24 |
+
text: str
|
| 25 |
+
info: Optional[ModelOutput] = None
|
| 26 |
+
|
| 27 |
+
def __str__(self):
|
| 28 |
+
return f"{self.role.upper()}: {self.text}"
|
| 29 |
+
|
| 30 |
+
def __repr__(self):
|
| 31 |
+
return f"ChatTurn(role='{self.role}', text='{reprlib.repr(self.text)}')"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class ChatHistory(BaseModel):
|
| 35 |
+
"""
|
| 36 |
+
Represents the full history of a chat conversation.
|
| 37 |
+
|
| 38 |
+
Parameters
|
| 39 |
+
----------
|
| 40 |
+
cid: `str`
|
| 41 |
+
The chat ID.
|
| 42 |
+
metadata: `list[str]`
|
| 43 |
+
The chat metadata.
|
| 44 |
+
turns: `list[ChatTurn]`
|
| 45 |
+
The list of messages in the conversation.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
cid: str
|
| 49 |
+
metadata: List[str]
|
| 50 |
+
turns: List[ChatTurn]
|
| 51 |
+
|
| 52 |
+
def __repr__(self):
|
| 53 |
+
return f"ChatHistory(cid='{self.cid}', turns={len(self.turns)})"
|
gemini_webapi/types/chatinfo.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class ChatInfo(BaseModel):
|
| 5 |
+
"""
|
| 6 |
+
Chat information from the user's account.
|
| 7 |
+
|
| 8 |
+
Parameters
|
| 9 |
+
----------
|
| 10 |
+
cid: `str`
|
| 11 |
+
The ID of the chat conversation (cid).
|
| 12 |
+
title: `str`
|
| 13 |
+
The display title of the chat conversation.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
cid: str
|
| 17 |
+
title: str
|
| 18 |
+
is_pinned: bool = False
|
gemini_webapi/types/gem.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class Gem(BaseModel):
|
| 5 |
+
"""
|
| 6 |
+
Reusable Gemini Gem object working as a system prompt, providing additional context to the model.
|
| 7 |
+
Gemini provides a set of predefined gems, and users can create custom gems as well.
|
| 8 |
+
|
| 9 |
+
Parameters
|
| 10 |
+
----------
|
| 11 |
+
id: `str`
|
| 12 |
+
Unique identifier for the gem.
|
| 13 |
+
name: `str`
|
| 14 |
+
User-friendly name of the gem.
|
| 15 |
+
description: `str`, optional
|
| 16 |
+
Brief description of the gem's purpose or content.
|
| 17 |
+
prompt: `str`, optional
|
| 18 |
+
The system prompt text that the gem provides to the model.
|
| 19 |
+
predefined: `bool`
|
| 20 |
+
Indicates whether the gem is predefined by Gemini or created by the user.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
id: str
|
| 24 |
+
name: str
|
| 25 |
+
description: str | None = None
|
| 26 |
+
prompt: str | None = None
|
| 27 |
+
predefined: bool
|
| 28 |
+
|
| 29 |
+
def __str__(self) -> str:
|
| 30 |
+
return (
|
| 31 |
+
f"Gem(id='{self.id}', name='{self.name}', description='{self.description}', "
|
| 32 |
+
f"prompt='{self.prompt}', predefined={self.predefined})"
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class GemJar(dict[str, Gem]):
|
| 37 |
+
"""
|
| 38 |
+
Helper class for handling a collection of `Gem` objects, stored by their ID.
|
| 39 |
+
This class extends `dict` to allows retrieving gems with extra filtering options.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
def __iter__(self):
|
| 43 |
+
"""
|
| 44 |
+
Iter over the gems in the jar.
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
return self.values().__iter__()
|
| 48 |
+
|
| 49 |
+
def get(
|
| 50 |
+
self, id: str | None = None, name: str | None = None, default: Gem | None = None
|
| 51 |
+
) -> Gem | None:
|
| 52 |
+
"""
|
| 53 |
+
Retrieves a gem by its id and/or name.
|
| 54 |
+
If both id and name are provided, returns the gem that matches both id and name.
|
| 55 |
+
If only id is provided, it's a direct lookup.
|
| 56 |
+
If only name is provided, it searches through the gems.
|
| 57 |
+
|
| 58 |
+
Parameters
|
| 59 |
+
----------
|
| 60 |
+
id: `str`, optional
|
| 61 |
+
The unique identifier of the gem to retrieve.
|
| 62 |
+
name: `str`, optional
|
| 63 |
+
The user-friendly name of the gem to retrieve.
|
| 64 |
+
default: `Gem`, optional
|
| 65 |
+
The default value to return if no matching gem is found.
|
| 66 |
+
|
| 67 |
+
Returns
|
| 68 |
+
-------
|
| 69 |
+
`Gem` | None
|
| 70 |
+
The matching gem if found, otherwise return the default value.
|
| 71 |
+
|
| 72 |
+
Raises
|
| 73 |
+
------
|
| 74 |
+
`AssertionError`
|
| 75 |
+
If neither id nor name is provided.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
assert not (
|
| 79 |
+
id is None and name is None
|
| 80 |
+
), "At least one of gem id or name must be provided."
|
| 81 |
+
|
| 82 |
+
if id is not None:
|
| 83 |
+
gem_candidate = super().get(id)
|
| 84 |
+
if gem_candidate:
|
| 85 |
+
if name is not None:
|
| 86 |
+
if gem_candidate.name == name:
|
| 87 |
+
return gem_candidate
|
| 88 |
+
else:
|
| 89 |
+
return default
|
| 90 |
+
else:
|
| 91 |
+
return gem_candidate
|
| 92 |
+
else:
|
| 93 |
+
return default
|
| 94 |
+
elif name is not None:
|
| 95 |
+
for gem_obj in self.values():
|
| 96 |
+
if gem_obj.name == name:
|
| 97 |
+
return gem_obj
|
| 98 |
+
return default
|
| 99 |
+
|
| 100 |
+
# Should be unreachable due to the assertion.
|
| 101 |
+
return default
|
| 102 |
+
|
| 103 |
+
def filter(
|
| 104 |
+
self, predefined: bool | None = None, name: str | None = None
|
| 105 |
+
) -> "GemJar":
|
| 106 |
+
"""
|
| 107 |
+
Returns a new `GemJar` containing gems that match the given filters.
|
| 108 |
+
|
| 109 |
+
Parameters
|
| 110 |
+
----------
|
| 111 |
+
predefined: `bool`, optional
|
| 112 |
+
If provided, filters gems by whether they are predefined (True) or user-created (False).
|
| 113 |
+
name: `str`, optional
|
| 114 |
+
If provided, filters gems by their name (exact match).
|
| 115 |
+
|
| 116 |
+
Returns
|
| 117 |
+
-------
|
| 118 |
+
`GemJar`
|
| 119 |
+
A new `GemJar` containing the filtered gems. Can be empty if no gems match the criteria.
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
filtered_gems = GemJar()
|
| 123 |
+
|
| 124 |
+
for gem_id, gem in self.items():
|
| 125 |
+
if predefined is not None and gem.predefined != predefined:
|
| 126 |
+
continue
|
| 127 |
+
if name is not None and gem.name != name:
|
| 128 |
+
continue
|
| 129 |
+
|
| 130 |
+
filtered_gems[gem_id] = gem
|
| 131 |
+
|
| 132 |
+
return GemJar(filtered_gems)
|
gemini_webapi/types/grpc.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
|
| 3 |
+
from ..constants import GRPC
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class RPCData(BaseModel):
|
| 7 |
+
"""
|
| 8 |
+
Helper class containing necessary data for Google RPC calls.
|
| 9 |
+
|
| 10 |
+
Parameters
|
| 11 |
+
----------
|
| 12 |
+
rpcid : GRPC
|
| 13 |
+
Google RPC ID.
|
| 14 |
+
payload : str
|
| 15 |
+
Payload for the RPC call.
|
| 16 |
+
identifier : str, optional
|
| 17 |
+
Identifier/order for the RPC call, defaults to "generic".
|
| 18 |
+
Makes sense if there are multiple RPC calls in a batch, where this identifier
|
| 19 |
+
can be used to distinguish between responses.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
rpcid: GRPC
|
| 23 |
+
payload: str
|
| 24 |
+
identifier: str = "generic"
|
| 25 |
+
|
| 26 |
+
def __repr__(self):
|
| 27 |
+
return f"GRPC(rpcid='{self.rpcid}', payload='{self.payload}', identifier='{self.identifier}')"
|
| 28 |
+
|
| 29 |
+
def serialize(self) -> list:
|
| 30 |
+
"""
|
| 31 |
+
Serializes object into formatted payload ready for RPC call.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
return [self.rpcid, self.payload, None, self.identifier]
|
gemini_webapi/types/image.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import mimetypes
|
| 3 |
+
import reprlib
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Any
|
| 7 |
+
|
| 8 |
+
from curl_cffi.requests import AsyncSession
|
| 9 |
+
from curl_cffi.requests.exceptions import HTTPError
|
| 10 |
+
from pydantic import BaseModel
|
| 11 |
+
|
| 12 |
+
from ..utils import logger
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class Image(BaseModel):
|
| 16 |
+
"""A single image object returned from Gemini.
|
| 17 |
+
|
| 18 |
+
Attributes:
|
| 19 |
+
url (str): URL of the image.
|
| 20 |
+
title (str, optional): Title of the image. Defaults to "[Image]".
|
| 21 |
+
alt (str, optional): Optional description of the image.
|
| 22 |
+
proxy (str, optional): Proxy used when saving image.
|
| 23 |
+
client (Any, optional): Reference to the client object.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
url: str
|
| 27 |
+
title: str = "[Image]"
|
| 28 |
+
alt: str = ""
|
| 29 |
+
proxy: str | None = None
|
| 30 |
+
client: Any = None
|
| 31 |
+
|
| 32 |
+
def __str__(self):
|
| 33 |
+
return f"Image(title='{self.title}', alt='{self.alt}', url='{reprlib.repr(self.url)}')"
|
| 34 |
+
|
| 35 |
+
async def save(
|
| 36 |
+
self,
|
| 37 |
+
path: str = "temp",
|
| 38 |
+
filename: str | None = None,
|
| 39 |
+
verbose: bool = False,
|
| 40 |
+
skip_invalid_filename: bool = False,
|
| 41 |
+
client: AsyncSession | None = None,
|
| 42 |
+
) -> str | None:
|
| 43 |
+
"""Saves the image to disk.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
path (str, optional): Path to save the image. Defaults to "./temp".
|
| 47 |
+
filename (str | None, optional): File name to save the image. Defaults to
|
| 48 |
+
a unique generated name.
|
| 49 |
+
verbose (bool, optional): If True, will print the path of the saved file
|
| 50 |
+
or warning for invalid file name. Defaults to False.
|
| 51 |
+
skip_invalid_filename (bool, optional): If True, will only save the image
|
| 52 |
+
if the file name and extension are valid. Defaults to False.
|
| 53 |
+
client (AsyncSession | None, optional): Client used for requests.
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
str | None: Absolute path of the saved image if successful, None if
|
| 57 |
+
filename is invalid and `skip_invalid_filename` is True.
|
| 58 |
+
|
| 59 |
+
Raises:
|
| 60 |
+
curl_cffi.requests.exceptions.HTTPError: If the network request failed.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
if not filename or not Path(filename).suffix:
|
| 64 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
| 65 |
+
url_hash = hashlib.sha256(self.url.encode()).hexdigest()[:10]
|
| 66 |
+
base_name = Path(filename).stem if filename else "image"
|
| 67 |
+
filename = f"{timestamp}_{url_hash}_{base_name}"
|
| 68 |
+
|
| 69 |
+
close_client = False
|
| 70 |
+
req_client = client or self.client
|
| 71 |
+
if not req_client:
|
| 72 |
+
client_ref = getattr(self, "client_ref", None)
|
| 73 |
+
cookies = getattr(client_ref, "cookies", None) if client_ref else None
|
| 74 |
+
req_client = AsyncSession(
|
| 75 |
+
impersonate="chrome",
|
| 76 |
+
allow_redirects=True,
|
| 77 |
+
cookies=cookies,
|
| 78 |
+
proxy=self.proxy,
|
| 79 |
+
)
|
| 80 |
+
close_client = True
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
response = await req_client.get(self.url)
|
| 84 |
+
if verbose:
|
| 85 |
+
logger.debug(f"HTTP Request: GET {self.url} [{response.status_code}]")
|
| 86 |
+
|
| 87 |
+
if response.status_code == 200:
|
| 88 |
+
path_obj_file = Path(filename)
|
| 89 |
+
if not path_obj_file.suffix:
|
| 90 |
+
content_type = (
|
| 91 |
+
response.headers.get("content-type", "")
|
| 92 |
+
.split(";")[0]
|
| 93 |
+
.strip()
|
| 94 |
+
.lower()
|
| 95 |
+
)
|
| 96 |
+
ext = mimetypes.guess_extension(content_type) or ".png"
|
| 97 |
+
filename = f"{filename}{ext}"
|
| 98 |
+
|
| 99 |
+
path_obj = Path(path)
|
| 100 |
+
path_obj.mkdir(parents=True, exist_ok=True)
|
| 101 |
+
|
| 102 |
+
dest = path_obj / filename
|
| 103 |
+
dest.write_bytes(response.content)
|
| 104 |
+
|
| 105 |
+
if verbose:
|
| 106 |
+
logger.info(f"Image saved as {dest.resolve()}")
|
| 107 |
+
|
| 108 |
+
return str(dest.resolve())
|
| 109 |
+
else:
|
| 110 |
+
raise HTTPError(
|
| 111 |
+
f"Error downloading image: {response.status_code} {response.reason}"
|
| 112 |
+
)
|
| 113 |
+
finally:
|
| 114 |
+
if close_client:
|
| 115 |
+
await req_client.close()
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class WebImage(Image):
|
| 119 |
+
"""Image retrieved from web.
|
| 120 |
+
|
| 121 |
+
Returned when asking Gemini to "SEND an image of [something]".
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
pass
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class GeneratedImage(Image):
|
| 128 |
+
"""Image generated by Google's AI image generator.
|
| 129 |
+
|
| 130 |
+
Returned when asking Gemini to "GENERATE an image of [something]".
|
| 131 |
+
|
| 132 |
+
Attributes:
|
| 133 |
+
client_ref (Any, optional): Reference to the GeminiClient instance.
|
| 134 |
+
cid (str, optional): Chat ID.
|
| 135 |
+
rid (str, optional): Response ID.
|
| 136 |
+
rcid (str, optional): Response candidate ID.
|
| 137 |
+
image_id (str, optional): Image ID generated.
|
| 138 |
+
"""
|
| 139 |
+
|
| 140 |
+
client_ref: Any = None
|
| 141 |
+
cid: str = ""
|
| 142 |
+
rid: str = ""
|
| 143 |
+
rcid: str = ""
|
| 144 |
+
image_id: str = ""
|
| 145 |
+
|
| 146 |
+
# @override
|
| 147 |
+
async def save(
|
| 148 |
+
self,
|
| 149 |
+
path: str = "temp",
|
| 150 |
+
filename: str | None = None,
|
| 151 |
+
verbose: bool = False,
|
| 152 |
+
skip_invalid_filename: bool = False,
|
| 153 |
+
client: AsyncSession | None = None,
|
| 154 |
+
full_size: bool = True,
|
| 155 |
+
) -> str | None:
|
| 156 |
+
"""Saves the generated image to disk.
|
| 157 |
+
|
| 158 |
+
Args:
|
| 159 |
+
path (str, optional): Path to save the image. Defaults to "temp".
|
| 160 |
+
filename (str | None, optional): Filename to save the image.
|
| 161 |
+
verbose (bool, optional): Prints status. Defaults to False.
|
| 162 |
+
skip_invalid_filename (bool, optional): Skip if invalid. Defaults to False.
|
| 163 |
+
client (AsyncSession | None, optional): An existing AsyncSession client.
|
| 164 |
+
full_size (bool, optional): Modifies preview URLs to fetch full-size images if True. Defaults to True.
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
str | None: Absolute path of the saved image if successfully saved, None otherwise.
|
| 168 |
+
"""
|
| 169 |
+
if full_size:
|
| 170 |
+
if all([self.client_ref, self.cid, self.rid, self.rcid, self.image_id]):
|
| 171 |
+
try:
|
| 172 |
+
original_url = await self.client_ref._get_image_full_size(
|
| 173 |
+
cid=self.cid,
|
| 174 |
+
rid=self.rid,
|
| 175 |
+
rcid=self.rcid,
|
| 176 |
+
image_id=self.image_id,
|
| 177 |
+
)
|
| 178 |
+
if original_url:
|
| 179 |
+
req_url = f"{original_url}=d-I?alr=yes"
|
| 180 |
+
|
| 181 |
+
req_client = client or self.client
|
| 182 |
+
close_client = False
|
| 183 |
+
|
| 184 |
+
if not req_client:
|
| 185 |
+
req_client = AsyncSession(
|
| 186 |
+
impersonate="chrome",
|
| 187 |
+
allow_redirects=True,
|
| 188 |
+
cookies=getattr(self.client_ref, "cookies", None),
|
| 189 |
+
proxy=self.proxy,
|
| 190 |
+
)
|
| 191 |
+
close_client = True
|
| 192 |
+
|
| 193 |
+
try:
|
| 194 |
+
response = await req_client.get(req_url)
|
| 195 |
+
response.raise_for_status()
|
| 196 |
+
url_text = response.text
|
| 197 |
+
|
| 198 |
+
response = await req_client.get(url_text)
|
| 199 |
+
response.raise_for_status()
|
| 200 |
+
self.url = response.text
|
| 201 |
+
|
| 202 |
+
return await super().save(
|
| 203 |
+
path=path,
|
| 204 |
+
filename=filename,
|
| 205 |
+
verbose=verbose,
|
| 206 |
+
skip_invalid_filename=skip_invalid_filename,
|
| 207 |
+
client=req_client,
|
| 208 |
+
)
|
| 209 |
+
finally:
|
| 210 |
+
if close_client and req_client:
|
| 211 |
+
await req_client.close()
|
| 212 |
+
|
| 213 |
+
except Exception as e:
|
| 214 |
+
logger.debug(
|
| 215 |
+
f"Failed to fetch full size image URL via RPC: {e}, falling back to default URL suffix."
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
if "=s1024-rj" in self.url:
|
| 219 |
+
self.url = self.url.replace("=s1024-rj", "=s2048-rj")
|
| 220 |
+
elif "=s2048-rj" not in self.url:
|
| 221 |
+
self.url += "=s2048-rj"
|
| 222 |
+
else:
|
| 223 |
+
if "=s2048-rj" in self.url:
|
| 224 |
+
self.url = self.url.replace("=s2048-rj", "=s1024-rj")
|
| 225 |
+
elif "=s1024-rj" not in self.url:
|
| 226 |
+
self.url += "=s1024-rj"
|
| 227 |
+
|
| 228 |
+
return await super().save(
|
| 229 |
+
path=path,
|
| 230 |
+
filename=filename,
|
| 231 |
+
verbose=verbose,
|
| 232 |
+
skip_invalid_filename=skip_invalid_filename,
|
| 233 |
+
client=client,
|
| 234 |
+
)
|
gemini_webapi/types/modeloutput.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
|
| 3 |
+
from .image import Image
|
| 4 |
+
from .candidate import Candidate
|
| 5 |
+
from .video import GeneratedVideo
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ModelOutput(BaseModel):
|
| 9 |
+
"""
|
| 10 |
+
Classified output from gemini.google.com
|
| 11 |
+
|
| 12 |
+
Parameters
|
| 13 |
+
----------
|
| 14 |
+
metadata: `list[str]`
|
| 15 |
+
List of chat metadata `[cid, rid, rcid]`, can be shorter than 3 elements, like `[cid, rid]` or `[cid]` only
|
| 16 |
+
candidates: `list[Candidate]`
|
| 17 |
+
List of all candidates returned from gemini
|
| 18 |
+
chosen: `int`, optional
|
| 19 |
+
Index of the chosen candidate, by default will choose the first one
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
metadata: list[str]
|
| 23 |
+
candidates: list[Candidate]
|
| 24 |
+
chosen: int = 0
|
| 25 |
+
|
| 26 |
+
def __str__(self):
|
| 27 |
+
return self.text
|
| 28 |
+
|
| 29 |
+
def __repr__(self):
|
| 30 |
+
return f"ModelOutput(metadata={self.metadata}, chosen={self.chosen}, candidates={self.candidates})"
|
| 31 |
+
|
| 32 |
+
@property
|
| 33 |
+
def text(self) -> str:
|
| 34 |
+
return self.candidates[self.chosen].text
|
| 35 |
+
|
| 36 |
+
@property
|
| 37 |
+
def text_delta(self) -> str:
|
| 38 |
+
return self.candidates[self.chosen].text_delta or ""
|
| 39 |
+
|
| 40 |
+
@property
|
| 41 |
+
def thoughts(self) -> str | None:
|
| 42 |
+
return self.candidates[self.chosen].thoughts
|
| 43 |
+
|
| 44 |
+
@property
|
| 45 |
+
def thoughts_delta(self) -> str:
|
| 46 |
+
return self.candidates[self.chosen].thoughts_delta or ""
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def images(self) -> list[Image]:
|
| 50 |
+
return self.candidates[self.chosen].images
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def videos(self) -> list[GeneratedVideo]:
|
| 54 |
+
return self.candidates[self.chosen].generated_videos
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def rcid(self) -> str:
|
| 58 |
+
return self.candidates[self.chosen].rcid
|
gemini_webapi/types/video.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import hashlib
|
| 3 |
+
import mimetypes
|
| 4 |
+
import reprlib
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from curl_cffi.requests import AsyncSession
|
| 10 |
+
from curl_cffi.requests.exceptions import HTTPError
|
| 11 |
+
from pydantic import BaseModel
|
| 12 |
+
|
| 13 |
+
from ..utils import logger
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Video(BaseModel):
|
| 17 |
+
"""A single video object returned from Gemini.
|
| 18 |
+
|
| 19 |
+
Attributes:
|
| 20 |
+
url (str): URL of the video.
|
| 21 |
+
title (str, optional): Title of the video. Defaults to "[Video]".
|
| 22 |
+
proxy (str, optional): Proxy used when saving video.
|
| 23 |
+
client (Any, optional): Reference to the client object.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
url: str
|
| 27 |
+
title: str = "[Video]"
|
| 28 |
+
proxy: str | None = None
|
| 29 |
+
client: Any = None
|
| 30 |
+
|
| 31 |
+
def __str__(self):
|
| 32 |
+
return f"Video(title='{self.title}', url='{reprlib.repr(self.url)}')"
|
| 33 |
+
|
| 34 |
+
async def save(
|
| 35 |
+
self,
|
| 36 |
+
path: str = "temp",
|
| 37 |
+
filename: str | None = None,
|
| 38 |
+
verbose: bool = False,
|
| 39 |
+
skip_invalid_filename: bool = False,
|
| 40 |
+
client: AsyncSession | None = None,
|
| 41 |
+
) -> tuple[str | None, str | None]:
|
| 42 |
+
"""Saves the video to disk.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
path (str, optional): Path to save the video. Defaults to "./temp".
|
| 46 |
+
filename (str | None, optional): File name to save the video. Defaults to
|
| 47 |
+
a unique generated name.
|
| 48 |
+
verbose (bool, optional): If True, will print the path of the saved file.
|
| 49 |
+
skip_invalid_filename (bool, optional): If True, will only save the video
|
| 50 |
+
if the file name has a valid extension. Defaults to False.
|
| 51 |
+
client (AsyncSession | None, optional): Client used for requests.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
tuple[str | None, str | None]: (Absolute path of the saved video, str | None path of thumbnail)
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
if not filename or not Path(filename).suffix:
|
| 58 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
| 59 |
+
url_hash = hashlib.sha256(self.url.encode()).hexdigest()[:10]
|
| 60 |
+
base_name = Path(filename).stem if filename else "video"
|
| 61 |
+
filename = f"{timestamp}_{url_hash}_{base_name}"
|
| 62 |
+
|
| 63 |
+
close_client = False
|
| 64 |
+
req_client = client or self.client
|
| 65 |
+
if not req_client:
|
| 66 |
+
client_ref = getattr(self, "client_ref", None)
|
| 67 |
+
cookies = getattr(client_ref, "cookies", None) if client_ref else None
|
| 68 |
+
req_client = AsyncSession(
|
| 69 |
+
impersonate="chrome",
|
| 70 |
+
allow_redirects=True,
|
| 71 |
+
cookies=cookies,
|
| 72 |
+
proxy=self.proxy,
|
| 73 |
+
)
|
| 74 |
+
close_client = True
|
| 75 |
+
|
| 76 |
+
try:
|
| 77 |
+
path_obj = Path(path)
|
| 78 |
+
path_obj.mkdir(parents=True, exist_ok=True)
|
| 79 |
+
return await self._perform_save(req_client, path_obj, filename, verbose)
|
| 80 |
+
finally:
|
| 81 |
+
if close_client:
|
| 82 |
+
await req_client.close()
|
| 83 |
+
|
| 84 |
+
@staticmethod
|
| 85 |
+
async def _download_file(
|
| 86 |
+
req_client: AsyncSession,
|
| 87 |
+
url: str,
|
| 88 |
+
path_obj: Path,
|
| 89 |
+
filename: str,
|
| 90 |
+
default_ext: str = ".mp4",
|
| 91 |
+
verbose: bool = False,
|
| 92 |
+
) -> str | None:
|
| 93 |
+
"""Internal helper to download a file and determine its extension."""
|
| 94 |
+
response = await req_client.get(url)
|
| 95 |
+
if verbose:
|
| 96 |
+
logger.debug(f"HTTP Request: GET {url} [{response.status_code}]")
|
| 97 |
+
|
| 98 |
+
if response.status_code == 200:
|
| 99 |
+
path_obj_file = Path(filename)
|
| 100 |
+
if not path_obj_file.suffix:
|
| 101 |
+
content_type = (
|
| 102 |
+
response.headers.get("content-type", "")
|
| 103 |
+
.split(";")[0]
|
| 104 |
+
.strip()
|
| 105 |
+
.lower()
|
| 106 |
+
)
|
| 107 |
+
ext = mimetypes.guess_extension(content_type) or default_ext
|
| 108 |
+
filename = f"{filename}{ext}"
|
| 109 |
+
|
| 110 |
+
dest = path_obj / filename
|
| 111 |
+
dest.write_bytes(response.content)
|
| 112 |
+
|
| 113 |
+
if verbose:
|
| 114 |
+
logger.info(f"File saved as {dest.resolve()}")
|
| 115 |
+
|
| 116 |
+
return str(dest.resolve())
|
| 117 |
+
elif response.status_code == 206:
|
| 118 |
+
return "206"
|
| 119 |
+
else:
|
| 120 |
+
raise HTTPError(
|
| 121 |
+
f"Error downloading file: {response.status_code} {response.reason}"
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
async def _perform_save(
|
| 125 |
+
self, req_client: AsyncSession, path_obj: Path, filename: str, verbose: bool
|
| 126 |
+
) -> tuple[str | None, str | None]:
|
| 127 |
+
"""Base implementation: simple download."""
|
| 128 |
+
path = await self._download_file(
|
| 129 |
+
req_client, self.url, path_obj, filename, ".mp4", verbose
|
| 130 |
+
)
|
| 131 |
+
return path, None
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class GeneratedVideo(Video):
|
| 135 |
+
"""Video generated by Google's AI Veo.
|
| 136 |
+
|
| 137 |
+
Attributes:
|
| 138 |
+
client_ref (Any, optional): Reference to the GeminiClient instance.
|
| 139 |
+
thumbnail (str, optional): URL of the video thumbnail.
|
| 140 |
+
cid (str, optional): Chat ID.
|
| 141 |
+
rid (str, optional): Response ID.
|
| 142 |
+
rcid (str, optional): Response candidate ID.
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
client_ref: Any = None
|
| 146 |
+
thumbnail: str = ""
|
| 147 |
+
cid: str = ""
|
| 148 |
+
rid: str = ""
|
| 149 |
+
rcid: str = ""
|
| 150 |
+
|
| 151 |
+
# @override
|
| 152 |
+
async def _perform_save(
|
| 153 |
+
self, req_client: AsyncSession, path_obj: Path, filename: str, verbose: bool
|
| 154 |
+
) -> tuple[str | None, str | None]:
|
| 155 |
+
"""Internal method for GeneratedVideo, handling thumbnails and polling."""
|
| 156 |
+
saved_thumb_path = None
|
| 157 |
+
if self.thumbnail:
|
| 158 |
+
thumb_base = Path(filename).stem
|
| 159 |
+
try:
|
| 160 |
+
saved_thumb_path = await self._download_file(
|
| 161 |
+
req_client, self.thumbnail, path_obj, thumb_base, ".jpg", verbose
|
| 162 |
+
)
|
| 163 |
+
except Exception as e:
|
| 164 |
+
if verbose:
|
| 165 |
+
logger.warning(f"Failed to save thumbnail: {e}")
|
| 166 |
+
|
| 167 |
+
while True:
|
| 168 |
+
path_or_marker = await self._download_file(
|
| 169 |
+
req_client, self.url, path_obj, filename, ".mp4", verbose
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
if path_or_marker == "206":
|
| 173 |
+
if verbose:
|
| 174 |
+
logger.info("Video still generating (206), retrying in 10s...")
|
| 175 |
+
await asyncio.sleep(10)
|
| 176 |
+
else:
|
| 177 |
+
return path_or_marker, saved_thumb_path
|
gemini_webapi/utils/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
|
| 3 |
+
from .decorators import running
|
| 4 |
+
from .get_access_token import get_access_token
|
| 5 |
+
from .load_browser_cookies import load_browser_cookies
|
| 6 |
+
from .logger import logger, set_log_level
|
| 7 |
+
from .parsing import (
|
| 8 |
+
extract_json_from_response,
|
| 9 |
+
get_delta_by_fp_len,
|
| 10 |
+
get_nested_value,
|
| 11 |
+
parse_response_by_frame,
|
| 12 |
+
)
|
| 13 |
+
from .rotate_1psidts import rotate_1psidts
|
| 14 |
+
from .upload_file import upload_file, parse_file_name
|
gemini_webapi/utils/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (648 Bytes). View file
|
|
|
gemini_webapi/utils/__pycache__/decorators.cpython-313.pyc
ADDED
|
Binary file (3.95 kB). View file
|
|
|
gemini_webapi/utils/__pycache__/get_access_token.cpython-313.pyc
ADDED
|
Binary file (9.66 kB). View file
|
|
|
gemini_webapi/utils/__pycache__/load_browser_cookies.cpython-313.pyc
ADDED
|
Binary file (3.69 kB). View file
|
|
|
gemini_webapi/utils/__pycache__/logger.cpython-313.pyc
ADDED
|
Binary file (1.48 kB). View file
|
|
|
gemini_webapi/utils/__pycache__/parsing.cpython-313.pyc
ADDED
|
Binary file (9.78 kB). View file
|
|
|
gemini_webapi/utils/__pycache__/rotate_1psidts.cpython-313.pyc
ADDED
|
Binary file (4.15 kB). View file
|
|
|
gemini_webapi/utils/__pycache__/upload_file.cpython-313.pyc
ADDED
|
Binary file (4.7 kB). View file
|
|
|
gemini_webapi/utils/decorators.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import functools
|
| 3 |
+
import inspect
|
| 4 |
+
from collections.abc import Callable
|
| 5 |
+
|
| 6 |
+
from ..exceptions import APIError
|
| 7 |
+
|
| 8 |
+
DELAY_FACTOR = 5
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def running(retry: int = 0) -> Callable:
|
| 12 |
+
"""
|
| 13 |
+
Decorator to check if GeminiClient is running before making a request.
|
| 14 |
+
Supports both regular async functions and async generators.
|
| 15 |
+
|
| 16 |
+
Parameters
|
| 17 |
+
----------
|
| 18 |
+
retry: `int`, optional
|
| 19 |
+
Max number of retries when `gemini_webapi.APIError` is raised.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def decorator(func):
|
| 23 |
+
if inspect.isasyncgenfunction(func):
|
| 24 |
+
|
| 25 |
+
@functools.wraps(func)
|
| 26 |
+
async def wrapper(client, *args, current_retry=None, **kwargs):
|
| 27 |
+
if current_retry is None:
|
| 28 |
+
current_retry = retry
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
if not client._running:
|
| 32 |
+
await client.init(
|
| 33 |
+
timeout=client.timeout,
|
| 34 |
+
auto_close=client.auto_close,
|
| 35 |
+
close_delay=client.close_delay,
|
| 36 |
+
auto_refresh=client.auto_refresh,
|
| 37 |
+
refresh_interval=client.refresh_interval,
|
| 38 |
+
verbose=client.verbose,
|
| 39 |
+
watchdog_timeout=client.watchdog_timeout,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
if not client._running:
|
| 43 |
+
raise APIError(
|
| 44 |
+
f"Invalid function call: GeminiClient.{func.__name__}. Client initialization failed."
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
async for item in func(client, *args, **kwargs):
|
| 48 |
+
yield item
|
| 49 |
+
except APIError:
|
| 50 |
+
if current_retry > 0:
|
| 51 |
+
delay = (retry - current_retry + 1) * DELAY_FACTOR
|
| 52 |
+
await asyncio.sleep(delay)
|
| 53 |
+
async for item in wrapper(
|
| 54 |
+
client, *args, current_retry=current_retry - 1, **kwargs
|
| 55 |
+
):
|
| 56 |
+
yield item
|
| 57 |
+
else:
|
| 58 |
+
raise
|
| 59 |
+
|
| 60 |
+
return wrapper
|
| 61 |
+
else:
|
| 62 |
+
|
| 63 |
+
@functools.wraps(func)
|
| 64 |
+
async def wrapper(client, *args, current_retry=None, **kwargs):
|
| 65 |
+
if current_retry is None:
|
| 66 |
+
current_retry = retry
|
| 67 |
+
|
| 68 |
+
try:
|
| 69 |
+
if not client._running:
|
| 70 |
+
await client.init(
|
| 71 |
+
timeout=client.timeout,
|
| 72 |
+
auto_close=client.auto_close,
|
| 73 |
+
close_delay=client.close_delay,
|
| 74 |
+
auto_refresh=client.auto_refresh,
|
| 75 |
+
refresh_interval=client.refresh_interval,
|
| 76 |
+
verbose=client.verbose,
|
| 77 |
+
watchdog_timeout=client.watchdog_timeout,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
if not client._running:
|
| 81 |
+
raise APIError(
|
| 82 |
+
f"Invalid function call: GeminiClient.{func.__name__}. Client initialization failed."
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
return await func(client, *args, **kwargs)
|
| 86 |
+
except APIError:
|
| 87 |
+
if current_retry > 0:
|
| 88 |
+
delay = (retry - current_retry + 1) * DELAY_FACTOR
|
| 89 |
+
await asyncio.sleep(delay)
|
| 90 |
+
|
| 91 |
+
return await wrapper(
|
| 92 |
+
client, *args, current_retry=current_retry - 1, **kwargs
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
raise
|
| 96 |
+
|
| 97 |
+
return wrapper
|
| 98 |
+
|
| 99 |
+
return decorator
|
gemini_webapi/utils/get_access_token.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
from curl_cffi.requests import AsyncSession, Cookies, Response
|
| 6 |
+
|
| 7 |
+
from .load_browser_cookies import HAS_BC3, load_browser_cookies
|
| 8 |
+
from .logger import logger
|
| 9 |
+
from .rotate_1psidts import _extract_cookie_value
|
| 10 |
+
from ..constants import Endpoint, Headers
|
| 11 |
+
from ..exceptions import AuthError
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
async def send_request(
|
| 15 |
+
client: AsyncSession, cookies: dict | Cookies, verbose: bool = False
|
| 16 |
+
) -> Response:
|
| 17 |
+
"""
|
| 18 |
+
Send http request with provided cookies using a shared session.
|
| 19 |
+
"""
|
| 20 |
+
client.cookies.clear()
|
| 21 |
+
if isinstance(cookies, Cookies):
|
| 22 |
+
client.cookies.update(cookies)
|
| 23 |
+
else:
|
| 24 |
+
for k, v in cookies.items():
|
| 25 |
+
client.cookies.set(k, v, domain=".google.com")
|
| 26 |
+
|
| 27 |
+
response = await client.get(Endpoint.INIT, headers=Headers.GEMINI.value)
|
| 28 |
+
if verbose:
|
| 29 |
+
logger.debug(f"HTTP Request: GET {Endpoint.INIT} [{response.status_code}]")
|
| 30 |
+
response.raise_for_status()
|
| 31 |
+
return response
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
async def get_access_token(
|
| 35 |
+
base_cookies: dict | Cookies,
|
| 36 |
+
proxy: str | None = None,
|
| 37 |
+
verbose: bool = False,
|
| 38 |
+
verify: bool = True,
|
| 39 |
+
) -> tuple[str | None, str | None, str | None, AsyncSession]:
|
| 40 |
+
"""
|
| 41 |
+
Send a get request to gemini.google.com for each group of available cookies and return
|
| 42 |
+
the value of "SNlM0e" as access token on the first successful request.
|
| 43 |
+
|
| 44 |
+
Returns the **live** AsyncSession that succeeded so the caller can reuse
|
| 45 |
+
the same TLS connection for subsequent requests.
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
client = AsyncSession(
|
| 49 |
+
impersonate="chrome", proxy=proxy, allow_redirects=True, verify=verify
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
response = await client.get(Endpoint.GOOGLE)
|
| 54 |
+
if verbose:
|
| 55 |
+
logger.debug(
|
| 56 |
+
f"HTTP Request: GET {Endpoint.GOOGLE} [{response.status_code}]"
|
| 57 |
+
)
|
| 58 |
+
preflight_cookies = Cookies(client.cookies)
|
| 59 |
+
except Exception:
|
| 60 |
+
await client.close()
|
| 61 |
+
raise
|
| 62 |
+
|
| 63 |
+
extra_cookies = Cookies()
|
| 64 |
+
if response.status_code == 200:
|
| 65 |
+
extra_cookies = preflight_cookies
|
| 66 |
+
|
| 67 |
+
# Phase 1: Prepare Cache
|
| 68 |
+
cookie_jars_to_test = []
|
| 69 |
+
tried_psid_ts = set()
|
| 70 |
+
|
| 71 |
+
if isinstance(base_cookies, Cookies):
|
| 72 |
+
base_psid = _extract_cookie_value(base_cookies, "__Secure-1PSID")
|
| 73 |
+
base_psidts = _extract_cookie_value(base_cookies, "__Secure-1PSIDTS")
|
| 74 |
+
else:
|
| 75 |
+
base_psid = base_cookies.get("__Secure-1PSID")
|
| 76 |
+
base_psidts = base_cookies.get("__Secure-1PSIDTS")
|
| 77 |
+
|
| 78 |
+
gemini_cookie_path = os.getenv("GEMINI_COOKIE_PATH")
|
| 79 |
+
if gemini_cookie_path:
|
| 80 |
+
cache_dir = Path(gemini_cookie_path)
|
| 81 |
+
else:
|
| 82 |
+
cache_dir = Path(__file__).parent / "temp"
|
| 83 |
+
|
| 84 |
+
if base_psid:
|
| 85 |
+
filename = f".cached_1psidts_{base_psid}.txt"
|
| 86 |
+
cache_file = cache_dir / filename
|
| 87 |
+
if cache_file.is_file():
|
| 88 |
+
cached_1psidts = cache_file.read_text().strip()
|
| 89 |
+
if cached_1psidts:
|
| 90 |
+
jar = Cookies(extra_cookies)
|
| 91 |
+
jar.update(base_cookies)
|
| 92 |
+
jar.set("__Secure-1PSIDTS", cached_1psidts, domain=".google.com")
|
| 93 |
+
cookie_jars_to_test.append((jar, "Cache"))
|
| 94 |
+
tried_psid_ts.add((base_psid, cached_1psidts))
|
| 95 |
+
elif verbose:
|
| 96 |
+
logger.debug("Skipping loading cached cookies. Cache file is empty.")
|
| 97 |
+
elif verbose:
|
| 98 |
+
logger.debug("Skipping loading cached cookies. Cache file not found.")
|
| 99 |
+
|
| 100 |
+
if not base_psid:
|
| 101 |
+
for cache_file in cache_dir.glob(".cached_1psidts_*.txt"):
|
| 102 |
+
psid = cache_file.stem[16:]
|
| 103 |
+
cached_1psidts = cache_file.read_text().strip()
|
| 104 |
+
if cached_1psidts:
|
| 105 |
+
jar = Cookies(extra_cookies)
|
| 106 |
+
jar.set("__Secure-1PSID", psid, domain=".google.com")
|
| 107 |
+
jar.set("__Secure-1PSIDTS", cached_1psidts, domain=".google.com")
|
| 108 |
+
cookie_jars_to_test.append((jar, "Cache"))
|
| 109 |
+
tried_psid_ts.add((psid, cached_1psidts))
|
| 110 |
+
|
| 111 |
+
# Phase 2: Base Cookies
|
| 112 |
+
if base_psid and base_psidts:
|
| 113 |
+
if (base_psid, base_psidts) not in tried_psid_ts:
|
| 114 |
+
jar = Cookies(extra_cookies)
|
| 115 |
+
jar.update(base_cookies)
|
| 116 |
+
cookie_jars_to_test.append((jar, "Base Cookies"))
|
| 117 |
+
tried_psid_ts.add((base_psid, base_psidts))
|
| 118 |
+
elif verbose:
|
| 119 |
+
logger.debug("Skipping base cookies as they match cached cookies.")
|
| 120 |
+
elif verbose and not cookie_jars_to_test:
|
| 121 |
+
logger.debug(
|
| 122 |
+
"Skipping loading base cookies. Either __Secure-1PSID or __Secure-1PSIDTS is not provided."
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Phase 3: Browser Cookies
|
| 126 |
+
try:
|
| 127 |
+
browser_cookies = load_browser_cookies(
|
| 128 |
+
domain_name="google.com", verbose=verbose
|
| 129 |
+
)
|
| 130 |
+
if browser_cookies:
|
| 131 |
+
for browser, cookies in browser_cookies.items():
|
| 132 |
+
if secure_1psid := cookies.get("__Secure-1PSID"):
|
| 133 |
+
if base_psid and base_psid != secure_1psid:
|
| 134 |
+
if verbose:
|
| 135 |
+
logger.debug(
|
| 136 |
+
f"Skipping loading local browser cookies from {browser}. "
|
| 137 |
+
"__Secure-1PSID does not match the one provided."
|
| 138 |
+
)
|
| 139 |
+
continue
|
| 140 |
+
|
| 141 |
+
secure_1psidts = cookies.get("__Secure-1PSIDTS")
|
| 142 |
+
if (secure_1psid, secure_1psidts or "") in tried_psid_ts:
|
| 143 |
+
continue
|
| 144 |
+
|
| 145 |
+
local_cookies = {"__Secure-1PSID": secure_1psid}
|
| 146 |
+
if secure_1psidts:
|
| 147 |
+
local_cookies["__Secure-1PSIDTS"] = secure_1psidts
|
| 148 |
+
if nid := cookies.get("NID"):
|
| 149 |
+
local_cookies["NID"] = nid
|
| 150 |
+
|
| 151 |
+
jar = Cookies(extra_cookies)
|
| 152 |
+
for k, v in local_cookies.items():
|
| 153 |
+
jar.set(k, v, domain=".google.com")
|
| 154 |
+
|
| 155 |
+
cookie_jars_to_test.append((jar, f"Browser ({browser})"))
|
| 156 |
+
tried_psid_ts.add((secure_1psid, secure_1psidts or ""))
|
| 157 |
+
if verbose:
|
| 158 |
+
logger.debug(f"Prepared local browser cookies from {browser}")
|
| 159 |
+
|
| 160 |
+
if (
|
| 161 |
+
HAS_BC3
|
| 162 |
+
and not any(group.startswith("Browser") for _, group in cookie_jars_to_test)
|
| 163 |
+
and verbose
|
| 164 |
+
):
|
| 165 |
+
logger.debug(
|
| 166 |
+
"Skipping loading local browser cookies. Login to gemini.google.com in your browser first."
|
| 167 |
+
)
|
| 168 |
+
except Exception:
|
| 169 |
+
if verbose:
|
| 170 |
+
logger.debug(
|
| 171 |
+
"Skipping loading local browser cookies (Not available or no permission)."
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
current_attempt = 0
|
| 175 |
+
for jar, group_name in cookie_jars_to_test:
|
| 176 |
+
current_attempt += 1
|
| 177 |
+
try:
|
| 178 |
+
res = await send_request(client, jar, verbose=verbose)
|
| 179 |
+
snlm0e = re.search(r'"SNlM0e":\s*"(.*?)"', res.text)
|
| 180 |
+
cfb2h = re.search(r'"cfb2h":\s*"(.*?)"', res.text)
|
| 181 |
+
fdrfje = re.search(r'"FdrFJe":\s*"(.*?)"', res.text)
|
| 182 |
+
if snlm0e or cfb2h or fdrfje:
|
| 183 |
+
if verbose:
|
| 184 |
+
logger.debug(
|
| 185 |
+
f"Init attempt ({current_attempt}) from {group_name} succeeded."
|
| 186 |
+
)
|
| 187 |
+
return (
|
| 188 |
+
snlm0e.group(1) if snlm0e else None,
|
| 189 |
+
cfb2h.group(1) if cfb2h else None,
|
| 190 |
+
fdrfje.group(1) if fdrfje else None,
|
| 191 |
+
client,
|
| 192 |
+
)
|
| 193 |
+
except Exception:
|
| 194 |
+
if verbose:
|
| 195 |
+
logger.debug(
|
| 196 |
+
f"Init attempt ({current_attempt}) from {group_name} failed."
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
await client.close()
|
| 200 |
+
raise AuthError(
|
| 201 |
+
f"Failed to initialize client after {current_attempt} attempts. SECURE_1PSIDTS could get expired frequently, please make sure cookie values are up to date."
|
| 202 |
+
)
|
gemini_webapi/utils/load_browser_cookies.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 2 |
+
from http.cookiejar import CookieJar
|
| 3 |
+
|
| 4 |
+
from .logger import logger
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
import browser_cookie3 as bc3
|
| 8 |
+
|
| 9 |
+
HAS_BC3 = True
|
| 10 |
+
except ImportError:
|
| 11 |
+
bc3 = None
|
| 12 |
+
HAS_BC3 = False
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def load_browser_cookies(domain_name: str = "", verbose: bool = False) -> dict:
|
| 16 |
+
"""
|
| 17 |
+
Try to load cookies from all supported browsers and return combined cookiejar.
|
| 18 |
+
Optionally pass in a domain name to only load cookies from the specified domain.
|
| 19 |
+
|
| 20 |
+
Parameters
|
| 21 |
+
----------
|
| 22 |
+
domain_name : str, optional
|
| 23 |
+
Domain name to filter cookies by, by default will load all cookies without filtering.
|
| 24 |
+
verbose : bool, optional
|
| 25 |
+
If `True`, will print more infomation in logs.
|
| 26 |
+
|
| 27 |
+
Returns
|
| 28 |
+
-------
|
| 29 |
+
`dict[str, dict]`
|
| 30 |
+
Dictionary with browser as keys and their cookies for the specified domain as values.
|
| 31 |
+
Only browsers that have cookies for the specified domain will be included.
|
| 32 |
+
"""
|
| 33 |
+
if not HAS_BC3 or bc3 is None:
|
| 34 |
+
if verbose:
|
| 35 |
+
logger.debug(
|
| 36 |
+
"Optional dependency 'browser-cookie3' not found. Skipping browser cookie loading."
|
| 37 |
+
)
|
| 38 |
+
return {}
|
| 39 |
+
|
| 40 |
+
browser_fns = [
|
| 41 |
+
bc3.chrome,
|
| 42 |
+
bc3.chromium,
|
| 43 |
+
bc3.opera,
|
| 44 |
+
bc3.opera_gx,
|
| 45 |
+
bc3.brave,
|
| 46 |
+
bc3.edge,
|
| 47 |
+
bc3.vivaldi,
|
| 48 |
+
bc3.firefox,
|
| 49 |
+
bc3.librewolf,
|
| 50 |
+
bc3.safari,
|
| 51 |
+
]
|
| 52 |
+
|
| 53 |
+
cookies = {}
|
| 54 |
+
|
| 55 |
+
def fetch_cookies(cookie_fn):
|
| 56 |
+
try:
|
| 57 |
+
jar: CookieJar = cookie_fn(domain_name=domain_name)
|
| 58 |
+
if jar:
|
| 59 |
+
return cookie_fn.__name__, {cookie.name: cookie.value for cookie in jar}
|
| 60 |
+
except bc3.BrowserCookieError:
|
| 61 |
+
pass
|
| 62 |
+
except PermissionError:
|
| 63 |
+
if verbose:
|
| 64 |
+
logger.warning(
|
| 65 |
+
f"Permission denied while trying to load cookies from {cookie_fn.__name__}."
|
| 66 |
+
)
|
| 67 |
+
except Exception:
|
| 68 |
+
if verbose:
|
| 69 |
+
logger.debug(
|
| 70 |
+
f"Failed to load cookies from {cookie_fn.__name__} (may not be installed)."
|
| 71 |
+
)
|
| 72 |
+
return None
|
| 73 |
+
|
| 74 |
+
with ThreadPoolExecutor(max_workers=len(browser_fns)) as executor:
|
| 75 |
+
futures = [executor.submit(fetch_cookies, fn) for fn in browser_fns]
|
| 76 |
+
for future in as_completed(futures):
|
| 77 |
+
result = future.result()
|
| 78 |
+
if result:
|
| 79 |
+
browser_name, cookie_dict = result
|
| 80 |
+
cookies[browser_name] = cookie_dict
|
| 81 |
+
|
| 82 |
+
return cookies
|
gemini_webapi/utils/logger.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from loguru import logger as _logger
|
| 3 |
+
|
| 4 |
+
_handler_id = None
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def set_log_level(level: str | int) -> None:
|
| 8 |
+
"""
|
| 9 |
+
Set the log level for gemini_webapi. The default log level is "INFO".
|
| 10 |
+
|
| 11 |
+
Note: calling this function for the first time will globally remove all existing loguru
|
| 12 |
+
handlers. To avoid this, you may want to set logging behaviors directly with loguru.
|
| 13 |
+
|
| 14 |
+
Parameters
|
| 15 |
+
----------
|
| 16 |
+
level : `str | int`
|
| 17 |
+
Log level: "TRACE", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
|
| 18 |
+
|
| 19 |
+
Examples
|
| 20 |
+
--------
|
| 21 |
+
>>> from gemini_webapi import set_log_level
|
| 22 |
+
>>> set_log_level("DEBUG") # Show debug messages
|
| 23 |
+
>>> set_log_level("ERROR") # Only show errors
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
global _handler_id
|
| 27 |
+
|
| 28 |
+
_logger.remove(_handler_id)
|
| 29 |
+
|
| 30 |
+
_handler_id = _logger.add(
|
| 31 |
+
sys.stderr,
|
| 32 |
+
level=level,
|
| 33 |
+
filter=lambda record: record["extra"].get("name") == "gemini_webapi",
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
logger = _logger.bind(name="gemini_webapi")
|
gemini_webapi/utils/parsing.py
ADDED
|
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import difflib
|
| 2 |
+
import re
|
| 3 |
+
import reprlib
|
| 4 |
+
from typing import Any
|
| 5 |
+
|
| 6 |
+
import orjson as json
|
| 7 |
+
|
| 8 |
+
from .logger import logger
|
| 9 |
+
|
| 10 |
+
_LENGTH_MARKER_PATTERN = re.compile(r"(\d+)\n")
|
| 11 |
+
_FLICKER_ESC_RE = re.compile(r"\\+[`*_~].*$")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def get_clean_text(s: str) -> str:
|
| 15 |
+
"""
|
| 16 |
+
Clean Gemini text by removing trailing code block artifacts and temporary escapes of Markdown markers.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
if not s:
|
| 20 |
+
return ""
|
| 21 |
+
|
| 22 |
+
if s.endswith("\n```"):
|
| 23 |
+
s = s[:-4]
|
| 24 |
+
|
| 25 |
+
return _FLICKER_ESC_RE.sub("", s)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_delta_by_fp_len(
|
| 29 |
+
new_raw: str, last_sent_clean: str, is_final: bool
|
| 30 |
+
) -> tuple[str, str]:
|
| 31 |
+
"""
|
| 32 |
+
Calculate text delta by aligning stable content and matching volatile symbols.
|
| 33 |
+
Handles temporary flicker at ends and permanent escaping drift during code block transitions.
|
| 34 |
+
Uses SequenceMatcher to robustly handle middle-string modifications.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
new_c = get_clean_text(new_raw) if not is_final else new_raw
|
| 38 |
+
|
| 39 |
+
if new_c.startswith(last_sent_clean):
|
| 40 |
+
return new_c[len(last_sent_clean) :], new_c
|
| 41 |
+
|
| 42 |
+
# Find the matching suffix to handle differences gracefully
|
| 43 |
+
search_len = min(3000, max(1000, len(last_sent_clean)))
|
| 44 |
+
search_len = min(search_len, len(last_sent_clean), len(new_c))
|
| 45 |
+
|
| 46 |
+
if search_len == 0:
|
| 47 |
+
return new_c, new_c
|
| 48 |
+
|
| 49 |
+
tail_last = last_sent_clean[-search_len:]
|
| 50 |
+
tail_new = new_c[-search_len:]
|
| 51 |
+
|
| 52 |
+
sm = difflib.SequenceMatcher(None, tail_last, tail_new)
|
| 53 |
+
blocks = [b for b in sm.get_matching_blocks() if b.size > 0]
|
| 54 |
+
|
| 55 |
+
if blocks:
|
| 56 |
+
last_match = blocks[-1]
|
| 57 |
+
match_end = last_match.b + last_match.size
|
| 58 |
+
return tail_new[match_end:], new_c
|
| 59 |
+
|
| 60 |
+
# Fallback to full string if tail didn't match at all
|
| 61 |
+
sm = difflib.SequenceMatcher(None, last_sent_clean, new_c)
|
| 62 |
+
blocks = [b for b in sm.get_matching_blocks() if b.size > 0]
|
| 63 |
+
|
| 64 |
+
if blocks:
|
| 65 |
+
last_match = blocks[-1]
|
| 66 |
+
match_end = last_match.b + last_match.size
|
| 67 |
+
return new_c[match_end:], new_c
|
| 68 |
+
|
| 69 |
+
return new_c, new_c
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _get_char_count_for_utf16_units(
|
| 73 |
+
s: str, start_idx: int, utf16_units: int
|
| 74 |
+
) -> tuple[int, int]:
|
| 75 |
+
"""
|
| 76 |
+
Calculate the number of Python characters (code points) and actual UTF-16
|
| 77 |
+
units found.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
count = 0
|
| 81 |
+
units = 0
|
| 82 |
+
limit = len(s)
|
| 83 |
+
|
| 84 |
+
while units < utf16_units and (start_idx + count) < limit:
|
| 85 |
+
char = s[start_idx + count]
|
| 86 |
+
u = 2 if ord(char) > 0xFFFF else 1
|
| 87 |
+
if units + u > utf16_units:
|
| 88 |
+
break
|
| 89 |
+
units += u
|
| 90 |
+
count += 1
|
| 91 |
+
|
| 92 |
+
return count, units
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def get_nested_value(
|
| 96 |
+
data: Any, path: list[int | str], default: Any = None, verbose: bool = False
|
| 97 |
+
) -> Any:
|
| 98 |
+
"""
|
| 99 |
+
Safely navigate through a nested structure (list or dict) using a sequence of keys/indices.
|
| 100 |
+
|
| 101 |
+
Parameters
|
| 102 |
+
----------
|
| 103 |
+
data: `Any`
|
| 104 |
+
The nested structure to traverse.
|
| 105 |
+
path: `list[int | str]`
|
| 106 |
+
A list of indices or keys representing the path.
|
| 107 |
+
default: `Any`
|
| 108 |
+
Value to return if the path is invalid.
|
| 109 |
+
verbose: `bool`
|
| 110 |
+
If True, log debug information when the path cannot be fully traversed.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
current = data
|
| 114 |
+
|
| 115 |
+
for i, key in enumerate(path):
|
| 116 |
+
found = False
|
| 117 |
+
if isinstance(key, int):
|
| 118 |
+
if isinstance(current, list) and -len(current) <= key < len(current):
|
| 119 |
+
current = current[key]
|
| 120 |
+
found = True
|
| 121 |
+
elif isinstance(key, str):
|
| 122 |
+
if isinstance(current, dict) and key in current:
|
| 123 |
+
current = current[key]
|
| 124 |
+
found = True
|
| 125 |
+
|
| 126 |
+
if not found:
|
| 127 |
+
if verbose:
|
| 128 |
+
logger.debug(
|
| 129 |
+
f"Safe navigation: path {path} ended at index {i} (key '{key}'), "
|
| 130 |
+
f"returning default. Context: {reprlib.repr(current)}"
|
| 131 |
+
)
|
| 132 |
+
return default
|
| 133 |
+
|
| 134 |
+
return current if current is not None else default
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def parse_response_by_frame(content: str) -> tuple[list[Any], str]:
|
| 138 |
+
"""
|
| 139 |
+
Core parser for Google's length-prefixed framing protocol,
|
| 140 |
+
Parse as many JSON frames as possible from an accumulated buffer received from streaming responses.
|
| 141 |
+
|
| 142 |
+
This function implements Google's length-prefixed framing protocol. Each frame starts
|
| 143 |
+
with a length marker (number of characters) followed by a newline and the JSON content.
|
| 144 |
+
If a frame is partially received, it stays in the buffer for the next call.
|
| 145 |
+
|
| 146 |
+
Each frame has the format: `[length]\n[json_payload]\n`,
|
| 147 |
+
The length value includes the newline after the number and the newline after the JSON.
|
| 148 |
+
|
| 149 |
+
Parameters
|
| 150 |
+
----------
|
| 151 |
+
content: `str`
|
| 152 |
+
The accumulated string buffer containing raw streaming data from the API.
|
| 153 |
+
|
| 154 |
+
Returns
|
| 155 |
+
-------
|
| 156 |
+
`tuple[list[Any], str]`
|
| 157 |
+
A tuple containing:
|
| 158 |
+
- A list of parsed JSON objects (envelopes) extracted from the buffer.
|
| 159 |
+
- The remaining unparsed part of the buffer (incomplete frames).
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
consumed_pos = 0
|
| 163 |
+
total_len = len(content)
|
| 164 |
+
parsed_frames = []
|
| 165 |
+
|
| 166 |
+
while consumed_pos < total_len:
|
| 167 |
+
while consumed_pos < total_len and content[consumed_pos].isspace():
|
| 168 |
+
consumed_pos += 1
|
| 169 |
+
|
| 170 |
+
if consumed_pos >= total_len:
|
| 171 |
+
break
|
| 172 |
+
|
| 173 |
+
match = _LENGTH_MARKER_PATTERN.match(content, pos=consumed_pos)
|
| 174 |
+
if not match:
|
| 175 |
+
break
|
| 176 |
+
|
| 177 |
+
length_val = match.group(1)
|
| 178 |
+
length = int(length_val)
|
| 179 |
+
|
| 180 |
+
# Content starts immediately after the digits.
|
| 181 |
+
# Google uses UTF-16 code units (JavaScript `String.length`) for the length marker.
|
| 182 |
+
start_content = match.start() + len(length_val)
|
| 183 |
+
char_count, units_found = _get_char_count_for_utf16_units(
|
| 184 |
+
content, start_content, length
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
if units_found < length:
|
| 188 |
+
logger.debug(
|
| 189 |
+
f"Incomplete frame at position {consumed_pos}: expected {length} UTF-16 units, "
|
| 190 |
+
f"but received {units_found}. Waiting for additional data..."
|
| 191 |
+
)
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
end_pos = start_content + char_count
|
| 195 |
+
chunk = content[start_content:end_pos].strip()
|
| 196 |
+
consumed_pos = end_pos
|
| 197 |
+
|
| 198 |
+
if not chunk:
|
| 199 |
+
continue
|
| 200 |
+
|
| 201 |
+
try:
|
| 202 |
+
parsed = json.loads(chunk)
|
| 203 |
+
if isinstance(parsed, list):
|
| 204 |
+
parsed_frames.extend(parsed)
|
| 205 |
+
else:
|
| 206 |
+
parsed_frames.append(parsed)
|
| 207 |
+
except json.JSONDecodeError:
|
| 208 |
+
logger.debug(
|
| 209 |
+
f"Failed to parse chunk at pos {start_content} with length {length}. "
|
| 210 |
+
f"Frame content: {reprlib.repr(chunk)}"
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
return parsed_frames, content[consumed_pos:]
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def extract_json_from_response(text: str) -> list:
|
| 217 |
+
"""
|
| 218 |
+
Extract and normalize JSON content from a Google API response.
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
if not isinstance(text, str):
|
| 222 |
+
raise TypeError(
|
| 223 |
+
f"Input text is expected to be a string, got {type(text).__name__} instead."
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
content = text
|
| 227 |
+
if content.startswith(")]}'"):
|
| 228 |
+
content = content[4:]
|
| 229 |
+
|
| 230 |
+
content = content.lstrip()
|
| 231 |
+
|
| 232 |
+
# Try extracting with framing protocol first, as it's the most structured format
|
| 233 |
+
result, _ = parse_response_by_frame(content)
|
| 234 |
+
if result:
|
| 235 |
+
return result
|
| 236 |
+
|
| 237 |
+
# Extract the entire content if parsing by frames failed
|
| 238 |
+
content_stripped = content.strip()
|
| 239 |
+
try:
|
| 240 |
+
parsed = json.loads(content_stripped)
|
| 241 |
+
return parsed if isinstance(parsed, list) else [parsed]
|
| 242 |
+
except json.JSONDecodeError:
|
| 243 |
+
pass
|
| 244 |
+
|
| 245 |
+
# Extract with NDJSON
|
| 246 |
+
collected_lines = []
|
| 247 |
+
for line in content_stripped.splitlines():
|
| 248 |
+
line = line.strip()
|
| 249 |
+
if not line:
|
| 250 |
+
continue
|
| 251 |
+
|
| 252 |
+
try:
|
| 253 |
+
parsed = json.loads(line)
|
| 254 |
+
except json.JSONDecodeError:
|
| 255 |
+
continue
|
| 256 |
+
|
| 257 |
+
if isinstance(parsed, list):
|
| 258 |
+
collected_lines.extend(parsed)
|
| 259 |
+
elif isinstance(parsed, dict):
|
| 260 |
+
collected_lines.append(parsed)
|
| 261 |
+
|
| 262 |
+
if collected_lines:
|
| 263 |
+
return collected_lines
|
| 264 |
+
|
| 265 |
+
raise ValueError("Could not find a valid JSON object or array in the response.")
|
gemini_webapi/utils/rotate_1psidts.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
from curl_cffi.requests import AsyncSession, Cookies
|
| 6 |
+
|
| 7 |
+
from ..constants import Endpoint, Headers
|
| 8 |
+
from ..exceptions import AuthError
|
| 9 |
+
from .logger import logger
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _extract_cookie_value(cookies: Cookies, name: str) -> str | None:
|
| 13 |
+
"""
|
| 14 |
+
Extract a cookie value from a curl_cffi Cookies jar, trying domain-specific
|
| 15 |
+
lookups first to avoid CookieConflict, then falling back to iteration.
|
| 16 |
+
"""
|
| 17 |
+
for domain in (
|
| 18 |
+
".google.com",
|
| 19 |
+
"google.com",
|
| 20 |
+
".accounts.google.com",
|
| 21 |
+
"accounts.google.com",
|
| 22 |
+
):
|
| 23 |
+
value = cookies.get(name, domain=domain)
|
| 24 |
+
if value:
|
| 25 |
+
return value
|
| 26 |
+
|
| 27 |
+
for cookie in cookies.jar:
|
| 28 |
+
if cookie.name == name:
|
| 29 |
+
return cookie.value
|
| 30 |
+
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
async def rotate_1psidts(client: AsyncSession, verbose: bool = False) -> str | None:
|
| 35 |
+
"""
|
| 36 |
+
Refresh the __Secure-1PSIDTS cookie and store the refreshed cookie value in cache file.
|
| 37 |
+
|
| 38 |
+
Parameters
|
| 39 |
+
----------
|
| 40 |
+
client : `curl_cffi.requests.AsyncSession`
|
| 41 |
+
The shared async session to use for the request.
|
| 42 |
+
verbose: `bool`, optional
|
| 43 |
+
If `True`, will print more infomation in logs.
|
| 44 |
+
|
| 45 |
+
Returns
|
| 46 |
+
-------
|
| 47 |
+
`str | None`
|
| 48 |
+
New value of the __Secure-1PSIDTS cookie if rotation was successful.
|
| 49 |
+
|
| 50 |
+
Raises
|
| 51 |
+
------
|
| 52 |
+
`gemini_webapi.AuthError`
|
| 53 |
+
If request failed with 401 Unauthorized.
|
| 54 |
+
`curl_cffi.requests.exceptions.HTTPError`
|
| 55 |
+
If request failed with other status codes.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
path = (
|
| 59 |
+
(GEMINI_COOKIE_PATH := os.getenv("GEMINI_COOKIE_PATH"))
|
| 60 |
+
and Path(GEMINI_COOKIE_PATH)
|
| 61 |
+
or (Path(__file__).parent / "temp")
|
| 62 |
+
)
|
| 63 |
+
path.mkdir(parents=True, exist_ok=True)
|
| 64 |
+
|
| 65 |
+
# Safely get __Secure-1PSID value for filename
|
| 66 |
+
secure_1psid = _extract_cookie_value(client.cookies, "__Secure-1PSID")
|
| 67 |
+
|
| 68 |
+
if not secure_1psid:
|
| 69 |
+
return None
|
| 70 |
+
|
| 71 |
+
filename = f".cached_1psidts_{secure_1psid}.txt"
|
| 72 |
+
path = path / filename
|
| 73 |
+
|
| 74 |
+
# Check if the cache file was modified in the last minute to avoid 429 Too Many Requests
|
| 75 |
+
if path.is_file() and time.time() - os.path.getmtime(path) <= 60:
|
| 76 |
+
return path.read_text()
|
| 77 |
+
|
| 78 |
+
response = await client.post(
|
| 79 |
+
url=Endpoint.ROTATE_COOKIES,
|
| 80 |
+
headers=Headers.ROTATE_COOKIES.value,
|
| 81 |
+
data='[000,"-0000000000000000000"]',
|
| 82 |
+
)
|
| 83 |
+
if verbose:
|
| 84 |
+
logger.debug(
|
| 85 |
+
f"HTTP Request: POST {Endpoint.ROTATE_COOKIES} [{response.status_code}]"
|
| 86 |
+
)
|
| 87 |
+
if response.status_code == 401:
|
| 88 |
+
raise AuthError
|
| 89 |
+
response.raise_for_status()
|
| 90 |
+
|
| 91 |
+
new_1psidts = _extract_cookie_value(client.cookies, "__Secure-1PSIDTS")
|
| 92 |
+
|
| 93 |
+
if new_1psidts:
|
| 94 |
+
path.write_text(new_1psidts)
|
| 95 |
+
logger.debug(
|
| 96 |
+
f"Rotated __Secure-1PSIDTS successfully (length={len(new_1psidts)})."
|
| 97 |
+
)
|
| 98 |
+
return new_1psidts
|
| 99 |
+
|
| 100 |
+
cookie_names = [c.name for c in client.cookies.jar]
|
| 101 |
+
logger.debug(f"Rotation response cookies: {cookie_names}")
|
| 102 |
+
return None
|
gemini_webapi/utils/temp/.cached_1psidts_g.a0007Qjc5GP_JJ8G6lqxKsBvwooBDG0kaQQpdrq1eVMavCuae6YHM71QR0oHtpOONkPxs87_PQACgYKAZcSARISFQHGX2MiBLscUC-RI65KuaeNsGHqgxoVAUF8yKrfh50pYTc-6ectdvp0W-we0076.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
sidts-CjEBBj1CYnSuFNTjwnWYGX5D3D4GPsxqjY4Kk2cO6xqlsGk7UJNFrK2f4SIr3Gq_jJ6rEAA
|
gemini_webapi/utils/upload_file.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import mimetypes
|
| 3 |
+
import random
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import curl_cffi
|
| 7 |
+
from curl_cffi.requests import AsyncSession
|
| 8 |
+
from pydantic import ConfigDict, validate_call
|
| 9 |
+
|
| 10 |
+
from ..constants import Endpoint, Headers
|
| 11 |
+
from .logger import logger
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _generate_random_name(extension: str = ".txt") -> str:
|
| 15 |
+
"""
|
| 16 |
+
Generate a random filename using a large integer for better performance.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
return f"input_{random.randint(1000000, 9999999)}{extension}"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@validate_call(config=ConfigDict(arbitrary_types_allowed=True))
|
| 23 |
+
async def upload_file(
|
| 24 |
+
file: str | Path | bytes | io.BytesIO,
|
| 25 |
+
client: AsyncSession,
|
| 26 |
+
filename: str | None = None,
|
| 27 |
+
verbose: bool = False,
|
| 28 |
+
) -> str:
|
| 29 |
+
"""
|
| 30 |
+
Upload a file to Google's server and return its identifier.
|
| 31 |
+
|
| 32 |
+
Parameters
|
| 33 |
+
----------
|
| 34 |
+
file : `str` | `Path` | `bytes` | `io.BytesIO`
|
| 35 |
+
Path to the file or file content to be uploaded.
|
| 36 |
+
client: `curl_cffi.requests.AsyncSession`
|
| 37 |
+
Shared async session to use for upload.
|
| 38 |
+
filename: `str`, optional
|
| 39 |
+
Name of the file to be uploaded. Required if file is bytes or BytesIO.
|
| 40 |
+
verbose: `bool`, optional
|
| 41 |
+
If `True`, will print more infomation in logs.
|
| 42 |
+
|
| 43 |
+
Returns
|
| 44 |
+
-------
|
| 45 |
+
`str`
|
| 46 |
+
Identifier of the uploaded file.
|
| 47 |
+
E.g. "/contrib_service/ttl_1d/1709764705i7wdlyx3mdzndme3a767pluckv4flj"
|
| 48 |
+
|
| 49 |
+
Raises
|
| 50 |
+
------
|
| 51 |
+
`curl_cffi.requests.exceptions.HTTPError`
|
| 52 |
+
If the upload request failed.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
if isinstance(file, (str, Path)):
|
| 56 |
+
file_path = Path(file)
|
| 57 |
+
if not file_path.is_file():
|
| 58 |
+
raise ValueError(f"{file_path} is not a valid file.")
|
| 59 |
+
if not filename:
|
| 60 |
+
filename = file_path.name
|
| 61 |
+
file_content = file_path.read_bytes()
|
| 62 |
+
elif isinstance(file, io.BytesIO):
|
| 63 |
+
file_content = file.getvalue()
|
| 64 |
+
if not filename:
|
| 65 |
+
filename = _generate_random_name()
|
| 66 |
+
elif isinstance(file, bytes):
|
| 67 |
+
file_content = file
|
| 68 |
+
if not filename:
|
| 69 |
+
filename = _generate_random_name()
|
| 70 |
+
else:
|
| 71 |
+
raise ValueError(f"Unsupported file type: {type(file)}")
|
| 72 |
+
|
| 73 |
+
content_type = mimetypes.guess_type(filename)[0] or "application/octet-stream"
|
| 74 |
+
|
| 75 |
+
mp = curl_cffi.CurlMime()
|
| 76 |
+
mp.addpart(
|
| 77 |
+
name="file",
|
| 78 |
+
content_type=content_type,
|
| 79 |
+
filename=filename,
|
| 80 |
+
data=file_content,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
try:
|
| 84 |
+
response = await client.post(
|
| 85 |
+
url=Endpoint.UPLOAD,
|
| 86 |
+
headers=Headers.UPLOAD.value,
|
| 87 |
+
multipart=mp,
|
| 88 |
+
allow_redirects=True,
|
| 89 |
+
)
|
| 90 |
+
if verbose:
|
| 91 |
+
logger.debug(
|
| 92 |
+
f"HTTP Request: POST {Endpoint.UPLOAD} [{response.status_code}]"
|
| 93 |
+
)
|
| 94 |
+
response.raise_for_status()
|
| 95 |
+
return response.text
|
| 96 |
+
finally:
|
| 97 |
+
mp.close()
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def parse_file_name(file: str | Path | bytes | io.BytesIO) -> str:
|
| 101 |
+
"""
|
| 102 |
+
Parse the file name from the given path or generate a random one for in-memory data.
|
| 103 |
+
|
| 104 |
+
Parameters
|
| 105 |
+
----------
|
| 106 |
+
file : `str` | `Path` | `bytes` | `io.BytesIO`
|
| 107 |
+
Path to the file or file content.
|
| 108 |
+
|
| 109 |
+
Returns
|
| 110 |
+
-------
|
| 111 |
+
`str`
|
| 112 |
+
File name with extension.
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
if isinstance(file, (str, Path)):
|
| 116 |
+
file = Path(file)
|
| 117 |
+
if not file.is_file():
|
| 118 |
+
raise ValueError(f"{file} is not a valid file.")
|
| 119 |
+
return file.name
|
| 120 |
+
|
| 121 |
+
return _generate_random_name()
|