Spaces:
Paused
feat(providers): ✨ add antigravity provider and auth base
Browse filesAdd a new Antigravity provider and authentication base to integrate with the Antigravity (internal Google) API.
- Add providers/antigravity_auth_base.py: OAuth2 token management with env/file loading, atomic saves, refresh logic, backoff/queue tracking, interactive and headless browser auth flow, and helper utilities.
- Add providers/antigravity_provider.py: request/response transformations (OpenAI → Gemini CLI → Antigravity), model aliasing, thinking/reasoning config mapping, tool response grouping, streaming & non-streaming handling, and base-URL fallback.
- Update provider_factory.py and providers/__init__.py to register the new provider.
- Bump project metadata in pyproject.toml (package name and version).
BREAKING CHANGE: project packaging metadata updated — package name changed to "rotator_library" and version bumped to 0.95. Update any dependency or packaging references that relied on the previous name/version.
|
@@ -3,11 +3,13 @@
|
|
| 3 |
from .providers.gemini_auth_base import GeminiAuthBase
|
| 4 |
from .providers.qwen_auth_base import QwenAuthBase
|
| 5 |
from .providers.iflow_auth_base import IFlowAuthBase
|
|
|
|
| 6 |
|
| 7 |
PROVIDER_MAP = {
|
| 8 |
"gemini_cli": GeminiAuthBase,
|
| 9 |
"qwen_code": QwenAuthBase,
|
| 10 |
"iflow": IFlowAuthBase,
|
|
|
|
| 11 |
}
|
| 12 |
|
| 13 |
def get_provider_auth_class(provider_name: str):
|
|
|
|
| 3 |
from .providers.gemini_auth_base import GeminiAuthBase
|
| 4 |
from .providers.qwen_auth_base import QwenAuthBase
|
| 5 |
from .providers.iflow_auth_base import IFlowAuthBase
|
| 6 |
+
from .providers.antigravity_auth_base import AntigravityAuthBase
|
| 7 |
|
| 8 |
PROVIDER_MAP = {
|
| 9 |
"gemini_cli": GeminiAuthBase,
|
| 10 |
"qwen_code": QwenAuthBase,
|
| 11 |
"iflow": IFlowAuthBase,
|
| 12 |
+
"antigravity": AntigravityAuthBase,
|
| 13 |
}
|
| 14 |
|
| 15 |
def get_provider_auth_class(provider_name: str):
|
|
@@ -112,6 +112,8 @@ def _register_providers():
|
|
| 112 |
"chutes",
|
| 113 |
"iflow",
|
| 114 |
"qwen_code",
|
|
|
|
|
|
|
| 115 |
]:
|
| 116 |
continue
|
| 117 |
|
|
|
|
| 112 |
"chutes",
|
| 113 |
"iflow",
|
| 114 |
"qwen_code",
|
| 115 |
+
"gemini_cli",
|
| 116 |
+
"antigravity",
|
| 117 |
]:
|
| 118 |
continue
|
| 119 |
|
|
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# src/rotator_library/providers/antigravity_auth_base.py
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import webbrowser
|
| 5 |
+
from typing import Union, Optional
|
| 6 |
+
import json
|
| 7 |
+
import time
|
| 8 |
+
import asyncio
|
| 9 |
+
import logging
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Dict, Any
|
| 12 |
+
import tempfile
|
| 13 |
+
import shutil
|
| 14 |
+
|
| 15 |
+
import httpx
|
| 16 |
+
from rich.console import Console
|
| 17 |
+
from rich.panel import Panel
|
| 18 |
+
from rich.text import Text
|
| 19 |
+
|
| 20 |
+
from ..utils.headless_detection import is_headless_environment
|
| 21 |
+
|
| 22 |
+
lib_logger = logging.getLogger('rotator_library')
|
| 23 |
+
|
| 24 |
+
# Antigravity OAuth credentials from CLIProxyAPI
|
| 25 |
+
CLIENT_ID = "1071006060591-tmhssin2h21lcre235vtolojh4g403ep.apps.googleusercontent.com"
|
| 26 |
+
CLIENT_SECRET = "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"
|
| 27 |
+
TOKEN_URI = "https://oauth2.googleapis.com/token"
|
| 28 |
+
USER_INFO_URI = "https://www.googleapis.com/oauth2/v1/userinfo"
|
| 29 |
+
REFRESH_EXPIRY_BUFFER_SECONDS = 30 * 60 # 30 minutes buffer before expiry
|
| 30 |
+
|
| 31 |
+
# Antigravity requires additional scopes
|
| 32 |
+
OAUTH_SCOPES = [
|
| 33 |
+
"https://www.googleapis.com/auth/cloud-platform",
|
| 34 |
+
"https://www.googleapis.com/auth/userinfo.email",
|
| 35 |
+
"https://www.googleapis.com/auth/userinfo.profile",
|
| 36 |
+
"https://www.googleapis.com/auth/cclog", # Antigravity-specific
|
| 37 |
+
"https://www.googleapis.com/auth/experimentsandconfigs" # Antigravity-specific
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
console = Console()
|
| 41 |
+
|
| 42 |
+
class AntigravityAuthBase:
|
| 43 |
+
"""
|
| 44 |
+
Base authentication class for Antigravity provider.
|
| 45 |
+
Handles OAuth2 flow, token management, and refresh logic.
|
| 46 |
+
|
| 47 |
+
Based on GeminiAuthBase but uses Antigravity-specific OAuth credentials and scopes.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(self):
|
| 51 |
+
self._credentials_cache: Dict[str, Dict[str, Any]] = {}
|
| 52 |
+
self._refresh_locks: Dict[str, asyncio.Lock] = {}
|
| 53 |
+
self._locks_lock = asyncio.Lock() # Protects the locks dict from race conditions
|
| 54 |
+
# [BACKOFF TRACKING] Track consecutive failures per credential
|
| 55 |
+
self._refresh_failures: Dict[str, int] = {} # Track consecutive failures per credential
|
| 56 |
+
self._next_refresh_after: Dict[str, float] = {} # Track backoff timers (Unix timestamp)
|
| 57 |
+
|
| 58 |
+
# [QUEUE SYSTEM] Sequential refresh processing
|
| 59 |
+
self._refresh_queue: asyncio.Queue = asyncio.Queue()
|
| 60 |
+
self._queued_credentials: set = set() # Track credentials already in queue
|
| 61 |
+
self._unavailable_credentials: set = set() # Mark credentials unavailable during re-auth
|
| 62 |
+
self._queue_tracking_lock = asyncio.Lock() # Protects queue sets
|
| 63 |
+
self._queue_processor_task: Optional[asyncio.Task] = None # Background worker task
|
| 64 |
+
|
| 65 |
+
def _load_from_env(self) -> Optional[Dict[str, Any]]:
|
| 66 |
+
"""
|
| 67 |
+
Load OAuth credentials from environment variables for stateless deployments.
|
| 68 |
+
|
| 69 |
+
Expected environment variables:
|
| 70 |
+
- ANTIGRAVITY_ACCESS_TOKEN (required)
|
| 71 |
+
- ANTIGRAVITY_REFRESH_TOKEN (required)
|
| 72 |
+
- ANTIGRAVITY_EXPIRY_DATE (optional, defaults to 0)
|
| 73 |
+
- ANTIGRAVITY_CLIENT_ID (optional, uses default)
|
| 74 |
+
- ANTIGRAVITY_CLIENT_SECRET (optional, uses default)
|
| 75 |
+
- ANTIGRAVITY_TOKEN_URI (optional, uses default)
|
| 76 |
+
- ANTIGRAVITY_UNIVERSE_DOMAIN (optional, defaults to googleapis.com)
|
| 77 |
+
- ANTIGRAVITY_EMAIL (optional, defaults to "env-user")
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
Dict with credential structure if env vars present, None otherwise
|
| 81 |
+
"""
|
| 82 |
+
access_token = os.getenv("ANTIGRAVITY_ACCESS_TOKEN")
|
| 83 |
+
refresh_token = os.getenv("ANTIGRAVITY_REFRESH_TOKEN")
|
| 84 |
+
|
| 85 |
+
# Both access and refresh tokens are required
|
| 86 |
+
if not (access_token and refresh_token):
|
| 87 |
+
return None
|
| 88 |
+
|
| 89 |
+
lib_logger.debug("Loading Antigravity credentials from environment variables")
|
| 90 |
+
|
| 91 |
+
# Parse expiry_date as float, default to 0 if not present
|
| 92 |
+
expiry_str = os.getenv("ANTIGRAVITY_EXPIRY_DATE", "0")
|
| 93 |
+
try:
|
| 94 |
+
expiry_date = float(expiry_str)
|
| 95 |
+
except ValueError:
|
| 96 |
+
lib_logger.warning(f"Invalid ANTIGRAVITY_EXPIRY_DATE value: {expiry_str}, using 0")
|
| 97 |
+
expiry_date = 0
|
| 98 |
+
|
| 99 |
+
creds = {
|
| 100 |
+
"access_token": access_token,
|
| 101 |
+
"refresh_token": refresh_token,
|
| 102 |
+
"expiry_date": expiry_date,
|
| 103 |
+
"client_id": os.getenv("ANTIGRAVITY_CLIENT_ID", CLIENT_ID),
|
| 104 |
+
"client_secret": os.getenv("ANTIGRAVITY_CLIENT_SECRET", CLIENT_SECRET),
|
| 105 |
+
"token_uri": os.getenv("ANTIGRAVITY_TOKEN_URI", TOKEN_URI),
|
| 106 |
+
"universe_domain": os.getenv("ANTIGRAVITY_UNIVERSE_DOMAIN", "googleapis.com"),
|
| 107 |
+
"_proxy_metadata": {
|
| 108 |
+
"email": os.getenv("ANTIGRAVITY_EMAIL", "env-user"),
|
| 109 |
+
"last_check_timestamp": time.time(),
|
| 110 |
+
"loaded_from_env": True # Flag to indicate env-based credentials
|
| 111 |
+
}
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
return creds
|
| 115 |
+
|
| 116 |
+
async def _load_credentials(self, path: str) -> Dict[str, Any]:
|
| 117 |
+
"""
|
| 118 |
+
Load credentials from a file. First attempts file-based load,
|
| 119 |
+
then falls back to environment variables if file not found.
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
path: File path to load credentials from
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
Dict containing the credentials
|
| 126 |
+
|
| 127 |
+
Raises:
|
| 128 |
+
ValueError: If credentials cannot be loaded from either source
|
| 129 |
+
"""
|
| 130 |
+
# If path is special marker "env", load from environment
|
| 131 |
+
if path == "env":
|
| 132 |
+
env_creds = self._load_from_env()
|
| 133 |
+
if env_creds:
|
| 134 |
+
lib_logger.debug("Using Antigravity credentials from environment variables")
|
| 135 |
+
return env_creds
|
| 136 |
+
raise ValueError("ANTIGRAVITY_ACCESS_TOKEN and ANTIGRAVITY_REFRESH_TOKEN environment variables not set")
|
| 137 |
+
|
| 138 |
+
# Try loading from cache first
|
| 139 |
+
if path in self._credentials_cache:
|
| 140 |
+
cached_creds = self._credentials_cache[path]
|
| 141 |
+
lib_logger.debug(f"Using cached Antigravity credentials for: {Path(path).name}")
|
| 142 |
+
return cached_creds
|
| 143 |
+
|
| 144 |
+
# Try loading from file
|
| 145 |
+
try:
|
| 146 |
+
with open(path, 'r') as f:
|
| 147 |
+
creds = json.load(f)
|
| 148 |
+
self._credentials_cache[path] = creds
|
| 149 |
+
lib_logger.debug(f"Loaded Antigravity credentials from file: {Path(path).name}")
|
| 150 |
+
return creds
|
| 151 |
+
except FileNotFoundError:
|
| 152 |
+
# Fall back to environment variables
|
| 153 |
+
lib_logger.debug(f"Credential file not found: {path}, attempting environment variables")
|
| 154 |
+
env_creds = self._load_from_env()
|
| 155 |
+
if env_creds:
|
| 156 |
+
lib_logger.debug("Using Antigravity credentials from environment variables as fallback")
|
| 157 |
+
# Cache with special path marker
|
| 158 |
+
self._credentials_cache[path] = env_creds
|
| 159 |
+
return env_creds
|
| 160 |
+
raise ValueError(f"Credential file not found: {path} and environment variables not set")
|
| 161 |
+
except json.JSONDecodeError as e:
|
| 162 |
+
raise ValueError(f"Invalid JSON in credential file {path}: {e}")
|
| 163 |
+
|
| 164 |
+
async def _save_credentials(self, path: str, creds: Dict[str, Any]) -> None:
|
| 165 |
+
"""
|
| 166 |
+
Save credentials to a file. Skip if credentials were loaded from environment.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
path: File path to save credentials to
|
| 170 |
+
creds: Credentials dictionary to save
|
| 171 |
+
"""
|
| 172 |
+
# Don't save environment-based credentials to file
|
| 173 |
+
if creds.get("_proxy_metadata", {}).get("loaded_from_env"):
|
| 174 |
+
lib_logger.debug("Skipping credential save (loaded from environment)")
|
| 175 |
+
return
|
| 176 |
+
|
| 177 |
+
# Don't save if path is special marker
|
| 178 |
+
if path == "env":
|
| 179 |
+
return
|
| 180 |
+
|
| 181 |
+
try:
|
| 182 |
+
# Ensure directory exists
|
| 183 |
+
Path(path).parent.mkdir(parents=True, exist_ok=True)
|
| 184 |
+
|
| 185 |
+
# Write atomically using temp file + rename
|
| 186 |
+
temp_fd, temp_path = tempfile.mkstemp(
|
| 187 |
+
dir=Path(path).parent,
|
| 188 |
+
prefix='.tmp_',
|
| 189 |
+
suffix='.json'
|
| 190 |
+
)
|
| 191 |
+
try:
|
| 192 |
+
with os.fdopen(temp_fd, 'w') as f:
|
| 193 |
+
json.dump(creds, f, indent=2)
|
| 194 |
+
shutil.move(temp_path, path)
|
| 195 |
+
lib_logger.debug(f"Saved Antigravity credentials to: {Path(path).name}")
|
| 196 |
+
except Exception:
|
| 197 |
+
# Clean up temp file on error
|
| 198 |
+
try:
|
| 199 |
+
os.unlink(temp_path)
|
| 200 |
+
except Exception:
|
| 201 |
+
pass
|
| 202 |
+
raise
|
| 203 |
+
except Exception as e:
|
| 204 |
+
lib_logger.warning(f"Failed to save Antigravity credentials to {path}: {e}")
|
| 205 |
+
|
| 206 |
+
def _is_token_expired(self, creds: Dict[str, Any]) -> bool:
|
| 207 |
+
"""
|
| 208 |
+
Check if the access token is expired or close to expiry.
|
| 209 |
+
|
| 210 |
+
Args:
|
| 211 |
+
creds: Credentials dict with expiry_date field (in milliseconds)
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
True if token is expired or within buffer time of expiry
|
| 215 |
+
"""
|
| 216 |
+
if 'expiry_date' not in creds:
|
| 217 |
+
return True
|
| 218 |
+
|
| 219 |
+
# expiry_date is in milliseconds
|
| 220 |
+
expiry_timestamp = creds['expiry_date'] / 1000.0
|
| 221 |
+
current_time = time.time()
|
| 222 |
+
|
| 223 |
+
# Consider expired if within buffer time
|
| 224 |
+
return (expiry_timestamp - current_time) <= REFRESH_EXPIRY_BUFFER_SECONDS
|
| 225 |
+
|
| 226 |
+
async def _refresh_token(self, path: str, creds: Dict[str, Any]) -> Dict[str, Any]:
|
| 227 |
+
"""
|
| 228 |
+
Refresh an expired OAuth token using the refresh token.
|
| 229 |
+
|
| 230 |
+
Args:
|
| 231 |
+
path: Credential file path (for saving updated credentials)
|
| 232 |
+
creds: Current credentials dict with refresh_token
|
| 233 |
+
|
| 234 |
+
Returns:
|
| 235 |
+
Updated credentials dict with fresh access token
|
| 236 |
+
|
| 237 |
+
Raises:
|
| 238 |
+
ValueError: If refresh fails
|
| 239 |
+
"""
|
| 240 |
+
if 'refresh_token' not in creds:
|
| 241 |
+
raise ValueError("No refresh token available")
|
| 242 |
+
|
| 243 |
+
lib_logger.debug(f"Refreshing Antigravity OAuth token for: {Path(path).name if path != 'env' else 'env'}")
|
| 244 |
+
|
| 245 |
+
client_id = creds.get('client_id', CLIENT_ID)
|
| 246 |
+
client_secret = creds.get('client_secret', CLIENT_SECRET)
|
| 247 |
+
token_uri = creds.get('token_uri', TOKEN_URI)
|
| 248 |
+
|
| 249 |
+
async with httpx.AsyncClient() as client:
|
| 250 |
+
try:
|
| 251 |
+
response = await client.post(
|
| 252 |
+
token_uri,
|
| 253 |
+
data={
|
| 254 |
+
'client_id': client_id,
|
| 255 |
+
'client_secret': client_secret,
|
| 256 |
+
'refresh_token': creds['refresh_token'],
|
| 257 |
+
'grant_type': 'refresh_token'
|
| 258 |
+
},
|
| 259 |
+
timeout=30.0
|
| 260 |
+
)
|
| 261 |
+
response.raise_for_status()
|
| 262 |
+
token_data = response.json()
|
| 263 |
+
|
| 264 |
+
# Update credentials with new token
|
| 265 |
+
creds['access_token'] = token_data['access_token']
|
| 266 |
+
creds['expiry_date'] = (time.time() + token_data['expires_in']) * 1000
|
| 267 |
+
|
| 268 |
+
# Update metadata
|
| 269 |
+
if '_proxy_metadata' not in creds:
|
| 270 |
+
creds['_proxy_metadata'] = {}
|
| 271 |
+
creds['_proxy_metadata']['last_check_timestamp'] = time.time()
|
| 272 |
+
|
| 273 |
+
# Save updated credentials
|
| 274 |
+
await self._save_credentials(path, creds)
|
| 275 |
+
|
| 276 |
+
# Update cache
|
| 277 |
+
self._credentials_cache[path] = creds
|
| 278 |
+
|
| 279 |
+
# Reset failure count on success
|
| 280 |
+
self._refresh_failures[path] = 0
|
| 281 |
+
|
| 282 |
+
lib_logger.info(f"Successfully refreshed Antigravity OAuth token for: {Path(path).name if path != 'env' else 'env'}")
|
| 283 |
+
return creds
|
| 284 |
+
|
| 285 |
+
except httpx.HTTPStatusError as e:
|
| 286 |
+
# Track failures for backoff
|
| 287 |
+
self._refresh_failures[path] = self._refresh_failures.get(path, 0) + 1
|
| 288 |
+
raise ValueError(f"Failed to refresh Antigravity token (HTTP {e.response.status_code}): {e.response.text}")
|
| 289 |
+
except Exception as e:
|
| 290 |
+
self._refresh_failures[path] = self._refresh_failures.get(path, 0) + 1
|
| 291 |
+
raise ValueError(f"Failed to refresh Antigravity token: {e}")
|
| 292 |
+
|
| 293 |
+
async def initialize_token(self, creds_or_path: Union[Dict[str, Any], str]) -> Dict[str, Any]:
|
| 294 |
+
"""
|
| 295 |
+
Initialize or refresh an OAuth token. Handles the complete OAuth flow if needed.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
creds_or_path: Either a credentials dict or a file path string
|
| 299 |
+
|
| 300 |
+
Returns:
|
| 301 |
+
Valid credentials dict with fresh access token
|
| 302 |
+
"""
|
| 303 |
+
path = creds_or_path if isinstance(creds_or_path, str) else None
|
| 304 |
+
|
| 305 |
+
if isinstance(creds_or_path, dict):
|
| 306 |
+
display_name = creds_or_path.get("_proxy_metadata", {}).get("display_name", "in-memory object")
|
| 307 |
+
else:
|
| 308 |
+
display_name = Path(path).name if path and path != "env" else "env"
|
| 309 |
+
|
| 310 |
+
lib_logger.debug(f"Initializing Antigravity token for '{display_name}'...")
|
| 311 |
+
|
| 312 |
+
try:
|
| 313 |
+
creds = await self._load_credentials(creds_or_path) if path else creds_or_path
|
| 314 |
+
reason = ""
|
| 315 |
+
if not creds.get("refresh_token"):
|
| 316 |
+
reason = "refresh token is missing"
|
| 317 |
+
elif self._is_token_expired(creds):
|
| 318 |
+
reason = "token is expired"
|
| 319 |
+
|
| 320 |
+
if reason:
|
| 321 |
+
if reason == "token is expired" and creds.get("refresh_token"):
|
| 322 |
+
try:
|
| 323 |
+
return await self._refresh_token(path, creds)
|
| 324 |
+
except Exception as e:
|
| 325 |
+
lib_logger.warning(f"Automatic token refresh for '{display_name}' failed: {e}. Proceeding to interactive login.")
|
| 326 |
+
|
| 327 |
+
lib_logger.warning(f"Antigravity OAuth token for '{display_name}' needs setup: {reason}.")
|
| 328 |
+
|
| 329 |
+
is_headless = is_headless_environment()
|
| 330 |
+
|
| 331 |
+
auth_code_future = asyncio.get_event_loop().create_future()
|
| 332 |
+
server = None
|
| 333 |
+
|
| 334 |
+
async def handle_callback(reader, writer):
|
| 335 |
+
try:
|
| 336 |
+
request_line_bytes = await reader.readline()
|
| 337 |
+
if not request_line_bytes:
|
| 338 |
+
return
|
| 339 |
+
path_str = request_line_bytes.decode('utf-8').strip().split(' ')[1]
|
| 340 |
+
# Consume headers
|
| 341 |
+
while await reader.readline() != b'\r\n':
|
| 342 |
+
pass
|
| 343 |
+
|
| 344 |
+
from urllib.parse import urlparse, parse_qs
|
| 345 |
+
query_params = parse_qs(urlparse(path_str).query)
|
| 346 |
+
|
| 347 |
+
writer.write(b"HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n")
|
| 348 |
+
if 'code' in query_params:
|
| 349 |
+
if not auth_code_future.done():
|
| 350 |
+
auth_code_future.set_result(query_params['code'][0])
|
| 351 |
+
writer.write(b"<html><body><h1>Authentication successful!</h1><p>You can close this window.</p></body></html>")
|
| 352 |
+
else:
|
| 353 |
+
error = query_params.get('error', ['Unknown error'])[0]
|
| 354 |
+
if not auth_code_future.done():
|
| 355 |
+
auth_code_future.set_exception(Exception(f"OAuth failed: {error}"))
|
| 356 |
+
writer.write(f"<html><body><h1>Authentication Failed</h1><p>Error: {error}. Please try again.</p></body></html>".encode())
|
| 357 |
+
await writer.drain()
|
| 358 |
+
except Exception as e:
|
| 359 |
+
lib_logger.error(f"Error in OAuth callback handler: {e}")
|
| 360 |
+
finally:
|
| 361 |
+
writer.close()
|
| 362 |
+
|
| 363 |
+
try:
|
| 364 |
+
server = await asyncio.start_server(handle_callback, '127.0.0.1', 8085)
|
| 365 |
+
|
| 366 |
+
from urllib.parse import urlencode
|
| 367 |
+
auth_url = "https://accounts.google.com/o/oauth2/v2/auth?" + urlencode({
|
| 368 |
+
"client_id": CLIENT_ID,
|
| 369 |
+
"redirect_uri": "http://localhost:8085/oauth2callback",
|
| 370 |
+
"scope": " ".join(OAUTH_SCOPES),
|
| 371 |
+
"access_type": "offline",
|
| 372 |
+
"response_type": "code",
|
| 373 |
+
"prompt": "consent"
|
| 374 |
+
})
|
| 375 |
+
|
| 376 |
+
if is_headless:
|
| 377 |
+
auth_panel_text = Text.from_markup(
|
| 378 |
+
"Running in headless environment (no GUI detected).\n"
|
| 379 |
+
"Please open the URL below in a browser on another machine to authorize:\n"
|
| 380 |
+
)
|
| 381 |
+
else:
|
| 382 |
+
auth_panel_text = Text.from_markup(
|
| 383 |
+
"1. Your browser will now open to log in and authorize the application.\n"
|
| 384 |
+
"2. If it doesn't open automatically, please open the URL below manually."
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
console.print(Panel(auth_panel_text, title=f"Antigravity OAuth Setup for [bold yellow]{display_name}[/bold yellow]", style="bold blue"))
|
| 388 |
+
console.print(f"[bold]URL:[/bold] [link={auth_url}]{auth_url}[/link]\n")
|
| 389 |
+
|
| 390 |
+
if not is_headless:
|
| 391 |
+
try:
|
| 392 |
+
webbrowser.open(auth_url)
|
| 393 |
+
lib_logger.info("Browser opened successfully for OAuth flow")
|
| 394 |
+
except Exception as e:
|
| 395 |
+
lib_logger.warning(f"Failed to open browser automatically: {e}. Please open the URL manually.")
|
| 396 |
+
|
| 397 |
+
with console.status("[bold green]Waiting for you to complete authentication in the browser...[/bold green]", spinner="dots"):
|
| 398 |
+
auth_code = await asyncio.wait_for(auth_code_future, timeout=300)
|
| 399 |
+
except asyncio.TimeoutError:
|
| 400 |
+
raise Exception("OAuth flow timed out. Please try again.")
|
| 401 |
+
finally:
|
| 402 |
+
if server:
|
| 403 |
+
server.close()
|
| 404 |
+
await server.wait_closed()
|
| 405 |
+
|
| 406 |
+
lib_logger.info(f"Attempting to exchange authorization code for tokens...")
|
| 407 |
+
async with httpx.AsyncClient() as client:
|
| 408 |
+
response = await client.post(TOKEN_URI, data={
|
| 409 |
+
"code": auth_code.strip(),
|
| 410 |
+
"client_id": CLIENT_ID,
|
| 411 |
+
"client_secret": CLIENT_SECRET,
|
| 412 |
+
"redirect_uri": "http://localhost:8085/oauth2callback",
|
| 413 |
+
"grant_type": "authorization_code"
|
| 414 |
+
})
|
| 415 |
+
response.raise_for_status()
|
| 416 |
+
token_data = response.json()
|
| 417 |
+
|
| 418 |
+
creds = token_data.copy()
|
| 419 |
+
creds["expiry_date"] = (time.time() + creds.pop("expires_in")) * 1000
|
| 420 |
+
creds["client_id"] = CLIENT_ID
|
| 421 |
+
creds["client_secret"] = CLIENT_SECRET
|
| 422 |
+
creds["token_uri"] = TOKEN_URI
|
| 423 |
+
creds["universe_domain"] = "googleapis.com"
|
| 424 |
+
|
| 425 |
+
# Fetch user info
|
| 426 |
+
user_info_response = await client.get(
|
| 427 |
+
USER_INFO_URI,
|
| 428 |
+
headers={"Authorization": f"Bearer {creds['access_token']}"}
|
| 429 |
+
)
|
| 430 |
+
user_info_response.raise_for_status()
|
| 431 |
+
user_info = user_info_response.json()
|
| 432 |
+
|
| 433 |
+
creds["_proxy_metadata"] = {
|
| 434 |
+
"email": user_info.get("email"),
|
| 435 |
+
"last_check_timestamp": time.time()
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
if path:
|
| 439 |
+
await self._save_credentials(path, creds)
|
| 440 |
+
|
| 441 |
+
lib_logger.info(f"Antigravity OAuth initialized successfully for '{display_name}'.")
|
| 442 |
+
return creds
|
| 443 |
+
|
| 444 |
+
lib_logger.info(f"Antigravity OAuth token at '{display_name}' is valid.")
|
| 445 |
+
return creds
|
| 446 |
+
except Exception as e:
|
| 447 |
+
raise ValueError(f"Failed to initialize Antigravity OAuth for '{display_name}': {e}")
|
| 448 |
+
|
| 449 |
+
async def get_valid_token(self, credential_path: str) -> str:
|
| 450 |
+
"""
|
| 451 |
+
Get a valid access token, refreshing if necessary.
|
| 452 |
+
|
| 453 |
+
Args:
|
| 454 |
+
credential_path: Path to credential file or "env" for environment variables
|
| 455 |
+
|
| 456 |
+
Returns:
|
| 457 |
+
Valid access token string
|
| 458 |
+
|
| 459 |
+
Raises:
|
| 460 |
+
ValueError: If token cannot be obtained
|
| 461 |
+
"""
|
| 462 |
+
try:
|
| 463 |
+
creds = await self.initialize_token(credential_path)
|
| 464 |
+
return creds['access_token']
|
| 465 |
+
except Exception as e:
|
| 466 |
+
raise ValueError(f"Failed to get valid Antigravity token: {e}")
|
|
@@ -0,0 +1,869 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# src/rotator_library/providers/antigravity_provider.py
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import httpx
|
| 5 |
+
import logging
|
| 6 |
+
import time
|
| 7 |
+
import asyncio
|
| 8 |
+
import random
|
| 9 |
+
import uuid
|
| 10 |
+
import copy
|
| 11 |
+
from typing import List, Dict, Any, AsyncGenerator, Union, Optional, Tuple
|
| 12 |
+
from .provider_interface import ProviderInterface
|
| 13 |
+
from .antigravity_auth_base import AntigravityAuthBase
|
| 14 |
+
from ..model_definitions import ModelDefinitions
|
| 15 |
+
import litellm
|
| 16 |
+
from litellm.exceptions import RateLimitError
|
| 17 |
+
from litellm.llms.vertex_ai.common_utils import _build_vertex_schema
|
| 18 |
+
|
| 19 |
+
lib_logger = logging.getLogger('rotator_library')
|
| 20 |
+
|
| 21 |
+
# Antigravity base URLs with fallback order
|
| 22 |
+
# Priority: daily (sandbox) → autopush (sandbox) → production
|
| 23 |
+
BASE_URLS = [
|
| 24 |
+
"https://daily-cloudcode-pa.sandbox.googleapis.com/v1internal",
|
| 25 |
+
"https://autopush-cloudcode-pa.sandbox.googleapis.com/v1internal",
|
| 26 |
+
"https://cloudcode-pa.googleapis.com/v1internal" # Production fallback
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
# Hardcoded models available via Antigravity
|
| 30 |
+
HARDCODED_MODELS = [
|
| 31 |
+
"gemini-2.5-pro",
|
| 32 |
+
"gemini-2.5-flash",
|
| 33 |
+
"gemini-2.5-flash-lite",
|
| 34 |
+
"gemini-3-pro-preview",
|
| 35 |
+
"gemini-3-pro-image-preview",
|
| 36 |
+
"gemini-2.5-computer-use-preview-10-2025"
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class AntigravityProvider(AntigravityAuthBase, ProviderInterface):
|
| 41 |
+
"""
|
| 42 |
+
Antigravity provider implementation for Gemini models.
|
| 43 |
+
|
| 44 |
+
Antigravity is an experimental internal Google API that provides access to Gemini models
|
| 45 |
+
including Gemini 3 with thinking/reasoning capabilities. It wraps standard Gemini API
|
| 46 |
+
requests with additional metadata and uses sandbox endpoints.
|
| 47 |
+
|
| 48 |
+
Key features:
|
| 49 |
+
- Model aliasing (gemini-3-pro-high ↔ gemini-3-pro-preview)
|
| 50 |
+
- Gemini 3 thinkingLevel support
|
| 51 |
+
- Thinking signature preservation for multi-turn conversations
|
| 52 |
+
- Sophisticated tool response grouping
|
| 53 |
+
- Base URL fallback (sandbox → production)
|
| 54 |
+
"""
|
| 55 |
+
skip_cost_calculation = True
|
| 56 |
+
|
| 57 |
+
def __init__(self):
|
| 58 |
+
super().__init__()
|
| 59 |
+
self.model_definitions = ModelDefinitions()
|
| 60 |
+
self._current_base_url = BASE_URLS[0] # Start with daily sandbox
|
| 61 |
+
self._base_url_index = 0
|
| 62 |
+
|
| 63 |
+
# ============================================================================
|
| 64 |
+
# MODEL ALIAS SYSTEM
|
| 65 |
+
# ============================================================================
|
| 66 |
+
|
| 67 |
+
def _model_name_to_alias(self, model_name: str) -> str:
|
| 68 |
+
"""
|
| 69 |
+
Convert internal Antigravity model names to public aliases.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
model_name: Internal model name
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
Public alias name, or empty string if model should be excluded
|
| 76 |
+
"""
|
| 77 |
+
alias_map = {
|
| 78 |
+
"rev19-uic3-1p": "gemini-2.5-computer-use-preview-10-2025",
|
| 79 |
+
"gemini-3-pro-image": "gemini-3-pro-image-preview",
|
| 80 |
+
"gemini-3-pro-high": "gemini-3-pro-preview",
|
| 81 |
+
"claude-sonnet-4-5": "gemini-claude-sonnet-4-5",
|
| 82 |
+
"claude-sonnet-4-5-thinking": "gemini-claude-sonnet-4-5-thinking",
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
# Filter out excluded models (return empty string to skip)
|
| 86 |
+
excluded = [
|
| 87 |
+
"chat_20706", "chat_23310", "gemini-2.5-flash-thinking",
|
| 88 |
+
"gemini-3-pro-low", "gemini-2.5-pro"
|
| 89 |
+
]
|
| 90 |
+
if model_name in excluded:
|
| 91 |
+
return ""
|
| 92 |
+
|
| 93 |
+
return alias_map.get(model_name, model_name)
|
| 94 |
+
|
| 95 |
+
def _alias_to_model_name(self, alias: str) -> str:
|
| 96 |
+
"""
|
| 97 |
+
Convert public aliases to internal Antigravity model names.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
alias: Public alias name
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
Internal model name
|
| 104 |
+
"""
|
| 105 |
+
reverse_map = {
|
| 106 |
+
"gemini-2.5-computer-use-preview-10-2025": "rev19-uic3-1p",
|
| 107 |
+
"gemini-3-pro-image-preview": "gemini-3-pro-image",
|
| 108 |
+
"gemini-3-pro-preview": "gemini-3-pro-high",
|
| 109 |
+
"gemini-claude-sonnet-4-5": "claude-sonnet-4-5",
|
| 110 |
+
"gemini-claude-sonnet-4-5-thinking": "claude-sonnet-4-5-thinking",
|
| 111 |
+
}
|
| 112 |
+
return reverse_map.get(alias, alias)
|
| 113 |
+
|
| 114 |
+
# ============================================================================
|
| 115 |
+
# RANDOM ID GENERATION
|
| 116 |
+
# ============================================================================
|
| 117 |
+
|
| 118 |
+
@staticmethod
|
| 119 |
+
def generate_request_id() -> str:
|
| 120 |
+
"""Generate Antigravity request ID: agent-{uuid}"""
|
| 121 |
+
return f"agent-{uuid.uuid4()}"
|
| 122 |
+
|
| 123 |
+
@staticmethod
|
| 124 |
+
def generate_session_id() -> str:
|
| 125 |
+
"""Generate Antigravity session ID: -{random_number}"""
|
| 126 |
+
# Generate random 19-digit number
|
| 127 |
+
n = random.randint(1_000_000_000_000_000_000, 9_999_999_999_999_999_999)
|
| 128 |
+
return f"-{n}"
|
| 129 |
+
|
| 130 |
+
@staticmethod
|
| 131 |
+
def generate_project_id() -> str:
|
| 132 |
+
"""Generate fake project ID: {adj}-{noun}-{random}"""
|
| 133 |
+
adjectives = ["useful", "bright", "swift", "calm", "bold"]
|
| 134 |
+
nouns = ["fuze", "wave", "spark", "flow", "core"]
|
| 135 |
+
adj = random.choice(adjectives)
|
| 136 |
+
noun = random.choice(nouns)
|
| 137 |
+
random_part = str(uuid.uuid4())[:5].lower()
|
| 138 |
+
return f"{adj}-{noun}-{random_part}"
|
| 139 |
+
|
| 140 |
+
# ============================================================================
|
| 141 |
+
# MESSAGE TRANSFORMATION (OpenAI → Gemini CLI format)
|
| 142 |
+
# ============================================================================
|
| 143 |
+
|
| 144 |
+
def _transform_messages(self, messages: List[Dict[str, Any]]) -> Tuple[Optional[Dict[str, Any]], List[Dict[str, Any]]]:
|
| 145 |
+
"""
|
| 146 |
+
Transform OpenAI messages to Gemini CLI format.
|
| 147 |
+
Reused from GeminiCliProvider with modifications for Antigravity.
|
| 148 |
+
|
| 149 |
+
Returns:
|
| 150 |
+
Tuple of (system_instruction, gemini_contents)
|
| 151 |
+
"""
|
| 152 |
+
system_instruction = None
|
| 153 |
+
gemini_contents = []
|
| 154 |
+
|
| 155 |
+
# Make a copy to avoid modifying original
|
| 156 |
+
messages = copy.deepcopy(messages)
|
| 157 |
+
|
| 158 |
+
# Separate system prompt from other messages
|
| 159 |
+
if messages and messages[0].get('role') == 'system':
|
| 160 |
+
system_prompt_content = messages.pop(0).get('content', '')
|
| 161 |
+
if system_prompt_content:
|
| 162 |
+
system_instruction = {
|
| 163 |
+
"role": "user",
|
| 164 |
+
"parts": [{"text": system_prompt_content}]
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
# Build tool call ID to name mapping
|
| 168 |
+
tool_call_id_to_name = {}
|
| 169 |
+
for msg in messages:
|
| 170 |
+
if msg.get("role") == "assistant" and msg.get("tool_calls"):
|
| 171 |
+
for tool_call in msg["tool_calls"]:
|
| 172 |
+
if tool_call.get("type") == "function":
|
| 173 |
+
tool_call_id_to_name[tool_call["id"]] = tool_call["function"]["name"]
|
| 174 |
+
|
| 175 |
+
# Convert each message
|
| 176 |
+
for msg in messages:
|
| 177 |
+
role = msg.get("role")
|
| 178 |
+
content = msg.get("content")
|
| 179 |
+
parts = []
|
| 180 |
+
gemini_role = "model" if role == "assistant" else "tool" if role == "tool" else "user"
|
| 181 |
+
|
| 182 |
+
if role == "user":
|
| 183 |
+
if isinstance(content, str):
|
| 184 |
+
# Simple text content
|
| 185 |
+
if content:
|
| 186 |
+
parts.append({"text": content})
|
| 187 |
+
elif isinstance(content, list):
|
| 188 |
+
# Multi-part content (text, images, etc.)
|
| 189 |
+
for item in content:
|
| 190 |
+
if item.get("type") == "text":
|
| 191 |
+
text = item.get("text", "")
|
| 192 |
+
if text:
|
| 193 |
+
parts.append({"text": text})
|
| 194 |
+
elif item.get("type") == "image_url":
|
| 195 |
+
# Handle image data URLs
|
| 196 |
+
image_url = item.get("image_url", {}).get("url", "")
|
| 197 |
+
if image_url.startswith("data:"):
|
| 198 |
+
try:
|
| 199 |
+
# Parse: data:image/png;base64,iVBORw0KG...
|
| 200 |
+
header, data = image_url.split(",", 1)
|
| 201 |
+
mime_type = header.split(":")[1].split(";")[0]
|
| 202 |
+
parts.append({
|
| 203 |
+
"inlineData": {
|
| 204 |
+
"mimeType": mime_type,
|
| 205 |
+
"data": data
|
| 206 |
+
}
|
| 207 |
+
})
|
| 208 |
+
except Exception as e:
|
| 209 |
+
lib_logger.warning(f"Failed to parse image data URL: {e}")
|
| 210 |
+
|
| 211 |
+
elif role == "assistant":
|
| 212 |
+
if isinstance(content, str) and content:
|
| 213 |
+
parts.append({"text": content})
|
| 214 |
+
if msg.get("tool_calls"):
|
| 215 |
+
for tool_call in msg["tool_calls"]:
|
| 216 |
+
if tool_call.get("type") == "function":
|
| 217 |
+
try:
|
| 218 |
+
args_dict = json.loads(tool_call["function"]["arguments"])
|
| 219 |
+
except (json.JSONDecodeError, TypeError):
|
| 220 |
+
args_dict = {}
|
| 221 |
+
|
| 222 |
+
# Add function call part with thoughtSignature
|
| 223 |
+
func_call_part = {
|
| 224 |
+
"functionCall": {
|
| 225 |
+
"name": tool_call["function"]["name"],
|
| 226 |
+
"args": args_dict
|
| 227 |
+
},
|
| 228 |
+
"thoughtSignature": "skip_thought_signature_validator"
|
| 229 |
+
}
|
| 230 |
+
parts.append(func_call_part)
|
| 231 |
+
|
| 232 |
+
elif role == "tool":
|
| 233 |
+
tool_call_id = msg.get("tool_call_id")
|
| 234 |
+
function_name = tool_call_id_to_name.get(tool_call_id)
|
| 235 |
+
if function_name:
|
| 236 |
+
# Wrap the tool response in a 'result' object
|
| 237 |
+
response_content = {"result": content}
|
| 238 |
+
parts.append({"functionResponse": {"name": function_name, "response": response_content}})
|
| 239 |
+
|
| 240 |
+
if parts:
|
| 241 |
+
gemini_contents.append({"role": gemini_role, "parts": parts})
|
| 242 |
+
|
| 243 |
+
# Ensure first message is from user
|
| 244 |
+
if not gemini_contents or gemini_contents[0]['role'] != 'user':
|
| 245 |
+
gemini_contents.insert(0, {"role": "user", "parts": [{"text": ""}]})
|
| 246 |
+
|
| 247 |
+
return system_instruction, gemini_contents
|
| 248 |
+
|
| 249 |
+
# ============================================================================
|
| 250 |
+
# THINKING/REASONING CONFIGURATION
|
| 251 |
+
# ============================================================================
|
| 252 |
+
|
| 253 |
+
def _map_reasoning_effort_to_thinking_config(
|
| 254 |
+
self,
|
| 255 |
+
reasoning_effort: Optional[str],
|
| 256 |
+
model: str
|
| 257 |
+
) -> Optional[Dict[str, Any]]:
|
| 258 |
+
"""
|
| 259 |
+
Map OpenAI reasoning_effort to Gemini thinking configuration.
|
| 260 |
+
Handles Gemini 3 thinkingLevel vs other models thinkingBudget.
|
| 261 |
+
|
| 262 |
+
Args:
|
| 263 |
+
reasoning_effort: OpenAI reasoning_effort value
|
| 264 |
+
model: Model name (public alias)
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
Dictionary with thinkingConfig or None
|
| 268 |
+
"""
|
| 269 |
+
internal_model = self._alias_to_model_name(model)
|
| 270 |
+
is_gemini_3 = internal_model.startswith("gemini-3-")
|
| 271 |
+
|
| 272 |
+
# Default for gemini-3-pro-preview when no reasoning_effort specified
|
| 273 |
+
if not reasoning_effort:
|
| 274 |
+
if model == "gemini-3-pro-preview" or internal_model == "gemini-3-pro-high":
|
| 275 |
+
return {
|
| 276 |
+
"thinkingBudget": -1,
|
| 277 |
+
"include_thoughts": True
|
| 278 |
+
}
|
| 279 |
+
return None
|
| 280 |
+
|
| 281 |
+
if reasoning_effort == "none":
|
| 282 |
+
return {
|
| 283 |
+
"thinkingBudget": 0,
|
| 284 |
+
"include_thoughts": False
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
if reasoning_effort == "auto":
|
| 288 |
+
# Auto always uses thinkingBudget=-1, even for Gemini 3
|
| 289 |
+
return {
|
| 290 |
+
"thinkingBudget": -1,
|
| 291 |
+
"include_thoughts": True
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
if is_gemini_3:
|
| 295 |
+
# Gemini 3: Use thinkingLevel
|
| 296 |
+
level_map = {
|
| 297 |
+
"low": "low",
|
| 298 |
+
"medium": "high", # Medium not released yet, map to high
|
| 299 |
+
"high": "high"
|
| 300 |
+
}
|
| 301 |
+
level = level_map.get(reasoning_effort, "high")
|
| 302 |
+
return {
|
| 303 |
+
"thinkingLevel": level,
|
| 304 |
+
"include_thoughts": True
|
| 305 |
+
}
|
| 306 |
+
else:
|
| 307 |
+
# Non-Gemini-3: Use thinkingBudget with normalization
|
| 308 |
+
budget_map = {
|
| 309 |
+
"low": 1024,
|
| 310 |
+
"medium": 8192,
|
| 311 |
+
"high": 32768
|
| 312 |
+
}
|
| 313 |
+
budget = budget_map.get(reasoning_effort, -1)
|
| 314 |
+
# TODO: Add model-specific normalization via model registry
|
| 315 |
+
return {
|
| 316 |
+
"thinkingBudget": budget,
|
| 317 |
+
"include_thoughts": True
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
# ============================================================================
|
| 321 |
+
# TOOL RESPONSE GROUPING
|
| 322 |
+
# ============================================================================
|
| 323 |
+
|
| 324 |
+
def _fix_tool_response_grouping(self, contents: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
| 325 |
+
"""
|
| 326 |
+
Group function calls with their responses for Antigravity compatibility.
|
| 327 |
+
|
| 328 |
+
Converts linear format (function call, response, function call, response)
|
| 329 |
+
to grouped format (model with calls, function role with all responses).
|
| 330 |
+
|
| 331 |
+
Args:
|
| 332 |
+
contents: List of Gemini content objects
|
| 333 |
+
|
| 334 |
+
Returns:
|
| 335 |
+
List of grouped content objects
|
| 336 |
+
"""
|
| 337 |
+
new_contents = []
|
| 338 |
+
pending_groups = [] # Groups awaiting responses
|
| 339 |
+
collected_responses = [] # Standalone responses to match
|
| 340 |
+
|
| 341 |
+
for content in contents:
|
| 342 |
+
role = content.get("role")
|
| 343 |
+
parts = content.get("parts", [])
|
| 344 |
+
|
| 345 |
+
# Check if this content has function responses
|
| 346 |
+
response_parts = [p for p in parts if "functionResponse" in p]
|
| 347 |
+
|
| 348 |
+
if response_parts:
|
| 349 |
+
# Collect responses
|
| 350 |
+
collected_responses.extend(response_parts)
|
| 351 |
+
|
| 352 |
+
# Try to satisfy pending groups
|
| 353 |
+
for i in range(len(pending_groups) - 1, -1, -1):
|
| 354 |
+
group = pending_groups[i]
|
| 355 |
+
if len(collected_responses) >= group["responses_needed"]:
|
| 356 |
+
# Take needed responses
|
| 357 |
+
group_responses = collected_responses[:group["responses_needed"]]
|
| 358 |
+
collected_responses = collected_responses[group["responses_needed"]:]
|
| 359 |
+
|
| 360 |
+
# Create merged function response content
|
| 361 |
+
function_response_content = {
|
| 362 |
+
"parts": group_responses,
|
| 363 |
+
"role": "function" # Changed from tool
|
| 364 |
+
}
|
| 365 |
+
new_contents.append(function_response_content)
|
| 366 |
+
|
| 367 |
+
# Remove satisfied group
|
| 368 |
+
pending_groups.pop(i)
|
| 369 |
+
break
|
| 370 |
+
|
| 371 |
+
continue # Skip adding this content
|
| 372 |
+
|
| 373 |
+
# If this is model content with function calls, create a group
|
| 374 |
+
if role == "model":
|
| 375 |
+
function_calls = [p for p in parts if "functionCall" in p]
|
| 376 |
+
|
| 377 |
+
if function_calls:
|
| 378 |
+
# Add model content first
|
| 379 |
+
new_contents.append(content)
|
| 380 |
+
|
| 381 |
+
# Create pending group
|
| 382 |
+
pending_groups.append({
|
| 383 |
+
"model_content": content,
|
| 384 |
+
"function_calls": function_calls,
|
| 385 |
+
"responses_needed": len(function_calls)
|
| 386 |
+
})
|
| 387 |
+
else:
|
| 388 |
+
# Regular model content without function calls
|
| 389 |
+
new_contents.append(content)
|
| 390 |
+
else:
|
| 391 |
+
# Non-model content (user, etc.)
|
| 392 |
+
new_contents.append(content)
|
| 393 |
+
|
| 394 |
+
# Handle remaining pending groups
|
| 395 |
+
for group in pending_groups:
|
| 396 |
+
if len(collected_responses) >= group["responses_needed"]:
|
| 397 |
+
group_responses = collected_responses[:group["responses_needed"]]
|
| 398 |
+
collected_responses = collected_responses[group["responses_needed"]:]
|
| 399 |
+
|
| 400 |
+
function_response_content = {
|
| 401 |
+
"parts": group_responses,
|
| 402 |
+
"role": "function"
|
| 403 |
+
}
|
| 404 |
+
new_contents.append(function_response_content)
|
| 405 |
+
|
| 406 |
+
return new_contents
|
| 407 |
+
|
| 408 |
+
# ============================================================================
|
| 409 |
+
# ANTIGRAVITY REQUEST TRANSFORMATION
|
| 410 |
+
# ============================================================================
|
| 411 |
+
|
| 412 |
+
def _transform_to_antigravity_format(
|
| 413 |
+
self,
|
| 414 |
+
gemini_cli_payload: Dict[str, Any],
|
| 415 |
+
model: str
|
| 416 |
+
) -> Dict[str, Any]:
|
| 417 |
+
"""
|
| 418 |
+
Transform Gemini CLI format to complete Antigravity format.
|
| 419 |
+
|
| 420 |
+
Args:
|
| 421 |
+
gemini_cli_payload: Request in Gemini CLI format
|
| 422 |
+
model: Model name (public alias)
|
| 423 |
+
|
| 424 |
+
Returns:
|
| 425 |
+
Complete Antigravity request payload
|
| 426 |
+
"""
|
| 427 |
+
internal_model = self._alias_to_model_name(model)
|
| 428 |
+
|
| 429 |
+
# 1. Wrap in Antigravity envelope
|
| 430 |
+
antigravity_payload = {
|
| 431 |
+
"project": self.generate_project_id(),
|
| 432 |
+
"userAgent": "antigravity",
|
| 433 |
+
"requestId": self.generate_request_id(),
|
| 434 |
+
"model": internal_model, # Use internal name
|
| 435 |
+
"request": copy.deepcopy(gemini_cli_payload)
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
# 2. Add session ID
|
| 439 |
+
antigravity_payload["request"]["sessionId"] = self.generate_session_id()
|
| 440 |
+
|
| 441 |
+
# 3. Remove fields that Antigravity doesn't support
|
| 442 |
+
antigravity_payload["request"].pop("safetySettings", None)
|
| 443 |
+
if "generationConfig" in antigravity_payload["request"]:
|
| 444 |
+
antigravity_payload["request"]["generationConfig"].pop("maxOutputTokens", None)
|
| 445 |
+
|
| 446 |
+
# 4. Set toolConfig mode
|
| 447 |
+
if "toolConfig" not in antigravity_payload["request"]:
|
| 448 |
+
antigravity_payload["request"]["toolConfig"] = {}
|
| 449 |
+
if "functionCallingConfig" not in antigravity_payload["request"]["toolConfig"]:
|
| 450 |
+
antigravity_payload["request"]["toolConfig"]["functionCallingConfig"] = {}
|
| 451 |
+
antigravity_payload["request"]["toolConfig"]["functionCallingConfig"]["mode"] = "VALIDATED"
|
| 452 |
+
|
| 453 |
+
# 5. Handle Gemini 3 specific thinking logic
|
| 454 |
+
# For non-Gemini-3 models, convert thinkingLevel to thinkingBudget
|
| 455 |
+
if not internal_model.startswith("gemini-3-"):
|
| 456 |
+
gen_config = antigravity_payload["request"].get("generationConfig", {})
|
| 457 |
+
thinking_config = gen_config.get("thinkingConfig", {})
|
| 458 |
+
if "thinkingLevel" in thinking_config:
|
| 459 |
+
# Remove thinkingLevel for non-Gemini-3 models
|
| 460 |
+
del thinking_config["thinkingLevel"]
|
| 461 |
+
# Set thinkingBudget to -1 (auto/dynamic)
|
| 462 |
+
thinking_config["thinkingBudget"] = -1
|
| 463 |
+
|
| 464 |
+
# 6. Preserve/add thoughtSignature to ALL function calls in model role content
|
| 465 |
+
for content in antigravity_payload["request"].get("contents", []):
|
| 466 |
+
if content.get("role") == "model":
|
| 467 |
+
for part in content.get("parts", []):
|
| 468 |
+
# Add signature to function calls OR preserve if already exists
|
| 469 |
+
if "functionCall" in part and "thoughtSignature" not in part:
|
| 470 |
+
part["thoughtSignature"] = "skip_thought_signature_validator"
|
| 471 |
+
# If thoughtSignature already exists, preserve it (important for Gemini 3)
|
| 472 |
+
|
| 473 |
+
# 7. Handle Claude models (special tool schema conversion)
|
| 474 |
+
if internal_model.startswith("claude-sonnet-"):
|
| 475 |
+
# For Claude models, convert parametersJsonSchema back to parameters
|
| 476 |
+
for tool in antigravity_payload["request"].get("tools", []):
|
| 477 |
+
for func_decl in tool.get("functionDeclarations", []):
|
| 478 |
+
if "parametersJsonSchema" in func_decl:
|
| 479 |
+
func_decl["parameters"] = func_decl.pop("parametersJsonSchema")
|
| 480 |
+
# Remove $schema if present
|
| 481 |
+
if "parameters" in func_decl and "$schema" in func_decl["parameters"]:
|
| 482 |
+
del func_decl["parameters"]["$schema"]
|
| 483 |
+
|
| 484 |
+
return antigravity_payload
|
| 485 |
+
|
| 486 |
+
#============================================================================
|
| 487 |
+
# BASE URL FALLBACK LOGIC
|
| 488 |
+
# ============================================================================
|
| 489 |
+
|
| 490 |
+
def _get_current_base_url(self) -> str:
|
| 491 |
+
"""Get the current base URL from the fallback list."""
|
| 492 |
+
return self._current_base_url
|
| 493 |
+
|
| 494 |
+
def _try_next_base_url(self) -> bool:
|
| 495 |
+
"""
|
| 496 |
+
Switch to the next base URL in the fallback list.
|
| 497 |
+
|
| 498 |
+
Returns:
|
| 499 |
+
True if successfully switched to next URL, False if no more URLs available
|
| 500 |
+
"""
|
| 501 |
+
if self._base_url_index < len(BASE_URLS) - 1:
|
| 502 |
+
self._base_url_index += 1
|
| 503 |
+
self._current_base_url = BASE_URLS[self._base_url_index]
|
| 504 |
+
lib_logger.info(f"Switching to fallback Antigravity base URL: {self._current_base_url}")
|
| 505 |
+
return True
|
| 506 |
+
return False
|
| 507 |
+
|
| 508 |
+
def _reset_base_url(self):
|
| 509 |
+
"""Reset to the primary base URL (daily sandbox)."""
|
| 510 |
+
self._base_url_index = 0
|
| 511 |
+
self._current_base_url = BASE_URLS[0]
|
| 512 |
+
|
| 513 |
+
# ============================================================================
|
| 514 |
+
# RESPONSE TRANSFORMATION (Antigravity → OpenAI)
|
| 515 |
+
# ============================================================================
|
| 516 |
+
|
| 517 |
+
def _unwrap_antigravity_response(self, antigravity_response: Dict[str, Any]) -> Dict[str, Any]:
|
| 518 |
+
"""
|
| 519 |
+
Extract Gemini response from Antigravity envelope.
|
| 520 |
+
|
| 521 |
+
Args:
|
| 522 |
+
antigravity_response: Response from Antigravity API
|
| 523 |
+
|
| 524 |
+
Returns:
|
| 525 |
+
Gemini response (unwrapped)
|
| 526 |
+
"""
|
| 527 |
+
# For both streaming and non-streaming, response is in 'response' field
|
| 528 |
+
return antigravity_response.get("response", antigravity_response)
|
| 529 |
+
|
| 530 |
+
def _gemini_to_openai_chunk(self, gemini_chunk: Dict[str, Any], model: str) -> litellm.ModelResponse:
|
| 531 |
+
"""
|
| 532 |
+
Convert a single Gemini response chunk to OpenAI format.
|
| 533 |
+
Based on GeminiCliProvider logic.
|
| 534 |
+
|
| 535 |
+
Args:
|
| 536 |
+
gemini_chunk: Gemini response chunk
|
| 537 |
+
model: Model name
|
| 538 |
+
|
| 539 |
+
Returns:
|
| 540 |
+
OpenAI-format ModelResponse
|
| 541 |
+
"""
|
| 542 |
+
# Extract candidate
|
| 543 |
+
candidates = gemini_chunk.get("candidates", [])
|
| 544 |
+
if not candidates:
|
| 545 |
+
# Empty chunk, return minimal response
|
| 546 |
+
return litellm.ModelResponse(
|
| 547 |
+
id=f"chatcmpl-{uuid.uuid4()}",
|
| 548 |
+
created=int(time.time()),
|
| 549 |
+
model=model,
|
| 550 |
+
choices=[]
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
candidate = candidates[0]
|
| 554 |
+
content_parts = candidate.get("content", {}).get("parts", [])
|
| 555 |
+
|
| 556 |
+
# Extract text, tool calls, and thinking
|
| 557 |
+
text_content = ""
|
| 558 |
+
tool_calls = []
|
| 559 |
+
|
| 560 |
+
for part in content_parts:
|
| 561 |
+
# Extract text
|
| 562 |
+
if "text" in part:
|
| 563 |
+
text_content += part["text"]
|
| 564 |
+
|
| 565 |
+
# Extract function calls (tool calls)
|
| 566 |
+
if "functionCall" in part:
|
| 567 |
+
func_call = part["functionCall"]
|
| 568 |
+
tool_calls.append({
|
| 569 |
+
"id": f"call_{uuid.uuid4().hex[:24]}",
|
| 570 |
+
"type": "function",
|
| 571 |
+
"function": {
|
| 572 |
+
"name": func_call.get("name", ""),
|
| 573 |
+
"arguments": json.dumps(func_call.get("args", {}))
|
| 574 |
+
}
|
| 575 |
+
})
|
| 576 |
+
|
| 577 |
+
# Build delta
|
| 578 |
+
delta = {}
|
| 579 |
+
if text_content:
|
| 580 |
+
delta["content"] = text_content
|
| 581 |
+
if tool_calls:
|
| 582 |
+
delta["tool_calls"] = tool_calls
|
| 583 |
+
|
| 584 |
+
# Get finish reason
|
| 585 |
+
finish_reason = candidate.get("finishReason", "").lower() if candidate.get("finishReason") else None
|
| 586 |
+
if finish_reason == "stop":
|
| 587 |
+
finish_reason = "stop"
|
| 588 |
+
elif finish_reason == "max_tokens":
|
| 589 |
+
finish_reason = "length"
|
| 590 |
+
|
| 591 |
+
# Build choice
|
| 592 |
+
choice = {
|
| 593 |
+
"index": 0,
|
| 594 |
+
"delta": delta,
|
| 595 |
+
"finish_reason": finish_reason
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
# Extract usage (if present)
|
| 599 |
+
usage_metadata = gemini_chunk.get("usageMetadata", {})
|
| 600 |
+
usage = None
|
| 601 |
+
if usage_metadata:
|
| 602 |
+
usage = {
|
| 603 |
+
"prompt_tokens": usage_metadata.get("promptTokenCount", 0),
|
| 604 |
+
"completion_tokens": usage_metadata.get("candidatesTokenCount", 0),
|
| 605 |
+
"total_tokens": usage_metadata.get("totalTokenCount", 0)
|
| 606 |
+
}
|
| 607 |
+
|
| 608 |
+
return litellm.ModelResponse(
|
| 609 |
+
id=f"chatcmpl-{uuid.uuid4()}",
|
| 610 |
+
created=int(time.time()),
|
| 611 |
+
model=model,
|
| 612 |
+
choices=[choice],
|
| 613 |
+
usage=usage
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
# ============================================================================
|
| 617 |
+
# PROVIDER INTERFACE IMPLEMENTATION
|
| 618 |
+
# ============================================================================
|
| 619 |
+
|
| 620 |
+
def has_custom_logic(self) -> bool:
|
| 621 |
+
"""Antigravity uses custom translation logic."""
|
| 622 |
+
return True
|
| 623 |
+
|
| 624 |
+
async def get_auth_header(self, credential_identifier: str) -> Dict[str, str]:
|
| 625 |
+
"""
|
| 626 |
+
Get OAuth authorization header for Antigravity.
|
| 627 |
+
|
| 628 |
+
Args:
|
| 629 |
+
credential_identifier: Credential file path or "env"
|
| 630 |
+
|
| 631 |
+
Returns:
|
| 632 |
+
Dict with Authorization header
|
| 633 |
+
"""
|
| 634 |
+
access_token = await self.get_valid_token(credential_identifier)
|
| 635 |
+
return {"Authorization": f"Bearer {access_token}"}
|
| 636 |
+
|
| 637 |
+
async def get_models(self, api_key: str, client: httpx.AsyncClient) -> List[str]:
|
| 638 |
+
"""
|
| 639 |
+
Fetch available models from Antigravity.
|
| 640 |
+
|
| 641 |
+
For Antigravity, we use the fetchAvailableModels endpoint and apply
|
| 642 |
+
alias mapping to convert internal names to public names.
|
| 643 |
+
|
| 644 |
+
Args:
|
| 645 |
+
api_key: Credential path (not a traditional API key)
|
| 646 |
+
client: HTTP client
|
| 647 |
+
|
| 648 |
+
Returns:
|
| 649 |
+
List of public model names
|
| 650 |
+
"""
|
| 651 |
+
credential_path = api_key # For OAuth providers, this is the credential path
|
| 652 |
+
|
| 653 |
+
try:
|
| 654 |
+
access_token = await self.get_valid_token(credential_path)
|
| 655 |
+
base_url = self._get_current_base_url()
|
| 656 |
+
|
| 657 |
+
# Generate required IDs
|
| 658 |
+
project_id = self.generate_project_id()
|
| 659 |
+
request_id = self.generate_request_id()
|
| 660 |
+
|
| 661 |
+
# Fetch models endpoint
|
| 662 |
+
url = f"{base_url}/fetchAvailableModels"
|
| 663 |
+
|
| 664 |
+
headers = {
|
| 665 |
+
"Authorization": f"Bearer {access_token}",
|
| 666 |
+
"Content-Type": "application/json"
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
payload = {
|
| 670 |
+
"project": project_id,
|
| 671 |
+
"requestId": request_id,
|
| 672 |
+
"userAgent": "antigravity"
|
| 673 |
+
}
|
| 674 |
+
|
| 675 |
+
lib_logger.debug(f"Fetching Antigravity models from: {url}")
|
| 676 |
+
|
| 677 |
+
response = await client.post(url, json=payload, headers=headers, timeout=30.0)
|
| 678 |
+
response.raise_for_status()
|
| 679 |
+
|
| 680 |
+
data = response.json()
|
| 681 |
+
|
| 682 |
+
# Extract model names and apply aliasing
|
| 683 |
+
models = []
|
| 684 |
+
if "models" in data:
|
| 685 |
+
for model_info in data["models"]:
|
| 686 |
+
internal_name = model_info.get("name", "").replace("models/", "")
|
| 687 |
+
if internal_name:
|
| 688 |
+
public_name = self._model_name_to_alias(internal_name)
|
| 689 |
+
if public_name: # Skip excluded models (empty string)
|
| 690 |
+
models.append(public_name)
|
| 691 |
+
|
| 692 |
+
if models:
|
| 693 |
+
lib_logger.info(f"Discovered {len(models)} Antigravity models")
|
| 694 |
+
return models
|
| 695 |
+
else:
|
| 696 |
+
lib_logger.warning("No models returned from Antigravity, using hardcoded list")
|
| 697 |
+
return HARDCODED_MODELS
|
| 698 |
+
|
| 699 |
+
except Exception as e:
|
| 700 |
+
lib_logger.warning(f"Failed to fetch Antigravity models: {e}, using hardcoded list")
|
| 701 |
+
return HARDCODED_MODELS
|
| 702 |
+
|
| 703 |
+
async def acompletion(
|
| 704 |
+
self,
|
| 705 |
+
client: httpx.AsyncClient,
|
| 706 |
+
**kwargs
|
| 707 |
+
) -> Union[litellm.ModelResponse, AsyncGenerator[litellm.ModelResponse, None]]:
|
| 708 |
+
"""
|
| 709 |
+
Handle completion requests for Antigravity.
|
| 710 |
+
|
| 711 |
+
This is the main entry point that:
|
| 712 |
+
1. Extracts the model and credential path
|
| 713 |
+
2. Transforms OpenAI request → Gemini CLI → Antigravity format
|
| 714 |
+
3. Makes the API call with fallback logic
|
| 715 |
+
4. Transforms Antigravity response → Gemini → OpenAI format
|
| 716 |
+
|
| 717 |
+
Args:
|
| 718 |
+
client: HTTP client
|
| 719 |
+
**kwargs: LiteLLM completion parameters
|
| 720 |
+
|
| 721 |
+
Returns:
|
| 722 |
+
ModelResponse (non-streaming) or AsyncGenerator (streaming)
|
| 723 |
+
"""
|
| 724 |
+
# Extract key parameters
|
| 725 |
+
model = kwargs.get("model", "gemini-2.5-pro")
|
| 726 |
+
messages = kwargs.get("messages", [])
|
| 727 |
+
stream = kwargs.get("stream", False)
|
| 728 |
+
credential_path = kwargs.pop("credential_identifier", kwargs.get("api_key", ""))
|
| 729 |
+
tools = kwargs.get("tools")
|
| 730 |
+
reasoning_effort = kwargs.get("reasoning_effort")
|
| 731 |
+
temperature = kwargs.get("temperature")
|
| 732 |
+
top_p = kwargs.get("top_p")
|
| 733 |
+
max_tokens = kwargs.get("max_tokens")
|
| 734 |
+
|
| 735 |
+
lib_logger.info(f"Antigravity completion: model={model}, stream={stream}, messages={len(messages)}")
|
| 736 |
+
|
| 737 |
+
# Step 1: Transform messages (OpenAI → Gemini CLI)
|
| 738 |
+
system_instruction, gemini_contents = self._transform_messages(messages)
|
| 739 |
+
|
| 740 |
+
# Apply tool response grouping
|
| 741 |
+
gemini_contents = self._fix_tool_response_grouping(gemini_contents)
|
| 742 |
+
|
| 743 |
+
# Step 2: Build Gemini CLI payload
|
| 744 |
+
gemini_cli_payload = {
|
| 745 |
+
"contents": gemini_contents
|
| 746 |
+
}
|
| 747 |
+
|
| 748 |
+
if system_instruction:
|
| 749 |
+
gemini_cli_payload["system_instruction"] = system_instruction
|
| 750 |
+
|
| 751 |
+
# Add generation config
|
| 752 |
+
generation_config = {}
|
| 753 |
+
if temperature is not None:
|
| 754 |
+
generation_config["temperature"] = temperature
|
| 755 |
+
if top_p is not None:
|
| 756 |
+
generation_config["topP"] = top_p
|
| 757 |
+
|
| 758 |
+
# Handle thinking config
|
| 759 |
+
thinking_config = self._map_reasoning_effort_to_thinking_config(reasoning_effort, model)
|
| 760 |
+
if thinking_config:
|
| 761 |
+
generation_config.setdefault("thinkingConfig", {}).update(thinking_config)
|
| 762 |
+
|
| 763 |
+
if generation_config:
|
| 764 |
+
gemini_cli_payload["generationConfig"] = generation_config
|
| 765 |
+
|
| 766 |
+
# Add tools
|
| 767 |
+
if tools:
|
| 768 |
+
gemini_tools = []
|
| 769 |
+
for tool in tools:
|
| 770 |
+
if tool.get("type") == "function":
|
| 771 |
+
func = tool.get("function", {})
|
| 772 |
+
schema = _build_vertex_schema(parameters=func.get("parameters", {}))
|
| 773 |
+
gemini_tools.append({
|
| 774 |
+
"functionDeclarations": [{
|
| 775 |
+
"name": func.get("name", ""),
|
| 776 |
+
"description": func.get("description", ""),
|
| 777 |
+
"parametersJsonSchema": schema
|
| 778 |
+
}]
|
| 779 |
+
})
|
| 780 |
+
if gemini_tools:
|
| 781 |
+
gemini_cli_payload["tools"] = gemini_tools
|
| 782 |
+
|
| 783 |
+
# Step 3: Transform to Antigravity format
|
| 784 |
+
antigravity_payload = self._transform_to_antigravity_format(gemini_cli_payload, model)
|
| 785 |
+
|
| 786 |
+
# Step 4: Make API call
|
| 787 |
+
access_token = await self.get_valid_token(credential_path)
|
| 788 |
+
base_url = self._get_current_base_url()
|
| 789 |
+
|
| 790 |
+
endpoint = ":streamGenerateContent" if stream else ":generateContent"
|
| 791 |
+
url = f"{base_url}{endpoint}"
|
| 792 |
+
|
| 793 |
+
headers = {
|
| 794 |
+
"Authorization": f"Bearer {access_token}",
|
| 795 |
+
"Content-Type": "application/json"
|
| 796 |
+
}
|
| 797 |
+
|
| 798 |
+
lib_logger.debug(f"Antigravity request to: {url}")
|
| 799 |
+
|
| 800 |
+
try:
|
| 801 |
+
if stream:
|
| 802 |
+
return self._handle_streaming(client, url, headers, antigravity_payload, model)
|
| 803 |
+
else:
|
| 804 |
+
return await self._handle_non_streaming(client, url, headers, antigravity_payload, model)
|
| 805 |
+
except Exception as e:
|
| 806 |
+
# Try fallback URL if available
|
| 807 |
+
if self._try_next_base_url():
|
| 808 |
+
lib_logger.warning(f"Retrying Antigravity request with fallback URL: {e}")
|
| 809 |
+
base_url = self._get_current_base_url()
|
| 810 |
+
url = f"{base_url}{endpoint}"
|
| 811 |
+
|
| 812 |
+
if stream:
|
| 813 |
+
return self._handle_streaming(client, url, headers, antigravity_payload, model)
|
| 814 |
+
else:
|
| 815 |
+
return await self._handle_non_streaming(client, url, headers, antigravity_payload, model)
|
| 816 |
+
else:
|
| 817 |
+
raise
|
| 818 |
+
|
| 819 |
+
async def _handle_non_streaming(
|
| 820 |
+
self,
|
| 821 |
+
client: httpx.AsyncClient,
|
| 822 |
+
url: str,
|
| 823 |
+
headers: Dict[str, str],
|
| 824 |
+
payload: Dict[str, Any],
|
| 825 |
+
model: str
|
| 826 |
+
) -> litellm.ModelResponse:
|
| 827 |
+
"""Handle non-streaming completion."""
|
| 828 |
+
response = await client.post(url, headers=headers, json=payload, timeout=120.0)
|
| 829 |
+
response.raise_for_status()
|
| 830 |
+
|
| 831 |
+
antigravity_response = response.json()
|
| 832 |
+
|
| 833 |
+
# Unwrap Antigravity envelope
|
| 834 |
+
gemini_response = self._unwrap_antigravity_response(antigravity_response)
|
| 835 |
+
|
| 836 |
+
# Convert to OpenAI format
|
| 837 |
+
return self._gemini_to_openai_chunk(gemini_response, model)
|
| 838 |
+
|
| 839 |
+
async def _handle_streaming(
|
| 840 |
+
self,
|
| 841 |
+
client: httpx.AsyncClient,
|
| 842 |
+
url: str,
|
| 843 |
+
headers: Dict[str, str],
|
| 844 |
+
payload: Dict[str, Any],
|
| 845 |
+
model: str
|
| 846 |
+
) -> AsyncGenerator[litellm.ModelResponse, None]:
|
| 847 |
+
"""Handle streaming completion."""
|
| 848 |
+
async with client.stream("POST", url, headers=headers, json=payload, timeout=120.0) as response:
|
| 849 |
+
response.raise_for_status()
|
| 850 |
+
|
| 851 |
+
async for line in response.aiter_lines():
|
| 852 |
+
if line.startswith("data: "):
|
| 853 |
+
data_str = line[6:]
|
| 854 |
+
if data_str == "[DONE]":
|
| 855 |
+
break
|
| 856 |
+
|
| 857 |
+
try:
|
| 858 |
+
antigravity_chunk = json.loads(data_str)
|
| 859 |
+
|
| 860 |
+
# Unwrap Antigravity envelope
|
| 861 |
+
gemini_chunk = self._unwrap_antigravity_response(antigravity_chunk)
|
| 862 |
+
|
| 863 |
+
# Convert to OpenAI format
|
| 864 |
+
openai_chunk = self._gemini_to_openai_chunk(gemini_chunk, model)
|
| 865 |
+
|
| 866 |
+
yield openai_chunk
|
| 867 |
+
except json.JSONDecodeError:
|
| 868 |
+
lib_logger.warning(f"Failed to parse Antigravity chunk: {data_str[:100]}")
|
| 869 |
+
continue
|
|
@@ -3,8 +3,8 @@ requires = ["setuptools>=61.0"]
|
|
| 3 |
build-backend = "setuptools.build_meta"
|
| 4 |
|
| 5 |
[project]
|
| 6 |
-
name = "
|
| 7 |
-
version = "0.
|
| 8 |
authors = [
|
| 9 |
{ name="Mirrowel", email="nuh@uh.com" },
|
| 10 |
]
|
|
|
|
| 3 |
build-backend = "setuptools.build_meta"
|
| 4 |
|
| 5 |
[project]
|
| 6 |
+
name = "rotator_library"
|
| 7 |
+
version = "0.95"
|
| 8 |
authors = [
|
| 9 |
{ name="Mirrowel", email="nuh@uh.com" },
|
| 10 |
]
|