diff --git a/__pycache__/init_pool.cpython-311.pyc b/__pycache__/init_pool.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3a859612318a44ee3e5a0e447dbdd4151ea1f494
Binary files /dev/null and b/__pycache__/init_pool.cpython-311.pyc differ
diff --git a/__pycache__/init_pool.cpython-312.pyc b/__pycache__/init_pool.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4068b13e88916914a3ad8611c9526fc4af03a86a
Binary files /dev/null and b/__pycache__/init_pool.cpython-312.pyc differ
diff --git a/api/__pycache__/telemetry.cpython-311.pyc b/api/__pycache__/telemetry.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e7c14beccff4312275bf8bc2e7b5c692a5602deb
Binary files /dev/null and b/api/__pycache__/telemetry.cpython-311.pyc differ
diff --git a/api/__pycache__/telemetry.cpython-312.pyc b/api/__pycache__/telemetry.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9798b970101d357351a43249f4882b6fe2b34a8b
Binary files /dev/null and b/api/__pycache__/telemetry.cpython-312.pyc differ
diff --git a/api/chat/__init__.py b/api/chat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..56bbb94d3e45b5ac06413a595391188c58e3380e
--- /dev/null
+++ b/api/chat/__init__.py
@@ -0,0 +1,4 @@
+from .chat_config import ChatConfig
+from .chat_api import ChatAPI
+
+__all__ = ['ChatConfig', 'ChatAPI']
diff --git a/api/chat/__pycache__/__init__.cpython-311.pyc b/api/chat/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a113af0a86396c41815fade783b611043fa8956
Binary files /dev/null and b/api/chat/__pycache__/__init__.cpython-311.pyc differ
diff --git a/api/chat/__pycache__/__init__.cpython-312.pyc b/api/chat/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..758064e2e4ee8043dec0704863331192656d3eca
Binary files /dev/null and b/api/chat/__pycache__/__init__.cpython-312.pyc differ
diff --git a/api/chat/__pycache__/chat_api.cpython-311.pyc b/api/chat/__pycache__/chat_api.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dfc76fa96f0268f8c4897fc00323d5f1ee504025
Binary files /dev/null and b/api/chat/__pycache__/chat_api.cpython-311.pyc differ
diff --git a/api/chat/__pycache__/chat_api.cpython-312.pyc b/api/chat/__pycache__/chat_api.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c28b677d805c3e81a82b0c98fdd16603fb88d4a
Binary files /dev/null and b/api/chat/__pycache__/chat_api.cpython-312.pyc differ
diff --git a/api/chat/__pycache__/chat_config.cpython-311.pyc b/api/chat/__pycache__/chat_config.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7b93ce3829f0266af6f8762ae141f501cdc3c76
Binary files /dev/null and b/api/chat/__pycache__/chat_config.cpython-311.pyc differ
diff --git a/api/chat/__pycache__/chat_config.cpython-312.pyc b/api/chat/__pycache__/chat_config.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e0388a1369072b0aa8d92dc2326afbb2c9baf38
Binary files /dev/null and b/api/chat/__pycache__/chat_config.cpython-312.pyc differ
diff --git a/api/chat/chat_api.py b/api/chat/chat_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..799b031dca67eb1fc5f92f79d2cbad9c6a85bd35
--- /dev/null
+++ b/api/chat/chat_api.py
@@ -0,0 +1,276 @@
+import binascii
+import json
+from typing import List, Optional, AsyncGenerator
+from time import time
+
+# Remove aioredis import
+# from aioredis import Redis
+
+# import tiktoken
+from api.chat import ChatConfig
+from api.telemetry import TelemetryAPI
+from auth.jwt_handler import JWTHandler
+from config.constants import ENCRYPTION_KEY
+from utils.encrypt import encrypt
+from utils.http import HTTPClient
+from config.constants import (
+ APP_LANGUAGE,
+ APP_NAME,
+ APP_VERSION,
+ DISPLAY_NAME,
+ HADWARE_INFO,
+ INFERENCE_URL,
+ SYSTEM_INFO,
+)
+from utils.compression import decompress_chunks
+
+from protos import request_pb2, response_pb2
+
+
+class ChatAPI:
+ def __init__(self, api_key: str, http_client: HTTPClient = HTTPClient()):
+ self.api_key = api_key
+ self.jwt_token = None
+ self.jwt_token_timestamp = 0
+ self.http_client = http_client
+
+ async def renew_jwt_token(self):
+ """Renew JWT token asynchronously if it's expired or missing"""
+ current_time = time()
+
+ # Check if token is still valid (within 2500 seconds)
+ if self.jwt_token and current_time - self.jwt_token_timestamp < 2500:
+ return
+
+ jwt_handler = JWTHandler(api_key=self.api_key, http_client=self.http_client)
+ jwt_token = await jwt_handler.get_jwt_token()
+ tele = TelemetryAPI(api_key=self.api_key)
+ await tele.do_telemetry()
+
+ if jwt_token:
+ self.jwt_token = jwt_token
+ self.jwt_token_timestamp = current_time
+
+ async def _create_chat_request(
+ self,
+ messages: List[dict],
+ config: ChatConfig,
+ system_prompt: str = "You are a helpful assistant.",
+ ) -> request_pb2.ChatRequestMessage:
+ try:
+ await self.renew_jwt_token()
+ except Exception as e:
+ print(e)
+ ...
+
+ msg = request_pb2.ChatRequestMessage()
+
+ # Set client info
+ self._set_client_info(msg)
+
+ # Set system prompt and model config
+ msg.system_prompt = system_prompt
+ msg.model_id = config.model_id.value
+ msg.idk13.idk13nn = 1
+ msg.idk_id = 5
+ self._set_model_config(msg, config)
+
+ # Set tool config
+ # self._set_tool_config(msg)
+
+ # Convert messages
+ self._add_messages(msg, messages)
+
+ return msg
+
+ def _set_client_info(self, msg: request_pb2.ChatRequestMessage) -> None:
+ """Set client information in the request message"""
+ msg.client_info.api_key = self.api_key
+ msg.client_info.user_jwt = self.jwt_token
+ msg.client_info.locale = APP_LANGUAGE
+ msg.client_info.extension_name = APP_NAME
+ msg.client_info.ide_name = APP_NAME
+ msg.client_info.extension_version = APP_VERSION
+ msg.client_info.os = SYSTEM_INFO
+ msg.client_info.ide_version = DISPLAY_NAME
+ msg.client_info.hardware = HADWARE_INFO
+
+ def _set_model_config(
+ self, msg: request_pb2.ChatRequestMessage, config: ChatConfig
+ ) -> None:
+ """Set model configuration"""
+ msg.model_config.parallel_stream = 1
+ msg.model_config.max_tokens = config.max_tokens
+ msg.model_config.temperature = config.temperature
+ msg.model_config.top_k = config.top_k
+ msg.model_config.top_P = config.top_p
+
+ def _set_special_tokens(self, msg: request_pb2.ChatRequestMessage) -> None:
+ msg.model_config.special_tokens.extend(
+ [
+ "<|user|>",
+ "<|bot|>",
+ "<|context_request|>",
+ "<|endoftext|>",
+ "<|end_of_turn|>",
+ ]
+ )
+
+ def _set_tool_config(self, msg: request_pb2.ChatRequestMessage) -> None:
+ """Set tool configuration"""
+ msg.tool_use.mode = "auto"
+ msg.tool_config.tool_name = "do_not_call"
+ msg.tool_config.description = "Do not call this tool."
+ msg.tool_config.schema = '{"$schema":"https://json-schema.org/draft/2020-12/schema","properties":{},"additionalProperties":false,"type":"object"}'
+
+ def _add_messages(
+ self, msg: request_pb2.ChatRequestMessage, messages: List[dict]
+ ) -> None:
+ """Add chat messages to the request"""
+ role_map = {"user": 1, "assistant": 2, "system": 3}
+
+ for chat_msg in messages:
+ role = role_map.get(chat_msg["role"], 1)
+ content = chat_msg["content"]
+
+ # Override system prompt
+ if role == 3:
+ if isinstance(content, str):
+ msg.system_prompt = content
+
+ elif isinstance(content, list):
+ for item in content:
+ if item.get("type", "") == "text" and "text" in item and isinstance(item["text"], str):
+ msg.system_prompt = item["text"]
+ break
+ continue
+
+ if isinstance(content, list):
+ pb_msg = self._create_multipart_message(role, content)
+ else:
+ pb_msg = self._create_text_message(role, content)
+
+ msg.chat_messages.append(pb_msg)
+
+ def _create_multipart_message(
+ self, role: int, content: List[dict]
+ ) -> request_pb2.ChatMessage:
+ """Create a message with multiple parts (text and images)"""
+ text_parts = []
+ image_parts = []
+
+ for item in content:
+ if item["type"] == "text":
+ text_parts.append(item["text"])
+ elif item["type"] == "image_url":
+ image_url = item["image_url"]["url"]
+ if image_url.startswith("data:image/") and "base64," in image_url:
+ prefix, image_data = image_url.split("base64,", 1)
+ mime_type = prefix.split("data:")[1].split(";")[0]
+ image_parts.append(
+ request_pb2.ImagePart(
+ image_data=image_data, image_mime_type=mime_type
+ )
+ )
+
+ return self._create_message(role, " ".join(text_parts), image_parts)
+
+ def _create_text_message(self, role: int, content: str) -> request_pb2.ChatMessage:
+ """Create a simple text message"""
+ return self._create_message(role, content)
+
+ def _create_message(
+ self, role: int, content: str, image_parts: List[request_pb2.ImagePart] = None
+ ) -> request_pb2.ChatMessage:
+ """Create a chat message with common attributes"""
+ pb_msg = request_pb2.ChatMessage(
+ role=role, content=content
+ )
+ if role == 1: pb_msg.idk2 = 1
+ # pb_msg.cache_control.prompt_caching = 1
+
+ if image_parts:
+ pb_msg.image_parts.extend(image_parts)
+ return pb_msg
+
+ async def _process_chat_response(self, type: int, data: bytes) -> tuple[str, int]:
+ """Process a single chat response chunk and return (message, count)"""
+ if type == 3: # end of message
+ try:
+ response = json.loads(data)
+ return (encrypt(str(response), ENCRYPTION_KEY), 0) if response else ("", 0)
+ except Exception as e:
+ raise e
+
+ try:
+ search_response = response_pb2.ChatResponse()
+ search_response.ParseFromString(data)
+ return (search_response.message, search_response.count) if search_response.message else ("", 0)
+ except:
+ return ("", 0)
+
+ async def _handle_stream_response(self, chunk_iterator) -> AsyncGenerator[tuple[str, int], None]:
+ """Handle streaming response chunks"""
+ async for chunk in chunk_iterator:
+ for type, data in decompress_chunks(chunk):
+ result = await self._process_chat_response(type, data)
+ yield result
+
+ async def _handle_response(self, chunk) -> AsyncGenerator[tuple[str, int], None]:
+ """Handle non-streaming response chunks"""
+ for type, data in decompress_chunks(chunk):
+ result = await self._process_chat_response(type, data)
+ yield result
+
+ async def send_message(
+ self,
+ messages: List[dict],
+ config: Optional[ChatConfig] = None,
+ system_prompt: str = "You are a helpful assistant.",
+ stream: bool = False,
+ ) -> AsyncGenerator[tuple[str, int], None]:
+ """Send chat messages and yield response chunks"""
+ if config is None:
+ config = ChatConfig()
+
+ request = await self._create_chat_request(messages, config, system_prompt)
+
+ headers = {
+ "User-Agent": "connect-go/1.16.2 (go1.23.2 X:nocoverageredesign)",
+ "Connect-Accept-Encoding": "gzip",
+ "Connect-Content-Encoding": "gzip",
+ "Connect-Protocol-Version": "1",
+ "Content-Type": "application/connect+proto",
+ }
+
+ url = f"{INFERENCE_URL}/exa.api_server_pb.ApiServerService/GetChatMessage"
+ request_data = request.SerializeToString()
+
+ if stream:
+ stream_iterator = self.http_client.stream_post(
+ url=url,
+ data=request_data,
+ headers=headers,
+ compress=True,
+ )
+ async for result in self._handle_stream_response(stream_iterator):
+ yield result
+ else:
+ response = await self.http_client.post(
+ url=url,
+ data=request_data,
+ headers=headers,
+ compress=True,
+ )
+
+ if response.status_code != 200:
+ raise Exception(f"Chat request failed: {response.status_code}")
+
+ if response.headers.get("connect-content-encoding") == "gzip":
+ async for result in self._handle_response(response.content):
+ yield result
+ else:
+ search_response = response_pb2.ChatResponse()
+ search_response.ParseFromString(response.content)
+ if search_response.message:
+ yield (search_response.message, search_response.count)
diff --git a/api/chat/chat_config.py b/api/chat/chat_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..38db8248c820a7c414285b0d2a7436b6fcd5ad15
--- /dev/null
+++ b/api/chat/chat_config.py
@@ -0,0 +1,21 @@
+
+from dataclasses import dataclass
+
+from config.models import ModelID
+
+
+@dataclass
+class ChatConfig:
+ def __init__(
+ self,
+ model_id: ModelID = ModelID.MODEL_CLAUDE_3_5_HAIKU_20241022,
+ temperature: float = 0.7,
+ max_tokens: int = 4096,
+ top_p: float = 1.0,
+ top_k: int = 50
+ ):
+ self.model_id = model_id
+ self.temperature = temperature
+ self.max_tokens = max_tokens
+ self.top_p = top_p
+ self.top_k = top_k
\ No newline at end of file
diff --git a/api/telemetry.py b/api/telemetry.py
new file mode 100644
index 0000000000000000000000000000000000000000..4309bbcf64efe7db0cd6263403fa0557d5732678
--- /dev/null
+++ b/api/telemetry.py
@@ -0,0 +1,92 @@
+import json
+import httpx
+import uuid
+from config.constants import (
+ APP_LANGUAGE,
+ BASE_URL,
+ APP_NAME,
+ APP_VERSION,
+ DISPLAY_NAME,
+ HADWARE_INFO,
+ SYSTEM_INFO,
+)
+from protos import telemetry_pb2
+
+
+class TelemetryAPI:
+ def __init__(self, api_key: str):
+ self.api_key = api_key
+ self.client = httpx.AsyncClient(verify=False, timeout=3)
+
+ async def do_telemetry(self):
+ await self.send_telemetry()
+ await self.send_ping()
+ await self.send_unleash_request()
+
+ async def send_telemetry(self):
+ telemetry = telemetry_pb2.TelemetryData()
+
+ # System info
+ system_info = telemetry.system_info
+ system_info.api_key = self.api_key
+ system_info.locale = APP_LANGUAGE
+ system_info.extension_name = APP_NAME
+ system_info.ide_name = APP_NAME
+ system_info.extension_version = APP_VERSION
+ system_info.os = SYSTEM_INFO
+ system_info.ide_version = DISPLAY_NAME
+ system_info.hardware = HADWARE_INFO
+
+ system_info.session_id = str(uuid.uuid4())
+
+ response = await self.client.post(
+ url=f"{BASE_URL}/exa.api_server_pb.ApiServerService/RecordAsyncTelemetry",
+ headers={
+ "Content-Type": "application/proto",
+ },
+ content=telemetry.SerializeToString(),
+ )
+
+ return response.status_code == 200
+
+ async def send_ping(self):
+ url = f"{BASE_URL}/exa.api_server_pb.ApiServerService/Ping"
+
+ headers = {
+ 'User-Agent': 'connect-go/1.16.2 (go1.23.2 X:nocoverageredesign)',
+ 'Accept-Encoding': 'gzip',
+ 'Connect-Protocol-Version': '1',
+ 'Content-Type': 'application/proto'
+ }
+
+ try:
+ response = await self.client.post(url, headers=headers)
+ print(f"Ping Status Code: {response.status_code}")
+ except httpx.RequestError as e:
+ print(f"请求发生错误: {e}")
+
+ async def send_unleash_request(self):
+ url = f"{BASE_URL}/exa.api_server_pb.ApiServerService/GetUnleashContextFields"
+
+ headers = {
+ 'User-Agent': 'connect-go/1.16.2 (go1.23.2 X:nocoverageredesign)',
+ 'Accept-Encoding': 'gzip',
+ 'Connect-Protocol-Version': '1',
+ 'Content-Type': 'application/proto'
+ }
+
+ try:
+ response = await self.client.post(url, headers=headers)
+
+ print(f"GetUnleashContextFields Status Code: {response.status_code}")
+ print(f"地区:")
+ print(f"{response.text}")
+
+ except httpx.RequestError as e:
+ print(f"请求发生错误: {e}")
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.client.aclose()
\ No newline at end of file
diff --git a/auth/__pycache__/jwt_handler.cpython-311.pyc b/auth/__pycache__/jwt_handler.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f9dae63e707c48ed91cb7ec812f83524e9d7676e
Binary files /dev/null and b/auth/__pycache__/jwt_handler.cpython-311.pyc differ
diff --git a/auth/__pycache__/jwt_handler.cpython-312.pyc b/auth/__pycache__/jwt_handler.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb682a8c1ffabd460a9b2d856cc83c61881a5fd1
Binary files /dev/null and b/auth/__pycache__/jwt_handler.cpython-312.pyc differ
diff --git a/auth/__pycache__/login.cpython-312.pyc b/auth/__pycache__/login.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb698f63169b17400f52093993003116e3b933e5
Binary files /dev/null and b/auth/__pycache__/login.cpython-312.pyc differ
diff --git a/auth/__pycache__/register.cpython-312.pyc b/auth/__pycache__/register.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2c01fe6fb6eb534f66ba64b719e8d0bd26da115
Binary files /dev/null and b/auth/__pycache__/register.cpython-312.pyc differ
diff --git a/auth/jwt_handler.py b/auth/jwt_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bc65b03075086be6e7a5a3821edc6c32d1dabb8
--- /dev/null
+++ b/auth/jwt_handler.py
@@ -0,0 +1,41 @@
+from utils.http import HTTPClient
+from config.constants import APP_LANGUAGE, BASE_URL, APP_NAME, APP_VERSION, DISPLAY_NAME
+from protos import jwt_pb2
+
+
+class JWTHandler:
+ def __init__(self, api_key: str, server_url: str = BASE_URL, http_client: HTTPClient = HTTPClient()):
+ self.server_url = server_url
+ self.api_key = api_key
+ self.jwt_token = None
+ self.http_client = http_client
+
+ async def get_jwt_token(self) -> str:
+ """Get JWT token from server"""
+ request = jwt_pb2.jwt_request(
+ surfwind_jwt_request=jwt_pb2.surfwind_jwt_request(
+ app_name=APP_NAME,
+ version=APP_VERSION,
+ api_key=self.api_key,
+ language=APP_LANGUAGE,
+ display_name=DISPLAY_NAME,
+ app_identifier=APP_NAME,
+ )
+ )
+
+ response = await self.http_client.post(
+ url=f"{self.server_url}/exa.auth_pb.AuthService/GetUserJwt",
+ headers={
+ "Content-Type": "application/proto",
+ },
+ data=request.SerializeToString(),
+ )
+
+ if response.status_code == 200:
+ jwt_response = jwt_pb2.jwt_response()
+ jwt_response.ParseFromString(response.content)
+ self.jwt_token = jwt_response.jwt_token
+ return self.jwt_token
+
+ print(response.content)
+ raise Exception(f"Failed to get JWT token: {response.status_code}")
diff --git a/auth/login.py b/auth/login.py
new file mode 100644
index 0000000000000000000000000000000000000000..af6791f7e19dc57ac81c83a33279a3940a192bee
--- /dev/null
+++ b/auth/login.py
@@ -0,0 +1,27 @@
+import uuid
+import webbrowser
+from config.constants import WINDSURF_ID
+
+
+def open_auth_url() -> str:
+ """Open authentication URL in browser"""
+ state = str(uuid.uuid4())
+ base_url = "https://www.codeium.com/windsurf/signin"
+
+ auth_url = (
+ f"{base_url}?"
+ f"response_type=token&"
+ f"client_id={WINDSURF_ID}&"
+ f"redirect_uri=show-auth-token&"
+ f"state={state}&"
+ f"prompt=login&"
+ f"redirect_parameters_type=query&"
+ f"workflow="
+ )
+
+ try:
+ webbrowser.open(auth_url)
+ return state
+ except Exception as e:
+ print(f"Failed to open browser: {e}")
+ return None
diff --git a/auth/register.py b/auth/register.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec4c86a227b21e56f8d400d619aa946c11ca7c85
--- /dev/null
+++ b/auth/register.py
@@ -0,0 +1,33 @@
+from utils.http import HTTPClient
+from config.constants import API_URL
+
+
+class Registration:
+ @staticmethod
+ async def register_user(firebase_token: str) -> str:
+ """
+ Register user with firebase token and return API key
+ """
+ headers = {
+ "User-Agent": "Go-http-client/1.1",
+ "Content-Type": "application/json",
+ "Accept-Encoding": "gzip",
+ }
+
+ data = {"firebase_id_token": firebase_token}
+
+ response = await HTTPClient().post(
+ f"{API_URL}/register_user/", headers=headers, default_headers=False, json=data, verify=False
+ )
+
+ if response.status_code != 200:
+ print(response.text)
+ raise Exception(f"Registration failed: {response.status_code}")
+
+ response_json = response.json()
+ api_key = response_json.get("api_key")
+
+ if not api_key:
+ raise Exception("No API key in response")
+
+ return api_key
diff --git a/boot.sh b/boot.sh
new file mode 100644
index 0000000000000000000000000000000000000000..221b01c670dd4144f46154a8b4f0be7c3b583bdf
--- /dev/null
+++ b/boot.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# start.sh
+
+# 初始化聊天队列
+# pypy/bin/python init_pool.py
+
+# 启动 Gunicorn 服务器
+
+pypy/bin/gunicorn service.server:app \
+ --worker-class uvicorn.workers.UvicornWorker \
+ --workers 16 \
+ --bind 0.0.0.0:8000 \
+ --timeout 30 \
+ --keep-alive 5 \
+ --access-logfile - \
+ --error-logfile -
\ No newline at end of file
diff --git a/config/__pycache__/api_keys.cpython-311.pyc b/config/__pycache__/api_keys.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8b2ac50f094697c0b21c34c8c3b523efcf6d811
Binary files /dev/null and b/config/__pycache__/api_keys.cpython-311.pyc differ
diff --git a/config/__pycache__/api_keys.cpython-312.pyc b/config/__pycache__/api_keys.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8aaf5029ce96cf577e9d0648a4dfb882f1f0dfa
Binary files /dev/null and b/config/__pycache__/api_keys.cpython-312.pyc differ
diff --git a/config/__pycache__/constants.cpython-311.pyc b/config/__pycache__/constants.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8c86abdb375cf8039f4793f0b4e24fa0176bd4a4
Binary files /dev/null and b/config/__pycache__/constants.cpython-311.pyc differ
diff --git a/config/__pycache__/constants.cpython-312.pyc b/config/__pycache__/constants.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7358e1959b97c6fb2804d8c1ea98c53a1c0d91ec
Binary files /dev/null and b/config/__pycache__/constants.cpython-312.pyc differ
diff --git a/config/__pycache__/models.cpython-311.pyc b/config/__pycache__/models.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9774eb4659ddc8c1f95879c6c7301f9a46ae976b
Binary files /dev/null and b/config/__pycache__/models.cpython-311.pyc differ
diff --git a/config/__pycache__/models.cpython-312.pyc b/config/__pycache__/models.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1288de7c94bd94022032ce2f04b4bb9b776ced9
Binary files /dev/null and b/config/__pycache__/models.cpython-312.pyc differ
diff --git a/config/api_keys.py b/config/api_keys.py
new file mode 100644
index 0000000000000000000000000000000000000000..eef93ce24fef1c0de430ee867377ef4bde3dc16e
--- /dev/null
+++ b/config/api_keys.py
@@ -0,0 +1,62 @@
+import xml.etree.ElementTree as ET
+from pathlib import Path
+from typing import Optional, List
+
+
+class APIKeyManager:
+ def __init__(self, config_path: str = "config/api_keys.xml"):
+ self.config_path = Path(config_path)
+ self._api_keys: List[str] = []
+ self._load_keys()
+
+ def _load_keys(self):
+ """Load API keys from XML file"""
+ if not self.config_path.exists():
+ self._create_default_config()
+
+ tree = ET.parse(self.config_path)
+ root = tree.getroot()
+
+ self._api_keys = [key.text for key in root.findall("key")]
+
+ def _create_default_config(self):
+ """Create default XML config file"""
+ self.config_path.parent.mkdir(parents=True, exist_ok=True)
+
+ root = ET.Element("api_keys")
+ tree = ET.ElementTree(root)
+ tree.write(self.config_path, encoding="utf-8", xml_declaration=True)
+
+ def get_key(self, index: int = 0) -> Optional[str]:
+ """Get API key by index"""
+ try:
+ return self._api_keys[index]
+ except IndexError:
+ return None
+
+ def add_key(self, api_key: str):
+ """Add new API key"""
+ root = ET.parse(self.config_path).getroot()
+ new_key = ET.SubElement(root, "key")
+ new_key.text = api_key
+
+ tree = ET.ElementTree(root)
+ tree.write(self.config_path, encoding="utf-8", xml_declaration=True)
+
+ self._api_keys.append(api_key)
+
+ def remove_key(self, index: int):
+ """Remove API key by index"""
+ root = ET.parse(self.config_path).getroot()
+ keys = root.findall("key")
+
+ if 0 <= index < len(keys):
+ root.remove(keys[index])
+ tree = ET.ElementTree(root)
+ tree.write(self.config_path, encoding="utf-8", xml_declaration=True)
+
+ self._api_keys.pop(index)
+
+ def list_keys(self) -> List[str]:
+ """List all API keys"""
+ return self._api_keys.copy()
diff --git a/config/api_keys.xml b/config/api_keys.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a988625832521ba890a627b805cf5c4b5f3f24eb
--- /dev/null
+++ b/config/api_keys.xml
@@ -0,0 +1,2 @@
+
+33cbbd76-bdb4-4d28-bc65-38c56dca37687e5ab17a-ec08-4037-8b30-186e86d8595a637b9327-cfef-4808-bdeb-3d83e8ecca1b82651d96-5d2c-4387-886f-70d501865994
\ No newline at end of file
diff --git a/config/constants.py b/config/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..86a0f81bfcdc280088e016bdaefa8d25bbd549da
--- /dev/null
+++ b/config/constants.py
@@ -0,0 +1,96 @@
+# API endpoints
+import json
+
+from config.models import ModelID
+
+DEFAULT_API_ACCOUNT_EXPIRATION_SECONDS = 4 * 24 * 60 * 60
+
+ENCRYPTION_KEY = "1923a821"
+BASE_URL = "https://server.codeium.com"
+API_URL = "https://api.codeium.com"
+INFERENCE_URL = "https://inference.codeium.com"
+
+# API keys and identifiers
+WINDSURF_ID = "3GUryQ7ldAeKEuD2obYnppsnmj58eP5u"
+
+# Application info
+APP_NAME = "windsurf"
+APP_VERSION = "1.30.0"
+DISPLAY_NAME = "Windsurf 1.94.0"
+APP_LANGUAGE = "en"
+
+HADWARE_INFO = json.dumps(
+ {
+ "NumSockets": 1,
+ "NumCores": 8,
+ "NumThreads": 16,
+ "VendorID": "GenuineIntel",
+ "Family": "6",
+ "Model": "158",
+ "ModelName": "Intel(R) Core(TM) i9-12900K CPU @ 3.60GHz",
+ "Memory": 34359738368,
+ }
+)
+SYSTEM_INFO = json.dumps(
+ {
+ "Os": "windows",
+ "Arch": "amd64",
+ "Version": "11",
+ "ProductName": "Windows 11 Pro",
+ "MajorVersionNumber": 11,
+ "MinorVersionNumber": 0,
+ "Build": "26100",
+ }
+)
+
+# HTTP headers
+DEFAULT_HEADERS = {
+ "User-Agent": "connect-go/1.16.2 (go1.23.2 X:nocoverageredesign)",
+ "Connect-Protocol-Version": "1",
+ "Accept-Encoding": "identity",
+ "Connection": "keep-alive",
+ "Keep-Alive": "timeout=120, max=10000"
+}
+
+REDIS_URL = "redis://localhost:6379"
+REDIS_PASSWORD = "$546a"
+JWT_SECRET_INFO = "0982d83c-969a-4a53-8243-9d215a2fe7a2"
+ALLOWED_HOSTS = ["*"]
+BLOCK_DURATION = 3600
+OPTIONS_DURATION = 600
+THREE_MIN_LIMIT = 90
+HOURLY_LIMIT = 550
+BLOCK_LIMIT = 600
+MAX_UPLOAD_SIZE = 1048576 * 100 # 100 MB
+MAX_REQUEST_SIZE = 1048576 * 8 # 8 MB
+
+# Model mappings can also be placed here if applicable
+MODEL_MAPPING = {
+ # OpenAI Models
+ "gpt-3.5-turbo": ModelID.MODEL_CHAT_3_5_TURBO,
+ "gpt-4": ModelID.MODEL_CHAT_GPT_4,
+ "gpt-4o": ModelID.MODEL_CHAT_GPT_4O_2024_08_06,
+ "gpt-4o-2024-08-06": ModelID.MODEL_CHAT_GPT_4O_2024_08_06,
+ "gpt-4o-mini-2024-07-18": ModelID.MODEL_CHAT_GPT_4O_MINI_2024_07_18,
+ "gpt-4o-mini": ModelID.MODEL_CHAT_GPT_4O_MINI_2024_07_18,
+ "gpt-4-turbo-preview": ModelID.MODEL_CHAT_GPT_4_1106_PREVIEW,
+ # Claude Models
+ "claude-3-opus-20240229": ModelID.MODEL_CLAUDE_3_OPUS_20240229,
+ "claude-3-opus": ModelID.MODEL_CLAUDE_3_OPUS_20240229,
+ # "claude-3-sonnet-20240229": ModelID.MODEL_CLAUDE_3_SONNET_20240229,
+ "claude-3.5-sonnet-20240620": ModelID.MODEL_CLAUDE_3_5_SONNET_20240620,
+ "claude-3.5-sonnet-20241022": ModelID.MODEL_CLAUDE_3_5_SONNET_20241022,
+ "claude-3.5-sonnet": ModelID.MODEL_CLAUDE_3_5_SONNET_20241022,
+ "claude-3.5-haiku": ModelID.MODEL_CLAUDE_3_5_HAIKU_20241022,
+ "claude-3-haiku": ModelID.MODEL_CLAUDE_3_HAIKU_20240307,
+ "claude": ModelID.MODEL_CLAUDE_3_5_SONNET_20241022,
+ # O1 Models
+ "o1-mini": ModelID.MODEL_CHAT_O1_MINI,
+ "o1-preview": ModelID.MODEL_CHAT_O1_PREVIEW,
+ # "o1": ModelID.MODEL_CHAT_O1,
+}
+
+# Authorization keys for API access
+AUTH_KEYS = [
+ "sk-upuyyds"
+]
\ No newline at end of file
diff --git a/config/models.py b/config/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..42e50f05a4ac606b740393e37173cb38289e3ec0
--- /dev/null
+++ b/config/models.py
@@ -0,0 +1,93 @@
+from enum import Enum
+
+
+class ModelID(Enum):
+ MODEL_UNSPECIFIED = 0
+ MODEL_EMBED_6591 = 20
+ MODEL_8341 = 33
+ MODEL_8528 = 42
+ MODEL_9024 = 41
+ MODEL_14602 = 112
+ MODEL_15133 = 115
+ MODEL_15302 = 119
+ MODEL_15335 = 121
+ MODEL_15336 = 122
+ MODEL_15931 = 167
+ MODEL_QUERY_9905 = 48
+ MODEL_QUERY_11791 = 66
+ MODEL_CHAT_11120 = 57
+ MODEL_CHAT_11121 = 58
+ MODEL_CHAT_12119 = 70
+ MODEL_CHAT_12121 = 69
+ MODEL_CHAT_12437 = 74
+ MODEL_CHAT_12491 = 76
+ MODEL_CHAT_12623 = 78
+ MODEL_CHAT_12950 = 79
+ MODEL_CHAT_12968 = 101
+ MODEL_CHAT_13404 = 102
+ MODEL_CHAT_13566 = 103
+ MODEL_CHAT_13930 = 108
+ MODEL_CHAT_14255 = 110
+ MODEL_CHAT_14256 = 111
+ MODEL_CHAT_14942 = 114
+ MODEL_CHAT_15305 = 120
+ MODEL_CHAT_15600 = 123
+ MODEL_CHAT_16718 = 175
+ MODEL_CHAT_15729 = 168
+ MODEL_CHAT_16579 = 173
+ MODEL_CHAT_16579_CRUSOE = 174
+ MODEL_DRAFT_11408 = 65
+ MODEL_DRAFT_CHAT_11883 = 67
+ MODEL_DRAFT_CHAT_12196 = 72
+ MODEL_DRAFT_CHAT_12413 = 73
+ MODEL_DRAFT_CHAT_13175 = 104
+ MODEL_CHAT_3_5_TURBO = 28
+ MODEL_CHAT_GPT_4 = 30
+ MODEL_CHAT_GPT_4_1106_PREVIEW = 37
+ MODEL_TEXT_EMBEDDING_OPENAI_ADA = 91
+ MODEL_TEXT_EMBEDDING_OPENAI_3_SMALL = 163
+ MODEL_TEXT_EMBEDDING_OPENAI_3_LARGE = 164
+ MODEL_CHAT_GPT_4O_2024_05_13 = 71
+ MODEL_CHAT_GPT_4O_2024_08_06 = 109
+ MODEL_CHAT_GPT_4O_MINI_2024_07_18 = 113
+ MODEL_CHAT_O1_PREVIEW = 117
+ MODEL_CHAT_O1_MINI = 118
+ MODEL_CHAT_O1 = 170
+ MODEL_GOOGLE_GEMINI_1_0_PRO = 61
+ MODEL_GOOGLE_GEMINI_1_5_PRO = 62
+ MODEL_CLAUDE_3_OPUS_20240229 = 63
+ MODEL_CLAUDE_3_SONNET_20240229 = 64
+ MODEL_CLAUDE_3_5_SONNET_20240620 = 80
+ MODEL_CLAUDE_3_5_SONNET_20241022 = 166
+ MODEL_CLAUDE_3_5_HAIKU_20241022 = 171
+ MODEL_CLAUDE_3_HAIKU_20240307 = 172
+ MODEL_TOGETHERAI_TEXT_EMBEDDING_M2_BERT = 81
+ MODEL_TOGETHERAI_LLAMA_3_1_8B_INSTRUCT = 165
+ MODEL_HUGGING_FACE_TEXT_EMBEDDING_M2_BERT = 82
+ MODEL_HUGGING_FACE_TEXT_EMBEDDING_UAE_CODE = 83
+ MODEL_HUGGING_FACE_TEXT_EMBEDDING_BGE = 84
+ MODEL_HUGGING_FACE_TEXT_EMBEDDING_BLADE = 85
+ MODEL_HUGGING_FACE_TEXT_EMBEDDING_ARCTIC_LARGE = 86
+ MODEL_HUGGING_FACE_TEXT_EMBEDDING_E5_BASE = 87
+ MODEL_HUGGING_FACE_TEXT_EMBEDDING_MXBAI = 88
+ MODEL_LLAMA_3_1_8B_INSTRUCT = 106
+ MODEL_LLAMA_3_1_70B_INSTRUCT = 107
+ MODEL_LLAMA_3_1_405B_INSTRUCT = 105
+ MODEL_LLAMA_3_1_70B_INSTRUCT_LONG_CONTEXT = 116
+ MODEL_NOMIC_TEXT_EMBEDDING_V1 = 89
+ MODEL_NOMIC_TEXT_EMBEDDING_V1_5 = 90
+ MODEL_MISTRAL_7B = 77
+ MODEL_SALESFORCE_EMBEDDING_2R = 99
+ MODEL_TEI_BGE_M3 = 92
+ MODEL_TEI_NOMIC_EMBED_TEXT_V1 = 93
+ MODEL_TEI_INTFLOAT_E5_LARGE_INSTRUCT = 94
+ MODEL_TEI_SNOWFLAKE_ARCTIC_EMBED_L = 95
+ MODEL_TEI_UAE_CODE_LARGE_V1 = 96
+ MODEL_TEI_B1ADE = 97
+ MODEL_TEI_WHEREISAI_UAE_LARGE_V1 = 98
+ MODEL_TEI_WHEREISAI_UAE_CODE_LARGE_V1 = 100
+ MODEL_OPENAI_COMPATIBLE = 200
+ MODEL_ANTHROPIC_COMPATIBLE = 201
+ MODEL_VERTEX_COMPATIBLE = 202
+ MODEL_BEDROCK_COMPATIBLE = 203
+ MODEL_AZURE_COMPATIBLE = 204
diff --git a/docker.txt b/docker.txt
new file mode 100644
index 0000000000000000000000000000000000000000..08a2d20b5a549141933f096272dfb89d304df6bf
--- /dev/null
+++ b/docker.txt
@@ -0,0 +1,9 @@
+docker run -d \
+ -p 127.0.0.1:9000:8080 \
+ -v open-webui:/app/backend/data \
+ -e OPENAI_API_BASE_URLS="http://host.docker.internal:8000/v1" \
+ -e OPENAI_API_KEYS="Ga_test_key1" \
+ --add-host=host.docker.internal:host-gateway \
+ --name open-webui \
+ --restart unless-stopped \
+ ghcr.io/open-webui/open-webui:main
\ No newline at end of file
diff --git a/gen_account.py b/gen_account.py
new file mode 100644
index 0000000000000000000000000000000000000000..a23058f1c4ff03a0d2e62c6dd3fc0dac3c833423
--- /dev/null
+++ b/gen_account.py
@@ -0,0 +1,232 @@
+from time import time
+import httpx
+import asyncio
+from faker import Faker
+import random
+import string
+import base64
+import json
+from datetime import datetime
+from rich.console import Console
+from rich.panel import Panel
+from rich.table import Table
+from rich import print as rprint
+from typing import List
+
+from auth.register import Registration
+from config.api_keys import APIKeyManager
+from protos import request_pb2, response_pb2
+
+console = Console()
+
+async def generate_firebase_client():
+ # Create heartbeat data with current date
+ heartbeat_data = {
+ "version": 2,
+ "heartbeats": [{
+ "agent": "fire-core/0.10.2 fire-core-esm2017/0.10.2 fire-js/ fire-auth/1.7.2 fire-auth-esm2017/1.7.2 fire-js-all-app/10.11.1",
+ "dates": [datetime.now().strftime("%Y-%m-%d")]
+ }]
+ }
+ return base64.b64encode(json.dumps(heartbeat_data).encode()).decode()
+
+async def generate_account():
+ faker = Faker()
+
+ # Generate random email and password
+ email = faker.email(safe=True, domain='outlook.com')
+ password = ''.join(random.choices(string.ascii_letters + string.digits, k=random.randint(9, 15)))
+
+ firebase_client = await generate_firebase_client()
+
+ async with httpx.AsyncClient() as client:
+ # Step 1: Firebase Sign Up
+ signup_response = await client.post(
+ 'https://identitytoolkit.googleapis.com/v1/accounts:signUp',
+ params={'key': 'AIzaSyDsOl-1XpT5err0Tcnx8FFod1H8gVGIycY'},
+ headers={
+ 'X-Firebase-Client': firebase_client,
+ 'Content-Type': 'application/json',
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.6723.70 Safari/537.36'
+ },
+ json={
+ 'returnSecureToken': True,
+ 'email': email,
+ 'password': password,
+ 'clientType': 'CLIENT_TYPE_WEB'
+ }
+ )
+
+ id_token = signup_response.json()['idToken']
+
+ # Step 2: Get Current User
+ current_user = request_pb2.CurrentUser()
+ current_user.jwt = id_token
+ current_user.f1 = 1
+ current_user.f2 = 1
+
+ current_user_response = await client.post(
+ 'https://web-backend.codeium.com/exa.seat_management_pb.SeatManagementService/GetCurrentUser',
+ headers={
+ 'Content-Type': 'application/proto',
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.6723.70 Safari/537.36',
+ 'Accept-Language': 'en',
+ 'Sec-Ch-Ua-Platform': '"Windows"',
+ 'Sec-Ch-Ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'Sec-Ch-Ua-Mobile': '?0',
+ 'Connect-Protocol-Version': '1',
+ 'Accept': '*/*',
+ 'Origin': 'https://codeium.com',
+ 'Sec-Fetch-Site': 'same-site',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Referer': 'https://codeium.com/',
+ "X-Auth-Token": id_token,
+ 'Accept-Encoding': 'gzip, deflate, br',
+ },
+ content=current_user.SerializeToString()
+ )
+ assert current_user_response.status_code == 200
+ response_proto = response_pb2.CurrentUserResponse()
+ response_proto.ParseFromString(current_user_response.content)
+ console.print(Panel(f"[bold green]User Created[/bold green]\nName: {response_proto.user.name}\nStatus: {response_proto.status.status}"))
+
+ # Step 3: Get Preapproval For User
+ get_preapproval_for_user = request_pb2.GetPreapprovalForUser()
+ get_preapproval_for_user.jwt = id_token
+ get_preapproval_for_user_response = await client.post(
+ 'https://web-backend.codeium.com/exa.seat_management_pb.SeatManagementService/GetPreapprovalForUser',
+ headers={
+ 'Content-Type': 'application/proto',
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.6723.70 Safari/537.36',
+ 'Accept-Language': 'en',
+ 'Sec-Ch-Ua-Platform': '"Windows"',
+ 'Sec-Ch-Ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'Sec-Ch-Ua-Mobile': '?0',
+ 'Connect-Protocol-Version': '1',
+ 'Accept': '*/*',
+ 'Origin': 'https://codeium.com',
+ 'Sec-Fetch-Site': 'same-site',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Referer': 'https://codeium.com/',
+ 'Accept-Encoding': 'gzip, deflate, br',
+ "X-Auth-Token": id_token,
+ 'Priority': 'u=1, i'
+ },
+ content=get_preapproval_for_user.SerializeToString()
+ )
+ assert get_preapproval_for_user_response.status_code == 200
+
+ # Step 4: Update Name using Protobuf
+ change_name = request_pb2.ChangeName()
+ change_name.jwt = id_token
+ change_name.name = faker.name()
+
+ name_response = await client.post(
+ 'https://web-backend.codeium.com/exa.seat_management_pb.SeatManagementService/UpdateName',
+ headers={
+ 'Content-Type': 'application/proto',
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.6723.70 Safari/537.36',
+ 'Accept-Language': 'en',
+ 'Sec-Ch-Ua-Platform': '"Windows"',
+ 'Sec-Ch-Ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'Sec-Ch-Ua-Mobile': '?0',
+ 'Connect-Protocol-Version': '1',
+ 'Accept': '*/*',
+ 'Origin': 'https://codeium.com',
+ 'Sec-Fetch-Site': 'same-site',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Referer': 'https://codeium.com/',
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Priority': 'u=1, i'
+ },
+ content=change_name.SerializeToString()
+ )
+
+ return {
+ 'email': email,
+ 'password': password,
+ 'id_token': id_token,
+ 'name_response': name_response.status_code
+ }
+
+async def generate_multiple_accounts(num_accounts: int):
+ tasks = []
+ for _ in range(num_accounts):
+ tasks.append(generate_account())
+
+ results = await asyncio.gather(*tasks)
+ return results
+
+async def main():
+ # 获取命令行参数,默认创建3个账号
+ num_accounts = 3
+
+ with console.status(f"[bold green]Generating {num_accounts} accounts...", spinner="dots"):
+ results = await generate_multiple_accounts(num_accounts)
+
+ for result in results:
+ id_token = result['id_token']
+ email = result['email']
+ name_response = result['name_response']
+
+ if name_response != 200:
+ console.print(f"[bold red]Failed to update name. Status code: {name_response}[/bold red]")
+ continue
+
+ key_manager = APIKeyManager()
+
+ with console.status("[bold green]Registering user...", spinner="dots"):
+ api_key = await Registration.register_user(id_token.strip())
+
+ # Add GetCurrentUser call after registration
+ async with httpx.AsyncClient() as client:
+ current_user = request_pb2.CurrentUser()
+ current_user.jwt = id_token
+ current_user.f1 = 1
+ current_user.f2 = 1
+
+ current_user_response = await client.post(
+ 'https://web-backend.codeium.com/exa.seat_management_pb.SeatManagementService/GetCurrentUser',
+ headers={
+ 'Content-Type': 'application/proto',
+ 'X-Auth-Token': id_token,
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.6723.70 Safari/537.36',
+ 'Accept-Language': 'en',
+ 'Sec-Ch-Ua-Platform': '"Windows"',
+ 'Sec-Ch-Ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'Sec-Ch-Ua-Mobile': '?0',
+ 'Connect-Protocol-Version': '1',
+ 'Accept': '*/*',
+ 'Origin': 'https://codeium.com',
+ 'Sec-Fetch-Site': 'same-site',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Referer': 'https://codeium.com/',
+ 'Accept-Encoding': 'gzip, deflate, br',
+ },
+ content=current_user.SerializeToString()
+ )
+
+ response_proto = response_pb2.CurrentUserResponse()
+ response_proto.ParseFromString(current_user_response.content)
+
+ key_manager.add_key(api_key)
+
+ # Update table to include password
+ table = Table(title=f"Registration Results - {email}")
+ table.add_column("Field", style="cyan")
+ table.add_column("Value", style="green")
+
+ table.add_row("Email", email)
+ table.add_row("Password", result['password'])
+ table.add_row("API Key", api_key)
+ table.add_row("Status", str(response_proto.status.status))
+
+ console.print(table)
+ console.print("\n") # Add a blank line between tables
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/init_pool.py b/init_pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..100345d014c9bc9528e93beca5c32960da0f266e
--- /dev/null
+++ b/init_pool.py
@@ -0,0 +1,21 @@
+from managers.global_chat_manager import GlobalChatManager
+import uvicorn
+import signal
+import sys
+import multiprocessing
+import asyncio
+from rich.console import Console
+
+console = Console()
+
+async def init_chat_queue():
+ # 初始化全局chat manager
+ chat_manager = GlobalChatManager(
+ console=console,
+ queue_type="deque",
+ )
+
+ await chat_manager.initialize_queue()
+
+if __name__ == "__main__":
+ asyncio.run(init_chat_queue())
\ No newline at end of file
diff --git a/main.py b/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..4584a5acb354f79ab9d614343bde480c4349b7f6
--- /dev/null
+++ b/main.py
@@ -0,0 +1,119 @@
+from managers.chat_manager import ChatManager
+from api.telemetry import TelemetryAPI
+from config.api_keys import APIKeyManager
+from config.models import ModelID
+from auth.register import Registration
+from auth.login import open_auth_url
+from api.chat.chat_api import ChatAPI, ChatConfig
+
+import urllib3
+import asyncio
+
+urllib3.disable_warnings()
+
+from rich.console import Console
+from rich.progress import Progress, SpinnerColumn, TextColumn
+
+console = Console()
+
+
+def select_api_key(key_manager: APIKeyManager) -> str:
+ """Let user select an API key from available keys"""
+ keys = key_manager.list_keys()
+ if not keys:
+ return None
+
+ console.print("\n[bold cyan]Available API keys:[/]")
+ for i, key in enumerate(keys):
+ console.print(f"[green]{i + 1}[/]. {key}")
+
+ while True:
+ try:
+ choice = int(input("\nSelect API key (number) or 0 for new login: "))
+ if choice == 0:
+ return None
+ if 1 <= choice <= len(keys):
+ return keys[choice - 1]
+ except ValueError:
+ return keys[0]
+ print("Invalid selection. Please try again.")
+
+
+async def main():
+ key_manager = APIKeyManager()
+ chat_manager = ChatManager()
+ api_key = select_api_key(key_manager)
+
+ # If no API key selected or available, do registration
+ if api_key is None:
+ _ = open_auth_url()
+ firebase_token = input("Please paste your firebase token: ")
+ api_key = await Registration.register_user(firebase_token.strip())
+ key_manager.add_key(api_key)
+ print(f"Registered successfully. API key: {api_key}")
+
+ with Progress(
+ SpinnerColumn(),
+ TextColumn("[progress.description]{task.description}"),
+ console=console,
+ ) as progress:
+ progress.add_task("Initializing Chat API...", total=None)
+ tele = TelemetryAPI(api_key=api_key)
+ await tele.do_telemetry()
+
+ await chat_manager.add_chat(api_key)
+
+ progress.stop()
+
+ with Progress(
+ SpinnerColumn(),
+ TextColumn("[progress.description]{task.description}"),
+ console=console,
+ ) as progress:
+ _ = progress.add_task("Sending message...", total=None)
+
+ config = ChatConfig(
+ model_id=ModelID.MODEL_CLAUDE_3_OPUS_20240229,
+ temperature=0.7,
+ max_tokens=8192,
+ )
+
+ message = [
+ {"role": "user", "content": "1234567890"},
+ {"role": "assistant", "content": "1234567890"},
+ {"role": "user", "content": "1234567890"},
+ {"role": "assistant", "content": "1234567890"},
+ {"role": "user", "content": "1234567890"},
+ {"role": "assistant", "content": "1234567890"},
+ {"role": "user", "content": "1234567890"},
+ {"role": "assistant", "content": "1234567890"},
+ {"role": "user", "content": "1234567890"},
+ {"role": "assistant", "content": "1234567890"},
+ {"role": "user", "content": "tell me a joke"},
+ ]
+ first_chunk = True
+ chat = await chat_manager.get_chat()
+
+ tokens = 0
+
+ async for chunk, token_count in chat.send_message(
+ messages=message,
+ config=config,
+ system_prompt="You are a helpful assistant.",
+ stream=True,
+ ):
+ if first_chunk:
+ progress.stop()
+ first_chunk = False
+ console.print("=" * 20)
+ if chunk:
+ console.print(
+ chunk, end="", markup=True, highlight=True, emoji=True
+ )
+ tokens += token_count
+
+ console.print(f"\n[bold green]Tokens: {tokens}[/]")
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/managers/__pycache__/chat_manager.cpython-311.pyc b/managers/__pycache__/chat_manager.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fdbc6788c607eda174f95352f63613c96e1b928d
Binary files /dev/null and b/managers/__pycache__/chat_manager.cpython-311.pyc differ
diff --git a/managers/__pycache__/chat_manager.cpython-312.pyc b/managers/__pycache__/chat_manager.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4361432a1437536ca490c3c9acbdb6cc91da8cf
Binary files /dev/null and b/managers/__pycache__/chat_manager.cpython-312.pyc differ
diff --git a/managers/__pycache__/global_chat_manager.cpython-311.pyc b/managers/__pycache__/global_chat_manager.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04312b07ed6036e52a2221b3a50a5212834d60d2
Binary files /dev/null and b/managers/__pycache__/global_chat_manager.cpython-311.pyc differ
diff --git a/managers/__pycache__/global_chat_manager.cpython-312.pyc b/managers/__pycache__/global_chat_manager.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e4f91531814052ca2310bb804b0131c71a592879
Binary files /dev/null and b/managers/__pycache__/global_chat_manager.cpython-312.pyc differ
diff --git a/managers/chat_manager.py b/managers/chat_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..75daf9316327b048d375c7dd1ee5a11426745493
--- /dev/null
+++ b/managers/chat_manager.py
@@ -0,0 +1,32 @@
+from typing import Optional
+from managers.queue.base import ChatQueueBase
+from managers.queue.deque_queue import DequeQueue
+
+from api.chat.chat_api import ChatAPI
+
+
+class ChatManager:
+ def __init__(self, queue_type: str = "deque", **kwargs):
+ self.queue: ChatQueueBase = self._create_queue(queue_type, **kwargs)
+
+ def _create_queue(self, queue_type: str, **kwargs) -> ChatQueueBase:
+ queue_types = {
+ "deque": DequeQueue,
+ }
+
+ if queue_type not in queue_types:
+ raise ValueError(f"Unsupported queue type: {queue_type}")
+
+ return queue_types[queue_type](**kwargs)
+
+ async def add_chat(self, api_key: str):
+ """添加一个新的chat实例到队列"""
+ await self.queue.add(api_key)
+
+ async def get_chat(self) -> Optional[ChatAPI]:
+ """获取队列中的下一个chat实例"""
+ return await self.queue.get()
+
+ async def length(self) -> int:
+ """获取队列长度"""
+ return await self.queue.length()
diff --git a/managers/global_chat_manager.py b/managers/global_chat_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fd15b049bae6a3d70ba45e3112bd1cfce40ebea
--- /dev/null
+++ b/managers/global_chat_manager.py
@@ -0,0 +1,106 @@
+from typing import Optional, List
+from rich.console import Console
+from rich.progress import Progress, SpinnerColumn, TextColumn
+
+from managers.chat_manager import ChatManager
+from api.telemetry import TelemetryAPI
+from config.api_keys import APIKeyManager
+
+
+class GlobalChatManager:
+ """
+ 全局聊天管理器
+ 专注于管理分布式队列(Redis/RabbitMQ)中的API密钥
+ """
+
+ _instance: Optional["GlobalChatManager"] = None
+
+ def __new__(cls, *args, **kwargs):
+ if cls._instance is None:
+ cls._instance = super().__new__(cls)
+ return cls._instance
+
+ def __init__(self, console: Console, queue_type: str = "deque", **kwargs):
+ # 防止重复初始化
+ if hasattr(self, "initialized"):
+ return
+
+ self.console = console
+ self.queue_type = queue_type
+
+ if queue_type != "deque":
+ raise ValueError("GlobalChatManager only supports 'deque' queue type")
+
+ self.chat_manager = ChatManager(queue_type=queue_type, **kwargs)
+ self.key_manager = APIKeyManager()
+ self.initialized = True
+
+ async def _register_single_key(self, api_key: str) -> None:
+ """注册单个API key到队列"""
+ try:
+ await self.chat_manager.add_chat(api_key)
+ await self._send_registration_signals(api_key)
+ self.console.log(
+ f"[bold green]✓[/] API key registered successfully: {api_key[:8]}..."
+ )
+ except Exception as e:
+ self.console.log(
+ f"[bold red]✗[/] Failed to register API key {api_key[:8]}\n"
+ f" Error: {str(e)}"
+ )
+ raise
+ finally:
+ self.console.log("─" * 80)
+
+ async def _send_registration_signals(self, api_key: str) -> None:
+ """发送注册相关的信号"""
+ self.console.log(
+ f"[bold yellow]⚡[/] Sending registration signals for {api_key[:8]}..."
+ )
+ await TelemetryAPI(api_key=api_key).do_telemetry()
+
+ async def initialize_queue(self, api_keys: List[str] = None) -> None:
+ """初始化分布式队列"""
+ with Progress(
+ SpinnerColumn(),
+ TextColumn("[progress.description]{task.description}"),
+ console=self.console,
+ ) as progress:
+ task = progress.add_task(
+ f"[bold blue]Initializing {self.queue_type.upper()} queue...[/]",
+ total=None
+ )
+
+ # 如果没有提供API keys,则从配置中读取
+ if not api_keys:
+ api_keys = self.key_manager.list_keys()
+ # 对API keys进行去重
+ api_keys = list(dict.fromkeys(api_keys))
+
+ if not api_keys:
+ raise ValueError("No API keys provided or found in configuration")
+
+ # 注册所有API keys
+ for api_key in api_keys:
+ await self._register_single_key(api_key)
+ progress.update(task, advance=1)
+
+ # 验证队列状态
+ queue_size = await self.chat_manager.length()
+ if queue_size == 0:
+ raise RuntimeError(f"Failed to initialize {self.queue_type} queue")
+
+ self.console.log(f"号池已初始化,当前容量: {queue_size} 个API密钥")
+
+ progress.update(
+ task,
+ description=f"{self.queue_type.upper()} queue initialized with {queue_size} keys",
+ )
+
+ async def add_api_key(self, api_key: str) -> None:
+ """添加新的API key到队列"""
+ await self._register_single_key(api_key)
+
+ async def get_queue_size(self) -> int:
+ """获取当前队列大小"""
+ return await self.chat_manager.length()
diff --git a/managers/queue/__pycache__/base.cpython-311.pyc b/managers/queue/__pycache__/base.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..13873b638b243c4cbaec1aab4d2df456f47a8e7f
Binary files /dev/null and b/managers/queue/__pycache__/base.cpython-311.pyc differ
diff --git a/managers/queue/__pycache__/base.cpython-312.pyc b/managers/queue/__pycache__/base.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aad5f8ef4b0e0c96acb31f5916487496d00e1882
Binary files /dev/null and b/managers/queue/__pycache__/base.cpython-312.pyc differ
diff --git a/managers/queue/__pycache__/deque_queue.cpython-311.pyc b/managers/queue/__pycache__/deque_queue.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82a34597585b747faafb8ffc3cc5ec348864de55
Binary files /dev/null and b/managers/queue/__pycache__/deque_queue.cpython-311.pyc differ
diff --git a/managers/queue/__pycache__/deque_queue.cpython-312.pyc b/managers/queue/__pycache__/deque_queue.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..069ef9a3ddb7f7002c6098bd5b469c145efaeb6d
Binary files /dev/null and b/managers/queue/__pycache__/deque_queue.cpython-312.pyc differ
diff --git a/managers/queue/__pycache__/rabbitmq_queue.cpython-312.pyc b/managers/queue/__pycache__/rabbitmq_queue.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d637a209d63573791dcca079abecbf11177ad408
Binary files /dev/null and b/managers/queue/__pycache__/rabbitmq_queue.cpython-312.pyc differ
diff --git a/managers/queue/__pycache__/redis_queue.cpython-312.pyc b/managers/queue/__pycache__/redis_queue.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b42bde60e1b4ae1cdb388e90413ab6649fa64628
Binary files /dev/null and b/managers/queue/__pycache__/redis_queue.cpython-312.pyc differ
diff --git a/managers/queue/base.py b/managers/queue/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7cc1cfcaf1ee96e21b198f324535638ef0d9c33
--- /dev/null
+++ b/managers/queue/base.py
@@ -0,0 +1,16 @@
+from abc import ABC, abstractmethod
+from typing import Optional
+from api.chat.chat_api import ChatAPI
+
+class ChatQueueBase(ABC):
+ @abstractmethod
+ async def add(self, api_key: str) -> None:
+ pass
+
+ @abstractmethod
+ async def get(self) -> Optional[ChatAPI]:
+ pass
+
+ @abstractmethod
+ async def length(self) -> int:
+ pass
\ No newline at end of file
diff --git a/managers/queue/deque_queue.py b/managers/queue/deque_queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..517c4cc64e9a252a34bd0569652bb0539e8073d0
--- /dev/null
+++ b/managers/queue/deque_queue.py
@@ -0,0 +1,21 @@
+from collections import deque
+from typing import Optional
+from api.chat.chat_api import ChatAPI
+from .base import ChatQueueBase
+
+class DequeQueue(ChatQueueBase):
+ def __init__(self):
+ self.queue = deque()
+
+ async def add(self, api_key: str) -> None:
+ self.queue.append(ChatAPI(api_key=api_key))
+
+ async def get(self) -> Optional[ChatAPI]:
+ if not self.queue:
+ return None
+ chat = self.queue.popleft()
+ self.queue.append(chat)
+ return chat
+
+ async def length(self) -> int:
+ return len(self.queue)
\ No newline at end of file
diff --git a/managers/queue/rabbitmq_queue.py b/managers/queue/rabbitmq_queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..27ac94593d2a24032b8e47b35d2d6de40895086a
--- /dev/null
+++ b/managers/queue/rabbitmq_queue.py
@@ -0,0 +1,41 @@
+import aio_pika
+from typing import Optional
+from api.chat.chat_api import ChatAPI
+from .base import ChatQueueBase
+
+class RabbitMQQueue(ChatQueueBase):
+ def __init__(self, url: str):
+ self.url = url
+ self.connection = None
+ self.channel = None
+ self.queue_name = "chat_queue"
+
+ async def connect(self):
+ if not self.connection:
+ self.connection = await aio_pika.connect_robust(self.url)
+ self.channel = await self.connection.channel()
+ await self.channel.declare_queue(self.queue_name)
+
+ async def add(self, api_key: str) -> None:
+ await self.connect()
+ message = aio_pika.Message(body=api_key.encode())
+ await self.channel.default_exchange.publish(
+ message,
+ routing_key=self.queue_name
+ )
+
+ async def get(self) -> Optional[ChatAPI]:
+ await self.connect()
+ message = await self.channel.get(self.queue_name, no_ack=True)
+ if message:
+ api_key = message.body.decode()
+ chat = ChatAPI(api_key=api_key)
+ # 重新加入队列
+ await self.add(api_key)
+ return chat
+ return None
+
+ async def length(self) -> int:
+ await self.connect()
+ queue = await self.channel.declare_queue(self.queue_name)
+ return queue.declaration_result.message_count
\ No newline at end of file
diff --git a/managers/queue/redis_queue.py b/managers/queue/redis_queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..4491f1661a7b530ea2e23e2b45abc8588c07656f
--- /dev/null
+++ b/managers/queue/redis_queue.py
@@ -0,0 +1,30 @@
+from typing import Optional
+from aioredis import Redis
+from api.chat.chat_api import ChatAPI
+from .base import ChatQueueBase
+
+class RedisQueue(ChatQueueBase):
+ def __init__(self, redis: Redis):
+ self.redis = redis
+ self.queue_key = "chat_queue"
+
+ async def add(self, api_key: str) -> None:
+ # 检查 api_key 是否已经在队列中
+ exists = await self.redis.lpos(self.queue_key, api_key)
+ if exists is None: # 如果不存在,则添加
+ await self.redis.rpush(self.queue_key, api_key)
+
+ async def get(self) -> Optional[ChatAPI]:
+ api_key = await self.redis.lpop(self.queue_key)
+ if api_key:
+ # Handle both bytes and str cases
+ if isinstance(api_key, bytes):
+ api_key = api_key.decode()
+ chat = ChatAPI(api_key=api_key)
+ # 重新加入队列
+ await self.add(api_key)
+ return chat
+ return None
+
+ async def length(self) -> int:
+ return await self.redis.llen(self.queue_key)
\ No newline at end of file
diff --git a/pack.sh b/pack.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0cdba673fe27c7ce9f6fc6b3eb8a9cbbaacac4dc
--- /dev/null
+++ b/pack.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+tar --exclude='.venv' \
+ --exclude='pypy' \
+ --exclude='node_modules' \
+ --exclude='__pycache__' \
+ --exclude='*.pyc' \
+ --exclude='*.gz' \
+ --exclude='.next' \
+ --exclude='.git' \
+ -czf archive.tar.gz .
+
+tar --exclude='.venv' \
+ --exclude='pypy' \
+ --exclude='node_modules' \
+ --exclude='__pycache__' \
+ --exclude='*.pyc' \
+ --exclude='*.gz' \
+ --exclude='*.xml' \
+ --exclude='web' \
+ --exclude='.next' \
+ --exclude='.git' \
+ -czf source.tar.gz .
\ No newline at end of file
diff --git a/protos/__init__.py b/protos/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebbeee7bfd2ecf7e4487dcc9f75a8cea1eb3fc6c
--- /dev/null
+++ b/protos/__init__.py
@@ -0,0 +1,8 @@
+from os.path import dirname, join
+import sys
+if dirname(__file__) not in sys.path:
+ sys.path.append(dirname(__file__))
+from . import request_pb2
+from . import clientinfo_pb2
+from .request_pb2 import ChatRequestMessage, ChatMessage, ModelConfig, ToolConfig
+from .clientinfo_pb2 import ClientInfo
\ No newline at end of file
diff --git a/protos/__pycache__/__init__.cpython-311.pyc b/protos/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..892354869e5b4549db83417f09d3b7b8f321869b
Binary files /dev/null and b/protos/__pycache__/__init__.cpython-311.pyc differ
diff --git a/protos/__pycache__/__init__.cpython-312.pyc b/protos/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c4ebf37317c3e517e467ac9a06c276df083e0c5
Binary files /dev/null and b/protos/__pycache__/__init__.cpython-312.pyc differ
diff --git a/protos/__pycache__/clientinfo_pb2.cpython-311.pyc b/protos/__pycache__/clientinfo_pb2.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff6879bd331d9b074c360d029dac2343325a6775
Binary files /dev/null and b/protos/__pycache__/clientinfo_pb2.cpython-311.pyc differ
diff --git a/protos/__pycache__/clientinfo_pb2.cpython-312.pyc b/protos/__pycache__/clientinfo_pb2.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e05fde79d54299e437fe307819cbe77864baca42
Binary files /dev/null and b/protos/__pycache__/clientinfo_pb2.cpython-312.pyc differ
diff --git a/protos/__pycache__/jwt_pb2.cpython-311.pyc b/protos/__pycache__/jwt_pb2.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..607ffdf3329069aa3ade2984ce0e0510182b2556
Binary files /dev/null and b/protos/__pycache__/jwt_pb2.cpython-311.pyc differ
diff --git a/protos/__pycache__/jwt_pb2.cpython-312.pyc b/protos/__pycache__/jwt_pb2.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8837a06c0fcaf877580b151246fce3b2ba4231ae
Binary files /dev/null and b/protos/__pycache__/jwt_pb2.cpython-312.pyc differ
diff --git a/protos/__pycache__/request_pb2.cpython-311.pyc b/protos/__pycache__/request_pb2.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ad6d2385b5048d8df57699b4885f2299b9fa63e0
Binary files /dev/null and b/protos/__pycache__/request_pb2.cpython-311.pyc differ
diff --git a/protos/__pycache__/request_pb2.cpython-312.pyc b/protos/__pycache__/request_pb2.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1702d4c432ddc8dd1ee53acb16456034afc9f32b
Binary files /dev/null and b/protos/__pycache__/request_pb2.cpython-312.pyc differ
diff --git a/protos/__pycache__/response_pb2.cpython-311.pyc b/protos/__pycache__/response_pb2.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5e5d30140bd644faf4bbbe9efdc7be92707b040
Binary files /dev/null and b/protos/__pycache__/response_pb2.cpython-311.pyc differ
diff --git a/protos/__pycache__/response_pb2.cpython-312.pyc b/protos/__pycache__/response_pb2.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d623f2d8b3e7c3dc2323b66ddaef133f42f27c2
Binary files /dev/null and b/protos/__pycache__/response_pb2.cpython-312.pyc differ
diff --git a/protos/__pycache__/telemetry_pb2.cpython-311.pyc b/protos/__pycache__/telemetry_pb2.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1563e0befab16cf5d12d0b72a69c7f7f8ef84e2a
Binary files /dev/null and b/protos/__pycache__/telemetry_pb2.cpython-311.pyc differ
diff --git a/protos/__pycache__/telemetry_pb2.cpython-312.pyc b/protos/__pycache__/telemetry_pb2.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a64dc274c94870f22d981454fa8079344a35459
Binary files /dev/null and b/protos/__pycache__/telemetry_pb2.cpython-312.pyc differ
diff --git a/protos/clientinfo.proto b/protos/clientinfo.proto
new file mode 100644
index 0000000000000000000000000000000000000000..a7bf7786b30f58ad645dd6c4c36bdca4f0d770ea
--- /dev/null
+++ b/protos/clientinfo.proto
@@ -0,0 +1,27 @@
+syntax = "proto3";
+
+package windsurf;
+
+// Client information
+message ClientInfo {
+ string ide_name = 1;
+ string extension_version = 2;
+ string api_key = 3;
+ string locale = 4;
+ string os = 5;
+ bool disable_telemetry = 6;
+ string ide_version = 7;
+ string hardware = 8;
+ int64 request_id = 9;
+ string session_id = 10;
+ string source_address = 11;
+ string extension_name = 12;
+ // string user_agent = 13;
+ // string url = 14;
+ // ? auth_source = 15;
+ // TS ls_timestamp = 16;
+ string extension_path =17;
+ string user_id = 20;
+ string user_jwt = 21;
+ string force_team_id = 22;
+ }
\ No newline at end of file
diff --git a/protos/clientinfo_pb2.py b/protos/clientinfo_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9558e2ef03fd7b2d0d32d81f14a32c1c517659a
--- /dev/null
+++ b/protos/clientinfo_pb2.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: clientinfo.proto
+# Protobuf Python Version: 4.25.1
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x63lientinfo.proto\x12\x08windsurf\"\xd2\x02\n\nClientInfo\x12\x10\n\x08ide_name\x18\x01 \x01(\t\x12\x19\n\x11\x65xtension_version\x18\x02 \x01(\t\x12\x0f\n\x07\x61pi_key\x18\x03 \x01(\t\x12\x0e\n\x06locale\x18\x04 \x01(\t\x12\n\n\x02os\x18\x05 \x01(\t\x12\x19\n\x11\x64isable_telemetry\x18\x06 \x01(\x08\x12\x13\n\x0bide_version\x18\x07 \x01(\t\x12\x10\n\x08hardware\x18\x08 \x01(\t\x12\x12\n\nrequest_id\x18\t \x01(\x03\x12\x12\n\nsession_id\x18\n \x01(\t\x12\x16\n\x0esource_address\x18\x0b \x01(\t\x12\x16\n\x0e\x65xtension_name\x18\x0c \x01(\t\x12\x16\n\x0e\x65xtension_path\x18\x11 \x01(\t\x12\x0f\n\x07user_id\x18\x14 \x01(\t\x12\x10\n\x08user_jwt\x18\x15 \x01(\t\x12\x15\n\rforce_team_id\x18\x16 \x01(\tb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'clientinfo_pb2', _globals)
+if _descriptor._USE_C_DESCRIPTORS == False:
+ DESCRIPTOR._options = None
+ _globals['_CLIENTINFO']._serialized_start=31
+ _globals['_CLIENTINFO']._serialized_end=369
+# @@protoc_insertion_point(module_scope)
diff --git a/protos/compile.sh b/protos/compile.sh
new file mode 100644
index 0000000000000000000000000000000000000000..04fa3a066d397bb498e6c2c506fa1401bf5c1e82
--- /dev/null
+++ b/protos/compile.sh
@@ -0,0 +1 @@
+protoc --python_out=. *.proto
\ No newline at end of file
diff --git a/protos/jwt.proto b/protos/jwt.proto
new file mode 100644
index 0000000000000000000000000000000000000000..c98e2440a13b0d63ae31735c9b5f6e48620afb4a
--- /dev/null
+++ b/protos/jwt.proto
@@ -0,0 +1,21 @@
+syntax = "proto3";
+
+message jwt_request {
+ surfwind_jwt_request surfwind_jwt_request = 1;
+}
+
+message surfwind_jwt_request {
+ string app_name = 1; // "windsurf"
+ string version = 2; // "1.28.0"
+ string api_key = 3; // "208ce300-3829-42f4-9a00-5f88fe6c9c3c"
+ string language = 4; // "en"
+ string display_name = 7; // "Windsurf 1.94.0"
+ uint32 value = 9; // 905
+ string uuid_2 = 10; // "9e80c651-0fa0-4fe8-bd30-a4e6de909028"
+ string app_identifier = 12; // "windsurf"
+ string resource_path = 17; // "d:\Windsurf\resources\app\extensions\windsurf"
+}
+
+message jwt_response {
+ string jwt_token = 1;
+}
diff --git a/protos/jwt_pb2.py b/protos/jwt_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..f03b72159b28e6a15b2d2e6d40a5be3d7a17a7bc
--- /dev/null
+++ b/protos/jwt_pb2.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: jwt.proto
+# Protobuf Python Version: 4.25.1
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\tjwt.proto\"B\n\x0bjwt_request\x12\x33\n\x14surfwind_jwt_request\x18\x01 \x01(\x0b\x32\x15.surfwind_jwt_request\"\xc0\x01\n\x14surfwind_jwt_request\x12\x10\n\x08\x61pp_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x0f\n\x07\x61pi_key\x18\x03 \x01(\t\x12\x10\n\x08language\x18\x04 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x07 \x01(\t\x12\r\n\x05value\x18\t \x01(\r\x12\x0e\n\x06uuid_2\x18\n \x01(\t\x12\x16\n\x0e\x61pp_identifier\x18\x0c \x01(\t\x12\x15\n\rresource_path\x18\x11 \x01(\t\"!\n\x0cjwt_response\x12\x11\n\tjwt_token\x18\x01 \x01(\tb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'jwt_pb2', _globals)
+if _descriptor._USE_C_DESCRIPTORS == False:
+ DESCRIPTOR._options = None
+ _globals['_JWT_REQUEST']._serialized_start=13
+ _globals['_JWT_REQUEST']._serialized_end=79
+ _globals['_SURFWIND_JWT_REQUEST']._serialized_start=82
+ _globals['_SURFWIND_JWT_REQUEST']._serialized_end=274
+ _globals['_JWT_RESPONSE']._serialized_start=276
+ _globals['_JWT_RESPONSE']._serialized_end=309
+# @@protoc_insertion_point(module_scope)
diff --git a/protos/request.proto b/protos/request.proto
new file mode 100644
index 0000000000000000000000000000000000000000..c4a3d2095a52c7e6ea3bc4cde7f383c68fac7fa3
--- /dev/null
+++ b/protos/request.proto
@@ -0,0 +1,65 @@
+syntax = "proto3";
+
+package windsurf;
+
+import "clientinfo.proto";
+
+// Main message structure
+message ChatRequestMessage {
+ ClientInfo client_info = 1;
+ string system_prompt = 2;
+ repeated ChatMessage chat_messages = 3;
+ uint32 model_id = 6; // 170 for gpt-4o, 171 for claude
+ uint32 idk_id= 7; // 5
+ ModelConfig model_config = 8;
+ ToolConfig tool_config = 10;
+ ToolUse tool_use = 12; // "auto"
+ IDK13n idk13 = 13; // 1
+}
+
+message ToolUse {
+ string mode = 1;
+}
+
+message IDK13n {
+ uint32 idk13nn = 1;
+}
+
+// Chat message structure
+message ChatMessage {
+ uint32 role = 2; // 1 for user, 2 for assistant
+ string content = 3;
+ sint32 tokens = 4;
+ uint32 idk2 = 5; // 1 ?
+ msgIDK idk3 = 8; // 1 ?
+ repeated ImagePart image_parts = 10;
+}
+
+message msgIDK {
+ uint32 idk4 = 1; // 1 ?
+}
+
+message ImagePart {
+ string image_data = 1; // 1 string /9j/4AAQSkZJRg....
+ string image_mime_type = 2; // "image/png"
+}
+
+// Model configuration
+message ModelConfig {
+ int32 parallel_stream = 1;
+ int32 max_tokens = 2;
+// int32 idk9 = 3; // 200
+ double temperature = 5;
+// double idk2 = 6; // 1.0
+ int32 top_k = 7;
+ double top_P = 8;
+ repeated string special_tokens = 9;
+// double idk4 = 11; // 1.0
+}
+
+// Tool configuration
+message ToolConfig {
+ string tool_name = 1;
+ string description = 2;
+ string schema = 3;
+}
diff --git a/protos/request_pb2.py b/protos/request_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0cbab97d3606d14319c75070d7ba92ec879b9e8
--- /dev/null
+++ b/protos/request_pb2.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: request.proto
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+import clientinfo_pb2 as clientinfo__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\rrequest.proto\x12\x08windsurf\x1a\x10\x63lientinfo.proto\"\xc4\x02\n\x12\x43hatRequestMessage\x12)\n\x0b\x63lient_info\x18\x01 \x01(\x0b\x32\x14.windsurf.ClientInfo\x12\x15\n\rsystem_prompt\x18\x02 \x01(\t\x12,\n\rchat_messages\x18\x03 \x03(\x0b\x32\x15.windsurf.ChatMessage\x12\x10\n\x08model_id\x18\x06 \x01(\r\x12\x0e\n\x06idk_id\x18\x07 \x01(\r\x12+\n\x0cmodel_config\x18\x08 \x01(\x0b\x32\x15.windsurf.ModelConfig\x12)\n\x0btool_config\x18\n \x01(\x0b\x32\x14.windsurf.ToolConfig\x12#\n\x08tool_use\x18\x0c \x01(\x0b\x32\x11.windsurf.ToolUse\x12\x1f\n\x05idk13\x18\r \x01(\x0b\x32\x10.windsurf.IDK13n\"\x17\n\x07ToolUse\x12\x0c\n\x04mode\x18\x01 \x01(\t\"\x19\n\x06IDK13n\x12\x0f\n\x07idk13nn\x18\x01 \x01(\r\"\x94\x01\n\x0b\x43hatMessage\x12\x0c\n\x04role\x18\x02 \x01(\r\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\x0e\n\x06tokens\x18\x04 \x01(\x11\x12\x0c\n\x04idk2\x18\x05 \x01(\r\x12\x1e\n\x04idk3\x18\x08 \x01(\x0b\x32\x10.windsurf.msgIDK\x12(\n\x0bimage_parts\x18\n \x03(\x0b\x32\x13.windsurf.ImagePart\"\x16\n\x06msgIDK\x12\x0c\n\x04idk4\x18\x01 \x01(\r\"8\n\tImagePart\x12\x12\n\nimage_data\x18\x01 \x01(\t\x12\x17\n\x0fimage_mime_type\x18\x02 \x01(\t\"\x85\x01\n\x0bModelConfig\x12\x17\n\x0fparallel_stream\x18\x01 \x01(\x05\x12\x12\n\nmax_tokens\x18\x02 \x01(\x05\x12\x13\n\x0btemperature\x18\x05 \x01(\x01\x12\r\n\x05top_k\x18\x07 \x01(\x05\x12\r\n\x05top_P\x18\x08 \x01(\x01\x12\x16\n\x0especial_tokens\x18\t \x03(\t\"D\n\nToolConfig\x12\x11\n\ttool_name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0e\n\x06schema\x18\x03 \x01(\tb\x06proto3')
+
+
+
+_CHATREQUESTMESSAGE = DESCRIPTOR.message_types_by_name['ChatRequestMessage']
+_TOOLUSE = DESCRIPTOR.message_types_by_name['ToolUse']
+_IDK13N = DESCRIPTOR.message_types_by_name['IDK13n']
+_CHATMESSAGE = DESCRIPTOR.message_types_by_name['ChatMessage']
+_MSGIDK = DESCRIPTOR.message_types_by_name['msgIDK']
+_IMAGEPART = DESCRIPTOR.message_types_by_name['ImagePart']
+_MODELCONFIG = DESCRIPTOR.message_types_by_name['ModelConfig']
+_TOOLCONFIG = DESCRIPTOR.message_types_by_name['ToolConfig']
+ChatRequestMessage = _reflection.GeneratedProtocolMessageType('ChatRequestMessage', (_message.Message,), {
+ 'DESCRIPTOR' : _CHATREQUESTMESSAGE,
+ '__module__' : 'request_pb2'
+ # @@protoc_insertion_point(class_scope:windsurf.ChatRequestMessage)
+ })
+_sym_db.RegisterMessage(ChatRequestMessage)
+
+ToolUse = _reflection.GeneratedProtocolMessageType('ToolUse', (_message.Message,), {
+ 'DESCRIPTOR' : _TOOLUSE,
+ '__module__' : 'request_pb2'
+ # @@protoc_insertion_point(class_scope:windsurf.ToolUse)
+ })
+_sym_db.RegisterMessage(ToolUse)
+
+IDK13n = _reflection.GeneratedProtocolMessageType('IDK13n', (_message.Message,), {
+ 'DESCRIPTOR' : _IDK13N,
+ '__module__' : 'request_pb2'
+ # @@protoc_insertion_point(class_scope:windsurf.IDK13n)
+ })
+_sym_db.RegisterMessage(IDK13n)
+
+ChatMessage = _reflection.GeneratedProtocolMessageType('ChatMessage', (_message.Message,), {
+ 'DESCRIPTOR' : _CHATMESSAGE,
+ '__module__' : 'request_pb2'
+ # @@protoc_insertion_point(class_scope:windsurf.ChatMessage)
+ })
+_sym_db.RegisterMessage(ChatMessage)
+
+msgIDK = _reflection.GeneratedProtocolMessageType('msgIDK', (_message.Message,), {
+ 'DESCRIPTOR' : _MSGIDK,
+ '__module__' : 'request_pb2'
+ # @@protoc_insertion_point(class_scope:windsurf.msgIDK)
+ })
+_sym_db.RegisterMessage(msgIDK)
+
+ImagePart = _reflection.GeneratedProtocolMessageType('ImagePart', (_message.Message,), {
+ 'DESCRIPTOR' : _IMAGEPART,
+ '__module__' : 'request_pb2'
+ # @@protoc_insertion_point(class_scope:windsurf.ImagePart)
+ })
+_sym_db.RegisterMessage(ImagePart)
+
+ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), {
+ 'DESCRIPTOR' : _MODELCONFIG,
+ '__module__' : 'request_pb2'
+ # @@protoc_insertion_point(class_scope:windsurf.ModelConfig)
+ })
+_sym_db.RegisterMessage(ModelConfig)
+
+ToolConfig = _reflection.GeneratedProtocolMessageType('ToolConfig', (_message.Message,), {
+ 'DESCRIPTOR' : _TOOLCONFIG,
+ '__module__' : 'request_pb2'
+ # @@protoc_insertion_point(class_scope:windsurf.ToolConfig)
+ })
+_sym_db.RegisterMessage(ToolConfig)
+
+if _descriptor._USE_C_DESCRIPTORS == False:
+
+ DESCRIPTOR._options = None
+ _CHATREQUESTMESSAGE._serialized_start=46
+ _CHATREQUESTMESSAGE._serialized_end=370
+ _TOOLUSE._serialized_start=372
+ _TOOLUSE._serialized_end=395
+ _IDK13N._serialized_start=397
+ _IDK13N._serialized_end=422
+ _CHATMESSAGE._serialized_start=425
+ _CHATMESSAGE._serialized_end=573
+ _MSGIDK._serialized_start=575
+ _MSGIDK._serialized_end=597
+ _IMAGEPART._serialized_start=599
+ _IMAGEPART._serialized_end=655
+ _MODELCONFIG._serialized_start=658
+ _MODELCONFIG._serialized_end=791
+ _TOOLCONFIG._serialized_start=793
+ _TOOLCONFIG._serialized_end=861
+# @@protoc_insertion_point(module_scope)
diff --git a/protos/response.proto b/protos/response.proto
new file mode 100644
index 0000000000000000000000000000000000000000..fbd8f5c3eeb2ee41a60aff3156e6baeef05e2a3f
--- /dev/null
+++ b/protos/response.proto
@@ -0,0 +1,84 @@
+syntax = "proto3";
+
+package windsurf;
+
+/**
+ * SearchRequest represents a search query, with pagination options to
+ * indicate which results to include in the response.
+ */
+message ChatResponse {
+ string session_id = 1;
+
+ Time time = 2;
+
+ string message = 3;
+
+ uint32 count = 4;
+
+ uint32 idk2 = 5;
+
+ IDK idk3 = 7;
+}
+
+message Time {
+ uint32 time1 = 1;
+ uint32 time2 = 2;
+}
+
+message IDK {
+ uint32 a = 1;
+ uint32 b = 2;
+ uint32 c = 3;
+}
+
+
+message CurrentUserResponse {
+ // Field 1: User Information
+ message User {
+ optional string id = 1; // e.g., "b34a6c86-8276-4cd2-ac11-bdcfa3f5554f"
+ optional string email = 3; // User's email address
+ // Nested message within User (Field 4)
+ message NestedInfo {
+ optional uint32 field1 = 1; // e.g., 1732966121
+ optional uint32 field2 = 2; // e.g., 199904000
+ }
+ optional NestedInfo nested_info = 4;
+
+ optional string code = 6; // e.g., "OvhqWJG9v3ShQFlyBZzItgPOrEh2"
+ optional string name = 9; // e.g., "extremely-poetic-chipmunk-21427"
+ optional uint32 field11 = 11; // e.g., 1
+ optional uint32 field19 = 19; // e.g., 1
+ optional string additional_info = 22; // e.g., "Additional Info"
+ }
+ optional User user = 1;
+
+ // Field 4: Placeholder for string or bytes (Empty content)
+ optional bytes field4 = 4;
+
+ // Field 5: Additional Information
+ message AdditionalInfo {
+ optional uint32 field1 = 1; // e.g., 1
+ optional uint32 field2 = 2; // e.g., 1
+ optional string info = 5; // e.g., "Additional Info"
+ }
+ optional AdditionalInfo additional_info = 5;
+
+ // Field 6: Status Information
+ message Status {
+ optional string status = 2; // e.g., "Free"
+ optional uint32 field7 = 7; // e.g., 4096
+ optional uint32 field8 = 8; // e.g., 100
+ optional uint32 field9 = 9; // e.g., 2
+ optional uint32 field10 = 10; // e.g., 5000
+ }
+ optional Status status = 6;
+
+ // Field 7: Role Information
+ message RoleInfo {
+ optional string id = 1; // e.g., "b34a6c86-8276-4cd2-ac11-bdcfa3f5554f"
+ optional string role = 4; // e.g., "User"
+ }
+ optional RoleInfo role_info = 7;
+}
+
+
diff --git a/protos/response_pb2.py b/protos/response_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..797ba96aab5dfd2e82fa134ce3ec2fcfce009569
--- /dev/null
+++ b/protos/response_pb2.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: response.proto
+# Protobuf Python Version: 4.25.1
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0eresponse.proto\x12\x08windsurf\"\x8b\x01\n\x0c\x43hatResponse\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x1c\n\x04time\x18\x02 \x01(\x0b\x32\x0e.windsurf.Time\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\r\n\x05\x63ount\x18\x04 \x01(\r\x12\x0c\n\x04idk2\x18\x05 \x01(\r\x12\x1b\n\x04idk3\x18\x07 \x01(\x0b\x32\r.windsurf.IDK\"$\n\x04Time\x12\r\n\x05time1\x18\x01 \x01(\r\x12\r\n\x05time2\x18\x02 \x01(\r\"&\n\x03IDK\x12\t\n\x01\x61\x18\x01 \x01(\r\x12\t\n\x01\x62\x18\x02 \x01(\r\x12\t\n\x01\x63\x18\x03 \x01(\r\"\xd8\x08\n\x13\x43urrentUserResponse\x12\x35\n\x04user\x18\x01 \x01(\x0b\x32\".windsurf.CurrentUserResponse.UserH\x00\x88\x01\x01\x12\x13\n\x06\x66ield4\x18\x04 \x01(\x0cH\x01\x88\x01\x01\x12J\n\x0f\x61\x64\x64itional_info\x18\x05 \x01(\x0b\x32,.windsurf.CurrentUserResponse.AdditionalInfoH\x02\x88\x01\x01\x12\x39\n\x06status\x18\x06 \x01(\x0b\x32$.windsurf.CurrentUserResponse.StatusH\x03\x88\x01\x01\x12>\n\trole_info\x18\x07 \x01(\x0b\x32&.windsurf.CurrentUserResponse.RoleInfoH\x04\x88\x01\x01\x1a\x91\x03\n\x04User\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05\x65mail\x18\x03 \x01(\tH\x01\x88\x01\x01\x12G\n\x0bnested_info\x18\x04 \x01(\x0b\x32-.windsurf.CurrentUserResponse.User.NestedInfoH\x02\x88\x01\x01\x12\x11\n\x04\x63ode\x18\x06 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04name\x18\t \x01(\tH\x04\x88\x01\x01\x12\x14\n\x07\x66ield11\x18\x0b \x01(\rH\x05\x88\x01\x01\x12\x14\n\x07\x66ield19\x18\x13 \x01(\rH\x06\x88\x01\x01\x12\x1c\n\x0f\x61\x64\x64itional_info\x18\x16 \x01(\tH\x07\x88\x01\x01\x1aL\n\nNestedInfo\x12\x13\n\x06\x66ield1\x18\x01 \x01(\rH\x00\x88\x01\x01\x12\x13\n\x06\x66ield2\x18\x02 \x01(\rH\x01\x88\x01\x01\x42\t\n\x07_field1B\t\n\x07_field2B\x05\n\x03_idB\x08\n\x06_emailB\x0e\n\x0c_nested_infoB\x07\n\x05_codeB\x07\n\x05_nameB\n\n\x08_field11B\n\n\x08_field19B\x12\n\x10_additional_info\x1al\n\x0e\x41\x64\x64itionalInfo\x12\x13\n\x06\x66ield1\x18\x01 \x01(\rH\x00\x88\x01\x01\x12\x13\n\x06\x66ield2\x18\x02 \x01(\rH\x01\x88\x01\x01\x12\x11\n\x04info\x18\x05 \x01(\tH\x02\x88\x01\x01\x42\t\n\x07_field1B\t\n\x07_field2B\x07\n\x05_info\x1a\xaa\x01\n\x06Status\x12\x13\n\x06status\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06\x66ield7\x18\x07 \x01(\rH\x01\x88\x01\x01\x12\x13\n\x06\x66ield8\x18\x08 \x01(\rH\x02\x88\x01\x01\x12\x13\n\x06\x66ield9\x18\t \x01(\rH\x03\x88\x01\x01\x12\x14\n\x07\x66ield10\x18\n \x01(\rH\x04\x88\x01\x01\x42\t\n\x07_statusB\t\n\x07_field7B\t\n\x07_field8B\t\n\x07_field9B\n\n\x08_field10\x1a>\n\x08RoleInfo\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x11\n\x04role\x18\x04 \x01(\tH\x01\x88\x01\x01\x42\x05\n\x03_idB\x07\n\x05_roleB\x07\n\x05_userB\t\n\x07_field4B\x12\n\x10_additional_infoB\t\n\x07_statusB\x0c\n\n_role_infob\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'response_pb2', _globals)
+if _descriptor._USE_C_DESCRIPTORS == False:
+ DESCRIPTOR._options = None
+ _globals['_CHATRESPONSE']._serialized_start=29
+ _globals['_CHATRESPONSE']._serialized_end=168
+ _globals['_TIME']._serialized_start=170
+ _globals['_TIME']._serialized_end=206
+ _globals['_IDK']._serialized_start=208
+ _globals['_IDK']._serialized_end=246
+ _globals['_CURRENTUSERRESPONSE']._serialized_start=249
+ _globals['_CURRENTUSERRESPONSE']._serialized_end=1361
+ _globals['_CURRENTUSERRESPONSE_USER']._serialized_start=548
+ _globals['_CURRENTUSERRESPONSE_USER']._serialized_end=949
+ _globals['_CURRENTUSERRESPONSE_USER_NESTEDINFO']._serialized_start=778
+ _globals['_CURRENTUSERRESPONSE_USER_NESTEDINFO']._serialized_end=854
+ _globals['_CURRENTUSERRESPONSE_ADDITIONALINFO']._serialized_start=951
+ _globals['_CURRENTUSERRESPONSE_ADDITIONALINFO']._serialized_end=1059
+ _globals['_CURRENTUSERRESPONSE_STATUS']._serialized_start=1062
+ _globals['_CURRENTUSERRESPONSE_STATUS']._serialized_end=1232
+ _globals['_CURRENTUSERRESPONSE_ROLEINFO']._serialized_start=1234
+ _globals['_CURRENTUSERRESPONSE_ROLEINFO']._serialized_end=1296
+# @@protoc_insertion_point(module_scope)
diff --git a/protos/telemetry.proto b/protos/telemetry.proto
new file mode 100644
index 0000000000000000000000000000000000000000..2d3be189c9afa71e3ce89559b6b110c925a4a32e
--- /dev/null
+++ b/protos/telemetry.proto
@@ -0,0 +1,10 @@
+syntax = "proto3";
+
+package windsurf;
+
+import "clientinfo.proto";
+
+message TelemetryData {
+ ClientInfo system_info = 1;
+ string runtime_metrics = 5;
+}
diff --git a/protos/telemetry_pb2.py b/protos/telemetry_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..427d5365694af7ffa5cf0f40be93de6962e773b2
--- /dev/null
+++ b/protos/telemetry_pb2.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: telemetry.proto
+# Protobuf Python Version: 4.25.1
+"""Generated protocol buffer code."""
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+import clientinfo_pb2 as clientinfo__pb2
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0ftelemetry.proto\x12\x08windsurf\x1a\x10\x63lientinfo.proto\"S\n\rTelemetryData\x12)\n\x0bsystem_info\x18\x01 \x01(\x0b\x32\x14.windsurf.ClientInfo\x12\x17\n\x0fruntime_metrics\x18\x05 \x01(\tb\x06proto3')
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'telemetry_pb2', _globals)
+if _descriptor._USE_C_DESCRIPTORS == False:
+ DESCRIPTOR._options = None
+ _globals['_TELEMETRYDATA']._serialized_start=47
+ _globals['_TELEMETRYDATA']._serialized_end=130
+# @@protoc_insertion_point(module_scope)
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b664cbb3d4886dbaad75d39e8e229aebb9b40bdb
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,13 @@
+requests>=2.31.0
+protobuf>=4.25.1
+uvicorn
+gunicorn
+fastapi
+httpx[http2]
+pycryptodome
+cryptography
+Faker
+aioredis
+aio_pika
+rich
+pyjwt
\ No newline at end of file
diff --git a/service/__init__.py b/service/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..783c0bc3460a2de10f4de75fa29f3ac3cbfccc33
--- /dev/null
+++ b/service/__init__.py
@@ -0,0 +1,3 @@
+from .server import *
+
+__all__ = ['server']
diff --git a/service/__pycache__/__init__.cpython-311.pyc b/service/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5acaefabf72434cdeb82a41b17b9bf0b3a42fa71
Binary files /dev/null and b/service/__pycache__/__init__.cpython-311.pyc differ
diff --git a/service/__pycache__/__init__.cpython-312.pyc b/service/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..896687b222ce1fa14473b9d80a6285cbbe3aa9ba
Binary files /dev/null and b/service/__pycache__/__init__.cpython-312.pyc differ
diff --git a/service/__pycache__/server.cpython-311.pyc b/service/__pycache__/server.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..feb75e6e0dc492444fd3ce9bd1d55ef7b571ffb2
Binary files /dev/null and b/service/__pycache__/server.cpython-311.pyc differ
diff --git a/service/__pycache__/server.cpython-312.pyc b/service/__pycache__/server.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..53e72a487de21f882baa728cd9eaf2e0c066a75c
Binary files /dev/null and b/service/__pycache__/server.cpython-312.pyc differ
diff --git a/service/handlers/__pycache__/response.cpython-311.pyc b/service/handlers/__pycache__/response.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2947688564d3aece2bebd0e9e90bd33e3d4b9f47
Binary files /dev/null and b/service/handlers/__pycache__/response.cpython-311.pyc differ
diff --git a/service/handlers/__pycache__/response.cpython-312.pyc b/service/handlers/__pycache__/response.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd08717fb4bed5dd2a242f2174c0e29e0b6ffbf9
Binary files /dev/null and b/service/handlers/__pycache__/response.cpython-312.pyc differ
diff --git a/service/handlers/response.py b/service/handlers/response.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce5466ed1953c23f2eaeec713475391ac4ee03a0
--- /dev/null
+++ b/service/handlers/response.py
@@ -0,0 +1,90 @@
+# fastapi/handlers/response.py
+
+from fastapi.responses import JSONResponse, StreamingResponse
+from pydantic import BaseModel
+import time
+import json
+from typing import List, Optional
+
+class ChatCompletionRequest(BaseModel):
+ model: str
+ messages: List[dict]
+ system_prompt: Optional[str] = None
+ temperature: Optional[float] = 0.7
+ max_tokens: Optional[int] = 4096
+ top_p: Optional[float] = 1.0
+ top_k: Optional[int] = 50
+ stream: Optional[bool] = False
+
+class ChatCompletionResponse(BaseModel):
+ id: str
+ object: str = "chat.completion"
+ created: int
+ model: str
+ choices: List[dict]
+ usage: dict
+
+class ResponseHandler:
+ @staticmethod
+ def create_error_response(status_code: int, detail: str) -> JSONResponse:
+ return JSONResponse(
+ status_code=status_code,
+ content={"detail": detail},
+ headers={"Access-Control-Allow-Origin": "*"}
+ )
+
+ @staticmethod
+ async def generate_stream(chunk_id: str, chat, messages, config, model: str) -> StreamingResponse:
+ async def stream_generator():
+ async for chunk, token_count in chat.send_message(
+ messages=messages,
+ config=config,
+ stream=True,
+ ):
+ if chunk:
+ chunk_data = {
+ "id": chunk_id,
+ "object": "chat.completion.chunk",
+ "created": int(time.time()),
+ "model": model,
+ "choices": [
+ {
+ "index": 0,
+ "delta": {"content": chunk},
+ "finish_reason": None,
+ }
+ ],
+ }
+ yield f"data: {json.dumps(chunk_data)}\n\n"
+
+ data = json.dumps(
+ {
+ "id": chunk_id,
+ "object": "chat.completion.chunk",
+ "created": int(time.time()),
+ "model": model,
+ "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
+ }
+ )
+ yield f"data: {data}\n\n"
+ yield "data: [DONE]\n\n"
+
+ return StreamingResponse(stream_generator(), media_type="text/event-stream")
+
+ @staticmethod
+ def build_chat_response(chunk_id: str, model: str, response_text: str, prompt_tokens: int, tokens: int) -> ChatCompletionResponse:
+ assistant_message = {"role": "assistant", "content": response_text}
+
+ return ChatCompletionResponse(
+ id=chunk_id,
+ created=int(time.time()),
+ model=model,
+ choices=[
+ {"index": 0, "message": assistant_message, "finish_reason": "stop"}
+ ],
+ usage={
+ "prompt_tokens": prompt_tokens,
+ "completion_tokens": tokens,
+ "total_tokens": prompt_tokens + tokens,
+ },
+ )
\ No newline at end of file
diff --git a/service/middleware/__pycache__/rate_limiter.cpython-311.pyc b/service/middleware/__pycache__/rate_limiter.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..339d2b241d90eaa4e6bf36dfa29320482d05ab9a
Binary files /dev/null and b/service/middleware/__pycache__/rate_limiter.cpython-311.pyc differ
diff --git a/service/middleware/__pycache__/rate_limiter.cpython-312.pyc b/service/middleware/__pycache__/rate_limiter.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ed5bd8087435f2abbf59cdae95082edc9941870
Binary files /dev/null and b/service/middleware/__pycache__/rate_limiter.cpython-312.pyc differ
diff --git a/service/middleware/rate_limiter.py b/service/middleware/rate_limiter.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebfa57ee1dfa82d6a6629286b554249c8fdef514
--- /dev/null
+++ b/service/middleware/rate_limiter.py
@@ -0,0 +1,58 @@
+from typing import Tuple
+from fastapi import Request
+from collections import defaultdict
+import time
+from config.constants import AUTH_KEYS
+
+class RateLimiter:
+ def __init__(self):
+ self.request_counts = defaultdict(lambda: {"three_min": 0, "hourly": 0})
+ self.last_reset = defaultdict(lambda: {"three_min": 0, "hourly": 0})
+
+ async def authenticate_request(self, request: Request) -> Tuple[bool, str, int]:
+ """
+ Authenticate and rate limit requests using in-memory storage
+ Returns: (is_allowed, error_message, status_code)
+ """
+ # Check Authorization header
+ auth_header = request.headers.get("Authorization")
+ if not auth_header:
+ return False, "Missing Authorization header", 401
+
+ # Extract the token from "Bearer "
+ try:
+ auth_type, auth_token = auth_header.split()
+ if auth_type.lower() != "bearer":
+ return False, "Invalid authorization type", 401
+ except ValueError:
+ return False, "Invalid Authorization header format", 401
+
+ # Validate the token
+ if auth_token not in AUTH_KEYS:
+ return False, "Invalid API key", 401
+
+ # Rate limiting logic
+ client_ip = request.client.host
+ current_time = time.time()
+
+ # Reset counters if needed
+ if current_time - self.last_reset[client_ip]["three_min"] > 180: # 3 minutes
+ self.request_counts[client_ip]["three_min"] = 0
+ self.last_reset[client_ip]["three_min"] = current_time
+
+ if current_time - self.last_reset[client_ip]["hourly"] > 3600: # 1 hour
+ self.request_counts[client_ip]["hourly"] = 0
+ self.last_reset[client_ip]["hourly"] = current_time
+
+ # Increment counters
+ self.request_counts[client_ip]["three_min"] += 1
+ self.request_counts[client_ip]["hourly"] += 1
+
+ # Check limits
+ if self.request_counts[client_ip]["three_min"] > 45: # THREE_MIN_LIMIT
+ return False, "Rate limit exceeded", 429
+
+ if self.request_counts[client_ip]["hourly"] > 150: # HOURLY_LIMIT
+ return False, "Too many requests. Please try again later", 429
+
+ return True, "", 200
diff --git a/service/server.py b/service/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c154abff09fcb13e2f6d06770c02b2dc6583e4f
--- /dev/null
+++ b/service/server.py
@@ -0,0 +1,201 @@
+import uuid
+from contextlib import asynccontextmanager
+from typing import List, Optional
+
+import urllib3
+from fastapi import FastAPI, HTTPException, Request
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.middleware.trustedhost import TrustedHostMiddleware
+from fastapi.responses import JSONResponse, StreamingResponse
+from pydantic import BaseModel
+from rich.console import Console
+from rich.progress import Progress, SpinnerColumn, TextColumn
+
+from service.handlers.response import ResponseHandler, ChatCompletionRequest
+from service.middleware.rate_limiter import RateLimiter
+from config.constants import (
+ MODEL_MAPPING,
+ ALLOWED_HOSTS,
+ ENCRYPTION_KEY,
+)
+from managers.chat_manager import ChatManager
+from api.chat.chat_api import ChatAPI, ChatConfig
+from config.api_keys import APIKeyManager
+from config.models import ModelID
+from utils.encrypt import encrypt
+from utils.http import HTTPClient
+from service.utils.validation import validate_message_format
+
+import traceback
+
+urllib3.disable_warnings()
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ # Initialize global variables
+ global http_client, console, chat_manager, rate_limiter
+ console = Console()
+ http_client = HTTPClient()
+ chat_manager = ChatManager(queue_type="deque") # Use deque instead of redis
+ rate_limiter = RateLimiter() # Initialize rate limiter
+
+ # Initialize chat queue with API keys from config
+ key_manager = APIKeyManager()
+ api_keys = key_manager.list_keys()
+ if not api_keys:
+ console.print("[bold red]Warning: No API keys found in configuration[/]")
+ else:
+ for api_key in api_keys:
+ await chat_manager.add_chat(api_key)
+ console.print(f"[bold green]Initialized chat queue with {len(api_keys)} API keys[/]")
+
+ yield
+ # No cleanup needed for memory queue
+
+
+app = FastAPI(lifespan=lifespan, docs_url=None, redoc_url=None)
+app.add_middleware(
+ CORSMiddleware,
+ allow_credentials=True,
+ allow_origins=["*"],
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+app.add_middleware(TrustedHostMiddleware, allowed_hosts=ALLOWED_HOSTS)
+
+class ChatCompletionRequest(BaseModel):
+ model: str
+ messages: List[dict]
+ system_prompt: Optional[str] = None
+ temperature: Optional[float] = 0.7
+ max_tokens: Optional[int] = 4096
+ top_p: Optional[float] = 1.0
+ top_k: Optional[int] = 50
+ stream: Optional[bool] = False
+
+
+class ChatCompletionResponse(BaseModel):
+ id: str
+ object: str = "chat.completion"
+ created: int
+ model: str
+ choices: List[dict]
+ usage: dict
+
+
+def map_model_name_to_id(model_name: str) -> ModelID:
+ """Map standard model names to internal ModelID enum"""
+
+ if model_name not in MODEL_MAPPING:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Unsupported model: {model_name}. Available models: {', '.join(MODEL_MAPPING.keys())}",
+ )
+
+ return MODEL_MAPPING[model_name]
+
+
+@app.get("/")
+async def root():
+ return ResponseHandler.create_error_response(403, "forbidden")
+
+
+@app.middleware("http")
+async def check_request(request: Request, call_next):
+ is_allowed, error_msg, status_code = await rate_limiter.authenticate_request(
+ request
+ )
+ if not is_allowed:
+ return ResponseHandler.create_error_response(status_code, error_msg)
+
+ return await call_next(request)
+
+
+@app.post("/v1/chat/completions")
+async def create_chat_completion(request: ChatCompletionRequest, req: Request):
+ model_id = map_model_name_to_id(request.model)
+ messages = request.messages
+ user_name = getattr(req.state, "user_name", None)
+
+ validate_message_format(messages)
+
+ try:
+ config = ChatConfig(
+ model_id=model_id,
+ temperature=request.temperature,
+ max_tokens=request.max_tokens,
+ top_p=request.top_p,
+ top_k=request.top_k,
+ )
+
+ # Get chat instance from queue
+ chat = await chat_manager.get_chat()
+ if chat is None:
+ raise HTTPException(
+ status_code=503,
+ detail="No available chat instances. Please try again later."
+ )
+
+ if request.stream:
+ return await ResponseHandler.generate_stream(
+ f"chatcmpl-{uuid.uuid4()}", chat, messages, config, request.model
+ )
+ else:
+ response_text = ""
+ tokens = 0
+ async for chunk, token_count in chat.send_message(
+ messages=messages,
+ config=config,
+ ):
+ if chunk:
+ response_text += chunk
+ tokens += token_count
+
+ return ResponseHandler.build_chat_response(
+ chunk_id=f"chatcmpl-{uuid.uuid4()}",
+ model=request.model,
+ response_text=response_text,
+ prompt_tokens=len(str(messages)),
+ tokens=tokens,
+ )
+
+ except Exception as e:
+ print(e)
+ traceback.print_exc()
+ encrypted_message = encrypt(str(e), ENCRYPTION_KEY)
+ error_response = {
+ "error": {
+ "encrypted_message": encrypted_message,
+ "type": "internal_server_error",
+ "code": 500,
+ }
+ }
+ return JSONResponse(status_code=500, content=error_response)
+
+MODEL_METADATA = {
+ model_name: {
+ "id": model_name,
+ "object": "model",
+ "created": 220899661,
+ "owned_by": "Delamain"
+ }
+ for model_name in MODEL_MAPPING.keys()
+}
+
+@app.get("/v1/models")
+async def list_models():
+ models = list(MODEL_METADATA.values())
+ return {
+ "object": "list",
+ "data": models
+ }
+
+@app.get("/v1/models/{model_id}")
+async def get_model(model_id: str):
+ model = MODEL_METADATA.get(model_id)
+ if not model:
+ raise HTTPException(
+ status_code=404,
+ detail=f"Model not found. Available models: {', '.join(MODEL_METADATA.keys())}"
+ )
+ return model
\ No newline at end of file
diff --git a/service/utils/__pycache__/validation.cpython-311.pyc b/service/utils/__pycache__/validation.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cff1d38e740e68b12c42844733233545c6ac9834
Binary files /dev/null and b/service/utils/__pycache__/validation.cpython-311.pyc differ
diff --git a/service/utils/__pycache__/validation.cpython-312.pyc b/service/utils/__pycache__/validation.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..65355639377bc7d2745898fd769f26773055579f
Binary files /dev/null and b/service/utils/__pycache__/validation.cpython-312.pyc differ
diff --git a/service/utils/validation.py b/service/utils/validation.py
new file mode 100644
index 0000000000000000000000000000000000000000..48e25f26b2346fdf634baa94e8ef7ed6e3ee7f35
--- /dev/null
+++ b/service/utils/validation.py
@@ -0,0 +1,32 @@
+import json
+from fastapi import HTTPException
+
+
+def validate_message_format(messages):
+ """
+ Validate the format of messages.
+ """
+ try:
+ json.dumps(messages)
+ except TypeError:
+ raise HTTPException(
+ status_code=400,
+ detail="Invalid message format - messages must be valid JSON",
+ )
+
+ for msg in messages:
+ if "role" not in msg or "content" not in msg:
+ raise HTTPException(
+ status_code=400,
+ detail="Messages must contain 'role' and 'content' fields",
+ )
+ if (
+ not isinstance(msg["role"], str)
+ or msg["role"] not in ["user", "assistant", "system"]
+ or msg["content"] is None
+ or not isinstance(msg["content"], (str, list))
+ ):
+ raise HTTPException(
+ status_code=400,
+ detail="Invalid role or content - role must be 'user', 'assistant', or 'system', and content must be a string or list",
+ )
diff --git a/utils/__pycache__/compression.cpython-311.pyc b/utils/__pycache__/compression.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1a3b36d8e6387697a17f3256669a7f69a3b149f
Binary files /dev/null and b/utils/__pycache__/compression.cpython-311.pyc differ
diff --git a/utils/__pycache__/compression.cpython-312.pyc b/utils/__pycache__/compression.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b33e1eb6d402b34d0944451ccd5f126a477fa6c
Binary files /dev/null and b/utils/__pycache__/compression.cpython-312.pyc differ
diff --git a/utils/__pycache__/encrypt.cpython-311.pyc b/utils/__pycache__/encrypt.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b9e4a084c0055760807041280099d9b6107e5b1
Binary files /dev/null and b/utils/__pycache__/encrypt.cpython-311.pyc differ
diff --git a/utils/__pycache__/encrypt.cpython-312.pyc b/utils/__pycache__/encrypt.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..295d53b206350c737b9cef608fec9b16a9d2784b
Binary files /dev/null and b/utils/__pycache__/encrypt.cpython-312.pyc differ
diff --git a/utils/__pycache__/http.cpython-311.pyc b/utils/__pycache__/http.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..280252714e3d41f97901a1730031efa58d39520f
Binary files /dev/null and b/utils/__pycache__/http.cpython-311.pyc differ
diff --git a/utils/__pycache__/http.cpython-312.pyc b/utils/__pycache__/http.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36dee06851e4030a6aa8a10428fdebaa23ca5dcd
Binary files /dev/null and b/utils/__pycache__/http.cpython-312.pyc differ
diff --git a/utils/compression.py b/utils/compression.py
new file mode 100644
index 0000000000000000000000000000000000000000..991a457741a96dfe9c79b1e9d85efedf613bd51a
--- /dev/null
+++ b/utils/compression.py
@@ -0,0 +1,41 @@
+import gzip
+from typing import Union, List, Generator
+from io import BytesIO
+
+
+def compress_data(data: bytes) -> bytes:
+ """Compress data using gzip and add header"""
+ compressed = gzip.compress(data)
+ header = bytes([0x01]) + len(compressed).to_bytes(4, byteorder="big")
+ return header + compressed
+
+
+def decompress_chunks(data: bytes) -> Generator[tuple[bytes, bytes], None, None]:
+ """Decompress chunked gzip data and yield results as they are processed"""
+ index = 0
+ status_code_len = 1
+ magic_gzip = b"\x1F\x8B"
+
+ while index < len(data):
+ chunk_len_pos = index + status_code_len
+ gzip_pos = chunk_len_pos + 4
+
+ if chunk_len_pos + 3 >= len(data):
+ break
+
+ chunk_length = int.from_bytes(data[chunk_len_pos:gzip_pos], byteorder="big")
+
+ if data[gzip_pos : gzip_pos + 2] != magic_gzip:
+ index = gzip_pos + 2
+ continue
+
+ chunk_end = gzip_pos + chunk_length
+ chunk_data = data[gzip_pos:chunk_end]
+
+ try:
+ with gzip.GzipFile(fileobj=BytesIO(chunk_data)) as gz:
+ yield (data[index], gz.read())
+ except Exception as e:
+ print(f"Failed to decompress chunk: {e}")
+
+ index = chunk_end
diff --git a/utils/encrypt.py b/utils/encrypt.py
new file mode 100644
index 0000000000000000000000000000000000000000..0906897cf6eedd1ff47cbb3e8f736b65e890d985
--- /dev/null
+++ b/utils/encrypt.py
@@ -0,0 +1,44 @@
+from Crypto.Cipher import AES
+from Crypto.Util.Padding import pad, unpad
+import base64
+
+
+def encrypt(message: str, key: str) -> str:
+ """Encrypt error message using AES-128"""
+ # Convert the key to bytes and ensure it's 16 bytes (128 bits)
+ key_bytes = key.encode('utf-8')[:16].ljust(16, b'\0')
+
+ # Create cipher
+ cipher = AES.new(key_bytes, AES.MODE_CBC)
+
+ # Pad and encrypt the message
+ ct_bytes = cipher.encrypt(pad(message.encode('utf-8'), AES.block_size))
+
+ # Combine IV and ciphertext and encode as base64
+ encrypted = base64.b16encode(cipher.iv + ct_bytes).decode('utf-8')
+ return encrypted
+
+def decrypt(encrypted: str, key: str) -> str:
+ """Decrypt AES-128 encrypted message"""
+ # Convert the key to bytes and ensure it's 16 bytes (128 bits)
+ key_bytes = key.encode('utf-8')[:16].ljust(16, b'\0')
+
+ # Decode the base64 message
+ enc_bytes = base64.b16decode(encrypted)
+
+ # Extract IV (first 16 bytes) and ciphertext
+ iv = enc_bytes[:16]
+ ct_bytes = enc_bytes[16:]
+
+ # Create cipher with same key and IV
+ cipher = AES.new(key_bytes, AES.MODE_CBC, iv)
+
+ # Decrypt and unpad the message
+ pt_bytes = unpad(cipher.decrypt(ct_bytes), AES.block_size)
+
+ # Convert back to string
+ return pt_bytes.decode('utf-8')
+
+if __name__ == "__main__":
+ decrypted = decrypt("8HF8LyqjywlLR6DglBkvMxG2qdcAGiC2WHga8P982exv6fp/EXmrcwp6vYiZ1lVS/6nv+xJ72Nd6xV9cya6w3yjBr70U0ziiOtsyKFWAzLNa/swvZn9Ses2ZIbq/nZfkRv+Va19GqwPxV6mDpbuZp0Tl4VQtxJth+RzVX74i71G4mpp5luW8GTZSlnU+KPI8m6BbNKWXYou6ttS41IJubA==", "1923a821")
+ print(decrypted)
diff --git a/utils/http.py b/utils/http.py
new file mode 100644
index 0000000000000000000000000000000000000000..9303eff37f9443294dbf7b2cfe8750d31b6e68ab
--- /dev/null
+++ b/utils/http.py
@@ -0,0 +1,80 @@
+import httpx
+from typing import Dict, Any, AsyncGenerator
+from config.constants import DEFAULT_HEADERS
+
+
+class HTTPClient:
+ def __init__(self):
+ self.client = httpx.AsyncClient(verify=False, http2=True)
+
+ async def post(
+ self, url: str, data: Any = None, headers: Dict = None, compress: bool = False, default_headers: bool = True, **kwargs
+ ) -> httpx.Response:
+ """
+ Send POST request with optional compression
+ """
+ if default_headers:
+ request_headers = {**DEFAULT_HEADERS, **(headers or {})}
+ else:
+ request_headers = headers or {}
+
+ if compress:
+ from .compression import compress_data
+
+ data = compress_data(data)
+ request_headers["Content-Encoding"] = "gzip"
+ request_headers["Connect-Content-Encoding"] = "gzip"
+ request_headers["Content-Length"] = str(len(data))
+
+ # Remove explicitly specified params from kwargs
+ for param in ['url', 'data', 'stream', 'headers', 'verify', 'timeout']:
+ kwargs.pop(param, None)
+
+ response = await self.client.post(
+ url=url,
+ content=data, # Changed from data to content
+ headers=request_headers,
+ timeout=60,
+ **kwargs
+ )
+ return response
+
+ async def stream_post(
+ self, url: str, data: Any = None, headers: Dict = None, compress: bool = False, default_headers: bool = True, **kwargs
+ ) -> AsyncGenerator[bytes, None]:
+ """
+ Send POST request with streaming response
+ Yields chunks of response data as they arrive
+ """
+ if default_headers:
+ request_headers = {**DEFAULT_HEADERS, **(headers or {})}
+ else:
+ request_headers = headers or {}
+
+ if compress:
+ from .compression import compress_data
+ data = compress_data(data)
+ request_headers["Content-Encoding"] = "gzip"
+ request_headers["Connect-Content-Encoding"] = "gzip"
+ request_headers["Content-Length"] = str(len(data))
+
+ # Remove explicitly specified params from kwargs
+ for param in ['url', 'data', 'stream', 'headers', 'verify', 'timeout']:
+ kwargs.pop(param, None)
+
+ async with self.client.stream(
+ method="POST",
+ url=url,
+ content=data,
+ headers=request_headers,
+ timeout=60,
+ **kwargs
+ ) as response:
+ async for chunk in response.aiter_bytes():
+ yield chunk
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.client.aclose()
diff --git a/uvicorn_boot.py b/uvicorn_boot.py
new file mode 100644
index 0000000000000000000000000000000000000000..c28d204a30e276db21f92a1d501556af9c6f7936
--- /dev/null
+++ b/uvicorn_boot.py
@@ -0,0 +1,27 @@
+
+import sys
+import signal
+import asyncio
+import uvicorn
+
+from init_pool import init_chat_queue
+
+if __name__ == "__main__":
+
+ def signal_handler(sig, frame):
+ print("\n正在关闭服务器...")
+ sys.exit(0)
+
+ # 注册 SIGINT 信号处理器 (ctrl+c)
+ signal.signal(signal.SIGINT, signal_handler)
+
+ asyncio.run(init_chat_queue())
+
+ uvicorn.run(
+ "service.server:app",
+ host="0.0.0.0",
+ port=8000,
+ reload=True,
+ timeout_keep_alive=5,
+ access_log=False,
+ )
\ No newline at end of file