Upload 28 files
Browse files- .dockerignore +7 -0
- .env.example +14 -0
- .github/workflows/build_docker.yml +50 -0
- .gitignore +6 -0
- Dockerfile +13 -0
- api/files.py +144 -0
- api/models.py +28 -0
- api/tokens.py +86 -0
- app.py +9 -0
- chat2api.py +133 -0
- chatgpt/ChatService.py +490 -0
- chatgpt/authorization.py +55 -0
- chatgpt/chatFormat.py +436 -0
- chatgpt/chatLimit.py +35 -0
- chatgpt/globals.py +54 -0
- chatgpt/proofofWork.py +512 -0
- chatgpt/refreshToken.py +58 -0
- chatgpt/reverseProxy.py +167 -0
- chatgpt/turnstile.py +268 -0
- chatgpt/wssClient.py +36 -0
- docs/capsolver.png +0 -0
- docs/tokens.png +0 -0
- requirements.txt +11 -0
- templates/tokens.html +82 -0
- utils/Client.py +59 -0
- utils/Logger.py +24 -0
- utils/config.py +74 -0
- utils/retry.py +32 -0
.dockerignore
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.env
|
| 2 |
+
*.pyc
|
| 3 |
+
/.git/
|
| 4 |
+
/.idea/
|
| 5 |
+
/docs/
|
| 6 |
+
/tmp/
|
| 7 |
+
/data/
|
.env.example
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
API_PREFIX=your_prefix
|
| 2 |
+
CHATGPT_BASE_URL=https://chatgpt.com
|
| 3 |
+
HISTORY_DISABLED=true
|
| 4 |
+
PROXY_URL=your_first_proxy, your_second_proxy
|
| 5 |
+
EXPORT_PROXY_URL=your_export_proxy
|
| 6 |
+
ARK0SE_TOKEN_URL=https://ark0se.example.com/token
|
| 7 |
+
POW_DIFFICULTY=000032
|
| 8 |
+
RETRY_TIMES=3
|
| 9 |
+
ENABLE_GATEWAY=true
|
| 10 |
+
CONVERSATION_ONLY=false
|
| 11 |
+
ENABLE_LIMIT=true
|
| 12 |
+
UPLOAD_BY_URL=false
|
| 13 |
+
SCHEDULED_REFRESH=false
|
| 14 |
+
USER_AGENTS=["ua1", "ua2"]
|
.github/workflows/build_docker.yml
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Build Docker Image
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches:
|
| 6 |
+
- main
|
| 7 |
+
paths-ignore:
|
| 8 |
+
- 'README.md'
|
| 9 |
+
- 'docker-compose.yml'
|
| 10 |
+
- 'docs/**'
|
| 11 |
+
- '.github/workflows/build_docker.yml'
|
| 12 |
+
workflow_dispatch:
|
| 13 |
+
|
| 14 |
+
jobs:
|
| 15 |
+
main:
|
| 16 |
+
runs-on: ubuntu-latest
|
| 17 |
+
steps:
|
| 18 |
+
- name: Checkout
|
| 19 |
+
uses: actions/checkout@v2
|
| 20 |
+
|
| 21 |
+
- name: Set up QEMU
|
| 22 |
+
uses: docker/setup-qemu-action@v3
|
| 23 |
+
|
| 24 |
+
- name: Set up Docker Buildx
|
| 25 |
+
uses: docker/setup-buildx-action@v3
|
| 26 |
+
|
| 27 |
+
- name: Log in to Docker Hub
|
| 28 |
+
uses: docker/login-action@v3
|
| 29 |
+
with:
|
| 30 |
+
username: ${{ secrets.DOCKER_USERNAME }}
|
| 31 |
+
password: ${{ secrets.DOCKER_PASSWORD }}
|
| 32 |
+
|
| 33 |
+
- name: Docker meta
|
| 34 |
+
id: meta
|
| 35 |
+
uses: docker/metadata-action@v5
|
| 36 |
+
with:
|
| 37 |
+
images: lanqian528/chat2api
|
| 38 |
+
tags: |
|
| 39 |
+
type=raw,value=latest,enable={{is_default_branch}}
|
| 40 |
+
type=raw,value=v1.4.16
|
| 41 |
+
|
| 42 |
+
- name: Build and push
|
| 43 |
+
uses: docker/build-push-action@v5
|
| 44 |
+
with:
|
| 45 |
+
context: .
|
| 46 |
+
platforms: linux/amd64,linux/arm64
|
| 47 |
+
file: Dockerfile
|
| 48 |
+
push: true
|
| 49 |
+
tags: ${{ steps.meta.outputs.tags }}
|
| 50 |
+
labels: ${{ steps.meta.outputs.labels }}
|
.gitignore
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.env
|
| 2 |
+
*.pyc
|
| 3 |
+
/.git/
|
| 4 |
+
/.idea/
|
| 5 |
+
/tmp/
|
| 6 |
+
/data/
|
Dockerfile
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
COPY . /app
|
| 6 |
+
|
| 7 |
+
ENV DEMO_SECRET=123
|
| 8 |
+
|
| 9 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 10 |
+
|
| 11 |
+
EXPOSE 5005
|
| 12 |
+
|
| 13 |
+
CMD ["python", "app.py"]
|
api/files.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
|
| 3 |
+
import pybase64
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
from utils.Client import Client
|
| 7 |
+
from utils.config import export_proxy_url, cf_file_url
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
async def get_file_content(url):
|
| 11 |
+
if url.startswith("data:"):
|
| 12 |
+
mime_type, base64_data = url.split(';')[0].split(':')[1], url.split(',')[1]
|
| 13 |
+
file_content = pybase64.b64decode(base64_data)
|
| 14 |
+
return file_content, mime_type
|
| 15 |
+
else:
|
| 16 |
+
client = Client()
|
| 17 |
+
try:
|
| 18 |
+
if cf_file_url:
|
| 19 |
+
body = {"file_url": url}
|
| 20 |
+
r = await client.post(cf_file_url, timeout=60, json=body)
|
| 21 |
+
else:
|
| 22 |
+
r = await client.get(url, proxy=export_proxy_url, timeout=60)
|
| 23 |
+
if r.status_code != 200:
|
| 24 |
+
return None, None
|
| 25 |
+
file_content = r.content
|
| 26 |
+
mime_type = r.headers.get('Content-Type', '').split(';')[0].strip()
|
| 27 |
+
return file_content, mime_type
|
| 28 |
+
finally:
|
| 29 |
+
await client.close()
|
| 30 |
+
del client
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
async def determine_file_use_case(mime_type):
|
| 34 |
+
multimodal_types = ["image/jpeg", "image/webp", "image/png", "image/gif"]
|
| 35 |
+
my_files_types = ["text/x-php", "application/msword", "text/x-c", "text/html",
|
| 36 |
+
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
| 37 |
+
"application/json", "text/javascript", "application/pdf",
|
| 38 |
+
"text/x-java", "text/x-tex", "text/x-typescript", "text/x-sh",
|
| 39 |
+
"text/x-csharp", "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
| 40 |
+
"text/x-c++", "application/x-latext", "text/markdown", "text/plain",
|
| 41 |
+
"text/x-ruby", "text/x-script.python"]
|
| 42 |
+
|
| 43 |
+
if mime_type in multimodal_types:
|
| 44 |
+
return "multimodal"
|
| 45 |
+
elif mime_type in my_files_types:
|
| 46 |
+
return "my_files"
|
| 47 |
+
else:
|
| 48 |
+
return "ace_upload"
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
async def get_image_size(file_content):
|
| 52 |
+
with Image.open(io.BytesIO(file_content)) as img:
|
| 53 |
+
return img.width, img.height
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
async def get_file_extension(mime_type):
|
| 57 |
+
extension_mapping = {
|
| 58 |
+
"image/jpeg": ".jpg",
|
| 59 |
+
"image/png": ".png",
|
| 60 |
+
"image/gif": ".gif",
|
| 61 |
+
"image/webp": ".webp",
|
| 62 |
+
"text/x-php": ".php",
|
| 63 |
+
"application/msword": ".doc",
|
| 64 |
+
"text/x-c": ".c",
|
| 65 |
+
"text/html": ".html",
|
| 66 |
+
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
|
| 67 |
+
"application/json": ".json",
|
| 68 |
+
"text/javascript": ".js",
|
| 69 |
+
"application/pdf": ".pdf",
|
| 70 |
+
"text/x-java": ".java",
|
| 71 |
+
"text/x-tex": ".tex",
|
| 72 |
+
"text/x-typescript": ".ts",
|
| 73 |
+
"text/x-sh": ".sh",
|
| 74 |
+
"text/x-csharp": ".cs",
|
| 75 |
+
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
|
| 76 |
+
"text/x-c++": ".cpp",
|
| 77 |
+
"application/x-latex": ".latex",
|
| 78 |
+
"text/markdown": ".md",
|
| 79 |
+
"text/plain": ".txt",
|
| 80 |
+
"text/x-ruby": ".rb",
|
| 81 |
+
"text/x-script.python": ".py",
|
| 82 |
+
"application/zip": ".zip",
|
| 83 |
+
"application/x-zip-compressed": ".zip",
|
| 84 |
+
"application/x-tar": ".tar",
|
| 85 |
+
"application/x-compressed-tar": ".tar.gz",
|
| 86 |
+
"application/vnd.rar": ".rar",
|
| 87 |
+
"application/x-rar-compressed": ".rar",
|
| 88 |
+
"application/x-7z-compressed": ".7z",
|
| 89 |
+
"application/octet-stream": ".bin",
|
| 90 |
+
"audio/mpeg": ".mp3",
|
| 91 |
+
"audio/wav": ".wav",
|
| 92 |
+
"audio/ogg": ".ogg",
|
| 93 |
+
"audio/aac": ".aac",
|
| 94 |
+
"video/mp4": ".mp4",
|
| 95 |
+
"video/x-msvideo": ".avi",
|
| 96 |
+
"video/x-matroska": ".mkv",
|
| 97 |
+
"video/webm": ".webm",
|
| 98 |
+
"application/rtf": ".rtf",
|
| 99 |
+
"application/vnd.ms-excel": ".xls",
|
| 100 |
+
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
|
| 101 |
+
"text/css": ".css",
|
| 102 |
+
"text/xml": ".xml",
|
| 103 |
+
"application/xml": ".xml",
|
| 104 |
+
"application/vnd.android.package-archive": ".apk",
|
| 105 |
+
"application/vnd.apple.installer+xml": ".mpkg",
|
| 106 |
+
"application/x-bzip": ".bz",
|
| 107 |
+
"application/x-bzip2": ".bz2",
|
| 108 |
+
"application/x-csh": ".csh",
|
| 109 |
+
"application/x-debian-package": ".deb",
|
| 110 |
+
"application/x-dvi": ".dvi",
|
| 111 |
+
"application/java-archive": ".jar",
|
| 112 |
+
"application/x-java-jnlp-file": ".jnlp",
|
| 113 |
+
"application/vnd.mozilla.xul+xml": ".xul",
|
| 114 |
+
"application/vnd.ms-fontobject": ".eot",
|
| 115 |
+
"application/ogg": ".ogx",
|
| 116 |
+
"application/x-font-ttf": ".ttf",
|
| 117 |
+
"application/font-woff": ".woff",
|
| 118 |
+
"application/x-shockwave-flash": ".swf",
|
| 119 |
+
"application/vnd.visio": ".vsd",
|
| 120 |
+
"application/xhtml+xml": ".xhtml",
|
| 121 |
+
"application/vnd.ms-powerpoint": ".ppt",
|
| 122 |
+
"application/vnd.oasis.opendocument.text": ".odt",
|
| 123 |
+
"application/vnd.oasis.opendocument.spreadsheet": ".ods",
|
| 124 |
+
"application/x-xpinstall": ".xpi",
|
| 125 |
+
"application/vnd.google-earth.kml+xml": ".kml",
|
| 126 |
+
"application/vnd.google-earth.kmz": ".kmz",
|
| 127 |
+
"application/x-font-otf": ".otf",
|
| 128 |
+
"application/vnd.ms-excel.addin.macroEnabled.12": ".xlam",
|
| 129 |
+
"application/vnd.ms-excel.sheet.binary.macroEnabled.12": ".xlsb",
|
| 130 |
+
"application/vnd.ms-excel.template.macroEnabled.12": ".xltm",
|
| 131 |
+
"application/vnd.ms-powerpoint.addin.macroEnabled.12": ".ppam",
|
| 132 |
+
"application/vnd.ms-powerpoint.presentation.macroEnabled.12": ".pptm",
|
| 133 |
+
"application/vnd.ms-powerpoint.slideshow.macroEnabled.12": ".ppsm",
|
| 134 |
+
"application/vnd.ms-powerpoint.template.macroEnabled.12": ".potm",
|
| 135 |
+
"application/vnd.ms-word.document.macroEnabled.12": ".docm",
|
| 136 |
+
"application/vnd.ms-word.template.macroEnabled.12": ".dotm",
|
| 137 |
+
"application/x-ms-application": ".application",
|
| 138 |
+
"application/x-ms-wmd": ".wmd",
|
| 139 |
+
"application/x-ms-wmz": ".wmz",
|
| 140 |
+
"application/x-ms-xbap": ".xbap",
|
| 141 |
+
"application/vnd.ms-xpsdocument": ".xps",
|
| 142 |
+
"application/x-silverlight-app": ".xap"
|
| 143 |
+
}
|
| 144 |
+
return extension_mapping.get(mime_type, "")
|
api/models.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_proxy = {
|
| 2 |
+
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
|
| 3 |
+
"gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
|
| 4 |
+
"gpt-4": "gpt-4-0613",
|
| 5 |
+
"gpt-4-32k": "gpt-4-32k-0613",
|
| 6 |
+
"gpt-4-turbo-preview": "gpt-4-0125-preview",
|
| 7 |
+
"gpt-4-vision-preview": "gpt-4-1106-vision-preview",
|
| 8 |
+
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
|
| 9 |
+
"gpt-4o": "gpt-4o-2024-05-13",
|
| 10 |
+
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
|
| 11 |
+
"o1-preview": "o1-preview-2024-09-12",
|
| 12 |
+
"o1-mini": "o1-mini-2024-09-12",
|
| 13 |
+
"claude-3-opus": "claude-3-opus-20240229",
|
| 14 |
+
"claude-3-sonnet": "claude-3-sonnet-20240229",
|
| 15 |
+
"claude-3-haiku": "claude-3-haiku-20240307",
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
model_system_fingerprint = {
|
| 19 |
+
"gpt-3.5-turbo-0125": ["fp_b28b39ffa8"],
|
| 20 |
+
"gpt-3.5-turbo-1106": ["fp_592ef5907d"],
|
| 21 |
+
"gpt-4-0125-preview": ["fp_f38f4d6482", "fp_2f57f81c11", "fp_a7daf7c51e", "fp_a865e8ede4", "fp_13c70b9f70",
|
| 22 |
+
"fp_b77cb481ed"],
|
| 23 |
+
"gpt-4-1106-preview": ["fp_e467c31c3d", "fp_d986a8d1ba", "fp_99a5a401bb", "fp_123d5a9f90", "fp_0d1affc7a6",
|
| 24 |
+
"fp_5c95a4634e"],
|
| 25 |
+
"gpt-4-turbo-2024-04-09": ["fp_d1bac968b4"],
|
| 26 |
+
"gpt-4o-2024-05-13": ["fp_3aa7262c27"],
|
| 27 |
+
"gpt-4o-mini-2024-07-18": ["fp_c9aa9c0491"]
|
| 28 |
+
}
|
api/tokens.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
import tiktoken
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
async def calculate_image_tokens(width, height, detail):
|
| 7 |
+
if detail == "low":
|
| 8 |
+
return 85
|
| 9 |
+
else:
|
| 10 |
+
max_dimension = max(width, height)
|
| 11 |
+
if max_dimension > 2048:
|
| 12 |
+
scale_factor = 2048 / max_dimension
|
| 13 |
+
new_width = int(width * scale_factor)
|
| 14 |
+
new_height = int(height * scale_factor)
|
| 15 |
+
else:
|
| 16 |
+
new_width = width
|
| 17 |
+
new_height = height
|
| 18 |
+
|
| 19 |
+
width, height = new_width, new_height
|
| 20 |
+
min_dimension = min(width, height)
|
| 21 |
+
if min_dimension > 768:
|
| 22 |
+
scale_factor = 768 / min_dimension
|
| 23 |
+
new_width = int(width * scale_factor)
|
| 24 |
+
new_height = int(height * scale_factor)
|
| 25 |
+
else:
|
| 26 |
+
new_width = width
|
| 27 |
+
new_height = height
|
| 28 |
+
|
| 29 |
+
width, height = new_width, new_height
|
| 30 |
+
num_masks_w = math.ceil(width / 512)
|
| 31 |
+
num_masks_h = math.ceil(height / 512)
|
| 32 |
+
total_masks = num_masks_w * num_masks_h
|
| 33 |
+
|
| 34 |
+
tokens_per_mask = 170
|
| 35 |
+
total_tokens = total_masks * tokens_per_mask + 85
|
| 36 |
+
|
| 37 |
+
return total_tokens
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
async def num_tokens_from_messages(messages, model=''):
|
| 41 |
+
try:
|
| 42 |
+
encoding = tiktoken.encoding_for_model(model)
|
| 43 |
+
except KeyError:
|
| 44 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
| 45 |
+
if model == "gpt-3.5-turbo-0301":
|
| 46 |
+
tokens_per_message = 4
|
| 47 |
+
else:
|
| 48 |
+
tokens_per_message = 3
|
| 49 |
+
num_tokens = 0
|
| 50 |
+
for message in messages:
|
| 51 |
+
num_tokens += tokens_per_message
|
| 52 |
+
for key, value in message.items():
|
| 53 |
+
if isinstance(value, list):
|
| 54 |
+
for item in value:
|
| 55 |
+
if item.get("type") == "text":
|
| 56 |
+
num_tokens += len(encoding.encode(item.get("text")))
|
| 57 |
+
if item.get("type") == "image_url":
|
| 58 |
+
pass
|
| 59 |
+
else:
|
| 60 |
+
num_tokens += len(encoding.encode(value))
|
| 61 |
+
num_tokens += 3
|
| 62 |
+
return num_tokens
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
async def num_tokens_from_content(content, model=None):
|
| 66 |
+
try:
|
| 67 |
+
encoding = tiktoken.encoding_for_model(model)
|
| 68 |
+
except KeyError:
|
| 69 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
| 70 |
+
encoded_content = encoding.encode(content)
|
| 71 |
+
len_encoded_content = len(encoded_content)
|
| 72 |
+
return len_encoded_content
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
async def split_tokens_from_content(content, max_tokens, model=None):
|
| 76 |
+
try:
|
| 77 |
+
encoding = tiktoken.encoding_for_model(model)
|
| 78 |
+
except KeyError:
|
| 79 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
| 80 |
+
encoded_content = encoding.encode(content)
|
| 81 |
+
len_encoded_content = len(encoded_content)
|
| 82 |
+
if len_encoded_content >= max_tokens:
|
| 83 |
+
content = encoding.decode(encoded_content[:max_tokens])
|
| 84 |
+
return content, max_tokens, "length"
|
| 85 |
+
else:
|
| 86 |
+
return content, len_encoded_content, "stop"
|
app.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uvicorn
|
| 2 |
+
|
| 3 |
+
log_config = uvicorn.config.LOGGING_CONFIG
|
| 4 |
+
default_format = "%(asctime)s | %(levelname)s | %(message)s"
|
| 5 |
+
access_format = r'%(asctime)s | %(levelname)s | %(client_addr)s: %(request_line)s %(status_code)s'
|
| 6 |
+
log_config["formatters"]["default"]["fmt"] = default_format
|
| 7 |
+
log_config["formatters"]["access"]["fmt"] = access_format
|
| 8 |
+
|
| 9 |
+
uvicorn.run("chat2api:app", host="0.0.0.0", port=5005)
|
chat2api.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import types
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
| 6 |
+
from fastapi import FastAPI, Request, Depends, HTTPException, Form
|
| 7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
+
from fastapi.responses import HTMLResponse
|
| 9 |
+
from fastapi.responses import StreamingResponse, JSONResponse
|
| 10 |
+
from fastapi.security import OAuth2PasswordBearer
|
| 11 |
+
from fastapi.templating import Jinja2Templates
|
| 12 |
+
from starlette.background import BackgroundTask
|
| 13 |
+
|
| 14 |
+
from chatgpt.ChatService import ChatService
|
| 15 |
+
from chatgpt.authorization import refresh_all_tokens
|
| 16 |
+
import chatgpt.globals as globals
|
| 17 |
+
from chatgpt.reverseProxy import chatgpt_reverse_proxy
|
| 18 |
+
from utils.Logger import logger
|
| 19 |
+
from utils.config import api_prefix, scheduled_refresh
|
| 20 |
+
from utils.retry import async_retry
|
| 21 |
+
|
| 22 |
+
warnings.filterwarnings("ignore")
|
| 23 |
+
|
| 24 |
+
app = FastAPI()
|
| 25 |
+
scheduler = AsyncIOScheduler()
|
| 26 |
+
templates = Jinja2Templates(directory="templates")
|
| 27 |
+
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
|
| 28 |
+
|
| 29 |
+
app.add_middleware(
|
| 30 |
+
CORSMiddleware,
|
| 31 |
+
allow_origins=["*"],
|
| 32 |
+
allow_credentials=True,
|
| 33 |
+
allow_methods=["*"],
|
| 34 |
+
allow_headers=["*"],
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@app.on_event("startup")
|
| 39 |
+
async def app_start():
|
| 40 |
+
if scheduled_refresh:
|
| 41 |
+
scheduler.add_job(id='refresh', func=refresh_all_tokens, trigger='cron', hour=3, minute=0, day='*/4', kwargs={'force_refresh': True})
|
| 42 |
+
scheduler.start()
|
| 43 |
+
asyncio.get_event_loop().call_later(0, lambda: asyncio.create_task(refresh_all_tokens(force_refresh=False)))
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
async def to_send_conversation(request_data, req_token):
|
| 47 |
+
chat_service = ChatService(req_token)
|
| 48 |
+
try:
|
| 49 |
+
await chat_service.set_dynamic_data(request_data)
|
| 50 |
+
await chat_service.get_chat_requirements()
|
| 51 |
+
return chat_service
|
| 52 |
+
except HTTPException as e:
|
| 53 |
+
await chat_service.close_client()
|
| 54 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
| 55 |
+
except Exception as e:
|
| 56 |
+
await chat_service.close_client()
|
| 57 |
+
logger.error(f"Server error, {str(e)}")
|
| 58 |
+
raise HTTPException(status_code=500, detail="Server error")
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
async def process(request_data, req_token):
|
| 62 |
+
chat_service = await to_send_conversation(request_data, req_token)
|
| 63 |
+
await chat_service.prepare_send_conversation()
|
| 64 |
+
res = await chat_service.send_conversation()
|
| 65 |
+
return chat_service, res
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@app.post(f"/{api_prefix}/v1/chat/completions" if api_prefix else "/v1/chat/completions")
|
| 69 |
+
async def send_conversation(request: Request, req_token: str = Depends(oauth2_scheme)):
|
| 70 |
+
try:
|
| 71 |
+
request_data = await request.json()
|
| 72 |
+
except Exception:
|
| 73 |
+
raise HTTPException(status_code=400, detail={"error": "Invalid JSON body"})
|
| 74 |
+
chat_service, res = await async_retry(process, request_data, req_token)
|
| 75 |
+
try:
|
| 76 |
+
if isinstance(res, types.AsyncGeneratorType):
|
| 77 |
+
background = BackgroundTask(chat_service.close_client)
|
| 78 |
+
return StreamingResponse(res, media_type="text/event-stream", background=background)
|
| 79 |
+
else:
|
| 80 |
+
background = BackgroundTask(chat_service.close_client)
|
| 81 |
+
return JSONResponse(res, media_type="application/json", background=background)
|
| 82 |
+
except HTTPException as e:
|
| 83 |
+
await chat_service.close_client()
|
| 84 |
+
if e.status_code == 500:
|
| 85 |
+
logger.error(f"Server error, {str(e)}")
|
| 86 |
+
raise HTTPException(status_code=500, detail="Server error")
|
| 87 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
| 88 |
+
except Exception as e:
|
| 89 |
+
await chat_service.close_client()
|
| 90 |
+
logger.error(f"Server error, {str(e)}")
|
| 91 |
+
raise HTTPException(status_code=500, detail="Server error")
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@app.get(f"/{api_prefix}/tokens" if api_prefix else "/tokens", response_class=HTMLResponse)
|
| 95 |
+
async def upload_html(request: Request):
|
| 96 |
+
tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
|
| 97 |
+
return templates.TemplateResponse("tokens.html",
|
| 98 |
+
{"request": request, "api_prefix": api_prefix, "tokens_count": tokens_count})
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
@app.post(f"/{api_prefix}/tokens/upload" if api_prefix else "/tokens/upload")
|
| 102 |
+
async def upload_post(text: str = Form(...)):
|
| 103 |
+
lines = text.split("\n")
|
| 104 |
+
for line in lines:
|
| 105 |
+
if line.strip() and not line.startswith("#"):
|
| 106 |
+
globals.token_list.append(line.strip())
|
| 107 |
+
with open("data/token.txt", "a", encoding="utf-8") as f:
|
| 108 |
+
f.write(line.strip() + "\n")
|
| 109 |
+
logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
|
| 110 |
+
tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
|
| 111 |
+
return {"status": "success", "tokens_count": tokens_count}
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@app.post(f"/{api_prefix}/tokens/clear" if api_prefix else "/tokens/clear")
|
| 115 |
+
async def upload_post():
|
| 116 |
+
globals.token_list.clear()
|
| 117 |
+
globals.error_token_list.clear()
|
| 118 |
+
with open("data/token.txt", "w", encoding="utf-8") as f:
|
| 119 |
+
pass
|
| 120 |
+
logger.info(f"Token count: {len(globals.token_list)}, Error token count: {len(globals.error_token_list)}")
|
| 121 |
+
tokens_count = len(set(globals.token_list) - set(globals.error_token_list))
|
| 122 |
+
return {"status": "success", "tokens_count": tokens_count}
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@app.post(f"/{api_prefix}/tokens/error" if api_prefix else "/tokens/error")
|
| 126 |
+
async def error_tokens():
|
| 127 |
+
error_tokens_list = list(set(globals.error_token_list))
|
| 128 |
+
return {"status": "success", "error_tokens": error_tokens_list}
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH", "TRACE"])
|
| 132 |
+
async def reverse_proxy(request: Request, path: str):
|
| 133 |
+
return await chatgpt_reverse_proxy(request, path)
|
chatgpt/ChatService.py
ADDED
|
@@ -0,0 +1,490 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import random
|
| 4 |
+
import uuid
|
| 5 |
+
|
| 6 |
+
from fastapi import HTTPException
|
| 7 |
+
from starlette.concurrency import run_in_threadpool
|
| 8 |
+
|
| 9 |
+
from api.files import get_image_size, get_file_extension, determine_file_use_case
|
| 10 |
+
from api.models import model_proxy
|
| 11 |
+
from chatgpt.authorization import get_req_token, verify_token
|
| 12 |
+
from chatgpt.chatFormat import api_messages_to_chat, stream_response, format_not_stream_response, head_process_response
|
| 13 |
+
from chatgpt.chatLimit import check_is_limit, handle_request_limit
|
| 14 |
+
from chatgpt.proofofWork import get_config, get_dpl, get_answer_token, get_requirements_token
|
| 15 |
+
|
| 16 |
+
from utils.Client import Client
|
| 17 |
+
from utils.Logger import logger
|
| 18 |
+
from utils.config import proxy_url_list, chatgpt_base_url_list, ark0se_token_url_list, history_disabled, pow_difficulty, \
|
| 19 |
+
conversation_only, enable_limit, upload_by_url, check_model, auth_key, user_agents_list, turnstile_solver_url
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ChatService:
|
| 23 |
+
def __init__(self, origin_token=None):
|
| 24 |
+
self.user_agent = random.choice(user_agents_list) if user_agents_list else "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
| 25 |
+
self.req_token = get_req_token(origin_token)
|
| 26 |
+
self.chat_token = "gAAAAAB"
|
| 27 |
+
self.s = None
|
| 28 |
+
self.ws = None
|
| 29 |
+
|
| 30 |
+
async def set_dynamic_data(self, data):
|
| 31 |
+
if self.req_token:
|
| 32 |
+
logger.info(f"Request token: {self.req_token}")
|
| 33 |
+
req_len = len(self.req_token.split(","))
|
| 34 |
+
if req_len == 1:
|
| 35 |
+
self.access_token = await verify_token(self.req_token)
|
| 36 |
+
self.account_id = None
|
| 37 |
+
else:
|
| 38 |
+
self.access_token = await verify_token(self.req_token.split(",")[0])
|
| 39 |
+
self.account_id = self.req_token.split(",")[1]
|
| 40 |
+
else:
|
| 41 |
+
logger.info("Request token is empty, use no-auth 3.5")
|
| 42 |
+
self.access_token = None
|
| 43 |
+
self.account_id = None
|
| 44 |
+
|
| 45 |
+
self.data = data
|
| 46 |
+
await self.set_model()
|
| 47 |
+
if enable_limit and self.req_token:
|
| 48 |
+
limit_response = await handle_request_limit(self.req_token, self.req_model)
|
| 49 |
+
if limit_response:
|
| 50 |
+
raise HTTPException(status_code=429, detail=limit_response)
|
| 51 |
+
|
| 52 |
+
self.account_id = self.data.get('Chatgpt-Account-Id', self.account_id)
|
| 53 |
+
self.parent_message_id = self.data.get('parent_message_id')
|
| 54 |
+
self.conversation_id = self.data.get('conversation_id')
|
| 55 |
+
self.history_disabled = self.data.get('history_disabled', history_disabled)
|
| 56 |
+
|
| 57 |
+
self.api_messages = self.data.get("messages", [])
|
| 58 |
+
self.prompt_tokens = 0
|
| 59 |
+
self.max_tokens = self.data.get("max_tokens", 2147483647)
|
| 60 |
+
if not isinstance(self.max_tokens, int):
|
| 61 |
+
self.max_tokens = 2147483647
|
| 62 |
+
|
| 63 |
+
self.proxy_url = random.choice(proxy_url_list) if proxy_url_list else None
|
| 64 |
+
self.host_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
|
| 65 |
+
self.ark0se_token_url = random.choice(ark0se_token_url_list) if ark0se_token_url_list else None
|
| 66 |
+
|
| 67 |
+
self.s = Client(proxy=self.proxy_url)
|
| 68 |
+
|
| 69 |
+
self.oai_device_id = str(uuid.uuid4())
|
| 70 |
+
self.persona = None
|
| 71 |
+
self.ark0se_token = None
|
| 72 |
+
self.proof_token = None
|
| 73 |
+
self.turnstile_token = None
|
| 74 |
+
|
| 75 |
+
self.chat_headers = None
|
| 76 |
+
self.chat_request = None
|
| 77 |
+
|
| 78 |
+
self.base_headers = {
|
| 79 |
+
'Accept': '*/*',
|
| 80 |
+
'Accept-Encoding': 'gzip, deflate, br, zstd',
|
| 81 |
+
'Accept-Language': 'en-US,en;q=0.9',
|
| 82 |
+
'Content-Type': 'application/json',
|
| 83 |
+
'Oai-Device-Id': self.oai_device_id,
|
| 84 |
+
'Oai-Language': 'en-US',
|
| 85 |
+
'Origin': self.host_url,
|
| 86 |
+
'Priority': 'u=1, i',
|
| 87 |
+
'Referer': f'{self.host_url}/',
|
| 88 |
+
'Sec-Ch-Ua': '"Chromium";v="124", "Microsoft Edge";v="124", "Not-A.Brand";v="99"',
|
| 89 |
+
'Sec-Ch-Ua-Mobile': '?0',
|
| 90 |
+
'Sec-Ch-Ua-Platform': '"Windows"',
|
| 91 |
+
'Sec-Fetch-Dest': 'empty',
|
| 92 |
+
'Sec-Fetch-Mode': 'cors',
|
| 93 |
+
'Sec-Fetch-Site': 'same-origin',
|
| 94 |
+
'User-Agent': self.user_agent
|
| 95 |
+
}
|
| 96 |
+
if self.access_token:
|
| 97 |
+
self.base_url = self.host_url + "/backend-api"
|
| 98 |
+
self.base_headers['Authorization'] = f'Bearer {self.access_token}'
|
| 99 |
+
if self.account_id:
|
| 100 |
+
self.base_headers['Chatgpt-Account-Id'] = self.account_id
|
| 101 |
+
else:
|
| 102 |
+
self.base_url = self.host_url + "/backend-anon"
|
| 103 |
+
|
| 104 |
+
if auth_key:
|
| 105 |
+
self.base_headers['authkey'] = auth_key
|
| 106 |
+
|
| 107 |
+
await get_dpl(self)
|
| 108 |
+
|
| 109 |
+
async def set_model(self):
|
| 110 |
+
self.origin_model = self.data.get("model", "gpt-3.5-turbo-0125")
|
| 111 |
+
self.resp_model = model_proxy.get(self.origin_model, self.origin_model)
|
| 112 |
+
if "o1-preview" in self.origin_model:
|
| 113 |
+
self.req_model = "o1-preview"
|
| 114 |
+
elif "o1-mini" in self.origin_model:
|
| 115 |
+
self.req_model = "o1-mini"
|
| 116 |
+
elif "o1" in self.origin_model:
|
| 117 |
+
self.req_model = "o1"
|
| 118 |
+
elif "gpt-4.5o" in self.origin_model:
|
| 119 |
+
self.req_model = "gpt-4.5o"
|
| 120 |
+
elif "gpt-4o-canmore" in self.origin_model:
|
| 121 |
+
self.req_model = "gpt-4o-canmore"
|
| 122 |
+
elif "gpt-4o-mini" in self.origin_model:
|
| 123 |
+
self.req_model = "gpt-4o-mini"
|
| 124 |
+
elif "gpt-4o" in self.origin_model:
|
| 125 |
+
self.req_model = "gpt-4o"
|
| 126 |
+
elif "gpt-4-mobile" in self.origin_model:
|
| 127 |
+
self.req_model = "gpt-4-mobile"
|
| 128 |
+
elif "gpt-4-gizmo" in self.origin_model:
|
| 129 |
+
self.req_model = "gpt-4o"
|
| 130 |
+
elif "gpt-4" in self.origin_model:
|
| 131 |
+
self.req_model = "gpt-4"
|
| 132 |
+
elif "gpt-3.5" in self.origin_model:
|
| 133 |
+
self.req_model = "text-davinci-002-render-sha"
|
| 134 |
+
elif "auto" in self.origin_model:
|
| 135 |
+
self.req_model = "auto"
|
| 136 |
+
else:
|
| 137 |
+
self.req_model = "auto"
|
| 138 |
+
|
| 139 |
+
async def get_chat_requirements(self):
|
| 140 |
+
if conversation_only:
|
| 141 |
+
return None
|
| 142 |
+
url = f'{self.base_url}/sentinel/chat-requirements'
|
| 143 |
+
headers = self.base_headers.copy()
|
| 144 |
+
try:
|
| 145 |
+
config = get_config(self.user_agent)
|
| 146 |
+
p = get_requirements_token(config)
|
| 147 |
+
data = {'p': p}
|
| 148 |
+
r = await self.s.post(url, headers=headers, json=data, timeout=5)
|
| 149 |
+
if r.status_code == 200:
|
| 150 |
+
resp = r.json()
|
| 151 |
+
|
| 152 |
+
if check_model:
|
| 153 |
+
r = await self.s.get(f'{self.base_url}/models', headers=headers, timeout=5)
|
| 154 |
+
if r.status_code == 200:
|
| 155 |
+
models = r.json().get('models')
|
| 156 |
+
if not any(self.req_model in model.get("slug", "") for model in models):
|
| 157 |
+
logger.error(f"Model {self.req_model} not support.")
|
| 158 |
+
raise HTTPException(status_code=404, detail={
|
| 159 |
+
"message": f"The model `{self.origin_model}` does not exist or you do not have access to it.",
|
| 160 |
+
"type": "invalid_request_error",
|
| 161 |
+
"param": None,
|
| 162 |
+
"code": "model_not_found"
|
| 163 |
+
})
|
| 164 |
+
else:
|
| 165 |
+
raise HTTPException(status_code=404, detail="Failed to get models")
|
| 166 |
+
else:
|
| 167 |
+
self.persona = resp.get("persona")
|
| 168 |
+
if self.persona != "chatgpt-paid":
|
| 169 |
+
if self.req_model == "gpt-4":
|
| 170 |
+
logger.error(f"Model {self.resp_model} not support for {self.persona}")
|
| 171 |
+
raise HTTPException(status_code=404, detail={
|
| 172 |
+
"message": f"The model `{self.origin_model}` does not exist or you do not have access to it.",
|
| 173 |
+
"type": "invalid_request_error",
|
| 174 |
+
"param": None,
|
| 175 |
+
"code": "model_not_found"
|
| 176 |
+
})
|
| 177 |
+
|
| 178 |
+
turnstile = resp.get('turnstile', {})
|
| 179 |
+
turnstile_required = turnstile.get('required')
|
| 180 |
+
if turnstile_required:
|
| 181 |
+
turnstile_dx = turnstile.get("dx")
|
| 182 |
+
try:
|
| 183 |
+
if turnstile_solver_url:
|
| 184 |
+
res = await self.s.post(turnstile_solver_url, json={"url": "https://chatgpt.com", "p": p, "dx": turnstile_dx})
|
| 185 |
+
self.turnstile_token = res.json().get("t")
|
| 186 |
+
except Exception as e:
|
| 187 |
+
logger.info(f"Turnstile ignored: {e}")
|
| 188 |
+
# raise HTTPException(status_code=403, detail="Turnstile required")
|
| 189 |
+
|
| 190 |
+
ark0se = resp.get('ark' + 'ose', {})
|
| 191 |
+
ark0se_required = ark0se.get('required')
|
| 192 |
+
if ark0se_required:
|
| 193 |
+
if self.persona == "chatgpt-freeaccount":
|
| 194 |
+
ark0se_method = "chat35"
|
| 195 |
+
else:
|
| 196 |
+
ark0se_method = "chat4"
|
| 197 |
+
if not self.ark0se_token_url:
|
| 198 |
+
raise HTTPException(status_code=403, detail="Ark0se service required")
|
| 199 |
+
ark0se_dx = ark0se.get("dx")
|
| 200 |
+
ark0se_client = Client()
|
| 201 |
+
try:
|
| 202 |
+
r2 = await ark0se_client.post(
|
| 203 |
+
url=self.ark0se_token_url,
|
| 204 |
+
json={"blob": ark0se_dx, "method": ark0se_method},
|
| 205 |
+
timeout=15
|
| 206 |
+
)
|
| 207 |
+
r2esp = r2.json()
|
| 208 |
+
logger.info(f"ark0se_token: {r2esp}")
|
| 209 |
+
if r2esp.get('solved', True):
|
| 210 |
+
self.ark0se_token = r2esp.get('token')
|
| 211 |
+
else:
|
| 212 |
+
raise HTTPException(status_code=403, detail="Failed to get Ark0se token")
|
| 213 |
+
except Exception:
|
| 214 |
+
raise HTTPException(status_code=403, detail="Failed to get Ark0se token")
|
| 215 |
+
finally:
|
| 216 |
+
await ark0se_client.close()
|
| 217 |
+
|
| 218 |
+
proofofwork = resp.get('proofofwork', {})
|
| 219 |
+
proofofwork_required = proofofwork.get('required')
|
| 220 |
+
if proofofwork_required:
|
| 221 |
+
proofofwork_diff = proofofwork.get("difficulty")
|
| 222 |
+
if proofofwork_diff <= pow_difficulty:
|
| 223 |
+
raise HTTPException(status_code=403,
|
| 224 |
+
detail=f"Proof of work difficulty too high: {proofofwork_diff}")
|
| 225 |
+
proofofwork_seed = proofofwork.get("seed")
|
| 226 |
+
self.proof_token, solved = await run_in_threadpool(get_answer_token, proofofwork_seed,
|
| 227 |
+
proofofwork_diff, config)
|
| 228 |
+
if not solved:
|
| 229 |
+
raise HTTPException(status_code=403, detail="Failed to solve proof of work")
|
| 230 |
+
|
| 231 |
+
self.chat_token = resp.get('token')
|
| 232 |
+
if not self.chat_token:
|
| 233 |
+
raise HTTPException(status_code=403, detail=f"Failed to get chat token: {r.text}")
|
| 234 |
+
return self.chat_token
|
| 235 |
+
else:
|
| 236 |
+
if "application/json" == r.headers.get("Content-Type", ""):
|
| 237 |
+
detail = r.json().get("detail", r.json())
|
| 238 |
+
else:
|
| 239 |
+
detail = r.text
|
| 240 |
+
if "cf-spinner-please-wait" in detail:
|
| 241 |
+
raise HTTPException(status_code=r.status_code, detail="cf-spinner-please-wait")
|
| 242 |
+
if r.status_code == 429:
|
| 243 |
+
raise HTTPException(status_code=r.status_code, detail="rate-limit")
|
| 244 |
+
raise HTTPException(status_code=r.status_code, detail=detail)
|
| 245 |
+
except HTTPException as e:
|
| 246 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
| 247 |
+
except Exception as e:
|
| 248 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 249 |
+
|
| 250 |
+
async def prepare_send_conversation(self):
|
| 251 |
+
try:
|
| 252 |
+
chat_messages, self.prompt_tokens = await api_messages_to_chat(self, self.api_messages, upload_by_url)
|
| 253 |
+
except Exception as e:
|
| 254 |
+
logger.error(f"Failed to format messages: {str(e)}")
|
| 255 |
+
raise HTTPException(status_code=400, detail="Failed to format messages.")
|
| 256 |
+
self.chat_headers = self.base_headers.copy()
|
| 257 |
+
self.chat_headers.update({
|
| 258 |
+
'Accept': 'text/event-stream',
|
| 259 |
+
'Openai-Sentinel-Chat-Requirements-Token': self.chat_token,
|
| 260 |
+
'Openai-Sentinel-Proof-Token': self.proof_token,
|
| 261 |
+
})
|
| 262 |
+
if self.ark0se_token:
|
| 263 |
+
self.chat_headers['Openai-Sentinel-Ark' + 'ose-Token'] = self.ark0se_token
|
| 264 |
+
|
| 265 |
+
if self.turnstile_token:
|
| 266 |
+
self.chat_headers['Openai-Sentinel-Turnstile-Token'] = self.turnstile_token
|
| 267 |
+
|
| 268 |
+
if conversation_only:
|
| 269 |
+
self.chat_headers.pop('Openai-Sentinel-Chat-Requirements-Token', None)
|
| 270 |
+
self.chat_headers.pop('Openai-Sentinel-Proof-Token', None)
|
| 271 |
+
self.chat_headers.pop('Openai-Sentinel-Ark' + 'ose-Token', None)
|
| 272 |
+
self.chat_headers.pop('Openai-Sentinel-Turnstile-Token', None)
|
| 273 |
+
|
| 274 |
+
if "gpt-4-gizmo" in self.origin_model:
|
| 275 |
+
gizmo_id = self.origin_model.split("gpt-4-gizmo-")[-1]
|
| 276 |
+
conversation_mode = {"kind": "gizmo_interaction", "gizmo_id": gizmo_id}
|
| 277 |
+
else:
|
| 278 |
+
conversation_mode = {"kind": "primary_assistant"}
|
| 279 |
+
|
| 280 |
+
logger.info(f"Model mapping: {self.origin_model} -> {self.req_model}")
|
| 281 |
+
self.chat_request = {
|
| 282 |
+
"action": "next",
|
| 283 |
+
"conversation_mode": conversation_mode,
|
| 284 |
+
"force_nulligen": False,
|
| 285 |
+
"force_paragen": False,
|
| 286 |
+
"force_paragen_model_slug": "",
|
| 287 |
+
"force_rate_limit": False,
|
| 288 |
+
"force_use_sse": True,
|
| 289 |
+
"history_and_training_disabled": self.history_disabled,
|
| 290 |
+
"messages": chat_messages,
|
| 291 |
+
"model": self.req_model,
|
| 292 |
+
"parent_message_id": self.parent_message_id if self.parent_message_id else f"{uuid.uuid4()}",
|
| 293 |
+
"reset_rate_limits": False,
|
| 294 |
+
"suggestions": [],
|
| 295 |
+
"timezone_offset_min": -480,
|
| 296 |
+
"variant_purpose": "comparison_implicit",
|
| 297 |
+
"websocket_request_id": f"{uuid.uuid4()}"
|
| 298 |
+
}
|
| 299 |
+
if self.conversation_id:
|
| 300 |
+
self.chat_request['conversation_id'] = self.conversation_id
|
| 301 |
+
return self.chat_request
|
| 302 |
+
|
| 303 |
+
async def send_conversation(self):
|
| 304 |
+
try:
|
| 305 |
+
url = f'{self.base_url}/conversation'
|
| 306 |
+
stream = self.data.get("stream", False)
|
| 307 |
+
r = await self.s.post_stream(url, headers=self.chat_headers, json=self.chat_request, timeout=10,
|
| 308 |
+
stream=True)
|
| 309 |
+
if r.status_code != 200:
|
| 310 |
+
rtext = await r.atext()
|
| 311 |
+
if "application/json" == r.headers.get("Content-Type", ""):
|
| 312 |
+
detail = json.loads(rtext).get("detail", json.loads(rtext))
|
| 313 |
+
if r.status_code == 429:
|
| 314 |
+
check_is_limit(detail, token=self.req_token, model=self.req_model)
|
| 315 |
+
else:
|
| 316 |
+
if "cf-spinner-please-wait" in rtext:
|
| 317 |
+
# logger.error(f"Failed to send conversation: cf-spinner-please-wait")
|
| 318 |
+
raise HTTPException(status_code=r.status_code, detail="cf-spinner-please-wait")
|
| 319 |
+
if r.status_code == 429:
|
| 320 |
+
# logger.error(f"Failed to send conversation: rate-limit")
|
| 321 |
+
raise HTTPException(status_code=r.status_code, detail="rate-limit")
|
| 322 |
+
detail = r.text[:100]
|
| 323 |
+
# logger.error(f"Failed to send conversation: {detail}")
|
| 324 |
+
raise HTTPException(status_code=r.status_code, detail=detail)
|
| 325 |
+
|
| 326 |
+
content_type = r.headers.get("Content-Type", "")
|
| 327 |
+
if "text/event-stream" in content_type:
|
| 328 |
+
res, start = await head_process_response(r.aiter_lines())
|
| 329 |
+
if not start:
|
| 330 |
+
raise HTTPException(status_code=403, detail="Our systems have detected unusual activity coming from your system. Please try again later.")
|
| 331 |
+
if stream:
|
| 332 |
+
return stream_response(self, res, self.resp_model, self.max_tokens)
|
| 333 |
+
else:
|
| 334 |
+
return await format_not_stream_response(
|
| 335 |
+
stream_response(self, res, self.resp_model, self.max_tokens), self.prompt_tokens,
|
| 336 |
+
self.max_tokens, self.resp_model)
|
| 337 |
+
elif "application/json" in content_type:
|
| 338 |
+
rtext = await r.atext()
|
| 339 |
+
resp = json.loads(rtext)
|
| 340 |
+
raise HTTPException(status_code=r.status_code, detail=resp)
|
| 341 |
+
else:
|
| 342 |
+
rtext = await r.atext()
|
| 343 |
+
raise HTTPException(status_code=r.status_code, detail=rtext)
|
| 344 |
+
except HTTPException as e:
|
| 345 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
| 346 |
+
except Exception as e:
|
| 347 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 348 |
+
|
| 349 |
+
async def get_download_url(self, file_id):
|
| 350 |
+
url = f"{self.base_url}/files/{file_id}/download"
|
| 351 |
+
headers = self.base_headers.copy()
|
| 352 |
+
try:
|
| 353 |
+
r = await self.s.get(url, headers=headers, timeout=5)
|
| 354 |
+
if r.status_code == 200:
|
| 355 |
+
download_url = r.json().get('download_url')
|
| 356 |
+
return download_url
|
| 357 |
+
else:
|
| 358 |
+
return ""
|
| 359 |
+
except HTTPException:
|
| 360 |
+
return ""
|
| 361 |
+
|
| 362 |
+
async def get_download_url_from_upload(self, file_id):
|
| 363 |
+
url = f"{self.base_url}/files/{file_id}/uploaded"
|
| 364 |
+
headers = self.base_headers.copy()
|
| 365 |
+
try:
|
| 366 |
+
r = await self.s.post(url, headers=headers, json={}, timeout=5)
|
| 367 |
+
if r.status_code == 200:
|
| 368 |
+
download_url = r.json().get('download_url')
|
| 369 |
+
return download_url
|
| 370 |
+
else:
|
| 371 |
+
return ""
|
| 372 |
+
except HTTPException:
|
| 373 |
+
return ""
|
| 374 |
+
|
| 375 |
+
async def get_upload_url(self, file_name, file_size, use_case="multimodal"):
|
| 376 |
+
url = f'{self.base_url}/files'
|
| 377 |
+
headers = self.base_headers.copy()
|
| 378 |
+
try:
|
| 379 |
+
r = await self.s.post(url, headers=headers, json={
|
| 380 |
+
"file_name": file_name,
|
| 381 |
+
"file_size": file_size,
|
| 382 |
+
"timezone_offset_min": -480,
|
| 383 |
+
"use_case": use_case
|
| 384 |
+
}, timeout=5)
|
| 385 |
+
if r.status_code == 200:
|
| 386 |
+
res = r.json()
|
| 387 |
+
file_id = res.get('file_id')
|
| 388 |
+
upload_url = res.get('upload_url')
|
| 389 |
+
logger.info(f"file_id: {file_id}, upload_url: {upload_url}")
|
| 390 |
+
return file_id, upload_url
|
| 391 |
+
else:
|
| 392 |
+
return "", ""
|
| 393 |
+
except HTTPException:
|
| 394 |
+
return "", ""
|
| 395 |
+
|
| 396 |
+
async def upload(self, upload_url, file_content, mime_type):
|
| 397 |
+
headers = self.base_headers.copy()
|
| 398 |
+
headers.update({
|
| 399 |
+
'Accept': 'application/json, text/plain, */*',
|
| 400 |
+
'Content-Type': mime_type,
|
| 401 |
+
'X-Ms-Blob-Type': 'BlockBlob',
|
| 402 |
+
'X-Ms-Version': '2020-04-08'
|
| 403 |
+
})
|
| 404 |
+
headers.pop('Authorization', None)
|
| 405 |
+
try:
|
| 406 |
+
r = await self.s.put(upload_url, headers=headers, data=file_content)
|
| 407 |
+
if r.status_code == 201:
|
| 408 |
+
return True
|
| 409 |
+
return False
|
| 410 |
+
except Exception:
|
| 411 |
+
return False
|
| 412 |
+
|
| 413 |
+
async def upload_file(self, file_content, mime_type):
|
| 414 |
+
if not file_content or not mime_type:
|
| 415 |
+
return None
|
| 416 |
+
|
| 417 |
+
width, height = None, None
|
| 418 |
+
if mime_type.startswith("image/"):
|
| 419 |
+
try:
|
| 420 |
+
width, height = await get_image_size(file_content)
|
| 421 |
+
except Exception as e:
|
| 422 |
+
logger.error(f"Error image mime_type, change to text/plain: {e}")
|
| 423 |
+
mime_type = 'text/plain'
|
| 424 |
+
file_size = len(file_content)
|
| 425 |
+
file_extension = await get_file_extension(mime_type)
|
| 426 |
+
file_name = f"{uuid.uuid4()}{file_extension}"
|
| 427 |
+
use_case = await determine_file_use_case(mime_type)
|
| 428 |
+
|
| 429 |
+
file_id, upload_url = await self.get_upload_url(file_name, file_size, use_case)
|
| 430 |
+
if file_id and upload_url:
|
| 431 |
+
if await self.upload(upload_url, file_content, mime_type):
|
| 432 |
+
download_url = await self.get_download_url_from_upload(file_id)
|
| 433 |
+
if download_url:
|
| 434 |
+
file_meta = {
|
| 435 |
+
"file_id": file_id,
|
| 436 |
+
"file_name": file_name,
|
| 437 |
+
"size_bytes": file_size,
|
| 438 |
+
"mime_type": mime_type,
|
| 439 |
+
"width": width,
|
| 440 |
+
"height": height,
|
| 441 |
+
"use_case": use_case
|
| 442 |
+
}
|
| 443 |
+
logger.info(f"File_meta: {file_meta}")
|
| 444 |
+
return file_meta
|
| 445 |
+
else:
|
| 446 |
+
logger.error("Failed to get download url")
|
| 447 |
+
else:
|
| 448 |
+
logger.error("Failed to upload file")
|
| 449 |
+
else:
|
| 450 |
+
logger.error("Failed to get upload url")
|
| 451 |
+
|
| 452 |
+
async def check_upload(self, file_id):
|
| 453 |
+
url = f'{self.base_url}/files/{file_id}'
|
| 454 |
+
headers = self.base_headers.copy()
|
| 455 |
+
try:
|
| 456 |
+
for i in range(30):
|
| 457 |
+
r = await self.s.get(url, headers=headers, timeout=5)
|
| 458 |
+
if r.status_code == 200:
|
| 459 |
+
res = r.json()
|
| 460 |
+
retrieval_index_status = res.get('retrieval_index_status', '')
|
| 461 |
+
if retrieval_index_status == "success":
|
| 462 |
+
break
|
| 463 |
+
await asyncio.sleep(1)
|
| 464 |
+
return True
|
| 465 |
+
except HTTPException:
|
| 466 |
+
return False
|
| 467 |
+
|
| 468 |
+
async def get_response_file_url(self, conversation_id, message_id, sandbox_path):
|
| 469 |
+
try:
|
| 470 |
+
url = f"{self.base_url}/conversation/{conversation_id}/interpreter/download"
|
| 471 |
+
params = {
|
| 472 |
+
"message_id": message_id,
|
| 473 |
+
"sandbox_path": sandbox_path
|
| 474 |
+
}
|
| 475 |
+
headers = self.base_headers.copy()
|
| 476 |
+
r = await self.s.get(url, headers=headers, params=params, timeout=10)
|
| 477 |
+
if r.status_code == 200:
|
| 478 |
+
return r.json().get("download_url")
|
| 479 |
+
else:
|
| 480 |
+
return None
|
| 481 |
+
except Exception:
|
| 482 |
+
logger.info("Failed to get response file url")
|
| 483 |
+
return None
|
| 484 |
+
|
| 485 |
+
async def close_client(self):
|
| 486 |
+
if self.s:
|
| 487 |
+
await self.s.close()
|
| 488 |
+
if self.ws:
|
| 489 |
+
await self.ws.close()
|
| 490 |
+
del self.ws
|
chatgpt/authorization.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
|
| 3 |
+
from fastapi import HTTPException
|
| 4 |
+
|
| 5 |
+
from chatgpt.refreshToken import rt2ac
|
| 6 |
+
from utils.Logger import logger
|
| 7 |
+
from utils.config import authorization_list
|
| 8 |
+
import chatgpt.globals as globals
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_req_token(req_token):
|
| 12 |
+
if req_token in authorization_list:
|
| 13 |
+
if len(globals.token_list) - len(globals.error_token_list) > 0:
|
| 14 |
+
globals.count += 1
|
| 15 |
+
globals.count %= len(globals.token_list)
|
| 16 |
+
while globals.token_list[globals.count] in globals.error_token_list:
|
| 17 |
+
globals.count += 1
|
| 18 |
+
globals.count %= len(globals.token_list)
|
| 19 |
+
return globals.token_list[globals.count]
|
| 20 |
+
else:
|
| 21 |
+
return None
|
| 22 |
+
else:
|
| 23 |
+
return req_token
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
async def verify_token(req_token):
|
| 27 |
+
if not req_token:
|
| 28 |
+
if authorization_list:
|
| 29 |
+
logger.error("Unauthorized with empty token.")
|
| 30 |
+
raise HTTPException(status_code=401)
|
| 31 |
+
else:
|
| 32 |
+
return None
|
| 33 |
+
else:
|
| 34 |
+
if req_token.startswith("eyJhbGciOi") or req_token.startswith("fk-"):
|
| 35 |
+
access_token = req_token
|
| 36 |
+
return access_token
|
| 37 |
+
elif len(req_token) == 45:
|
| 38 |
+
try:
|
| 39 |
+
access_token = await rt2ac(req_token, force_refresh=False)
|
| 40 |
+
return access_token
|
| 41 |
+
except HTTPException as e:
|
| 42 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
| 43 |
+
else:
|
| 44 |
+
return req_token
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
async def refresh_all_tokens(force_refresh=False):
|
| 48 |
+
for token in globals.token_list:
|
| 49 |
+
if len(token) == 45:
|
| 50 |
+
try:
|
| 51 |
+
await asyncio.sleep(2)
|
| 52 |
+
await rt2ac(token, force_refresh=force_refresh)
|
| 53 |
+
except HTTPException:
|
| 54 |
+
pass
|
| 55 |
+
logger.info("All tokens refreshed.")
|
chatgpt/chatFormat.py
ADDED
|
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import random
|
| 4 |
+
import re
|
| 5 |
+
import string
|
| 6 |
+
import time
|
| 7 |
+
import uuid
|
| 8 |
+
|
| 9 |
+
import pybase64
|
| 10 |
+
import websockets
|
| 11 |
+
from fastapi import HTTPException
|
| 12 |
+
|
| 13 |
+
from api.files import get_file_content
|
| 14 |
+
from api.models import model_system_fingerprint
|
| 15 |
+
from api.tokens import split_tokens_from_content, calculate_image_tokens, num_tokens_from_messages
|
| 16 |
+
from utils.Logger import logger
|
| 17 |
+
|
| 18 |
+
moderation_message = "I'm sorry, I cannot provide or engage in any content related to pornography, violence, or any unethical material. If you have any other questions or need assistance, please feel free to let me know. I'll do my best to provide support and assistance."
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
async def format_not_stream_response(response, prompt_tokens, max_tokens, model):
|
| 22 |
+
chat_id = f"chatcmpl-{''.join(random.choice(string.ascii_letters + string.digits) for _ in range(29))}"
|
| 23 |
+
system_fingerprint_list = model_system_fingerprint.get(model, None)
|
| 24 |
+
system_fingerprint = random.choice(system_fingerprint_list) if system_fingerprint_list else None
|
| 25 |
+
created_time = int(time.time())
|
| 26 |
+
all_text = ""
|
| 27 |
+
async for chunk in response:
|
| 28 |
+
try:
|
| 29 |
+
if chunk.startswith("data: [DONE]"):
|
| 30 |
+
break
|
| 31 |
+
elif not chunk.startswith("data: "):
|
| 32 |
+
continue
|
| 33 |
+
else:
|
| 34 |
+
chunk = json.loads(chunk[6:])
|
| 35 |
+
if not chunk["choices"][0].get("delta"):
|
| 36 |
+
continue
|
| 37 |
+
all_text += chunk["choices"][0]["delta"]["content"]
|
| 38 |
+
except Exception as e:
|
| 39 |
+
logger.error(f"Error: {chunk}, error: {str(e)}")
|
| 40 |
+
continue
|
| 41 |
+
content, completion_tokens, finish_reason = await split_tokens_from_content(all_text, max_tokens, model)
|
| 42 |
+
message = {
|
| 43 |
+
"role": "assistant",
|
| 44 |
+
"content": content,
|
| 45 |
+
}
|
| 46 |
+
usage = {
|
| 47 |
+
"prompt_tokens": prompt_tokens,
|
| 48 |
+
"completion_tokens": completion_tokens,
|
| 49 |
+
"total_tokens": prompt_tokens + completion_tokens
|
| 50 |
+
}
|
| 51 |
+
if not message.get("content"):
|
| 52 |
+
raise HTTPException(status_code=403, detail="No content in the message.")
|
| 53 |
+
|
| 54 |
+
data = {
|
| 55 |
+
"id": chat_id,
|
| 56 |
+
"object": "chat.completion",
|
| 57 |
+
"created": created_time,
|
| 58 |
+
"model": model,
|
| 59 |
+
"choices": [
|
| 60 |
+
{
|
| 61 |
+
"index": 0,
|
| 62 |
+
"message": message,
|
| 63 |
+
"logprobs": None,
|
| 64 |
+
"finish_reason": finish_reason
|
| 65 |
+
}
|
| 66 |
+
],
|
| 67 |
+
"usage": usage
|
| 68 |
+
}
|
| 69 |
+
if system_fingerprint:
|
| 70 |
+
data["system_fingerprint"] = system_fingerprint
|
| 71 |
+
return data
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
async def wss_stream_response(websocket, conversation_id):
|
| 75 |
+
while not websocket.closed:
|
| 76 |
+
try:
|
| 77 |
+
message = await asyncio.wait_for(websocket.recv(), timeout=10)
|
| 78 |
+
if message:
|
| 79 |
+
resultObj = json.loads(message)
|
| 80 |
+
sequenceId = resultObj.get("sequenceId", None)
|
| 81 |
+
if not sequenceId:
|
| 82 |
+
continue
|
| 83 |
+
data = resultObj.get("data", {})
|
| 84 |
+
if conversation_id != data.get("conversation_id", ""):
|
| 85 |
+
continue
|
| 86 |
+
sequenceId = resultObj.get('sequenceId')
|
| 87 |
+
if sequenceId and sequenceId % 80 == 0:
|
| 88 |
+
await websocket.send(
|
| 89 |
+
json.dumps(
|
| 90 |
+
{"type": "sequenceAck", "sequenceId": sequenceId}
|
| 91 |
+
)
|
| 92 |
+
)
|
| 93 |
+
decoded_bytes = pybase64.b64decode(data.get("body", None))
|
| 94 |
+
yield decoded_bytes
|
| 95 |
+
else:
|
| 96 |
+
print("No message received within the specified time.")
|
| 97 |
+
except asyncio.TimeoutError:
|
| 98 |
+
logger.error("Timeout! No message received within the specified time.")
|
| 99 |
+
break
|
| 100 |
+
except websockets.ConnectionClosed as e:
|
| 101 |
+
if e.code == 1000:
|
| 102 |
+
logger.error("WebSocket closed normally with code 1000 (OK)")
|
| 103 |
+
yield b"data: [DONE]\n\n"
|
| 104 |
+
else:
|
| 105 |
+
logger.error(f"WebSocket closed with error code {e.code}")
|
| 106 |
+
except Exception as e:
|
| 107 |
+
logger.error(f"Error: {str(e)}")
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
async def head_process_response(response):
|
| 112 |
+
async for chunk in response:
|
| 113 |
+
chunk = chunk.decode("utf-8")
|
| 114 |
+
if chunk.startswith("data: {"):
|
| 115 |
+
chunk_old_data = json.loads(chunk[6:])
|
| 116 |
+
message = chunk_old_data.get("message", {})
|
| 117 |
+
if not message and "error" in chunk_old_data:
|
| 118 |
+
return response, False
|
| 119 |
+
role = message.get('author', {}).get('role')
|
| 120 |
+
if role == 'user' or role == 'system':
|
| 121 |
+
continue
|
| 122 |
+
|
| 123 |
+
status = message.get("status")
|
| 124 |
+
if status == "in_progress":
|
| 125 |
+
return response, True
|
| 126 |
+
return response, False
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
async def stream_response(service, response, model, max_tokens):
|
| 130 |
+
chat_id = f"chatcmpl-{''.join(random.choice(string.ascii_letters + string.digits) for _ in range(29))}"
|
| 131 |
+
system_fingerprint_list = model_system_fingerprint.get(model, None)
|
| 132 |
+
system_fingerprint = random.choice(system_fingerprint_list) if system_fingerprint_list else None
|
| 133 |
+
created_time = int(time.time())
|
| 134 |
+
completion_tokens = 0
|
| 135 |
+
len_last_content = 0
|
| 136 |
+
len_last_citation = 0
|
| 137 |
+
last_message_id = None
|
| 138 |
+
last_role = None
|
| 139 |
+
last_content_type = None
|
| 140 |
+
model_slug = None
|
| 141 |
+
end = False
|
| 142 |
+
|
| 143 |
+
chunk_new_data = {
|
| 144 |
+
"id": chat_id,
|
| 145 |
+
"object": "chat.completion.chunk",
|
| 146 |
+
"created": created_time,
|
| 147 |
+
"model": model,
|
| 148 |
+
"choices": [
|
| 149 |
+
{
|
| 150 |
+
"index": 0,
|
| 151 |
+
"delta": {"role": "assistant", "content": ""},
|
| 152 |
+
"logprobs": None,
|
| 153 |
+
"finish_reason": None
|
| 154 |
+
}
|
| 155 |
+
]
|
| 156 |
+
}
|
| 157 |
+
if system_fingerprint:
|
| 158 |
+
chunk_new_data["system_fingerprint"] = system_fingerprint
|
| 159 |
+
yield f"data: {json.dumps(chunk_new_data)}\n\n"
|
| 160 |
+
|
| 161 |
+
async for chunk in response:
|
| 162 |
+
chunk = chunk.decode("utf-8")
|
| 163 |
+
if end:
|
| 164 |
+
logger.info(f"Response Model: {model_slug}")
|
| 165 |
+
yield "data: [DONE]\n\n"
|
| 166 |
+
break
|
| 167 |
+
try:
|
| 168 |
+
if chunk.startswith("data: {"):
|
| 169 |
+
chunk_old_data = json.loads(chunk[6:])
|
| 170 |
+
finish_reason = None
|
| 171 |
+
message = chunk_old_data.get("message", {})
|
| 172 |
+
conversation_id = chunk_old_data.get("conversation_id")
|
| 173 |
+
role = message.get('author', {}).get('role')
|
| 174 |
+
if role == 'user' or role == 'system':
|
| 175 |
+
continue
|
| 176 |
+
|
| 177 |
+
status = message.get("status")
|
| 178 |
+
message_id = message.get("id")
|
| 179 |
+
content = message.get("content", {})
|
| 180 |
+
recipient = message.get("recipient", "")
|
| 181 |
+
meta_data = message.get("metadata", {})
|
| 182 |
+
initial_text = meta_data.get("initial_text", "")
|
| 183 |
+
model_slug = meta_data.get("model_slug", model_slug)
|
| 184 |
+
|
| 185 |
+
if not message and chunk_old_data.get("type") == "moderation":
|
| 186 |
+
delta = {"role": "assistant", "content": moderation_message}
|
| 187 |
+
finish_reason = "stop"
|
| 188 |
+
end = True
|
| 189 |
+
elif status == "in_progress":
|
| 190 |
+
outer_content_type = content.get("content_type")
|
| 191 |
+
if outer_content_type == "text":
|
| 192 |
+
part = content.get("parts", [])[0]
|
| 193 |
+
if not part:
|
| 194 |
+
if role == 'assistant' and last_role != 'assistant':
|
| 195 |
+
if last_role == None:
|
| 196 |
+
new_text = ""
|
| 197 |
+
else:
|
| 198 |
+
new_text = f"\n"
|
| 199 |
+
elif role == 'tool' and last_role != 'tool':
|
| 200 |
+
new_text = f">{initial_text}\n"
|
| 201 |
+
else:
|
| 202 |
+
new_text = ""
|
| 203 |
+
else:
|
| 204 |
+
if last_message_id and last_message_id != message_id:
|
| 205 |
+
continue
|
| 206 |
+
citation = message.get("metadata", {}).get("citations", [])
|
| 207 |
+
if len(citation) > len_last_citation:
|
| 208 |
+
inside_metadata = citation[-1].get("metadata", {})
|
| 209 |
+
citation_title = inside_metadata.get("title", "")
|
| 210 |
+
citation_url = inside_metadata.get("url", "")
|
| 211 |
+
new_text = f' **[[""]]({citation_url} "{citation_title}")** '
|
| 212 |
+
len_last_citation = len(citation)
|
| 213 |
+
else:
|
| 214 |
+
if role == 'assistant' and last_role != 'assistant':
|
| 215 |
+
if recipient == 'dalle.text2im':
|
| 216 |
+
new_text = f"\n```{recipient}\n{part[len_last_content:]}"
|
| 217 |
+
elif last_role == None:
|
| 218 |
+
new_text = part[len_last_content:]
|
| 219 |
+
else:
|
| 220 |
+
new_text = f"\n\n{part[len_last_content:]}"
|
| 221 |
+
elif role == 'tool' and last_role != 'tool':
|
| 222 |
+
new_text = f">{initial_text}\n{part[len_last_content:]}"
|
| 223 |
+
elif role == 'tool':
|
| 224 |
+
new_text = part[len_last_content:].replace("\n\n", "\n")
|
| 225 |
+
else:
|
| 226 |
+
new_text = part[len_last_content:]
|
| 227 |
+
len_last_content = len(part)
|
| 228 |
+
else:
|
| 229 |
+
text = content.get("text", "")
|
| 230 |
+
if outer_content_type == "code" and last_content_type != "code":
|
| 231 |
+
language = content.get("language", "")
|
| 232 |
+
if not language or language == "unknown":
|
| 233 |
+
language = recipient
|
| 234 |
+
new_text = "\n```" + language + "\n" + text[len_last_content:]
|
| 235 |
+
elif outer_content_type == "execution_output" and last_content_type != "execution_output":
|
| 236 |
+
new_text = "\n```" + "Output" + "\n" + text[len_last_content:]
|
| 237 |
+
else:
|
| 238 |
+
new_text = text[len_last_content:]
|
| 239 |
+
len_last_content = len(text)
|
| 240 |
+
if last_content_type == "code" and outer_content_type != "code":
|
| 241 |
+
new_text = "\n```\n" + new_text
|
| 242 |
+
elif last_content_type == "execution_output" and outer_content_type != "execution_output":
|
| 243 |
+
new_text = "\n```\n" + new_text
|
| 244 |
+
|
| 245 |
+
delta = {"content": new_text}
|
| 246 |
+
last_content_type = outer_content_type
|
| 247 |
+
if completion_tokens >= max_tokens:
|
| 248 |
+
delta = {}
|
| 249 |
+
finish_reason = "length"
|
| 250 |
+
end = True
|
| 251 |
+
elif status == "finished_successfully":
|
| 252 |
+
if content.get("content_type") == "multimodal_text":
|
| 253 |
+
parts = content.get("parts", [])
|
| 254 |
+
delta = {}
|
| 255 |
+
for part in parts:
|
| 256 |
+
if isinstance(part, str):
|
| 257 |
+
continue
|
| 258 |
+
inner_content_type = part.get('content_type')
|
| 259 |
+
if inner_content_type == "image_asset_pointer":
|
| 260 |
+
last_content_type = "image_asset_pointer"
|
| 261 |
+
file_id = part.get('asset_pointer').replace('file-service://', '')
|
| 262 |
+
logger.debug(f"file_id: {file_id}")
|
| 263 |
+
image_download_url = await service.get_download_url(file_id)
|
| 264 |
+
logger.debug(f"image_download_url: {image_download_url}")
|
| 265 |
+
if image_download_url:
|
| 266 |
+
delta = {"content": f"\n```\n\n"}
|
| 267 |
+
else:
|
| 268 |
+
delta = {"content": f"\n```\nFailed to load the image.\n"}
|
| 269 |
+
elif message.get("end_turn"):
|
| 270 |
+
part = content.get("parts", [])[0]
|
| 271 |
+
new_text = part[len_last_content:]
|
| 272 |
+
if not new_text:
|
| 273 |
+
matches = re.findall(r'\(sandbox:(.*?)\)', part)
|
| 274 |
+
if matches:
|
| 275 |
+
file_url_content = ""
|
| 276 |
+
for i, sandbox_path in enumerate(matches):
|
| 277 |
+
file_download_url = await service.get_response_file_url(conversation_id, message_id, sandbox_path)
|
| 278 |
+
if file_download_url:
|
| 279 |
+
file_url_content += f"\n```\n\n\n"
|
| 280 |
+
delta = {"content": file_url_content}
|
| 281 |
+
else:
|
| 282 |
+
delta = {}
|
| 283 |
+
else:
|
| 284 |
+
delta = {"content": new_text}
|
| 285 |
+
finish_reason = "stop"
|
| 286 |
+
end = True
|
| 287 |
+
else:
|
| 288 |
+
len_last_content = 0
|
| 289 |
+
if meta_data.get("finished_text"):
|
| 290 |
+
delta = {"content": f"\n{meta_data.get('finished_text')}\n"}
|
| 291 |
+
else:
|
| 292 |
+
continue
|
| 293 |
+
else:
|
| 294 |
+
continue
|
| 295 |
+
last_message_id = message_id
|
| 296 |
+
last_role = role
|
| 297 |
+
if not end and not delta.get("content"):
|
| 298 |
+
delta = {"role": "assistant", "content": ""}
|
| 299 |
+
chunk_new_data["choices"][0]["delta"] = delta
|
| 300 |
+
chunk_new_data["choices"][0]["finish_reason"] = finish_reason
|
| 301 |
+
if not service.history_disabled:
|
| 302 |
+
chunk_new_data.update({
|
| 303 |
+
"message_id": message_id,
|
| 304 |
+
"conversation_id": conversation_id,
|
| 305 |
+
})
|
| 306 |
+
completion_tokens += 1
|
| 307 |
+
yield f"data: {json.dumps(chunk_new_data)}\n\n"
|
| 308 |
+
elif chunk.startswith("data: [DONE]"):
|
| 309 |
+
logger.info(f"Response Model: {model_slug}")
|
| 310 |
+
yield "data: [DONE]\n\n"
|
| 311 |
+
else:
|
| 312 |
+
continue
|
| 313 |
+
except Exception as e:
|
| 314 |
+
if chunk.startswith("data: "):
|
| 315 |
+
chunk_data = json.loads(chunk[6:])
|
| 316 |
+
if chunk_data.get("error"):
|
| 317 |
+
logger.error(f"Error: {chunk_data.get('error')}")
|
| 318 |
+
yield "data: [DONE]\n\n"
|
| 319 |
+
break
|
| 320 |
+
logger.error(f"Error: {chunk}, details: {str(e)}")
|
| 321 |
+
continue
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def get_url_from_content(content):
|
| 325 |
+
if isinstance(content, str) and content.startswith('http'):
|
| 326 |
+
try:
|
| 327 |
+
url = re.match(
|
| 328 |
+
r'(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
|
| 329 |
+
content.split(' ')[0])[0]
|
| 330 |
+
content = content.replace(url, '').strip()
|
| 331 |
+
return url, content
|
| 332 |
+
except Exception:
|
| 333 |
+
return None, content
|
| 334 |
+
return None, content
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def format_messages_with_url(content):
|
| 338 |
+
url_list = []
|
| 339 |
+
while True:
|
| 340 |
+
url, content = get_url_from_content(content)
|
| 341 |
+
if url:
|
| 342 |
+
url_list.append(url)
|
| 343 |
+
logger.info(f"Found a file_url from messages: {url}")
|
| 344 |
+
else:
|
| 345 |
+
break
|
| 346 |
+
if not url_list:
|
| 347 |
+
return content
|
| 348 |
+
new_content = [
|
| 349 |
+
{
|
| 350 |
+
"type": "text",
|
| 351 |
+
"text": content
|
| 352 |
+
}
|
| 353 |
+
]
|
| 354 |
+
for url in url_list:
|
| 355 |
+
new_content.append({
|
| 356 |
+
"type": "image_url",
|
| 357 |
+
"image_url": {
|
| 358 |
+
"url": url
|
| 359 |
+
}
|
| 360 |
+
})
|
| 361 |
+
return new_content
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
async def api_messages_to_chat(service, api_messages, upload_by_url=False):
|
| 365 |
+
file_tokens = 0
|
| 366 |
+
chat_messages = []
|
| 367 |
+
for api_message in api_messages:
|
| 368 |
+
role = api_message.get('role')
|
| 369 |
+
content = api_message.get('content')
|
| 370 |
+
if upload_by_url:
|
| 371 |
+
if isinstance(content, str):
|
| 372 |
+
content = format_messages_with_url(content)
|
| 373 |
+
if isinstance(content, list):
|
| 374 |
+
parts = []
|
| 375 |
+
attachments = []
|
| 376 |
+
content_type = "multimodal_text"
|
| 377 |
+
for i in content:
|
| 378 |
+
if i.get("type") == "text":
|
| 379 |
+
parts.append(i.get("text"))
|
| 380 |
+
elif i.get("type") == "image_url":
|
| 381 |
+
image_url = i.get("image_url")
|
| 382 |
+
url = image_url.get("url")
|
| 383 |
+
detail = image_url.get("detail", "auto")
|
| 384 |
+
file_content, mime_type = await get_file_content(url)
|
| 385 |
+
file_meta = await service.upload_file(file_content, mime_type)
|
| 386 |
+
if file_meta:
|
| 387 |
+
file_id = file_meta["file_id"]
|
| 388 |
+
file_size = file_meta["size_bytes"]
|
| 389 |
+
file_name = file_meta["file_name"]
|
| 390 |
+
mime_type = file_meta["mime_type"]
|
| 391 |
+
use_case = file_meta["use_case"]
|
| 392 |
+
if mime_type.startswith("image/"):
|
| 393 |
+
width, height = file_meta["width"], file_meta["height"]
|
| 394 |
+
file_tokens += await calculate_image_tokens(width, height, detail)
|
| 395 |
+
parts.append({
|
| 396 |
+
"content_type": "image_asset_pointer",
|
| 397 |
+
"asset_pointer": f"file-service://{file_id}",
|
| 398 |
+
"size_bytes": file_size,
|
| 399 |
+
"width": width,
|
| 400 |
+
"height": height
|
| 401 |
+
})
|
| 402 |
+
attachments.append({
|
| 403 |
+
"id": file_id,
|
| 404 |
+
"size": file_size,
|
| 405 |
+
"name": file_name,
|
| 406 |
+
"mime_type": mime_type,
|
| 407 |
+
"width": width,
|
| 408 |
+
"height": height
|
| 409 |
+
})
|
| 410 |
+
else:
|
| 411 |
+
if not use_case == "ace_upload":
|
| 412 |
+
await service.check_upload(file_id)
|
| 413 |
+
file_tokens += file_size // 1000
|
| 414 |
+
attachments.append({
|
| 415 |
+
"id": file_id,
|
| 416 |
+
"size": file_size,
|
| 417 |
+
"name": file_name,
|
| 418 |
+
"mime_type": mime_type,
|
| 419 |
+
})
|
| 420 |
+
metadata = {
|
| 421 |
+
"attachments": attachments
|
| 422 |
+
}
|
| 423 |
+
else:
|
| 424 |
+
content_type = "text"
|
| 425 |
+
parts = [content]
|
| 426 |
+
metadata = {}
|
| 427 |
+
chat_message = {
|
| 428 |
+
"id": f"{uuid.uuid4()}",
|
| 429 |
+
"author": {"role": role},
|
| 430 |
+
"content": {"content_type": content_type, "parts": parts},
|
| 431 |
+
"metadata": metadata
|
| 432 |
+
}
|
| 433 |
+
chat_messages.append(chat_message)
|
| 434 |
+
text_tokens = await num_tokens_from_messages(api_messages, service.resp_model)
|
| 435 |
+
prompt_tokens = text_tokens + file_tokens
|
| 436 |
+
return chat_messages, prompt_tokens
|
chatgpt/chatLimit.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import threading
|
| 2 |
+
import time
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
from utils.Logger import logger
|
| 6 |
+
|
| 7 |
+
limit_details = {}
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def check_is_limit(detail, token, model):
|
| 11 |
+
if token and isinstance(detail, dict) and detail.get('clears_in'):
|
| 12 |
+
clear_time = int(time.time()) + detail.get('clears_in')
|
| 13 |
+
limit_details.setdefault(token, {})[model] = clear_time
|
| 14 |
+
logger.info(f"{token[:40]}: Reached {model} limit, will be cleared at {datetime.fromtimestamp(clear_time).replace(microsecond=0)}")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
async def handle_request_limit(token, model):
|
| 18 |
+
try:
|
| 19 |
+
if limit_details.get(token) and model in limit_details[token]:
|
| 20 |
+
limit_time = limit_details[token][model]
|
| 21 |
+
is_limit = limit_time > int(time.time())
|
| 22 |
+
if is_limit:
|
| 23 |
+
clear_date = datetime.fromtimestamp(limit_time).replace(microsecond=0)
|
| 24 |
+
result = f"Request limit exceeded. You can continue with the default model now, or try again after {clear_date}"
|
| 25 |
+
logger.info(result)
|
| 26 |
+
return result
|
| 27 |
+
else:
|
| 28 |
+
del limit_details[token][model]
|
| 29 |
+
return None
|
| 30 |
+
except KeyError as e:
|
| 31 |
+
logger.error(f"Key error: {e}")
|
| 32 |
+
return None
|
| 33 |
+
except Exception as e:
|
| 34 |
+
logger.error(f"An unexpected error occurred: {e}")
|
| 35 |
+
return None
|
chatgpt/globals.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from utils.Logger import logger
|
| 5 |
+
|
| 6 |
+
DATA_FOLDER = "data"
|
| 7 |
+
TOKENS_FILE = os.path.join(DATA_FOLDER, "token.txt")
|
| 8 |
+
REFRESH_MAP_FILE = os.path.join(DATA_FOLDER, "refresh_map.json")
|
| 9 |
+
ERROR_TOKENS_FILE = os.path.join(DATA_FOLDER, "error_token.txt")
|
| 10 |
+
WSS_MAP_FILE = os.path.join(DATA_FOLDER, "wss_map.json")
|
| 11 |
+
|
| 12 |
+
count = 0
|
| 13 |
+
token_list = []
|
| 14 |
+
error_token_list = []
|
| 15 |
+
refresh_map = {}
|
| 16 |
+
wss_map = {}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
if not os.path.exists(DATA_FOLDER):
|
| 20 |
+
os.makedirs(DATA_FOLDER)
|
| 21 |
+
|
| 22 |
+
if os.path.exists(REFRESH_MAP_FILE):
|
| 23 |
+
with open(REFRESH_MAP_FILE, "r") as file:
|
| 24 |
+
refresh_map = json.load(file)
|
| 25 |
+
else:
|
| 26 |
+
refresh_map = {}
|
| 27 |
+
|
| 28 |
+
if os.path.exists(WSS_MAP_FILE):
|
| 29 |
+
with open(WSS_MAP_FILE, "r") as file:
|
| 30 |
+
wss_map = json.load(file)
|
| 31 |
+
else:
|
| 32 |
+
wss_map = {}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
if os.path.exists(TOKENS_FILE):
|
| 36 |
+
with open(TOKENS_FILE, "r", encoding="utf-8") as f:
|
| 37 |
+
for line in f:
|
| 38 |
+
if line.strip() and not line.startswith("#"):
|
| 39 |
+
token_list.append(line.strip())
|
| 40 |
+
else:
|
| 41 |
+
with open(TOKENS_FILE, "w", encoding="utf-8") as f:
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
if os.path.exists(ERROR_TOKENS_FILE):
|
| 45 |
+
with open(ERROR_TOKENS_FILE, "r", encoding="utf-8") as f:
|
| 46 |
+
for line in f:
|
| 47 |
+
if line.strip() and not line.startswith("#"):
|
| 48 |
+
error_token_list.append(line.strip())
|
| 49 |
+
else:
|
| 50 |
+
with open(ERROR_TOKENS_FILE, "w", encoding="utf-8") as f:
|
| 51 |
+
pass
|
| 52 |
+
|
| 53 |
+
if token_list:
|
| 54 |
+
logger.info(f"Token list count: {len(token_list)}, Error token list count: {len(error_token_list)}")
|
chatgpt/proofofWork.py
ADDED
|
@@ -0,0 +1,512 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import json
|
| 3 |
+
import random
|
| 4 |
+
import re
|
| 5 |
+
import time
|
| 6 |
+
import uuid
|
| 7 |
+
from datetime import datetime, timedelta, timezone
|
| 8 |
+
from html.parser import HTMLParser
|
| 9 |
+
|
| 10 |
+
import pybase64
|
| 11 |
+
|
| 12 |
+
from utils.Logger import logger
|
| 13 |
+
from utils.config import conversation_only
|
| 14 |
+
|
| 15 |
+
cores = [16, 24, 32]
|
| 16 |
+
screens = [3000, 4000, 6000]
|
| 17 |
+
timeLayout = "%a %b %d %Y %H:%M:%S"
|
| 18 |
+
|
| 19 |
+
cached_scripts = []
|
| 20 |
+
cached_dpl = ""
|
| 21 |
+
cached_time = 0
|
| 22 |
+
cached_require_proof = ""
|
| 23 |
+
|
| 24 |
+
navigator_key = [
|
| 25 |
+
"registerProtocolHandler−function registerProtocolHandler() { [native code] }",
|
| 26 |
+
"storage−[object StorageManager]",
|
| 27 |
+
"locks−[object LockManager]",
|
| 28 |
+
"appCodeName−Mozilla",
|
| 29 |
+
"permissions−[object Permissions]",
|
| 30 |
+
"appVersion−5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
| 31 |
+
"share−function share() { [native code] }",
|
| 32 |
+
"webdriver−false",
|
| 33 |
+
"managed−[object NavigatorManagedData]",
|
| 34 |
+
"canShare−function canShare() { [native code] }",
|
| 35 |
+
"vendor−Google Inc.",
|
| 36 |
+
"vendor−Google Inc.",
|
| 37 |
+
"mediaDevices−[object MediaDevices]",
|
| 38 |
+
"vibrate−function vibrate() { [native code] }",
|
| 39 |
+
"storageBuckets−[object StorageBucketManager]",
|
| 40 |
+
"mediaCapabilities−[object MediaCapabilities]",
|
| 41 |
+
"getGamepads−function getGamepads() { [native code] }",
|
| 42 |
+
"bluetooth−[object Bluetooth]",
|
| 43 |
+
"share−function share() { [native code] }",
|
| 44 |
+
"cookieEnabled−true",
|
| 45 |
+
"virtualKeyboard−[object VirtualKeyboard]",
|
| 46 |
+
"product−Gecko",
|
| 47 |
+
"mediaDevices−[object MediaDevices]",
|
| 48 |
+
"canShare−function canShare() { [native code] }",
|
| 49 |
+
"getGamepads−function getGamepads() { [native code] }",
|
| 50 |
+
"product−Gecko",
|
| 51 |
+
"xr−[object XRSystem]",
|
| 52 |
+
"clipboard−[object Clipboard]",
|
| 53 |
+
"storageBuckets−[object StorageBucketManager]",
|
| 54 |
+
"unregisterProtocolHandler−function unregisterProtocolHandler() { [native code] }",
|
| 55 |
+
"productSub−20030107",
|
| 56 |
+
"login−[object NavigatorLogin]",
|
| 57 |
+
"vendorSub−",
|
| 58 |
+
"login−[object NavigatorLogin]",
|
| 59 |
+
"userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
| 60 |
+
"getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
|
| 61 |
+
"userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
| 62 |
+
"mediaDevices−[object MediaDevices]",
|
| 63 |
+
"locks−[object LockManager]",
|
| 64 |
+
"webkitGetUserMedia−function webkitGetUserMedia() { [native code] }",
|
| 65 |
+
"vendor−Google Inc.",
|
| 66 |
+
"xr−[object XRSystem]",
|
| 67 |
+
"mediaDevices−[object MediaDevices]",
|
| 68 |
+
"virtualKeyboard−[object VirtualKeyboard]",
|
| 69 |
+
"userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
| 70 |
+
"virtualKeyboard−[object VirtualKeyboard]",
|
| 71 |
+
"appName−Netscape",
|
| 72 |
+
"storageBuckets−[object StorageBucketManager]",
|
| 73 |
+
"presentation−[object Presentation]",
|
| 74 |
+
"onLine−true",
|
| 75 |
+
"mimeTypes−[object MimeTypeArray]",
|
| 76 |
+
"credentials−[object CredentialsContainer]",
|
| 77 |
+
"presentation−[object Presentation]",
|
| 78 |
+
"getGamepads−function getGamepads() { [native code] }",
|
| 79 |
+
"vendorSub−",
|
| 80 |
+
"virtualKeyboard−[object VirtualKeyboard]",
|
| 81 |
+
"serviceWorker−[object ServiceWorkerContainer]",
|
| 82 |
+
"xr−[object XRSystem]",
|
| 83 |
+
"product−Gecko",
|
| 84 |
+
"keyboard−[object Keyboard]",
|
| 85 |
+
"gpu−[object GPU]",
|
| 86 |
+
"getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
|
| 87 |
+
"webkitPersistentStorage−[object DeprecatedStorageQuota]",
|
| 88 |
+
"doNotTrack",
|
| 89 |
+
"clearAppBadge−function clearAppBadge() { [native code] }",
|
| 90 |
+
"presentation−[object Presentation]",
|
| 91 |
+
"serial−[object Serial]",
|
| 92 |
+
"locks−[object LockManager]",
|
| 93 |
+
"requestMIDIAccess−function requestMIDIAccess() { [native code] }",
|
| 94 |
+
"locks−[object LockManager]",
|
| 95 |
+
"requestMediaKeySystemAccess−function requestMediaKeySystemAccess() { [native code] }",
|
| 96 |
+
"vendor−Google Inc.",
|
| 97 |
+
"pdfViewerEnabled−true",
|
| 98 |
+
"language−zh-CN",
|
| 99 |
+
"setAppBadge−function setAppBadge() { [native code] }",
|
| 100 |
+
"geolocation−[object Geolocation]",
|
| 101 |
+
"userAgentData−[object NavigatorUAData]",
|
| 102 |
+
"mediaCapabilities−[object MediaCapabilities]",
|
| 103 |
+
"requestMIDIAccess−function requestMIDIAccess() { [native code] }",
|
| 104 |
+
"getUserMedia−function getUserMedia() { [native code] }",
|
| 105 |
+
"mediaDevices−[object MediaDevices]",
|
| 106 |
+
"webkitPersistentStorage−[object DeprecatedStorageQuota]",
|
| 107 |
+
"userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
| 108 |
+
"sendBeacon−function sendBeacon() { [native code] }",
|
| 109 |
+
"hardwareConcurrency−32",
|
| 110 |
+
"appVersion−5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
| 111 |
+
"credentials−[object CredentialsContainer]",
|
| 112 |
+
"storage−[object StorageManager]",
|
| 113 |
+
"cookieEnabled−true",
|
| 114 |
+
"pdfViewerEnabled−true",
|
| 115 |
+
"windowControlsOverlay−[object WindowControlsOverlay]",
|
| 116 |
+
"scheduling−[object Scheduling]",
|
| 117 |
+
"pdfViewerEnabled−true",
|
| 118 |
+
"hardwareConcurrency−32",
|
| 119 |
+
"xr−[object XRSystem]",
|
| 120 |
+
"userAgent−Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
| 121 |
+
"webdriver−false",
|
| 122 |
+
"getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
|
| 123 |
+
"getInstalledRelatedApps−function getInstalledRelatedApps() { [native code] }",
|
| 124 |
+
"bluetooth−[object Bluetooth]"
|
| 125 |
+
]
|
| 126 |
+
document_key = ['_reactListeningo743lnnpvdg', 'location']
|
| 127 |
+
window_key = [
|
| 128 |
+
"0",
|
| 129 |
+
"window",
|
| 130 |
+
"self",
|
| 131 |
+
"document",
|
| 132 |
+
"name",
|
| 133 |
+
"location",
|
| 134 |
+
"customElements",
|
| 135 |
+
"history",
|
| 136 |
+
"navigation",
|
| 137 |
+
"locationbar",
|
| 138 |
+
"menubar",
|
| 139 |
+
"personalbar",
|
| 140 |
+
"scrollbars",
|
| 141 |
+
"statusbar",
|
| 142 |
+
"toolbar",
|
| 143 |
+
"status",
|
| 144 |
+
"closed",
|
| 145 |
+
"frames",
|
| 146 |
+
"length",
|
| 147 |
+
"top",
|
| 148 |
+
"opener",
|
| 149 |
+
"parent",
|
| 150 |
+
"frameElement",
|
| 151 |
+
"navigator",
|
| 152 |
+
"origin",
|
| 153 |
+
"external",
|
| 154 |
+
"screen",
|
| 155 |
+
"innerWidth",
|
| 156 |
+
"innerHeight",
|
| 157 |
+
"scrollX",
|
| 158 |
+
"pageXOffset",
|
| 159 |
+
"scrollY",
|
| 160 |
+
"pageYOffset",
|
| 161 |
+
"visualViewport",
|
| 162 |
+
"screenX",
|
| 163 |
+
"screenY",
|
| 164 |
+
"outerWidth",
|
| 165 |
+
"outerHeight",
|
| 166 |
+
"devicePixelRatio",
|
| 167 |
+
"clientInformation",
|
| 168 |
+
"screenLeft",
|
| 169 |
+
"screenTop",
|
| 170 |
+
"styleMedia",
|
| 171 |
+
"onsearch",
|
| 172 |
+
"isSecureContext",
|
| 173 |
+
"trustedTypes",
|
| 174 |
+
"performance",
|
| 175 |
+
"onappinstalled",
|
| 176 |
+
"onbeforeinstallprompt",
|
| 177 |
+
"crypto",
|
| 178 |
+
"indexedDB",
|
| 179 |
+
"sessionStorage",
|
| 180 |
+
"localStorage",
|
| 181 |
+
"onbeforexrselect",
|
| 182 |
+
"onabort",
|
| 183 |
+
"onbeforeinput",
|
| 184 |
+
"onbeforematch",
|
| 185 |
+
"onbeforetoggle",
|
| 186 |
+
"onblur",
|
| 187 |
+
"oncancel",
|
| 188 |
+
"oncanplay",
|
| 189 |
+
"oncanplaythrough",
|
| 190 |
+
"onchange",
|
| 191 |
+
"onclick",
|
| 192 |
+
"onclose",
|
| 193 |
+
"oncontentvisibilityautostatechange",
|
| 194 |
+
"oncontextlost",
|
| 195 |
+
"oncontextmenu",
|
| 196 |
+
"oncontextrestored",
|
| 197 |
+
"oncuechange",
|
| 198 |
+
"ondblclick",
|
| 199 |
+
"ondrag",
|
| 200 |
+
"ondragend",
|
| 201 |
+
"ondragenter",
|
| 202 |
+
"ondragleave",
|
| 203 |
+
"ondragover",
|
| 204 |
+
"ondragstart",
|
| 205 |
+
"ondrop",
|
| 206 |
+
"ondurationchange",
|
| 207 |
+
"onemptied",
|
| 208 |
+
"onended",
|
| 209 |
+
"onerror",
|
| 210 |
+
"onfocus",
|
| 211 |
+
"onformdata",
|
| 212 |
+
"oninput",
|
| 213 |
+
"oninvalid",
|
| 214 |
+
"onkeydown",
|
| 215 |
+
"onkeypress",
|
| 216 |
+
"onkeyup",
|
| 217 |
+
"onload",
|
| 218 |
+
"onloadeddata",
|
| 219 |
+
"onloadedmetadata",
|
| 220 |
+
"onloadstart",
|
| 221 |
+
"onmousedown",
|
| 222 |
+
"onmouseenter",
|
| 223 |
+
"onmouseleave",
|
| 224 |
+
"onmousemove",
|
| 225 |
+
"onmouseout",
|
| 226 |
+
"onmouseover",
|
| 227 |
+
"onmouseup",
|
| 228 |
+
"onmousewheel",
|
| 229 |
+
"onpause",
|
| 230 |
+
"onplay",
|
| 231 |
+
"onplaying",
|
| 232 |
+
"onprogress",
|
| 233 |
+
"onratechange",
|
| 234 |
+
"onreset",
|
| 235 |
+
"onresize",
|
| 236 |
+
"onscroll",
|
| 237 |
+
"onsecuritypolicyviolation",
|
| 238 |
+
"onseeked",
|
| 239 |
+
"onseeking",
|
| 240 |
+
"onselect",
|
| 241 |
+
"onslotchange",
|
| 242 |
+
"onstalled",
|
| 243 |
+
"onsubmit",
|
| 244 |
+
"onsuspend",
|
| 245 |
+
"ontimeupdate",
|
| 246 |
+
"ontoggle",
|
| 247 |
+
"onvolumechange",
|
| 248 |
+
"onwaiting",
|
| 249 |
+
"onwebkitanimationend",
|
| 250 |
+
"onwebkitanimationiteration",
|
| 251 |
+
"onwebkitanimationstart",
|
| 252 |
+
"onwebkittransitionend",
|
| 253 |
+
"onwheel",
|
| 254 |
+
"onauxclick",
|
| 255 |
+
"ongotpointercapture",
|
| 256 |
+
"onlostpointercapture",
|
| 257 |
+
"onpointerdown",
|
| 258 |
+
"onpointermove",
|
| 259 |
+
"onpointerrawupdate",
|
| 260 |
+
"onpointerup",
|
| 261 |
+
"onpointercancel",
|
| 262 |
+
"onpointerover",
|
| 263 |
+
"onpointerout",
|
| 264 |
+
"onpointerenter",
|
| 265 |
+
"onpointerleave",
|
| 266 |
+
"onselectstart",
|
| 267 |
+
"onselectionchange",
|
| 268 |
+
"onanimationend",
|
| 269 |
+
"onanimationiteration",
|
| 270 |
+
"onanimationstart",
|
| 271 |
+
"ontransitionrun",
|
| 272 |
+
"ontransitionstart",
|
| 273 |
+
"ontransitionend",
|
| 274 |
+
"ontransitioncancel",
|
| 275 |
+
"onafterprint",
|
| 276 |
+
"onbeforeprint",
|
| 277 |
+
"onbeforeunload",
|
| 278 |
+
"onhashchange",
|
| 279 |
+
"onlanguagechange",
|
| 280 |
+
"onmessage",
|
| 281 |
+
"onmessageerror",
|
| 282 |
+
"onoffline",
|
| 283 |
+
"ononline",
|
| 284 |
+
"onpagehide",
|
| 285 |
+
"onpageshow",
|
| 286 |
+
"onpopstate",
|
| 287 |
+
"onrejectionhandled",
|
| 288 |
+
"onstorage",
|
| 289 |
+
"onunhandledrejection",
|
| 290 |
+
"onunload",
|
| 291 |
+
"crossOriginIsolated",
|
| 292 |
+
"scheduler",
|
| 293 |
+
"alert",
|
| 294 |
+
"atob",
|
| 295 |
+
"blur",
|
| 296 |
+
"btoa",
|
| 297 |
+
"cancelAnimationFrame",
|
| 298 |
+
"cancelIdleCallback",
|
| 299 |
+
"captureEvents",
|
| 300 |
+
"clearInterval",
|
| 301 |
+
"clearTimeout",
|
| 302 |
+
"close",
|
| 303 |
+
"confirm",
|
| 304 |
+
"createImageBitmap",
|
| 305 |
+
"fetch",
|
| 306 |
+
"find",
|
| 307 |
+
"focus",
|
| 308 |
+
"getComputedStyle",
|
| 309 |
+
"getSelection",
|
| 310 |
+
"matchMedia",
|
| 311 |
+
"moveBy",
|
| 312 |
+
"moveTo",
|
| 313 |
+
"open",
|
| 314 |
+
"postMessage",
|
| 315 |
+
"print",
|
| 316 |
+
"prompt",
|
| 317 |
+
"queueMicrotask",
|
| 318 |
+
"releaseEvents",
|
| 319 |
+
"reportError",
|
| 320 |
+
"requestAnimationFrame",
|
| 321 |
+
"requestIdleCallback",
|
| 322 |
+
"resizeBy",
|
| 323 |
+
"resizeTo",
|
| 324 |
+
"scroll",
|
| 325 |
+
"scrollBy",
|
| 326 |
+
"scrollTo",
|
| 327 |
+
"setInterval",
|
| 328 |
+
"setTimeout",
|
| 329 |
+
"stop",
|
| 330 |
+
"structuredClone",
|
| 331 |
+
"webkitCancelAnimationFrame",
|
| 332 |
+
"webkitRequestAnimationFrame",
|
| 333 |
+
"chrome",
|
| 334 |
+
"caches",
|
| 335 |
+
"cookieStore",
|
| 336 |
+
"ondevicemotion",
|
| 337 |
+
"ondeviceorientation",
|
| 338 |
+
"ondeviceorientationabsolute",
|
| 339 |
+
"launchQueue",
|
| 340 |
+
"documentPictureInPicture",
|
| 341 |
+
"getScreenDetails",
|
| 342 |
+
"queryLocalFonts",
|
| 343 |
+
"showDirectoryPicker",
|
| 344 |
+
"showOpenFilePicker",
|
| 345 |
+
"showSaveFilePicker",
|
| 346 |
+
"originAgentCluster",
|
| 347 |
+
"onpageswap",
|
| 348 |
+
"onpagereveal",
|
| 349 |
+
"credentialless",
|
| 350 |
+
"speechSynthesis",
|
| 351 |
+
"onscrollend",
|
| 352 |
+
"webkitRequestFileSystem",
|
| 353 |
+
"webkitResolveLocalFileSystemURL",
|
| 354 |
+
"sendMsgToSolverCS",
|
| 355 |
+
"webpackChunk_N_E",
|
| 356 |
+
"__next_set_public_path__",
|
| 357 |
+
"next",
|
| 358 |
+
"__NEXT_DATA__",
|
| 359 |
+
"__SSG_MANIFEST_CB",
|
| 360 |
+
"__NEXT_P",
|
| 361 |
+
"_N_E",
|
| 362 |
+
"regeneratorRuntime",
|
| 363 |
+
"__REACT_INTL_CONTEXT__",
|
| 364 |
+
"DD_RUM",
|
| 365 |
+
"_",
|
| 366 |
+
"filterCSS",
|
| 367 |
+
"filterXSS",
|
| 368 |
+
"__SEGMENT_INSPECTOR__",
|
| 369 |
+
"__NEXT_PRELOADREADY",
|
| 370 |
+
"Intercom",
|
| 371 |
+
"__MIDDLEWARE_MATCHERS",
|
| 372 |
+
"__STATSIG_SDK__",
|
| 373 |
+
"__STATSIG_JS_SDK__",
|
| 374 |
+
"__STATSIG_RERENDER_OVERRIDE__",
|
| 375 |
+
"_oaiHandleSessionExpired",
|
| 376 |
+
"__BUILD_MANIFEST",
|
| 377 |
+
"__SSG_MANIFEST",
|
| 378 |
+
"__intercomAssignLocation",
|
| 379 |
+
"__intercomReloadLocation"
|
| 380 |
+
]
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
class ScriptSrcParser(HTMLParser):
|
| 384 |
+
def handle_starttag(self, tag, attrs):
|
| 385 |
+
global cached_scripts, cached_dpl, cached_time
|
| 386 |
+
if tag == "script":
|
| 387 |
+
attrs_dict = dict(attrs)
|
| 388 |
+
if "src" in attrs_dict:
|
| 389 |
+
src = attrs_dict["src"]
|
| 390 |
+
cached_scripts.append(src)
|
| 391 |
+
match = re.search(r"c/[^/]*/_", src)
|
| 392 |
+
if match:
|
| 393 |
+
cached_dpl = match.group(0)
|
| 394 |
+
cached_time = int(time.time())
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def get_data_build_from_html(html_content):
|
| 398 |
+
global cached_scripts, cached_dpl, cached_time
|
| 399 |
+
parser = ScriptSrcParser()
|
| 400 |
+
parser.feed(html_content)
|
| 401 |
+
if not cached_scripts:
|
| 402 |
+
cached_scripts.append("https://chatgpt.com/backend-api/sentinel/sdk.js")
|
| 403 |
+
if not cached_dpl:
|
| 404 |
+
match = re.search(r'<html[^>]*data-build="([^"]*)"', html_content)
|
| 405 |
+
if match:
|
| 406 |
+
data_build = match.group(1)
|
| 407 |
+
cached_dpl = data_build
|
| 408 |
+
cached_time = int(time.time())
|
| 409 |
+
logger.info(f"Found dpl: {cached_dpl}")
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
async def get_dpl(service):
|
| 413 |
+
global cached_scripts, cached_dpl, cached_time
|
| 414 |
+
if int(time.time()) - cached_time < 15 * 60:
|
| 415 |
+
return True
|
| 416 |
+
headers = service.base_headers.copy()
|
| 417 |
+
cached_scripts = []
|
| 418 |
+
cached_dpl = ""
|
| 419 |
+
try:
|
| 420 |
+
if conversation_only:
|
| 421 |
+
return True
|
| 422 |
+
r = await service.s.get(f"{service.host_url}/", headers=headers, timeout=5)
|
| 423 |
+
r.raise_for_status()
|
| 424 |
+
get_data_build_from_html(r.text)
|
| 425 |
+
if not cached_dpl:
|
| 426 |
+
raise Exception("No Cached DPL")
|
| 427 |
+
else:
|
| 428 |
+
return True
|
| 429 |
+
except Exception as e:
|
| 430 |
+
logger.info(f"Failed to get dpl: {e}")
|
| 431 |
+
cached_dpl = None
|
| 432 |
+
cached_time = int(time.time())
|
| 433 |
+
return False
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
def get_parse_time():
|
| 437 |
+
now = datetime.now(timezone(timedelta(hours=-5)))
|
| 438 |
+
return now.strftime(timeLayout) + " GMT-0500 (Eastern Standard Time)"
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def get_config(user_agent):
|
| 442 |
+
core = random.choice(cores)
|
| 443 |
+
screen = random.choice(screens)
|
| 444 |
+
config = [
|
| 445 |
+
core + screen,
|
| 446 |
+
get_parse_time(),
|
| 447 |
+
4294705152,
|
| 448 |
+
0,
|
| 449 |
+
user_agent,
|
| 450 |
+
random.choice(cached_scripts) if cached_scripts else None,
|
| 451 |
+
cached_dpl,
|
| 452 |
+
"en-US",
|
| 453 |
+
"en-US,es-US,en,es",
|
| 454 |
+
0,
|
| 455 |
+
random.choice(navigator_key),
|
| 456 |
+
random.choice(document_key),
|
| 457 |
+
random.choice(window_key),
|
| 458 |
+
time.perf_counter(),
|
| 459 |
+
str(uuid.uuid4()),
|
| 460 |
+
]
|
| 461 |
+
return config
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def get_answer_token(seed, diff, config):
|
| 465 |
+
start = time.time()
|
| 466 |
+
answer, solved = generate_answer(seed, diff, config)
|
| 467 |
+
end = time.time()
|
| 468 |
+
logger.info(f'diff: {diff}, time: {int((end - start) * 1e6) / 1e3}ms, solved: {solved}')
|
| 469 |
+
return "gAAAAAB" + answer, solved
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def generate_answer(seed, diff, config):
|
| 473 |
+
diff_len = len(diff)
|
| 474 |
+
seed_encoded = seed.encode()
|
| 475 |
+
static_config_part1 = (json.dumps(config[:3], separators=(',', ':'), ensure_ascii=False)[:-1] + ',').encode()
|
| 476 |
+
static_config_part2 = (',' + json.dumps(config[4:9], separators=(',', ':'), ensure_ascii=False)[1:-1] + ',').encode()
|
| 477 |
+
static_config_part3 = (',' + json.dumps(config[10:], separators=(',', ':'), ensure_ascii=False)[1:]).encode()
|
| 478 |
+
|
| 479 |
+
target_diff = bytes.fromhex(diff)
|
| 480 |
+
|
| 481 |
+
for i in range(500000):
|
| 482 |
+
dynamic_json_i = str(i).encode()
|
| 483 |
+
dynamic_json_j = str(i >> 1).encode()
|
| 484 |
+
final_json_bytes = static_config_part1 + dynamic_json_i + static_config_part2 + dynamic_json_j + static_config_part3
|
| 485 |
+
base_encode = pybase64.b64encode(final_json_bytes)
|
| 486 |
+
hash_value = hashlib.sha3_512(seed_encoded + base_encode).digest()
|
| 487 |
+
if hash_value[:diff_len] <= target_diff:
|
| 488 |
+
return base_encode.decode(), True
|
| 489 |
+
|
| 490 |
+
return "wQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + pybase64.b64encode(f'"{seed}"'.encode()).decode(), False
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
def get_requirements_token(config):
|
| 494 |
+
require, solved = generate_answer(format(random.random()), "0fffff", config)
|
| 495 |
+
return 'gAAAAAC' + require
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
if __name__ == "__main__":
|
| 499 |
+
# cached_scripts.append(
|
| 500 |
+
# "https://cdn.oaistatic.com/_next/static/cXh69klOLzS0Gy2joLDRS/_ssgManifest.js?dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3")
|
| 501 |
+
# cached_dpl = "453ebaec0d44c2decab71692e1bfe39be35a24b3"
|
| 502 |
+
# cached_time = int(time.time())
|
| 503 |
+
# for i in range(10):
|
| 504 |
+
# seed = format(random.random())
|
| 505 |
+
# diff = "000032"
|
| 506 |
+
# config = get_config("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome")
|
| 507 |
+
# answer = get_answer_token(seed, diff, config)
|
| 508 |
+
cached_scripts.append(
|
| 509 |
+
"https://cdn.oaistatic.com/_next/static/cXh69klOLzS0Gy2joLDRS/_ssgManifest.js?dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3")
|
| 510 |
+
cached_dpl = "dpl=453ebaec0d44c2decab71692e1bfe39be35a24b3"
|
| 511 |
+
config = get_config("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36")
|
| 512 |
+
get_requirements_token(config)
|
chatgpt/refreshToken.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
from fastapi import HTTPException
|
| 6 |
+
|
| 7 |
+
from utils.Client import Client
|
| 8 |
+
from utils.Logger import logger
|
| 9 |
+
from utils.config import proxy_url_list
|
| 10 |
+
import chatgpt.globals as globals
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def save_refresh_map(refresh_map):
|
| 14 |
+
with open(globals.REFRESH_MAP_FILE, "w") as file:
|
| 15 |
+
json.dump(refresh_map, file)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
async def rt2ac(refresh_token, force_refresh=False):
|
| 19 |
+
if not force_refresh and (refresh_token in globals.refresh_map and int(time.time()) - globals.refresh_map.get(refresh_token, {}).get("timestamp", 0) < 5 * 24 * 60 * 60):
|
| 20 |
+
access_token = globals.refresh_map[refresh_token]["token"]
|
| 21 |
+
logger.info(f"refresh_token -> access_token from cache")
|
| 22 |
+
return access_token
|
| 23 |
+
else:
|
| 24 |
+
try:
|
| 25 |
+
access_token = await chat_refresh(refresh_token)
|
| 26 |
+
globals.refresh_map[refresh_token] = {"token": access_token, "timestamp": int(time.time())}
|
| 27 |
+
save_refresh_map(globals.refresh_map)
|
| 28 |
+
logger.info(f"refresh_token -> access_token with openai: {access_token}")
|
| 29 |
+
return access_token
|
| 30 |
+
except HTTPException as e:
|
| 31 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
async def chat_refresh(refresh_token):
|
| 35 |
+
data = {
|
| 36 |
+
"client_id": "pdlLIX2Y72MIl2rhLhTE9VV9bN905kBh",
|
| 37 |
+
"grant_type": "refresh_token",
|
| 38 |
+
"redirect_uri": "com.openai.chat://auth0.openai.com/ios/com.openai.chat/callback",
|
| 39 |
+
"refresh_token": refresh_token
|
| 40 |
+
}
|
| 41 |
+
client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
|
| 42 |
+
try:
|
| 43 |
+
r = await client.post("https://auth0.openai.com/oauth/token", json=data, timeout=5)
|
| 44 |
+
if r.status_code == 200:
|
| 45 |
+
access_token = r.json()['access_token']
|
| 46 |
+
return access_token
|
| 47 |
+
else:
|
| 48 |
+
with open(globals.ERROR_TOKENS_FILE, "a", encoding="utf-8") as f:
|
| 49 |
+
f.write(refresh_token + "\n")
|
| 50 |
+
if refresh_token not in globals.error_token_list:
|
| 51 |
+
globals.error_token_list.append(refresh_token)
|
| 52 |
+
raise Exception(r.text[:100])
|
| 53 |
+
except Exception as e:
|
| 54 |
+
logger.error(f"Failed to refresh access_token `{refresh_token}`: {str(e)}")
|
| 55 |
+
raise HTTPException(status_code=500, detail=f"Failed to refresh access_token.")
|
| 56 |
+
finally:
|
| 57 |
+
await client.close()
|
| 58 |
+
del client
|
chatgpt/reverseProxy.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
from fastapi import Request, HTTPException
|
| 4 |
+
from fastapi.responses import StreamingResponse, Response
|
| 5 |
+
from starlette.background import BackgroundTask
|
| 6 |
+
|
| 7 |
+
from utils.Client import Client
|
| 8 |
+
from utils.config import chatgpt_base_url_list, proxy_url_list, enable_gateway
|
| 9 |
+
|
| 10 |
+
headers_reject_list = [
|
| 11 |
+
"x-real-ip",
|
| 12 |
+
"x-forwarded-for",
|
| 13 |
+
"x-forwarded-proto",
|
| 14 |
+
"x-forwarded-port",
|
| 15 |
+
"x-forwarded-host",
|
| 16 |
+
"x-forwarded-server",
|
| 17 |
+
"cf-warp-tag-id",
|
| 18 |
+
"cf-visitor",
|
| 19 |
+
"cf-ray",
|
| 20 |
+
"cf-connecting-ip",
|
| 21 |
+
"cf-ipcountry",
|
| 22 |
+
"cdn-loop",
|
| 23 |
+
"remote-host",
|
| 24 |
+
"x-frame-options",
|
| 25 |
+
"x-xss-protection",
|
| 26 |
+
"x-content-type-options",
|
| 27 |
+
"content-security-policy",
|
| 28 |
+
"host",
|
| 29 |
+
"cookie",
|
| 30 |
+
"connection",
|
| 31 |
+
"content-length",
|
| 32 |
+
"content-encoding",
|
| 33 |
+
"x-middleware-prefetch",
|
| 34 |
+
"x-nextjs-data",
|
| 35 |
+
"purpose",
|
| 36 |
+
"x-forwarded-uri",
|
| 37 |
+
"x-forwarded-path",
|
| 38 |
+
"x-forwarded-method",
|
| 39 |
+
"x-forwarded-protocol",
|
| 40 |
+
"x-forwarded-scheme",
|
| 41 |
+
"cf-request-id",
|
| 42 |
+
"cf-worker",
|
| 43 |
+
"cf-access-client-id",
|
| 44 |
+
"cf-access-client-device-type",
|
| 45 |
+
"cf-access-client-device-model",
|
| 46 |
+
"cf-access-client-device-name",
|
| 47 |
+
"cf-access-client-device-brand",
|
| 48 |
+
"x-middleware-prefetch",
|
| 49 |
+
"x-forwarded-for",
|
| 50 |
+
"x-forwarded-host",
|
| 51 |
+
"x-forwarded-proto",
|
| 52 |
+
"x-forwarded-server",
|
| 53 |
+
"x-real-ip",
|
| 54 |
+
"x-forwarded-port",
|
| 55 |
+
"cf-connecting-ip",
|
| 56 |
+
"cf-ipcountry",
|
| 57 |
+
"cf-ray",
|
| 58 |
+
"cf-visitor",
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
async def chatgpt_reverse_proxy(request: Request, path: str):
|
| 63 |
+
if not enable_gateway:
|
| 64 |
+
raise HTTPException(status_code=404, detail="Gateway is disabled")
|
| 65 |
+
try:
|
| 66 |
+
origin_host = request.url.netloc
|
| 67 |
+
if ":" in origin_host:
|
| 68 |
+
petrol = "http"
|
| 69 |
+
else:
|
| 70 |
+
petrol = "https"
|
| 71 |
+
if path.startswith("v1/"):
|
| 72 |
+
base_url = "https://ab.chatgpt.com"
|
| 73 |
+
else:
|
| 74 |
+
base_url = random.choice(chatgpt_base_url_list) if chatgpt_base_url_list else "https://chatgpt.com"
|
| 75 |
+
if "/assets" in path:
|
| 76 |
+
base_url = "https://cdn.oaistatic.com"
|
| 77 |
+
|
| 78 |
+
params = dict(request.query_params)
|
| 79 |
+
headers = {
|
| 80 |
+
key: value for key, value in request.headers.items()
|
| 81 |
+
if (key.lower() not in ["host", "origin", "referer", "user-agent",
|
| 82 |
+
"authorization"] and key.lower() not in headers_reject_list)
|
| 83 |
+
}
|
| 84 |
+
request_cookies = dict(request.cookies)
|
| 85 |
+
|
| 86 |
+
headers.update({
|
| 87 |
+
"accept-Language": "en-US,en;q=0.9",
|
| 88 |
+
"host": base_url.replace("https://", "").replace("http://", ""),
|
| 89 |
+
"origin": base_url,
|
| 90 |
+
"referer": f"{base_url}/",
|
| 91 |
+
"sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
| 92 |
+
"sec-ch-ua-mobile": "?0",
|
| 93 |
+
"sec-ch-ua-platform": "\"Windows\"",
|
| 94 |
+
"sec-fetch-dest": "empty",
|
| 95 |
+
"sec-fetch-mode": "cors",
|
| 96 |
+
"sec-fetch-site": "same-origin",
|
| 97 |
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0"
|
| 98 |
+
})
|
| 99 |
+
|
| 100 |
+
if request.headers.get('Authorization'):
|
| 101 |
+
headers['Authorization'] = request.headers['Authorization']
|
| 102 |
+
|
| 103 |
+
if headers.get("Content-Type") == "application/json":
|
| 104 |
+
data = await request.json()
|
| 105 |
+
else:
|
| 106 |
+
data = await request.body()
|
| 107 |
+
|
| 108 |
+
client = Client(proxy=random.choice(proxy_url_list) if proxy_url_list else None)
|
| 109 |
+
try:
|
| 110 |
+
background = BackgroundTask(client.close)
|
| 111 |
+
r = await client.request(request.method, f"{base_url}/{path}", params=params, headers=headers,
|
| 112 |
+
cookies=request_cookies, data=data, stream=True, allow_redirects=False)
|
| 113 |
+
if r.status_code == 304:
|
| 114 |
+
return Response(status_code=304, background=background)
|
| 115 |
+
elif r.status_code == 307:
|
| 116 |
+
if "oai-dm=1" not in r.headers.get("Location"):
|
| 117 |
+
return Response(status_code=307, headers={
|
| 118 |
+
"Location": r.headers.get("Location").replace("chat.openai.com", origin_host)
|
| 119 |
+
.replace("chatgpt.com", origin_host)
|
| 120 |
+
.replace("https", petrol)}, background=background)
|
| 121 |
+
else:
|
| 122 |
+
return Response(status_code=307, headers={"Location": r.headers.get("Location")},
|
| 123 |
+
background=background)
|
| 124 |
+
elif r.status_code == 302:
|
| 125 |
+
return Response(status_code=302,
|
| 126 |
+
headers={"Location": r.headers.get("Location").replace("chatgpt.com", origin_host)
|
| 127 |
+
.replace("chat.openai.com", origin_host)
|
| 128 |
+
.replace("ab.chatgpt.com", origin_host)
|
| 129 |
+
.replace("cdn.oaistatic.com", origin_host)
|
| 130 |
+
.replace("https", petrol)}, background=background)
|
| 131 |
+
elif 'stream' in r.headers.get("content-type", ""):
|
| 132 |
+
return StreamingResponse(r.aiter_content(), media_type=r.headers.get("content-type", ""),
|
| 133 |
+
background=background)
|
| 134 |
+
else:
|
| 135 |
+
if "/conversation" in path or "/register-websocket" in path:
|
| 136 |
+
response = Response(content=(await r.atext()), media_type=r.headers.get("content-type"),
|
| 137 |
+
status_code=r.status_code, background=background)
|
| 138 |
+
else:
|
| 139 |
+
content = ((await r.atext()).replace("chatgpt.com", origin_host)
|
| 140 |
+
.replace("chat.openai.com", origin_host)
|
| 141 |
+
.replace("ab.chatgpt.com", origin_host)
|
| 142 |
+
.replace("cdn.oaistatic.com", origin_host)
|
| 143 |
+
.replace("https", petrol))
|
| 144 |
+
rheaders = dict(r.headers)
|
| 145 |
+
cache_control = rheaders.get("cache-control", "")
|
| 146 |
+
content_type = rheaders.get("content-type", "")
|
| 147 |
+
rheaders = {
|
| 148 |
+
"cache-control": cache_control,
|
| 149 |
+
"content-type": content_type
|
| 150 |
+
}
|
| 151 |
+
response = Response(content=content, headers=rheaders,
|
| 152 |
+
status_code=r.status_code, background=background)
|
| 153 |
+
for cookie_name in r.cookies:
|
| 154 |
+
if cookie_name in request_cookies:
|
| 155 |
+
continue
|
| 156 |
+
for cookie_domain in [".chatgpt.com"]:
|
| 157 |
+
cookie_value = r.cookies.get(name=cookie_name, domain=cookie_domain)
|
| 158 |
+
if cookie_name.startswith("__"):
|
| 159 |
+
response.set_cookie(key=cookie_name, value=cookie_value, secure=True, httponly=True)
|
| 160 |
+
else:
|
| 161 |
+
response.set_cookie(key=cookie_name, value=cookie_value)
|
| 162 |
+
return response
|
| 163 |
+
except Exception:
|
| 164 |
+
await client.close()
|
| 165 |
+
|
| 166 |
+
except Exception as e:
|
| 167 |
+
raise HTTPException(status_code=500, detail=str(e))
|
chatgpt/turnstile.py
ADDED
|
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pybase64
|
| 2 |
+
import json
|
| 3 |
+
import random
|
| 4 |
+
import time
|
| 5 |
+
from typing import Any, Callable, Dict, List, Union
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class OrderedMap:
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.keys = []
|
| 11 |
+
self.values = {}
|
| 12 |
+
|
| 13 |
+
def add(self, key: str, value: Any):
|
| 14 |
+
if key not in self.values:
|
| 15 |
+
self.keys.append(key)
|
| 16 |
+
self.values[key] = value
|
| 17 |
+
|
| 18 |
+
def to_json(self):
|
| 19 |
+
return json.dumps({k: self.values[k] for k in self.keys})
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
TurnTokenList = List[List[Any]]
|
| 23 |
+
FloatMap = Dict[float, Any]
|
| 24 |
+
StringMap = Dict[str, Any]
|
| 25 |
+
FuncType = Callable[..., Any]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_turnstile_token(dx: str, p: str) -> Union[str, None]:
|
| 29 |
+
try:
|
| 30 |
+
decoded_bytes = pybase64.b64decode(dx)
|
| 31 |
+
return process_turnstile_token(decoded_bytes.decode(), p)
|
| 32 |
+
except Exception as e:
|
| 33 |
+
print(f"Error in get_turnstile_token: {e}")
|
| 34 |
+
return None
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def process_turnstile_token(dx: str, p: str) -> str:
|
| 38 |
+
result = []
|
| 39 |
+
p_length = len(p)
|
| 40 |
+
if p_length != 0:
|
| 41 |
+
for i, r in enumerate(dx):
|
| 42 |
+
result.append(chr(ord(r) ^ ord(p[i % p_length])))
|
| 43 |
+
else:
|
| 44 |
+
result = list(dx)
|
| 45 |
+
return ''.join(result)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def is_slice(input_val: Any) -> bool:
|
| 49 |
+
return isinstance(input_val, (list, tuple))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def is_float(input_val: Any) -> bool:
|
| 53 |
+
return isinstance(input_val, float)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def is_string(input_val: Any) -> bool:
|
| 57 |
+
return isinstance(input_val, str)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def to_str(input_val: Any) -> str:
|
| 61 |
+
if input_val is None:
|
| 62 |
+
return "undefined"
|
| 63 |
+
elif is_float(input_val):
|
| 64 |
+
return str(input_val)
|
| 65 |
+
elif is_string(input_val):
|
| 66 |
+
special_cases = {
|
| 67 |
+
"window.Math": "[object Math]",
|
| 68 |
+
"window.Reflect": "[object Reflect]",
|
| 69 |
+
"window.performance": "[object Performance]",
|
| 70 |
+
"window.localStorage": "[object Storage]",
|
| 71 |
+
"window.Object": "function Object() { [native code] }",
|
| 72 |
+
"window.Reflect.set": "function set() { [native code] }",
|
| 73 |
+
"window.performance.now": "function () { [native code] }",
|
| 74 |
+
"window.Object.create": "function create() { [native code] }",
|
| 75 |
+
"window.Object.keys": "function keys() { [native code] }",
|
| 76 |
+
"window.Math.random": "function random() { [native code] }"
|
| 77 |
+
}
|
| 78 |
+
return special_cases.get(input_val, input_val)
|
| 79 |
+
elif isinstance(input_val, list) and all(isinstance(item, str) for item in input_val):
|
| 80 |
+
return ','.join(input_val)
|
| 81 |
+
else:
|
| 82 |
+
return str(input_val)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def get_func_map() -> FloatMap:
|
| 86 |
+
process_map: FloatMap = {}
|
| 87 |
+
|
| 88 |
+
def func_1(e: float, t: float):
|
| 89 |
+
e_str = to_str(process_map[e])
|
| 90 |
+
t_str = to_str(process_map[t])
|
| 91 |
+
res = process_turnstile_token(e_str, t_str)
|
| 92 |
+
process_map[e] = res
|
| 93 |
+
|
| 94 |
+
def func_2(e: float, t: Any):
|
| 95 |
+
process_map[e] = t
|
| 96 |
+
|
| 97 |
+
def func_5(e: float, t: float):
|
| 98 |
+
n = process_map[e]
|
| 99 |
+
tres = process_map[t]
|
| 100 |
+
if is_slice(n):
|
| 101 |
+
nt = n + [tres]
|
| 102 |
+
process_map[e] = nt
|
| 103 |
+
else:
|
| 104 |
+
if is_string(n) or is_string(tres):
|
| 105 |
+
res = to_str(n) + to_str(tres)
|
| 106 |
+
elif is_float(n) and is_float(tres):
|
| 107 |
+
res = n + tres
|
| 108 |
+
else:
|
| 109 |
+
res = "NaN"
|
| 110 |
+
process_map[e] = res
|
| 111 |
+
|
| 112 |
+
def func_6(e: float, t: float, n: float):
|
| 113 |
+
tv = process_map[t]
|
| 114 |
+
nv = process_map[n]
|
| 115 |
+
if is_string(tv) and is_string(nv):
|
| 116 |
+
res = f"{tv}.{nv}"
|
| 117 |
+
if res == "window.document.location":
|
| 118 |
+
process_map[e] = "https://chatgpt.com/"
|
| 119 |
+
else:
|
| 120 |
+
process_map[e] = res
|
| 121 |
+
else:
|
| 122 |
+
print("func type 6 error")
|
| 123 |
+
|
| 124 |
+
def func_24(e: float, t: float, n: float):
|
| 125 |
+
tv = process_map[t]
|
| 126 |
+
nv = process_map[n]
|
| 127 |
+
if is_string(tv) and is_string(nv):
|
| 128 |
+
process_map[e] = f"{tv}.{nv}"
|
| 129 |
+
else:
|
| 130 |
+
print("func type 24 error")
|
| 131 |
+
|
| 132 |
+
def func_7(e: float, *args):
|
| 133 |
+
n = [process_map[arg] for arg in args]
|
| 134 |
+
ev = process_map[e]
|
| 135 |
+
if isinstance(ev, str):
|
| 136 |
+
if ev == "window.Reflect.set":
|
| 137 |
+
obj = n[0]
|
| 138 |
+
key_str = str(n[1])
|
| 139 |
+
val = n[2]
|
| 140 |
+
obj.add(key_str, val)
|
| 141 |
+
elif callable(ev):
|
| 142 |
+
ev(*n)
|
| 143 |
+
|
| 144 |
+
def func_17(e: float, t: float, *args):
|
| 145 |
+
i = [process_map[arg] for arg in args]
|
| 146 |
+
tv = process_map[t]
|
| 147 |
+
res = None
|
| 148 |
+
if isinstance(tv, str):
|
| 149 |
+
if tv == "window.performance.now":
|
| 150 |
+
current_time = time.time_ns()
|
| 151 |
+
elapsed_ns = current_time - int(start_time * 1e9)
|
| 152 |
+
res = (elapsed_ns + random.random()) / 1e6
|
| 153 |
+
elif tv == "window.Object.create":
|
| 154 |
+
res = OrderedMap()
|
| 155 |
+
elif tv == "window.Object.keys":
|
| 156 |
+
if isinstance(i[0], str) and i[0] == "window.localStorage":
|
| 157 |
+
res = ["STATSIG_LOCAL_STORAGE_INTERNAL_STORE_V4", "STATSIG_LOCAL_STORAGE_STABLE_ID",
|
| 158 |
+
"client-correlated-secret", "oai/apps/capExpiresAt", "oai-did",
|
| 159 |
+
"STATSIG_LOCAL_STORAGE_LOGGING_REQUEST", "UiState.isNavigationCollapsed.1"]
|
| 160 |
+
elif tv == "window.Math.random":
|
| 161 |
+
res = random.random()
|
| 162 |
+
elif callable(tv):
|
| 163 |
+
res = tv(*i)
|
| 164 |
+
process_map[e] = res
|
| 165 |
+
|
| 166 |
+
def func_8(e: float, t: float):
|
| 167 |
+
process_map[e] = process_map[t]
|
| 168 |
+
|
| 169 |
+
def func_14(e: float, t: float):
|
| 170 |
+
tv = process_map[t]
|
| 171 |
+
if is_string(tv):
|
| 172 |
+
token_list = json.loads(tv)
|
| 173 |
+
process_map[e] = token_list
|
| 174 |
+
else:
|
| 175 |
+
print("func type 14 error")
|
| 176 |
+
|
| 177 |
+
def func_15(e: float, t: float):
|
| 178 |
+
tv = process_map[t]
|
| 179 |
+
process_map[e] = json.dumps(tv)
|
| 180 |
+
|
| 181 |
+
def func_18(e: float):
|
| 182 |
+
ev = process_map[e]
|
| 183 |
+
e_str = to_str(ev)
|
| 184 |
+
decoded = pybase64.b64decode(e_str).decode()
|
| 185 |
+
process_map[e] = decoded
|
| 186 |
+
|
| 187 |
+
def func_19(e: float):
|
| 188 |
+
ev = process_map[e]
|
| 189 |
+
e_str = to_str(ev)
|
| 190 |
+
encoded = pybase64.b64encode(e_str.encode()).decode()
|
| 191 |
+
process_map[e] = encoded
|
| 192 |
+
|
| 193 |
+
def func_20(e: float, t: float, n: float, *args):
|
| 194 |
+
o = [process_map[arg] for arg in args]
|
| 195 |
+
ev = process_map[e]
|
| 196 |
+
tv = process_map[t]
|
| 197 |
+
if ev == tv:
|
| 198 |
+
nv = process_map[n]
|
| 199 |
+
if callable(nv):
|
| 200 |
+
nv(*o)
|
| 201 |
+
else:
|
| 202 |
+
print("func type 20 error")
|
| 203 |
+
|
| 204 |
+
def func_21(*args):
|
| 205 |
+
pass
|
| 206 |
+
|
| 207 |
+
def func_23(e: float, t: float, *args):
|
| 208 |
+
i = list(args)
|
| 209 |
+
ev = process_map[e]
|
| 210 |
+
tv = process_map[t]
|
| 211 |
+
if ev is not None:
|
| 212 |
+
if callable(tv):
|
| 213 |
+
tv(*i)
|
| 214 |
+
|
| 215 |
+
process_map.update({
|
| 216 |
+
1: func_1, 2: func_2, 5: func_5, 6: func_6, 24: func_24, 7: func_7,
|
| 217 |
+
17: func_17, 8: func_8, 10: "window", 14: func_14, 15: func_15,
|
| 218 |
+
18: func_18, 19: func_19, 20: func_20, 21: func_21, 23: func_23
|
| 219 |
+
})
|
| 220 |
+
|
| 221 |
+
return process_map
|
| 222 |
+
|
| 223 |
+
start_time = 0
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def process_turnstile(dx: str, p: str) -> str:
|
| 227 |
+
global start_time
|
| 228 |
+
start_time = time.time()
|
| 229 |
+
tokens = get_turnstile_token(dx, p)
|
| 230 |
+
if tokens is None:
|
| 231 |
+
return ""
|
| 232 |
+
|
| 233 |
+
token_list = json.loads(tokens)
|
| 234 |
+
# print(token_list)
|
| 235 |
+
res = ""
|
| 236 |
+
process_map = get_func_map()
|
| 237 |
+
|
| 238 |
+
def func_3(e: str):
|
| 239 |
+
nonlocal res
|
| 240 |
+
res = pybase64.b64encode(e.encode()).decode()
|
| 241 |
+
|
| 242 |
+
process_map[3] = func_3
|
| 243 |
+
process_map[9] = token_list
|
| 244 |
+
process_map[16] = p
|
| 245 |
+
|
| 246 |
+
for token in token_list:
|
| 247 |
+
try:
|
| 248 |
+
e = token[0]
|
| 249 |
+
t = token[1:]
|
| 250 |
+
f = process_map.get(e)
|
| 251 |
+
if callable(f):
|
| 252 |
+
f(*t)
|
| 253 |
+
else:
|
| 254 |
+
pass
|
| 255 |
+
# print(f"Warning: No function found for key {e}")
|
| 256 |
+
except Exception as exc:
|
| 257 |
+
pass
|
| 258 |
+
# print(f"Error processing token {token}: {exc}")
|
| 259 |
+
|
| 260 |
+
return res
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
if __name__ == "__main__":
|
| 264 |
+
result = process_turnstile(
|
| 265 |
+
"PBp5bWF1cHlLe1ttQhRfaTdmXEpidGdEYU5JdGJpR3xfHFVuGHVEY0tZVG18Vh54RWJ5CXpxKXl3SUZ7b2FZAWJaTBl6RGQZURh8BndUcRlQVgoYalAca2QUX24ffQZgdVVbbmBrAH9FV08Rb2oVVgBeQVRrWFp5VGZMYWNyMnoSN0FpaQgFT1l1f3h7c1RtcQUqY1kZbFJ5BQRiZEJXS3RvHGtieh9PaBlHaXhVWnVLRUlKdwsdbUtbKGFaAlN4a0V/emUJe2J2dl9BZkAxZWU/WGocRUBnc3VyT3F4WkJmYSthdBIGf0RwQ2FjAUBnd3ZEelgbVUEIDAJjS1VZbU9sSWFjfk55J2lZFV0HWX1cbVV5dWdAfkFIAVQVbloUXQtYaAR+VXhUF1BZdG4CBHRyK21AG1JaHhBFaBwCWUlocyQGVT4NBzNON2ASFVtXeQRET1kARndjUEBDT2RKeQN7RmJjeVtvZGpDeWJ1EHxafVd+Wk1AbzdLVTpafkd9dWZKeARecGJrS0xcenZIEEJQOmcFa01menFOeVRiSGFZC1JnWUA0SU08QGgeDFFgY34YWXAdZHYaHRhANFRMOV0CZmBfVExTWh9lZlVpSnx6eQURb2poa2RkQVJ0cmF0bwJbQgB6RlRbQHRQaQFKBHtENwVDSWpgHAlbTU1hXEpwdBh2eBlNY3l2UEhnblx7AmpaQ08JDDAzJUVAbn5IA2d8XX5ZFVlrYWhSXWlYQlEdZlQ/QUwuYwJgTG5GZghSRHdCYk1CWWBjclp0aWo3TWMSQmFaaAdge05FbmFhH3hxCFZuIX1BY01WVW5ABx5jfG1ZbjcZEiwwPFYQVm0sdHV8Xnl7alRuemgKZUwICklweW1heHR5Q3UqYVoSR3BCaldIc3Z8SmJOS212CAY5AmMkYmMaRn5UXEthZFsHYFx7ZHRnYV5tcFBZeHocQxUXXU0bYk0VFUZ0ZgFrSWcMRksCAwdJEBBncF12fGUVdnFNQnl4ZQB9WUclYGMRe04TQUZMf0FEbEthW357HEN2aVhAdHAMH0NPdWFicm1YbzNRBSkWMDUAOVdXbBlfRz51ah54YG5iVX9sR2t6RF1pR1RGU20MABBWQy55T3dQfmlUfmFrA35gY2AdDiBWMWVlP1hqHEVAZ3NzfE9/c1pCZWErYXQSB2BKcENjew1baXB9Rm1aG1VBCAkJY01aWW1NbklgZH5Oek1rTX9FFEB7RHNGEG9pKH1eRgFSZGJJdkcMQHUSY0IRQRkzUmFgBG90cklvVwNZThIHQXYABjFJaApCWh1qUEhnWVpiBHxDRDlAHg8kFVcCY1dCUk8VRm9obEN9e21EdnluWxN7eWt8RnFOekRTRXZKXkNPWH40YGMRXHwfRHZ7Z1JKS2R9XG1XR09qCGlaZmZ/QXwnfloWTQxIflxbSVNdSUZgHBRLKCwpQwwmXzB2NFRMOVxUTFNfH3BoRVhfWkcBYghVaSh0ZWMFeG9qBWp5eENNeGNldncHR0wBezVPTjdlSGcOTndjVkAUVl99YQFkRUE2YlNKe3ppeml2V2lvYkhGHjtbNHIALywsMScPEjEFO3Q1MQ0UGDYvK148ETYxIzEcD0gzchNcLSs+LAJxJiEQKBd5MCsXCRclFA0gBRg3axk1HTkBGyoUPRhwCwI2OAIRB2gUBRcjATt6ORQ9JDANOHFlEQITIC8VOS4GAC49GDscBBQMNQ4hDQtQZHYMHmk3BRFHeHZvcXNvd01+WXxPFF9pN2ZaSmR3Z0RkQkl7YmlHbzMsSS8HEy4PPggxGAAYBBcuJREBEQA7LAMANgEiNiZgFR5Mchs0eH83ERFsGCceZTESe2MeEgQSGwgXIgIbb38FFBAWEC1GFC42OQ0CCwcudSIpOwY6MRw7IjwYAgAYD3UbOA8AaHoHPiUkBgQmTA4FUxgAOCoJKxNmVSoANDIzAjdlDxA6ISIOKhQDEhwLPS82IT4CUFIsOyIwLD4+BBsDAww1AnMqHAIlMiMTGT0oAQlUE3QDQhIUACMxDwhGLxEXHQsSIV0FLgMaAgJ2LgsEHyEPLBcKOBtfUhg9MiAXPT5fHhA1Wg8+BxoPLgYcGS0WRSsELjIZKg8EJw4lFQAoUCcTcxASLS9BOTsZD3ERGRUhOD1YUjJxWBEBdnc9PwkQNytyED0zAQtaG3Y2ACsWXSsoPV4+DBQ2DyQ+bg0MHxVHKhAqNh8QPVkNET5fAis5Jh0uGxACKA8kOyo6IBkHIgkKdx0sAgA8SAQVHCkCLwcoBnQHGRAeAxAXOQAdKxhrNxMLJQYrKwAxHnFcOA4HIlEEAVkVDigqAwMoORQQKFkaOy0pISMoRmYDPyFLCRIqVhwCImITET04Gx8QPTMWWRQDcgstAioLGSkBTjw7ECYLeSgraxFoazw2CQcrJgU1cQ0fAB4YEykpIQMEPgJ0NUY0Lhc8IBEEWQtyNSkeECEmHitRFhsULgUrASkfO3E6XDsqLTAVcg8pFCwUaT8rPiMALzskFQQNJBkfKgUxBwscAj4YWhYHDxoXEBRwHgUUMx4gCxsCGBRJAz5yABsCAxIPFSo2AQILLSs7NS4EAGEnFBANJBgTOV0FLWJSKAUQeRkDKyAjCjYqIwEUBwAUPT5iBgohDzYmBAEBJS4pCSspGgUQBDsuD3wvKFd7HwE/EQ8ZFQgRICYEAgUuRhovHFYdM15eNwIgZBgmBVIoJGBnACRXChIKQR8lDVh2CicfKTIBcxwzNionIg4PEVI0FyMQOTkaABI3JSoAByVTKAItJn1ULjcEOG4gBjoqDnAQDjsGHzA2cF92CTIlAhMdchoJABA6KQEyajcgBAM+IhwyE292OTQ0IzUsAVY8EBcxMRxoKgEhBRQSGTMLfQsgFDp1PDQsCgEFKAkIASA8EhF4IgpjIzMJJC4WcyYcEQkPPSMBHlUSfFkuPCQnKiMaAGYWEC80EQIeex9wJjszCSQMFg4iDDcvVxMEBR17Knw0OnMVRyc4fj9ROQpiABoWFxAscR0Na3gBHWdyPjcOBCMleBQgKR4rLQViBhcLGnEgDDZ4ACoPJhQQIH4nHBoDNhkWCyUWDRgVFx4YAwAzFjAELCUPNScjDQ4hDB54Gwg4K2g3BmMBKjkwGggiFAo0Iwp6BBQeDxYwBz4VKCIzeDQmJjYeXTUmHCZpcygrAQt3NAFrBjsmGhtWJz8uUiR3CjorPy4NJXUuOjYIBDoMDGM4MwxxNiMNGg4SES01GHA1O3EIOSo7LQUXHnEeOgIjPXENLjQSfn4OVSkSAgcFBQIxDQUuajUPOj0MFwwcZhMnVzQOCQMDAWBWZBUPPx4oBAA5YA5qBwcrEwQ+IjppEz47Ji4CE2YNKTEzAUcjBgAoFFwyKHwbCz8pARUrDgIIMgg1H2MXGTUBFx0XAgMdEj0HOQ4MIionOyE2cUcxHAA7Iw0sNTkBDUU9GRsbPgkzOBwNKD9hHBdVJipxVTYRAgMmGAIVKxc2JREoNxgtMysDHggNExYWBh8FHwUfBQ8/KQYONiUrLjkfIwpxHDgYCTw1MDEMMBU2JRErK2crDzZdCy94UjAOC00MMgFCKTJxZw8mdgoSCzQMcAtzDC8hMBw7CHJ/GjQ+Cw4aDAVyMTMwEi8gHhUfNB8sDi4hWTQ0GDdJdSEVNggXAhY7Knd3MQ4KGhoZDm11DysqLxI8NXYZCXMDMngaMQg5PSsYKjYxJRJzdx8jOzQlIwklEwgtDhEMdwskLAs3Izg7LQscJi4IeyE3GiAbDAYrHzEzEjcxKicAdSteCTMqJHsUMSEXMT0kJD4Ga3V2Kk4rMSUZHS8qMAsqHTsEPR8RXzArXzc2OgYQOy4oPXc1AQM+DhpuMDFRFTMrBn8pCQkCdCE/MDILKG8uGllRNRlGRy0NGjsyFGoTKSUsOiwkAi8sNRJUNgQ0czEuFgUNMShjBAsBDDErbywzKBoKKzkeOncPDR42HCskNGg7BjEMVgAvOyApLQ5WPgAVHiM+Jz8eOA8BOSI7Xwo4JGIJNjYdCz0MFmAuPhEbLzc3VjUQAGwoHjATcSAGdwUVCjIqMDA1OyQNUB5gGRw6UwpkNS0eECoqbCt2KzQEdD1jBzEZOxQdIjBoMxVqCyoEBToSDB5xPz44LA9MCDAKMAZhLgZZACwMKAYDPWgHODIGHiwMIDUpZ2YEMA04By8INQl3ClQLLC8wCDIIXG8/PSARMDYQLxQyeh8qFTg7MhhUDzkLKwNzDT8RPQ84JC0dDTAqGDA7KxkoKDAcPzh1KQo9LzkeN3YMIxc4HzsBNxorAj0jQX90CCMlPQ4FMTYPfDgwDA0sMyoJHyw6EigMCwULUBsDcnsAdQUAKRAMFBIqLQwCGCkLLmoOJQIEOSU/JQ0JFQgmDx02LwgrIjMLHQQ9DCw+cgoRJREWZAQkCyoyNgskJip0JDg5cy1BXXIzJAl3GCQCdggwZXEbBmcPNAwwCAV9fAkGDDUUBhBmKTgyKAo0KRklcRc/IxY5KQ8SACIKEgg4FVUuDx0FUVoiK3IuEiQEGQkkYToJDhcPJhVTfA8zMiMhFgxnAystCycgLTweB1A0GAMuACIBVEUKHSYiCR0UJA0ENQsRBwUPCgEpMCcvGyUKdxcvH3U5OAwRegMnCiE1IxYiOgsGEGoOAhg/DxJ9IggHCzESCgMsJgJ9awodFDksDRAyCyA1NwodDCwJOFcWCw0yNwokfTUKLwt3IwolIwwocTcbRRAeCwoMHiUZOWkeCRclHihWMyVVcTcfVQEkJjAyMyReOT0jEFwMC1UPPyMwATQnO1oxHz8DNSIoAScYMBMtDi8iFgwgHwwKMAxnDjsXDQooCx4YHSY4JQYYPgQ0Cz0PVkQEEQYqKCIWPTELLBsxElgUMBcENhMKPQQRbyQVRhJdREdUW0tUYB4MX2BjeAU8bxEfZUVYW1VHTF5OSQV/f1xBMU5Jamd7QX9fbWd4H3p1ZhNuYmRFVHRyZHRnBltCCnxGV1YxeEQcDUp3ZlJAFFhafWEKFUlQQ25cOW9iHm90Yk5teXpaSGdhXHsBYStPTR1fdG5wHUIAZ0ZuZWVTeFQVWWliaFxSGFRQOARhQlRVQFVpBmBObEZmAUlKdU9gW0VFbHJkXW0Ffko6cmVTfEx3CXdvV1x+eWMDE2h1IXlJZ0J1VkNKe1cGBnZkcE1gdFJbbXdsWntMECo=",
|
| 266 |
+
"gAAAAACWzMwMzIsIlRodSBKdWwgMTEgMjAyNCAwMzoxMDo0NiBHTVQrMDgwMCAo5Lit5Zu95qCH5YeG5pe26Ze0KSIsNDI5NDcwNTE1MiwxLCJNb3ppbGxhLzUuMCAoV2luZG93cyBOVCAxMC4wOyBXaW42NDsgeDY0KSBBcHBsZVdlYktpdC81MzcuMzYgKEtIVE1MLCBsaWtlIEdlY2tvKSBDaHJvbWUvMTI2LjAuMC4wIFNhZmFyaS81MzcuMzYgRWRnLzEyNi4wLjAuMCIsImh0dHBzOi8vY2RuLm9haXN0YXRpYy5jb20vX25leHQvc3RhdGljL2NodW5rcy9wYWdlcy9fYXBwLWMwOWZmNWY0MjQwMjcwZjguanMiLCJjL1pGWGkxeTNpMnpaS0EzSVQwNzRzMy9fIiwiemgtQ04iLCJ6aC1DTixlbixlbi1HQixlbi1VUyIsMTM1LCJ3ZWJraXRUZW1wb3JhcnlTdG9yYWdl4oiSW29iamVjdCBEZXByZWNhdGVkU3RvcmFnZVF1b3RhXSIsIl9yZWFjdExpc3RlbmluZ3NxZjF0ejFzNmsiLCJmZXRjaCIsMzY1NCwiNWU1NDUzNzItMzcyNy00ZDAyLTkwMDYtMzMwMDRjMWJmYTQ2Il0="
|
| 267 |
+
)
|
| 268 |
+
print(result)
|
chatgpt/wssClient.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
from utils.Logger import logger
|
| 5 |
+
import chatgpt.globals as globals
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def save_wss_map(wss_map):
|
| 9 |
+
with open(globals.WSS_MAP_FILE, "w") as file:
|
| 10 |
+
json.dump(wss_map, file)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
async def token2wss(token):
|
| 14 |
+
if not token:
|
| 15 |
+
return False, None
|
| 16 |
+
if token in globals.wss_map:
|
| 17 |
+
wss_mode = globals.wss_map[token]["wss_mode"]
|
| 18 |
+
if wss_mode:
|
| 19 |
+
if int(time.time()) - globals.wss_map.get(token, {}).get("timestamp", 0) < 60 * 60:
|
| 20 |
+
wss_url = globals.wss_map[token]["wss_url"]
|
| 21 |
+
logger.info(f"token -> wss_url from cache")
|
| 22 |
+
return wss_mode, wss_url
|
| 23 |
+
else:
|
| 24 |
+
logger.info(f"token -> wss_url expired")
|
| 25 |
+
return wss_mode, None
|
| 26 |
+
else:
|
| 27 |
+
return False, None
|
| 28 |
+
return False, None
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
async def set_wss(token, wss_mode, wss_url=None):
|
| 32 |
+
if not token:
|
| 33 |
+
return True
|
| 34 |
+
globals.wss_map[token] = {"timestamp": int(time.time()), "wss_url": wss_url, "wss_mode": wss_mode}
|
| 35 |
+
save_wss_map(globals.wss_map)
|
| 36 |
+
return True
|
docs/capsolver.png
ADDED
|
docs/tokens.png
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
python-multipart
|
| 3 |
+
curl_cffi==0.7.3
|
| 4 |
+
uvicorn
|
| 5 |
+
tiktoken
|
| 6 |
+
python-dotenv
|
| 7 |
+
websockets
|
| 8 |
+
pillow
|
| 9 |
+
pybase64
|
| 10 |
+
jinja2
|
| 11 |
+
APScheduler
|
templates/tokens.html
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="zh-CN">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta content="width=device-width, initial-scale=1.0" name="viewport">
|
| 6 |
+
<title>Tokens 管理</title>
|
| 7 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
| 8 |
+
<script>
|
| 9 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 10 |
+
const apiPrefix = "{{ api_prefix }}";
|
| 11 |
+
const uploadForm = document.getElementById('uploadForm');
|
| 12 |
+
const clearForm = document.getElementById('clearForm');
|
| 13 |
+
const errorButton = document.getElementById('errorButton');
|
| 14 |
+
|
| 15 |
+
if (apiPrefix === "None") {
|
| 16 |
+
uploadForm.action = "/tokens/upload";
|
| 17 |
+
clearForm.action = "/tokens/clear";
|
| 18 |
+
errorButton.dataset.api = "/tokens/error";
|
| 19 |
+
} else {
|
| 20 |
+
uploadForm.action = `/${apiPrefix}/tokens/upload`;
|
| 21 |
+
clearForm.action = `/${apiPrefix}/tokens/clear`;
|
| 22 |
+
errorButton.dataset.api = `/${apiPrefix}/tokens/error`;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
errorButton.addEventListener('click', async () => {
|
| 26 |
+
const response = await fetch(errorButton.dataset.api, {
|
| 27 |
+
method: 'POST',
|
| 28 |
+
});
|
| 29 |
+
const result = await response.json();
|
| 30 |
+
const errorTokens = result.error_tokens;
|
| 31 |
+
|
| 32 |
+
const errorModal = document.getElementById('errorModal');
|
| 33 |
+
const errorModalContent = document.getElementById('errorModalContent');
|
| 34 |
+
|
| 35 |
+
errorModalContent.innerHTML = errorTokens.map(token => `<p>${token}</p>`).join('');
|
| 36 |
+
errorModal.classList.remove('hidden');
|
| 37 |
+
});
|
| 38 |
+
|
| 39 |
+
document.getElementById('errorModalClose').addEventListener('click', () => {
|
| 40 |
+
document.getElementById('errorModal').classList.add('hidden');
|
| 41 |
+
});
|
| 42 |
+
|
| 43 |
+
document.getElementById('errorModalCopy').addEventListener('click', () => {
|
| 44 |
+
const errorModalContent = document.getElementById('errorModalContent');
|
| 45 |
+
const textToCopy = errorModalContent.innerText.replace(/\n\n/g, '\n');
|
| 46 |
+
navigator.clipboard.writeText(textToCopy).then(() => {
|
| 47 |
+
alert('错误 Tokens 已复制到剪贴板');
|
| 48 |
+
}).catch(err => {
|
| 49 |
+
alert('复制失败,请手动复制');
|
| 50 |
+
});
|
| 51 |
+
});
|
| 52 |
+
});
|
| 53 |
+
</script>
|
| 54 |
+
</head>
|
| 55 |
+
<body class="bg-gradient-to-r from-blue-200 via-purple-200 to-pink-200 flex justify-center items-center min-h-screen">
|
| 56 |
+
<div class="bg-white p-10 rounded-lg shadow-2xl w-128 text-center">
|
| 57 |
+
<h1 class="text-4xl font-extrabold text-gray-900 mb-6">Tokens 管理</h1>
|
| 58 |
+
<p class="text-gray-600 mb-4">当前可用 Tokens 数量:<span class="text-blue-600">{{ tokens_count }}</span></p>
|
| 59 |
+
<form class="mb-2" id="uploadForm" method="post">
|
| 60 |
+
<textarea class="w-full p-4 mb-4 border border-gray-300 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-400 resize-none" name="text" placeholder="一行一个Token,可以是 AccessToken 或 RefreshToken" rows="10"></textarea>
|
| 61 |
+
<p class="text-gray-600 mb-2">注:使用docker时如果挂载了data文件夹则重启后不需要再次上传</p>
|
| 62 |
+
<button class="w-full bg-blue-600 text-white py-3 rounded-md hover:bg-blue-700 transition duration-300 mb-2" type="submit">上传</button>
|
| 63 |
+
</form>
|
| 64 |
+
<button id="errorButton" class="w-full bg-yellow-600 text-white py-3 rounded-md hover:bg-yellow-700 transition duration-200 mt-2">查看错误Tokens</button>
|
| 65 |
+
<p class="text-gray-600 mt-2">点击清空,将会清空上传和错误的 Tokens</p>
|
| 66 |
+
<form id="clearForm" method="post">
|
| 67 |
+
<button class="w-full bg-red-600 text-white py-3 rounded-md hover:bg-red-700 transition duration-300" type="submit">清空Tokens</button>
|
| 68 |
+
</form>
|
| 69 |
+
</div>
|
| 70 |
+
|
| 71 |
+
<div id="errorModal" class="fixed inset-0 bg-gray-800 bg-opacity-75 flex justify-center items-center hidden">
|
| 72 |
+
<div class="bg-white p-6 rounded-lg shadow-lg w-150">
|
| 73 |
+
<h2 class="text-2xl font-bold mb-4">错误 Tokens</h2>
|
| 74 |
+
<div id="errorModalContent" class="list-disc list-inside text-left mb-4"></div>
|
| 75 |
+
<div class="flex justify-end space-x-4">
|
| 76 |
+
<button id="errorModalCopy" class="bg-green-600 text-white py-2 px-4 rounded-md hover:bg-green-700 transition duration-300">复制</button>
|
| 77 |
+
<button id="errorModalClose" class="bg-red-600 text-white py-2 px-4 rounded-md hover:bg-red-700 transition duration-300">关闭</button>
|
| 78 |
+
</div>
|
| 79 |
+
</div>
|
| 80 |
+
</div>
|
| 81 |
+
</body>
|
| 82 |
+
</html>
|
utils/Client.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
from curl_cffi.requests import AsyncSession
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Client:
|
| 7 |
+
def __init__(self, proxy=None, timeout=15, verify=True):
|
| 8 |
+
self.proxies = {
|
| 9 |
+
"http": proxy,
|
| 10 |
+
"https": proxy,
|
| 11 |
+
}
|
| 12 |
+
self.timeout = timeout
|
| 13 |
+
self.verify = verify
|
| 14 |
+
|
| 15 |
+
self.impersonate = random.choice(['safari15_3'])
|
| 16 |
+
# impersonate=self.impersonate
|
| 17 |
+
|
| 18 |
+
# self.ja3 = ""
|
| 19 |
+
# self.akamai = ""
|
| 20 |
+
# ja3=self.ja3, akamai=self.akamai
|
| 21 |
+
self.session = AsyncSession(proxies=self.proxies, timeout=self.timeout, verify=self.verify)
|
| 22 |
+
self.session2 = AsyncSession(proxies=self.proxies, timeout=self.timeout, verify=self.verify)
|
| 23 |
+
|
| 24 |
+
async def post(self, *args, **kwargs):
|
| 25 |
+
r = await self.session.post(*args, impersonate=self.impersonate, **kwargs)
|
| 26 |
+
return r
|
| 27 |
+
|
| 28 |
+
async def post_stream(self, *args, headers=None, cookies=None, **kwargs):
|
| 29 |
+
if self.session:
|
| 30 |
+
headers = headers or self.session.headers
|
| 31 |
+
cookies = cookies or self.session.cookies
|
| 32 |
+
r = await self.session2.post(*args, headers=headers, cookies=cookies, impersonate=self.impersonate, **kwargs)
|
| 33 |
+
return r
|
| 34 |
+
|
| 35 |
+
async def get(self, *args, **kwargs):
|
| 36 |
+
r = await self.session.get(*args, impersonate=self.impersonate, **kwargs)
|
| 37 |
+
return r
|
| 38 |
+
|
| 39 |
+
async def request(self, *args, **kwargs):
|
| 40 |
+
r = await self.session.request(*args, impersonate=self.impersonate, **kwargs)
|
| 41 |
+
return r
|
| 42 |
+
|
| 43 |
+
async def put(self, *args, **kwargs):
|
| 44 |
+
r = await self.session.put(*args, impersonate=self.impersonate, **kwargs)
|
| 45 |
+
return r
|
| 46 |
+
|
| 47 |
+
async def close(self):
|
| 48 |
+
if self.session:
|
| 49 |
+
try:
|
| 50 |
+
await self.session.close()
|
| 51 |
+
del self.session
|
| 52 |
+
except Exception:
|
| 53 |
+
pass
|
| 54 |
+
if self.session2:
|
| 55 |
+
try:
|
| 56 |
+
await self.session2.close()
|
| 57 |
+
del self.session2
|
| 58 |
+
except Exception:
|
| 59 |
+
pass
|
utils/Logger.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Logger:
|
| 7 |
+
@staticmethod
|
| 8 |
+
def info(message):
|
| 9 |
+
logging.info(str(message))
|
| 10 |
+
|
| 11 |
+
@staticmethod
|
| 12 |
+
def warning(message):
|
| 13 |
+
logging.warning("\033[0;33m" + str(message) + "\033[0m")
|
| 14 |
+
|
| 15 |
+
@staticmethod
|
| 16 |
+
def error(message):
|
| 17 |
+
logging.error("\033[0;31m" + "-" * 50 + '\n| ' + str(message) + "\033[0m" + "\n" + "└" + "-" * 80)
|
| 18 |
+
|
| 19 |
+
@staticmethod
|
| 20 |
+
def debug(message):
|
| 21 |
+
logging.debug("\033[0;37m" + str(message) + "\033[0m")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = Logger()
|
utils/config.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
|
| 6 |
+
from utils.Logger import logger
|
| 7 |
+
|
| 8 |
+
load_dotenv(encoding="ascii")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def is_true(x):
|
| 12 |
+
if isinstance(x, bool):
|
| 13 |
+
return x
|
| 14 |
+
if isinstance(x, str):
|
| 15 |
+
return x.lower() in ['true', '1', 't', 'y', 'yes']
|
| 16 |
+
elif isinstance(x, int):
|
| 17 |
+
return x == 1
|
| 18 |
+
else:
|
| 19 |
+
return False
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
api_prefix = os.getenv('API_PREFIX', None)
|
| 23 |
+
authorization = os.getenv('AUTHORIZATION', '').replace(' ', '')
|
| 24 |
+
chatgpt_base_url = os.getenv('CHATGPT_BASE_URL', 'https://chatgpt.com').replace(' ', '')
|
| 25 |
+
auth_key = os.getenv('AUTH_KEY', None)
|
| 26 |
+
user_agents = os.getenv('USER_AGENTS', '[]')
|
| 27 |
+
|
| 28 |
+
ark0se_token_url = os.getenv('ARK' + 'OSE_TOKEN_URL', '').replace(' ', '')
|
| 29 |
+
if not ark0se_token_url:
|
| 30 |
+
ark0se_token_url = os.getenv('ARK0SE_TOKEN_URL', None)
|
| 31 |
+
proxy_url = os.getenv('PROXY_URL', '').replace(' ', '')
|
| 32 |
+
export_proxy_url = os.getenv('EXPORT_PROXY_URL', None)
|
| 33 |
+
|
| 34 |
+
cf_file_url = os.getenv('CF_FILE_URL', None)
|
| 35 |
+
turnstile_solver_url = os.getenv('TURNSTILE_SOLVER_URL', None)
|
| 36 |
+
|
| 37 |
+
history_disabled = is_true(os.getenv('HISTORY_DISABLED', True))
|
| 38 |
+
pow_difficulty = os.getenv('POW_DIFFICULTY', '000032')
|
| 39 |
+
retry_times = int(os.getenv('RETRY_TIMES', 3))
|
| 40 |
+
enable_gateway = is_true(os.getenv('ENABLE_GATEWAY', True))
|
| 41 |
+
conversation_only = is_true(os.getenv('CONVERSATION_ONLY', False))
|
| 42 |
+
enable_limit = is_true(os.getenv('ENABLE_LIMIT', True))
|
| 43 |
+
upload_by_url = is_true(os.getenv('UPLOAD_BY_URL', False))
|
| 44 |
+
check_model = is_true(os.getenv('CHECK_MODEL', False))
|
| 45 |
+
scheduled_refresh = is_true(os.getenv('SCHEDULED_REFRESH', False))
|
| 46 |
+
|
| 47 |
+
authorization_list = authorization.split(',') if authorization else []
|
| 48 |
+
chatgpt_base_url_list = chatgpt_base_url.split(',') if chatgpt_base_url else []
|
| 49 |
+
ark0se_token_url_list = ark0se_token_url.split(',') if ark0se_token_url else []
|
| 50 |
+
proxy_url_list = proxy_url.split(',') if proxy_url else []
|
| 51 |
+
user_agents_list = ast.literal_eval(user_agents)
|
| 52 |
+
|
| 53 |
+
logger.info("-" * 60)
|
| 54 |
+
logger.info("Chat2Api 1.4.16 | https://github.com/lanqian528/chat2api")
|
| 55 |
+
logger.info("-" * 60)
|
| 56 |
+
logger.info("Environment variables:")
|
| 57 |
+
logger.info("API_PREFIX: " + str(api_prefix))
|
| 58 |
+
logger.info("AUTHORIZATION: " + str(authorization_list))
|
| 59 |
+
logger.info("CHATGPT_BASE_URL: " + str(chatgpt_base_url_list))
|
| 60 |
+
logger.info("AUTH_KEY: " + str(auth_key))
|
| 61 |
+
logger.info("ARK0SE_TOKEN_URL: " + str(ark0se_token_url_list))
|
| 62 |
+
logger.info("PROXY_URL: " + str(proxy_url_list))
|
| 63 |
+
logger.info("EXPORT_PROXY_URL: " + str(export_proxy_url))
|
| 64 |
+
logger.info("HISTORY_DISABLED: " + str(history_disabled))
|
| 65 |
+
logger.info("POW_DIFFICULTY: " + str(pow_difficulty))
|
| 66 |
+
logger.info("RETRY_TIMES: " + str(retry_times))
|
| 67 |
+
logger.info("ENABLE_GATEWAY: " + str(enable_gateway))
|
| 68 |
+
logger.info("CONVERSATION_ONLY: " + str(conversation_only))
|
| 69 |
+
logger.info("ENABLE_LIMIT: " + str(enable_limit))
|
| 70 |
+
logger.info("UPLOAD_BY_URL: " + str(upload_by_url))
|
| 71 |
+
logger.info("CHECK_MODEL: " + str(check_model))
|
| 72 |
+
logger.info("SCHEDULED_REFRESH: " + str(scheduled_refresh))
|
| 73 |
+
logger.info("USER_AGENTS: " + str(user_agents_list))
|
| 74 |
+
logger.info("-" * 60)
|
utils/retry.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import HTTPException
|
| 2 |
+
|
| 3 |
+
from utils.Logger import logger
|
| 4 |
+
from utils.config import retry_times
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
async def async_retry(func, *args, max_retries=retry_times, **kwargs):
|
| 8 |
+
for attempt in range(max_retries + 1):
|
| 9 |
+
try:
|
| 10 |
+
result = await func(*args, **kwargs)
|
| 11 |
+
return result
|
| 12 |
+
except HTTPException as e:
|
| 13 |
+
if attempt == max_retries:
|
| 14 |
+
logger.error(f"Throw an exception {e.status_code}, {e.detail}")
|
| 15 |
+
if e.status_code == 500:
|
| 16 |
+
raise HTTPException(status_code=500, detail="Server error")
|
| 17 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
| 18 |
+
logger.info(f"Retry {attempt + 1} status code {e.status_code}, {e.detail}. Retrying...")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def retry(func, *args, max_retries=retry_times, **kwargs):
|
| 22 |
+
for attempt in range(max_retries + 1):
|
| 23 |
+
try:
|
| 24 |
+
result = func(*args, **kwargs)
|
| 25 |
+
return result
|
| 26 |
+
except HTTPException as e:
|
| 27 |
+
if attempt == max_retries:
|
| 28 |
+
logger.error(f"Throw an exception {e.status_code}, {e.detail}")
|
| 29 |
+
if e.status_code == 500:
|
| 30 |
+
raise HTTPException(status_code=500, detail="Server error")
|
| 31 |
+
raise HTTPException(status_code=e.status_code, detail=e.detail)
|
| 32 |
+
logger.error(f"Retry {attempt + 1} status code {e.status_code}, {e.detail}. Retrying...")
|