Upload 6 files
Browse files- .gitattributes +2 -35
- Dockerfile +6 -0
- LICENSE +21 -0
- README.md +20 -11
- main.py +154 -0
- requirements.txt +4 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,2 @@
|
|
| 1 |
-
|
| 2 |
-
*
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
# Auto detect text files and perform LF normalization
|
| 2 |
+
* text=auto
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Dockerfile
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:alpine
|
| 2 |
+
WORKDIR /home
|
| 3 |
+
COPY . .
|
| 4 |
+
RUN pip install -r requirements.txt
|
| 5 |
+
EXPOSE 5000
|
| 6 |
+
CMD ["python", "main.py"]
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024 SSJACK8582
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,11 +1,20 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Free ChatGPT
|
| 2 |
+
Utilize the unlimited free GPT-3.5-Turbo API service provided by the login-free ChatGPT Web
|
| 3 |
+
# Deploy
|
| 4 |
+
```bash
|
| 5 |
+
pip install -r requirements.txt
|
| 6 |
+
python main.py
|
| 7 |
+
```
|
| 8 |
+
# Example
|
| 9 |
+
```
|
| 10 |
+
curl --location 'http://127.0.0.1:5000/v1/chat/completions' \
|
| 11 |
+
--header 'Content-Type: application/json' \
|
| 12 |
+
--data '{
|
| 13 |
+
"messages": [
|
| 14 |
+
{
|
| 15 |
+
"role": "user",
|
| 16 |
+
"content": "hello"
|
| 17 |
+
}
|
| 18 |
+
]
|
| 19 |
+
}'
|
| 20 |
+
```
|
main.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import time
|
| 4 |
+
import uuid
|
| 5 |
+
import random
|
| 6 |
+
import requests
|
| 7 |
+
import threading
|
| 8 |
+
from flask import Flask, Response, request
|
| 9 |
+
from flask_cors import CORS
|
| 10 |
+
from gevent import pywsgi
|
| 11 |
+
|
| 12 |
+
app = Flask(__name__)
|
| 13 |
+
CORS(app)
|
| 14 |
+
headers = {
|
| 15 |
+
'origin': 'https://chat.openai.com',
|
| 16 |
+
'referer': 'https://chat.openai.com',
|
| 17 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36'
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def set_token():
|
| 22 |
+
url = 'https://chat.openai.com/backend-anon/sentinel/chat-requirements'
|
| 23 |
+
while True:
|
| 24 |
+
device_id = str(uuid.uuid4())
|
| 25 |
+
head = headers.copy()
|
| 26 |
+
head['oai-device-id'] = device_id
|
| 27 |
+
head['openai-sentinel-chat-requirements-token'] = ''
|
| 28 |
+
try:
|
| 29 |
+
resp = requests.post(url=url, headers=head, json={})
|
| 30 |
+
resp_json = json.loads(resp.text)
|
| 31 |
+
print(resp_json['token'])
|
| 32 |
+
headers['oai-device-id'] = device_id
|
| 33 |
+
headers['openai-sentinel-chat-requirements-token'] = resp_json['token']
|
| 34 |
+
except Exception as e:
|
| 35 |
+
print(e)
|
| 36 |
+
time.sleep(60)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def get_message(messages):
|
| 40 |
+
url = 'https://chat.openai.com/backend-anon/conversation'
|
| 41 |
+
payload = {
|
| 42 |
+
'action': 'next',
|
| 43 |
+
'messages': messages,
|
| 44 |
+
'parent_message_id': str(uuid.uuid4()),
|
| 45 |
+
'model': 'text-davinci-002-render-sha',
|
| 46 |
+
'timezone_offset_min': -480,
|
| 47 |
+
'suggestions': [],
|
| 48 |
+
'history_and_training_disabled': False,
|
| 49 |
+
'conversation_mode': {
|
| 50 |
+
'kind': 'primary_assistant'
|
| 51 |
+
},
|
| 52 |
+
'force_paragen': False,
|
| 53 |
+
'force_paragen_model_slug': '',
|
| 54 |
+
'force_nulligen': False,
|
| 55 |
+
'force_rate_limit': False,
|
| 56 |
+
'websocket_request_id': str(uuid.uuid4())
|
| 57 |
+
}
|
| 58 |
+
try:
|
| 59 |
+
with requests.post(url=url, headers=headers, json=payload, stream=True) as resp:
|
| 60 |
+
for line in resp.iter_lines():
|
| 61 |
+
if line:
|
| 62 |
+
string = line.decode()
|
| 63 |
+
if '{' in string:
|
| 64 |
+
data = json.loads(string[6:])
|
| 65 |
+
message = data.get('message', {})
|
| 66 |
+
if 'assistant' == message.get('author', {}).get('role'):
|
| 67 |
+
parts = message.get('content', {}).get('parts', [''])
|
| 68 |
+
if 'in_progress' == message.get('status'):
|
| 69 |
+
yield parts[0]
|
| 70 |
+
except Exception as e:
|
| 71 |
+
yield str(e)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def get_completion_id():
|
| 75 |
+
return 'chatcmpl-{}'.format(
|
| 76 |
+
''.join(random.choices(population='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', k=29)))
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def get_result(messages):
|
| 80 |
+
completion_id = get_completion_id()
|
| 81 |
+
created = int(time.time())
|
| 82 |
+
string = ''
|
| 83 |
+
for message in get_message(messages):
|
| 84 |
+
data = message[len(string):]
|
| 85 |
+
string = message
|
| 86 |
+
yield 'data: {}\n\n'.format(json.dumps({
|
| 87 |
+
'id': completion_id,
|
| 88 |
+
'object': 'chat.completion.chunk',
|
| 89 |
+
'created': created,
|
| 90 |
+
'model': 'gpt-3.5-turbo',
|
| 91 |
+
'choices': [
|
| 92 |
+
{
|
| 93 |
+
'index': 0,
|
| 94 |
+
'delta': {
|
| 95 |
+
'content': data,
|
| 96 |
+
},
|
| 97 |
+
'logprobs': None,
|
| 98 |
+
'finish_reason': None,
|
| 99 |
+
},
|
| 100 |
+
],
|
| 101 |
+
}))
|
| 102 |
+
print(string)
|
| 103 |
+
yield 'data: {}\n\n'.format(json.dumps({
|
| 104 |
+
'id': completion_id,
|
| 105 |
+
'object': 'chat.completion.chunk',
|
| 106 |
+
'created': created,
|
| 107 |
+
'model': 'gpt-3.5-turbo',
|
| 108 |
+
'choices': [
|
| 109 |
+
{
|
| 110 |
+
'index': 0,
|
| 111 |
+
'delta': {},
|
| 112 |
+
'logprobs': None,
|
| 113 |
+
'finish_reason': 'stop',
|
| 114 |
+
},
|
| 115 |
+
],
|
| 116 |
+
}))
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@app.route('/v1/chat/completions', methods=['POST'])
|
| 120 |
+
def completions():
|
| 121 |
+
try:
|
| 122 |
+
data = request.get_data()
|
| 123 |
+
data_json = json.loads(data)
|
| 124 |
+
print(data_json)
|
| 125 |
+
messages = []
|
| 126 |
+
for message in data_json.get('messages'):
|
| 127 |
+
messages.append({
|
| 128 |
+
'id': str(uuid.uuid4()),
|
| 129 |
+
'author': {
|
| 130 |
+
'role': message.get('role')
|
| 131 |
+
},
|
| 132 |
+
'content': {
|
| 133 |
+
'content_type': 'text',
|
| 134 |
+
'parts': [
|
| 135 |
+
message.get('content')
|
| 136 |
+
]
|
| 137 |
+
},
|
| 138 |
+
'metadata': {}
|
| 139 |
+
})
|
| 140 |
+
return Response(get_result(messages), content_type='text/event-stream')
|
| 141 |
+
except Exception as e:
|
| 142 |
+
return {'message': e}
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
@app.route('/', methods=['GET'])
|
| 146 |
+
def index():
|
| 147 |
+
return {'message': '/v1/chat/completions'}
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
if __name__ == '__main__':
|
| 151 |
+
# os.environ.update(HTTP_PROXY='127.0.0.1:7890', HTTPS_PROXY='127.0.0.1:7890')
|
| 152 |
+
threading.Thread(target=set_token).start()
|
| 153 |
+
server = pywsgi.WSGIServer(('0.0.0.0', 5000), app)
|
| 154 |
+
server.serve_forever()
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flask
|
| 2 |
+
gevent
|
| 3 |
+
requests
|
| 4 |
+
flask-cors
|