Hiren122 commited on
Commit
b33bd58
·
verified ·
1 Parent(s): bee40fb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +203 -0
app.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, Response, jsonify
2
+ import requests
3
+ import json
4
+ import uuid
5
+ import time
6
+ import os
7
+ import re
8
+ import base64
9
+ import mimetypes
10
+ import random
11
+ from datetime import datetime
12
+
13
+ app = Flask(__name__)
14
+
15
+ # ================= CONFIGURATION =================
16
+ COGNIX_BASE_URL = "https://www.cognixai.co"
17
+ DEFAULT_SESSION_ID = "f351d7e7-a0ba-4888-86a4-76aab9a7a661"
18
+ KEYS_FILE = os.path.join(os.getcwd(), "keys.json")
19
+ MASTER_KEY = "sk-master-onyx-2026" # The Master Key to create other keys
20
+
21
+ # Tier Limits
22
+ RPM_LIMIT = 10
23
+ # Models NOT supported in this tier
24
+ RESTRICTED_MODELS = ["claude opus-4.6", "3-pro", "gpt-5.3 codex"]
25
+
26
+ # In-memory database
27
+ if not os.path.exists(KEYS_FILE):
28
+ with open(KEYS_FILE, 'w') as f:
29
+ json.dump({"api_keys": {}}, f)
30
+
31
+ def load_keys():
32
+ try:
33
+ with open(KEYS_FILE, 'r') as f:
34
+ return json.load(f)
35
+ except:
36
+ return {"api_keys": {}}
37
+
38
+ def save_keys(keys_data):
39
+ with open(KEYS_FILE, 'w') as f:
40
+ json.dump(keys_data, f, indent=4)
41
+
42
+ # Tracking Data
43
+ rpm_tracking = {}
44
+
45
+ COGNIX_COOKIES = [
46
+ "ext_name=ojplmecpdpgccookcobabopnaifgidhf; cf_clearance=j_nYaeNI0RwDRG1Qyd.bRf0R5YCGgIgAEzEgaQEjCCU-1770908625-1.2.1.1-RMchxpAE5hSG0Xl4XY3BShfT4aXGHCqNiBxN6iyTGkrv8azqzeTMuCOKZZ1lHjBZ5kdtj4.F_hmpP2legrsaaSe16gMqtqa5.FrM7yNuGQczvf1ep45loNu5MhI151HAk0k9T5UKDHdHXHcidlUt_ajlE64FUTSj26Rf6WwTg55n.xeliVOzxYygojzifx7hywAXmXMAqCpKADeDnSuEWqahc2_zDnpJxwy4444gh_o; __Secure-better-auth.state=FOj7ymeub1GeD3s4fiEbm9Hrd-hE0slR.oM0kHle4Je9FhUDPisXmPSHQvH4nkqldTe3kRBrTHJk%3D; __Secure-better-auth.session_token=5npdnyCa90buJBq2qW2wopL6nC3HjO4R.5v3gNhODuU7F0hbVXAJ%2BPFgMPsCPM0j8J%2BHk%2FrqsNdc%3D; __Secure-better-auth.session_data=eyJzZXNzaW9uIjp7InNlc3Npb24iOnsiZXhwaXJlc0F0IjoiMjAyNi0wMi0xOVQxNTowMzo0OC44MjNaIiwidG9rZW4iOiI1bnBkbnlDYTkwYnVKQnEycVcyd29wTDZuQzNIak80UiIsImNyZWF0ZWRBdCI6IjIwMjYtMDItMTJUMTU6MDM6NDguODIzWiIsInVwZGF0ZWRBdCI6IjIwMjYtMDItMTJUMTU6MDM6NDguODIzWiIsImlwQWRkcmVzcyI6IjE2Mi4xNTguNjMuMjQwIiwidXNlckFnZW50IjoiTW96aWxsYS81LjAgKFdpbmRvd3MgTlQgMTAuMDsgV2luNjQ7IHg2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzE0NC4wLjAuMCBTYWZhcmkvNTM3LjM2IiwidXNlcklkIjoiODM0YWZkYWEtOWFiYy00OGNkLTkwMzQtNzU4YTMzY2M3NTUxIiwiaW1wZXJzb25hdGVkQnkiOm51bGwsImlkIjoiNzk5ODJjMWMtZjQwOC00ODYyLWI0ZGEtMzI2ZTZkZmQ1NWU0In0sInVzZXIiOnsibmFtZSI6IkhpcmVuIEFoYWxhd2F0IiwiZW1haWwiOiJnaGc2NDI3MkBnbWFpbC5jb20iLCJlbWFpbFZlcmlmaWVkIjp0cnVlLCJpbWFnZSI6Imh0dHBzOi8vbGgzLmdvb2dsZXVzZXJjb250ZW50LmNvbS9hL0FDZzhvY0ozTVo3MjdKYzlJU244bERCcUplS2MyU0MxYXV5djFlbkV1bWxuTDhmR01CaEp0OGNUPXM5Ni1jIiwiY3JlYXRlZEF0IjoiMjAyNi0wMS0yNlQwNTo0NzoyNC43NzNaIiwidXBkYXRlZEF0IjoiMjAyNi0wMS0yNlQwNTo0NzoyNC43NzNaIiwicm9sZSI6ImVkaXRvciIsImJhbm5lZCI6ZmFsc2UsImJhblJlYXNvbiI6bnVsbCwiYmFuRXhwaXJlcyI6bnVsbCwiaWQiOiI4MzRhZmRhYS05YWJjLTQ4Y2QtOTAzNC03NThhMzNjYzc1NTEifX0sImV4cGlyZXNBdCI6MTc3MDkxMjIyODgzNCwic2lnbmF0dXJlIjoidXpNQWloYU9Sbk1QSnZ1V2VCMDdtOGcxSHliYVVrT2hLU05PS3JKSE96byJ9"
47
+ ]
48
+
49
+ def get_headers():
50
+ return {
51
+ "accept": "*/*",
52
+ "accept-language": "en-IN,en;q=0.9",
53
+ "cookie": random.choice(COGNIX_COOKIES),
54
+ "origin": "https://www.cognixai.co",
55
+ "referer": f"https://www.cognixai.co/chat/{DEFAULT_SESSION_ID}",
56
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36"
57
+ }
58
+
59
+ # ================= AUTH & LIMITS =================
60
+
61
+ @app.before_request
62
+ def validate_access():
63
+ if request.path == "/" or request.path == "/v1/models" or not request.path.startswith("/v1/"):
64
+ return
65
+
66
+ # Master Key exclusion for its own route
67
+ if request.path == "/v1/admin/create-key":
68
+ auth_header = request.headers.get("Authorization", "")
69
+ if auth_header != f"Bearer {MASTER_KEY}":
70
+ return jsonify({"error": "Admin Access Denied"}), 403
71
+ return
72
+
73
+ # Standard Key Validation
74
+ auth_header = request.headers.get("Authorization", "")
75
+ if not auth_header.startswith("Bearer "):
76
+ return jsonify({"error": "Unauthorized", "message": "Missing Bearer token"}), 401
77
+
78
+ api_key = auth_header.replace("Bearer ", "").strip()
79
+ data = load_keys()
80
+
81
+ if api_key not in data["api_keys"]:
82
+ return jsonify({"error": "Unauthorized", "message": "Invalid API Key"}), 401
83
+
84
+ # RPM Enforcement (10 RPM)
85
+ now = time.time()
86
+ if api_key not in rpm_tracking:
87
+ rpm_tracking[api_key] = []
88
+ rpm_tracking[api_key] = [t for t in rpm_tracking[api_key] if now - t < 60]
89
+
90
+ if len(rpm_tracking[api_key]) >= RPM_LIMIT:
91
+ return jsonify({"error": "Rate limit exceeded", "message": f"Free Tier: {RPM_LIMIT} RPM"}), 429
92
+ rpm_tracking[api_key].append(now)
93
+
94
+ # Model Exclusion Check
95
+ if request.method == "POST" and request.path.endswith("/chat/completions"):
96
+ body = request.get_json(silent=True) or {}
97
+ model = body.get("model", "").lower()
98
+ if any(rm in model for rm in RESTRICTED_MODELS):
99
+ return jsonify({
100
+ "error": "Access Forbidden",
101
+ "message": f"Model '{model}' is not supported in the Free Tier. Upgrade to Development Plan."
102
+ }), 403
103
+
104
+ # ================= ADMIN ROUTES =================
105
+
106
+ @app.route('/v1/admin/create-key', methods=['POST'])
107
+ def create_key():
108
+ d = request.json
109
+ name = d.get('name')
110
+ if not name:
111
+ return jsonify({"error": "Name is required"}), 400
112
+
113
+ new_key = f"sk-free-{name.lower().replace(' ', '-')}-{uuid.uuid4().hex[:6]}"
114
+
115
+ data = load_keys()
116
+ data["api_keys"][new_key] = {
117
+ "name": name,
118
+ "created_at": datetime.now().isoformat()
119
+ }
120
+ save_keys(data)
121
+
122
+ return jsonify({
123
+ "status": "success",
124
+ "key": new_key,
125
+ "name": name,
126
+ "details": "This key is limited to 10 RPM and basic models only."
127
+ })
128
+
129
+ # ================= CORE PROXY LOGIC =================
130
+
131
+ def parse_cognix_stream_chunk(line):
132
+ if not line.strip(): return None, "content"
133
+ if line.startswith("data: "): line = line[6:]
134
+ if line.strip() == "[DONE]": return None, "stop"
135
+ try:
136
+ data = json.loads(line)
137
+ content = data.get('text') or data.get('content')
138
+ if not content:
139
+ delta = data.get('delta')
140
+ if isinstance(delta, str): content = delta
141
+ elif isinstance(delta, dict): content = delta.get('text') or delta.get('content', '')
142
+ return content or "", "content"
143
+ except:
144
+ return line, "content"
145
+
146
+ @app.route('/v1/chat/completions', methods=['POST'])
147
+ def chat_completions():
148
+ d = request.json
149
+ model = d.get('model', 'openai/gpt-4o')
150
+ messages = d.get('messages', [])
151
+ system = next((m.get('content', '') for m in messages if m.get('role') == 'system'), "")
152
+ filtered = [m for m in messages if m.get('role') != 'system']
153
+
154
+ prov, ver = model.split('/', 1) if '/' in model else ("openai", model)
155
+
156
+ payload = {
157
+ "id": str(uuid.uuid4()),
158
+ "chatModel": {"provider": prov, "model": ver},
159
+ "message": {"role": "user", "parts": [{"type": "text", "text": f"{system}\n\n{filtered[-1].get('content', '') if filtered else ''}"}], "id": str(uuid.uuid4())},
160
+ "allowedAppDefaultToolkit": ["webSearch"],
161
+ "attachments": []
162
+ }
163
+
164
+ if d.get('stream'):
165
+ def gen():
166
+ cid = f"chatcmpl-{uuid.uuid4().hex[:24]}"
167
+ yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'role': 'assistant'}}]})}\n\n"
168
+ with requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers(), stream=True) as r:
169
+ for line in r.iter_lines(decode_unicode=True):
170
+ if not line: continue
171
+ cont, pty = parse_cognix_stream_chunk(line)
172
+ if pty == "stop": break
173
+ if cont:
174
+ yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': cont}}]})}\n\n"
175
+ yield "data: [DONE]\n\n"
176
+ return Response(gen(), content_type='text/event-stream')
177
+
178
+ r = requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers())
179
+ full_text = "".join([parse_cognix_stream_chunk(l)[0] or "" for l in r.text.strip().split('\n')])
180
+ return jsonify({"id": str(uuid.uuid4()), "object": "chat.completion", "choices": [{"message": {"role": "assistant", "content": full_text}, "finish_reason": "stop"}]})
181
+
182
+ @app.route('/v1/models', methods=['GET'])
183
+ def list_models():
184
+ return jsonify({
185
+ "object": "list",
186
+ "data": [
187
+ {"id": "openai/gpt-4o", "object": "model"},
188
+ {"id": "anthropic/claude-3-5-sonnet", "object": "model"}
189
+ ]
190
+ })
191
+
192
+ @app.route('/')
193
+ def index():
194
+ return jsonify({
195
+ "tier": "Public Free Tier",
196
+ "rpm_limit": RPM_LIMIT,
197
+ "restricted_models": RESTRICTED_MODELS,
198
+ "status": "online"
199
+ })
200
+
201
+ if __name__ == '__main__':
202
+ print("🚀 Public Free Tier Proxy (Port 7866) Started...")
203
+ app.run(host='0.0.0.0', port=7860, debug=True)