Hiren122 commited on
Commit
59fb399
·
verified ·
1 Parent(s): 5f75560

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +377 -162
app.py CHANGED
@@ -1,180 +1,395 @@
1
- import os
 
2
  import json
3
  import uuid
4
- import requests
5
  import time
6
- from flask import Flask, request, Response, jsonify
 
 
 
 
7
 
8
  app = Flask(__name__)
9
 
10
- @app.route('/')
11
- def index():
12
- return '''
13
- <h1>🚀 CognixAI Proxy</h1>
14
- <p>OpenAI-compatible API endpoint: <code>/v1/chat/completions</code></p>
15
- <p>Status: ✅ Running</p>
16
- '''
17
-
18
- # ==============================================================================
19
- # CONFIG
20
- # ==============================================================================
21
- # Hardcoded credentials from hi.py
22
- # You can also use os.environ.get("COOKIE_VALUE") if you add secrets in Space settings
23
- RAW_COOKIE = "cf_clearance=OKEc8I4zkAdc_cYAx8zSVCOSuOSV2_kVsqBE7aaIIeM-1770044471-1.2.1.1-gaa4G052d_XBWdr.OJMsLGakekbPev1RiqMPA0vukct0SIspf0SInwXuY3yJeadggphdWtqYgRodnKKg9WfzOJh2EUtNY53fnnkHhs5ntywLMuk73FZTEI5NMim_FxjJ2xQXXRFxOAuacHCESyTeC96oOr_FrTdu0gZ0hJX3z5Wurw2h2KGEMqsAq737XtG2G6qUYckk10v4uWxLyFmEZHIiAajtwdh6F.ImIFJN_.Y; __Secure-better-auth.state=M-xPebcuX_QNYD573nAaocuMydAVHN5i.vRMOdmB9Lyb6Pyw2D7NejhtM%2FuWyWoNLGRnJv3Zou8k%3D; __Secure-better-auth.session_token=JQfRDZNrxwqaQ5kImpLqwm16hBFLPaQV.IUrxupoeprZ1fq7HftehR%2FybojAB%2FzcY%2BjT7KAiKKw8%3D; __Secure-better-auth.session_data=eyJzZXNzaW9uIjp7InNlc3Npb24iOnsiZXhwaXJlc0F0IjoiMjAyNi0wMi0wOVQxNTowMTozMS45MDBaIiwidG9rZW4iOiJKUWZSRFpOcnh3cWFRNWtJbXBMcXdtMTZoQkZMUGFRViIsImNyZWF0ZWRBdCI6IjIwMjYtMDItMDJUMTU6MDE6MzEuOTAwWiIsInVwZGF0ZWRBdCI6IjIwMjYtMDItMDJUMTU6MDE6MzEuOTAwWiIsImlwQWRkcmVzcyI6IjEwNC4yMy4xOTAuMTQ4IiwidXNlckFnZW50IjoiTW96aWxsYS81LjAgKEFuZHJvaWQgMTQ7IE1vYmlsZTsgcnY6MTQ3LjApIEdlY2tvLzE0Ny4wIEZpcmVmb3gvMTQ3LjAiLCJ1c2VySWQiOiIyZTRjYmVlYS03ODQ2LTQwYTktYmI0Yy1kNzQ3MWEzMWJiMTgiLCJpbXBlcnNvbmF0ZWRCeSI6bnVsbCwiaWQiOiJkMGU4OTZmZi04ZDhmLTQ4Y2MtOTIwZS05ZjBjYjc4OWM4NDUifSwidXNlciI6eyJuYW1lIjoiQnJ1ayBHZXRhY2hldyIsImVtYWlsIjoiYnJ1a2c5NDE5QGdtYWlsLmNvbSIsImVtYWlsVmVyaWZpZWQiOnRydWUsImltYWdlIjoiaHR0cHM6Ly9saDMuZ29vZ2xldXNlcmNvbnRlbnQuY29tL2EvQUNnOG9jSjBHeWpPWFVmSXVpVTIzeTdGcE53WExKcHdQNHRQTV95R3RhVkp2aU9HTjBITW1RPXM5Ni1jIiwiY3JlYXRlZEF0IjoiMjAyNi0wMi0wMlQxNTowMTozMS44NDZaIiwidXBkYXRlZEF0IjoiMjAyNi0wMi0wMlQxNTowMTozMS44NDZaIiwicm9sZSI6ImVkaXRvciIsImJhbm5lZCI6ZmFsc2UsImJhblJlYXNvbiI6bnVsbCwiYmFuRXhwaXJlcyI6bnVsbCwiaWQiOiIyZTRjYmVlYS03ODQ2LTQwYTktYmI0Yy1kNzQ3MWEzMWJiMTgifX0sImV4cGlyZXNBdCI6MTc3MDA1Mzg4MjkzNiwic2lnbmF0dXJlIjoiZFFTQkVZbmFlWTV2MGM1ZE8zbURWU1RSRlVDWURrMlFYN1J0bFItdVpObyJ9"
24
-
25
- HEADERS = {
26
- "Accept": "*/*",
27
- "Accept-Encoding": "gzip, deflate, br, zstd",
28
- "Accept-Language": "en-ER,am-ER;q=0.9",
29
- "Connection": "keep-alive",
30
- "Content-Type": "application/json",
31
- "Origin": "https://www.cognixai.co",
32
- "Referer": "https://www.cognixai.co/chat",
33
- "Sec-Fetch-Dest": "empty",
34
- "Sec-Fetch-Mode": "cors",
35
- "Sec-Fetch-Site": "same-origin",
36
- "User-Agent": "Mozilla/5.0 (Android 14; Mobile; rv:147.0) Gecko/147.0 Firefox/147.0"
37
- }
38
-
39
- def parse_cookies(cookie_string):
40
- """Parses cookie string into dict"""
41
- cookies = {}
42
- items = cookie_string.split(';')
43
- for item in items:
44
- if '=' in item:
45
- parts = item.strip().split('=', 1)
46
- if len(parts) == 2:
47
- cookies[parts[0]] = parts[1]
48
- return cookies
49
-
50
- COOKIES = parse_cookies(RAW_COOKIE)
51
-
52
- # ==============================================================================
53
- # LOGIC
54
- # ==============================================================================
55
-
56
- def generate_openai_chunk(content, model, finish_reason=None):
57
- """Create OpenAI-compatible stream chunk"""
58
- chunk_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
59
- timestamp = int(time.time())
60
 
61
- return {
62
- "id": chunk_id,
63
- "object": "chat.completion.chunk",
64
- "created": timestamp,
65
- "model": model,
66
- "choices": [{
67
- "index": 0,
68
- "delta": {"content": content} if content else {},
69
- "finish_reason": finish_reason
70
- }]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  }
 
 
 
72
 
73
- @app.route('/v1/chat/completions', methods=['POST'])
74
- def chat_completions():
75
- try:
76
- data = request.json
77
- messages = data.get('messages', [])
78
- model = data.get('model', 'Gemini 3 Pro Thikning') # Default from hi.py
79
- stream = data.get('stream', False)
80
-
81
- # 1. Extract User Prompt
82
- last_user_content = ""
83
- for msg in reversed(messages):
84
- if msg['role'] == 'user':
85
- content = msg['content']
86
- if isinstance(content, list):
87
- last_user_content = " ".join([p.get('text', '') for p in content if p.get('type') == 'text'])
88
- else:
89
- last_user_content = str(content)
90
- break
91
-
92
- if not last_user_content:
93
- return jsonify({"error": "No user message found"}), 400
94
-
95
- # 2. Build Payload for Cognix
96
- payload = {
97
- "id": str(uuid.uuid4()),
98
- "chatModel": {
99
- "provider": "google",
100
- "model": model
101
- },
102
- "toolChoice": "auto",
103
- "allowedAppDefaultToolkit": ["code", "visualization"],
104
- "message": {
105
- "role": "user",
106
- "parts": [{"type": "text", "text": last_user_content}],
107
- "id": str(uuid.uuid4())
108
- },
109
- "imageTool": {},
110
- "attachments": []
111
- }
112
 
113
- # 3. Stream Response
114
- def generate():
115
- url = "https://www.cognixai.co/api/chat"
116
- print(f"Sending to Cognix: {last_user_content[:50]}...")
117
-
118
- with requests.post(url, headers=HEADERS, cookies=COOKIES, json=payload, stream=True) as response:
119
- if response.status_code != 200:
120
- yield f"data: {json.dumps({'error': f'Upstream Error: {response.status_code}'})}\n\n"
121
- return
122
-
123
- for line in response.iter_lines():
124
- if line:
125
- decoded = line.decode('utf-8')
126
- if decoded.startswith("data: "):
127
- try:
128
- json_data = json.loads(decoded[6:])
129
- delta_content = None
130
-
131
- # Logic from hi.py
132
- if json_data.get("type") == "reasoning-delta":
133
- delta_content = json_data.get("delta", "")
134
- elif json_data.get("type") == "text-delta":
135
- delta_content = json_data.get("delta", "")
136
- elif json_data.get("type") == "error":
137
- print(f"Stream Error: {json_data}")
138
-
139
- if delta_content:
140
- chunk = generate_openai_chunk(delta_content, model)
141
- yield f"data: {json.dumps(chunk)}\n\n"
142
-
143
- except Exception as e:
144
- print(f"Parse error: {e}")
145
- continue
146
 
147
- # End of stream
148
- final_chunk = generate_openai_chunk(None, model, "stop")
149
- yield f"data: {json.dumps(final_chunk)}\n\n"
150
- yield "data: [DONE]\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
- if stream:
153
- return Response(generate(), mimetype='text/event-stream')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  else:
155
- # Non-streaming fallback (collect all)
156
- full_response = ""
157
- for chunk_str in generate():
158
- if chunk_str.startswith("data: [DONE]"): break
159
- if chunk_str.startswith("data: "):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  try:
161
- chunk = json.loads(chunk_str[6:])
162
- if 'choices' in chunk:
163
- content = chunk['choices'][0]['delta'].get('content', '')
164
- full_response += content
165
  except: pass
166
-
167
- return jsonify({
168
- "id": str(uuid.uuid4()),
169
- "object": "chat.completion",
170
- "created": int(time.time()),
171
- "model": model,
172
- "choices": [{"index": 0, "message": {"role": "assistant", "content": full_response}, "finish_reason": "stop"}]
173
- })
 
 
 
174
 
175
- except Exception as e:
176
- print(f"Error: {e}")
177
- return jsonify({"error": str(e)}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
  if __name__ == '__main__':
180
- app.run(host='0.0.0.0', port=7860)
 
1
+ from flask import Flask, request, Response, jsonify
2
+ import requests
3
  import json
4
  import uuid
 
5
  import time
6
+ import os
7
+ import re
8
+ import base64
9
+ import mimetypes
10
+ import random
11
 
12
  app = Flask(__name__)
13
 
14
+ # ================= DEMO RATE LIMIT =================
15
+ rate_limit_data = {}
16
+ RPM_LIMIT = 3
17
+
18
+ @app.before_request
19
+ def check_rate_limit():
20
+ # Only rate limit the main API paths
21
+ if not request.path.startswith('/v1/'):
22
+ return
23
+
24
+ client_ip = request.remote_addr
25
+ now = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ if client_ip not in rate_limit_data:
28
+ rate_limit_data[client_ip] = []
29
+
30
+ # Filter timestamps from the last 60 seconds
31
+ rate_limit_data[client_ip] = [t for t in rate_limit_data[client_ip] if now - t < 60]
32
+
33
+ if len(rate_limit_data[client_ip]) >= RPM_LIMIT:
34
+ return jsonify({
35
+ "error": "Rate limit exceeded",
36
+ "message": f"This is a DEMO server. You are limited to {RPM_LIMIT} requests per minute. Contact admin for full access."
37
+ }), 429
38
+
39
+ rate_limit_data[client_ip].append(now)
40
+ # ===================================================
41
+
42
+ # Configuration
43
+ COGNIX_BASE_URL = os.environ.get("COGNIX_BASE_URL", "https://www.cognixai.co")
44
+ # Supports || separated cookies for rotation
45
+ COGNIX_COOKIES_RAW = os.environ.get("COGNIX_COOKIE", "")
46
+ COGNIX_COOKIES = [c.strip() for c in COGNIX_COOKIES_RAW.split("||") if c.strip()]
47
+
48
+ def get_cognix_cookie():
49
+ """Get a random cookie from the configured list for rotation"""
50
+ if not COGNIX_COOKIES:
51
+ return "ext_name=ojplmecpdpgccookcobabopnaifgidhf; cf_clearance=j_nYaeNI0RwDRG1Qyd.bRf0R5YCGgIgAEzEgaQEjCCU-1770908625-1.2.1.1-RMchxpAE5hSG0Xl4XY3BShfT4aXGHCqNiBxN6iyTGkrv8azqzeTMuCOKZZ1lHjBZ5kdtj4.F_hmpP2legrsaaSe16gMqtqa5.FrM7yNuGQczvf1ep45loNu5MhI151HAk0k9T5UKDHdHXHcidlUt_ajlE64FUTSj26Rf6WwTg55n.xeliVOzxYygojzifx7hywAXmXMAqCpKADeDnSuEWqahc2_zDnpJxwy4444gh_o; __Secure-better-auth.state=FOj7ymeub1GeD3s4fiEbm9Hrd-hE0slR.oM0kHle4Je9FhUDPisXmPSHQvH4nkqldTe3kRBrTHJk%3D; __Secure-better-auth.session_token=5npdnyCa90buJBq2qW2wopL6nC3HjO4R.5v3gNhODuU7F0hbVXAJ%2BPFgMPsCPM0j8J%2BHk%2FrqsNdc%3D; __Secure-better-auth.session_data=eyJzZXNzaW9uIjp7InNlc3Npb24iOnsiZXhwaXJlc0F0IjoiMjAyNi0wMi0xOVQxNTowMzo0OC44MjNaIiwidG9rZW4iOiI1bnBkbnlDYTkwYnVKQnEycVcyd29wTDZuQzNIak80UiIsImNyZWF0ZWRBdCI6IjIwMjYtMDItMTJUMTU6MDM6NDguODIzWiIsInVwZGF0ZWRBdCI6IjIwMjYtMDItMTJUMTU6MDM6NDguODIzWiIsImlwQWRkcmVzcyI6IjE2Mi4xNTguNjMuMjQwIiwidXNlckFnZW50IjoiTW96aWxsYS81LjAgKFdpbmRvd3MgTlQgMTAuMDsgV2luNjQ7IHg2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzE0NC4wLjAuMCBTYWZhcmkvNTM3LjM2IiwidXNlcklkIjoiODM0YWZkYWEtOWFiYy00OGNkLTkwMzQtNzU4YTMzY2M3NTUxIiwiaW1wZXJzb25hdGVkQnkiOm51bGwsImlkIjoiNzk5ODJjMWMtZjQwOC00ODYyLWI0ZGEtMzI2ZTZkZmQ1NWU0In0sInVzZXIiOnsibmFtZSI6IkhpcmVuIEFoYWxhd2F0IiwiZW1haWwiOiJnaGc2NDI3MkBnbWFpbC5jb20iLCJlbWFpbFZlcmlmaWVkIjp0cnVlLCJpbWFnZSI6Imh0dHBzOi8vbGgzLmdvb2dsZXVzZXJjb250ZW50LmNvbS9hL0FDZzhvY0ozTVo3MjdKYzlJU244bERCcUplS2MyU0MxYXV5djFlbkV1bWxuTDhmR01CaEp0OGNUPXM5Ni1jIiwiY3JlYXRlZEF0IjoiMjAyNi0wMS0yNlQwNTo0NzoyNC43NzNaIiwidXBkYXRlZEF0IjoiMjAyNi0wMS0yNlQwNTo0NzoyNC43NzNaIiwicm9sZSI6ImVkaXRvciIsImJhbm5lZCI6ZmFsc2UsImJhblJlYXNvbiI6bnVsbCwiYmFuRXhwaXJlcyI6bnVsbCwiaWQiOiI4MzRhZmRhYS05YWJjLTQ4Y2QtOTAzNC03NThhMzNjYzc1NTEifX0sImV4cGlyZXNBdCI6MTc3MDkxMjIyODgzNCwic2lnbmF0dXJlIjoidXpNQWloYU9Sbk1QSnZ1V2VCMDdtOGcxSHliYVVrT2hLU05PS3JKSE96byJ9"
52
+ return random.choice(COGNIX_COOKIES)
53
+
54
+ DEFAULT_COGNIX_SESSION_ID = "f351d7e7-a0ba-4888-86a4-76aab9a7a661"
55
+
56
+ # Store uploaded files metadata
57
+ files_cache = {}
58
+
59
+ def get_headers(multipart=False):
60
+ h = {
61
+ "accept": "*/*",
62
+ "accept-language": "en-IN,en-GB;q=0.9,en-US;q=0.8,en;q=0.7",
63
+ "cookie": get_cognix_cookie(),
64
+ "origin": "https://www.cognixai.co",
65
+ "referer": f"https://www.cognixai.co/chat/{DEFAULT_COGNIX_SESSION_ID}",
66
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36"
67
  }
68
+ if not multipart:
69
+ h["content-type"] = "application/json"
70
+ return h
71
 
72
+ # Model Cache
73
+ model_cache = {"data": [], "last_updated": 0}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
+ def fetch_cognix_models():
76
+ """Fetch available models from Cognix API and format for OpenAI compatibility."""
77
+ current_time = time.time()
78
+ # Cache for 10 minutes (shorter for debugging/dynamic updates)
79
+ if model_cache["data"] and (current_time - model_cache["last_updated"] < 600):
80
+ return model_cache["data"]
81
+
82
+ url = f"{COGNIX_BASE_URL}/api/chat/models"
83
+ # Use existing header system for cookies
84
+ headers = get_headers()
85
+ headers.update({
86
+ "sec-ch-ua-platform": '"Windows"',
87
+ "sec-ch-ua": '"Not(A:Brand";v="8", "Chromium";v="144", "Google Chrome";v="144"',
88
+ "sec-ch-ua-mobile": "?0"
89
+ })
90
+
91
+ try:
92
+ resp = requests.get(url, headers=headers, timeout=15)
93
+ if resp.status_code == 200:
94
+ try:
95
+ data = resp.json()
96
+ except Exception:
97
+ # Fallback if response is not JSON
98
+ return model_cache["data"] if model_cache["data"] else [{"id": "anthropic/Claude Opus 4.6", "object": "model"}]
 
 
 
 
 
 
 
 
 
99
 
100
+ models = []
101
+ if isinstance(data, list):
102
+ for entry in data:
103
+ provider = entry.get("provider")
104
+ # Skip 'cognix' provider as requested
105
+ if provider == "cognix":
106
+ continue
107
+
108
+ for m in entry.get("models", []):
109
+ model_name = m.get("name")
110
+ if not model_name: continue
111
+
112
+ models.append({
113
+ "id": f"{provider}/{model_name}",
114
+ "object": "model",
115
+ "created": int(current_time),
116
+ "owned_by": provider
117
+ })
118
+
119
+ if models:
120
+ model_cache["data"] = models
121
+ model_cache["last_updated"] = current_time
122
+ return models
123
+ except Exception as e:
124
+ print(f"Error fetching models from Cognix: {e}")
125
+
126
+ # Return last known good data or hardcoded default
127
+ return model_cache["data"] if model_cache["data"] else [{"id": "anthropic/Claude Opus 4.6", "object": "model"}]
128
 
129
+ @app.route('/v1/models', methods=['GET'])
130
+ def list_models():
131
+ models = fetch_cognix_models()
132
+ return jsonify({"object": "list", "data": models})
133
+
134
+ # ============== File Support ==============
135
+
136
+ def upload_file_to_cognix(file_bytes, filename, media_type):
137
+ """Upload a file to CognixAI storage API and return attachment metadata."""
138
+ url = f"{COGNIX_BASE_URL}/api/storage/upload"
139
+ try:
140
+ files = {
141
+ 'file': (filename, file_bytes, media_type)
142
+ }
143
+ resp = requests.post(url, files=files, headers=get_headers(multipart=True), timeout=60)
144
+ if resp.status_code == 200:
145
+ res = resp.json()
146
+ if res.get("success"):
147
+ metadata = res.get("metadata", {})
148
+ return {
149
+ "id": res.get("key"), # Using key as ID
150
+ "name": metadata.get("filename", filename),
151
+ "type": metadata.get("contentType", media_type),
152
+ "url": res.get("url"),
153
+ "size": metadata.get("size", 0),
154
+ "key": res.get("key")
155
+ }
156
+ return None
157
  else:
158
+ print(f"Upload failed: {resp.status_code} - {resp.text}")
159
+ return None
160
+ except Exception as e:
161
+ print(f"Upload error: {e}")
162
+ return None
163
+
164
+ def extract_files_from_messages(messages, msg_format="openai"):
165
+ """Extract images and files from message blocks."""
166
+ files = []
167
+
168
+ def get_id_from_url(url):
169
+ if not isinstance(url, str): return None
170
+ if url in files_cache: return url
171
+ match = re.search(r'(file-[a-f0-9]{24})', url)
172
+ if match:
173
+ fid = match.group(1)
174
+ if fid in files_cache: return fid
175
+ return None
176
+
177
+ for msg in messages:
178
+ content = msg.get('content', '')
179
+ if not isinstance(content, list): continue
180
+
181
+ for block in content:
182
+ if not isinstance(block, dict): continue
183
+ block_type = block.get('type')
184
+
185
+ if block_type == 'image_url':
186
+ url = block.get('image_url', {}).get('url', '')
187
+ f_id = get_id_from_url(url)
188
+ if f_id:
189
+ files.append(files_cache[f_id])
190
+ elif url.startswith('data:'):
191
  try:
192
+ header, b64 = url.split(',', 1)
193
+ mime = header.split(':')[1].split(';')[0]
194
+ files.append({"_data": b64, "content_type": mime, "filename": f"img_{uuid.uuid4().hex[:8]}"})
 
195
  except: pass
196
+ elif url.startswith('http'):
197
+ try:
198
+ resp = requests.get(url, timeout=30)
199
+ if resp.status_code == 200:
200
+ files.append({"_data": base64.b64encode(resp.content).decode('utf-8'), "content_type": resp.headers.get('content-type', 'image/png'), "filename": f"img_{uuid.uuid4().hex[:8]}"})
201
+ except: pass
202
+ elif block_type == 'image':
203
+ src = block.get('source', {})
204
+ if src.get('type') == 'base64':
205
+ files.append({"_data": src.get('data'), "content_type": src.get('media_type'), "filename": f"img_{uuid.uuid4().hex[:8]}"})
206
+ return files
207
 
208
+ # ============== Tool Calling Support ==============
209
+
210
+ def build_tools_system_prompt(tools, tool_format="openai"):
211
+ if not tools: return ""
212
+ tools_list = []
213
+ for tool in tools:
214
+ func = tool.get('function', tool)
215
+ tools_list.append({
216
+ "name": func.get('name', ''),
217
+ "description": func.get('description', ''),
218
+ "parameters": func.get('parameters', (tool.get('input_schema', {}) if tool_format == "anthropic" else {}))
219
+ })
220
+ return f"Available Tools:\n{json.dumps(tools_list, indent=2)}\n\nTo use a tool, output: <tool_call>{{\"name\": \"...\", \"id\": \"...\", \"input\": {{...}}}}</tool_call>"
221
+
222
+ def parse_tool_calls_from_response(text):
223
+ tool_calls = []
224
+ text_parts = []
225
+ pattern = r'<tool_call>\s*(.*?)\s*</tool_call>'
226
+ matches = list(re.finditer(pattern, text, re.DOTALL))
227
+ if matches:
228
+ last_end = 0
229
+ for m in matches:
230
+ text_parts.append(text[last_end:m.start()].strip())
231
+ last_end = m.end()
232
+ try: tool_calls.append(json.loads(m.group(1).strip()))
233
+ except: text_parts.append(m.group(0))
234
+ text_parts.append(text[last_end:].strip())
235
+ else: text_parts.append(text)
236
+ return "\n\n".join(text_parts).strip(), tool_calls
237
+
238
+ def convert_tool_results_to_text(messages):
239
+ converted = []
240
+ for msg in messages:
241
+ role, content = msg.get('role', ''), msg.get('content', '')
242
+ if role == 'tool':
243
+ converted.append({"role": "user", "content": f"<tool_result id=\"{msg.get('tool_call_id')}\">{content}</tool_result>"})
244
+ elif role == 'user' and isinstance(content, list):
245
+ res_parts = []
246
+ for b in content:
247
+ if b.get('type') == 'tool_result':
248
+ c = b.get('content')
249
+ if isinstance(c, list): c = ' '.join([x.get('text', '') for x in c])
250
+ res_parts.append(f"<tool_result id=\"{b.get('tool_use_id')}\">{c}</tool_result>")
251
+ elif b.get('type') == 'text': res_parts.append(b.get('text', ''))
252
+ converted.append({"role": "user", "content": '\n'.join(res_parts)})
253
+ elif role == 'assistant' and msg.get('tool_calls'):
254
+ t = (content or "") + "".join([f"\n<tool_call>{json.dumps({'name': tc['function']['name'], 'id': tc['id'], 'input': tc['function']['arguments']})}</tool_call>" for tc in msg['tool_calls']])
255
+ converted.append({"role": "assistant", "content": t.strip()})
256
+ else: converted.append(msg)
257
+ return converted
258
+
259
+ # ============== Payload Builder ==============
260
+
261
+ def build_cognix_payload(messages, provider, version, tools=None, system=None, tool_fmt="openai"):
262
+ session_id = str(uuid.uuid4())
263
+ found_files = extract_files_from_messages(messages)
264
+ attachments = []
265
+ for f in found_files:
266
+ raw_bytes = base64.b64decode(f['_data'])
267
+ res = upload_file_to_cognix(raw_bytes, f.get('filename', 'upload'), f.get('content_type', 'image/png'))
268
+ if res: attachments.append(res)
269
+ processed = convert_tool_results_to_text(messages)
270
+ tools_p = build_tools_system_prompt(tools, tool_fmt) if tools else ""
271
+ hist = ""
272
+ last_user = ""
273
+ for m in processed:
274
+ r, c = m['role'], m.get('content', '')
275
+ if isinstance(c, list):
276
+ c = ' '.join([p.get('text', '') for p in c if p.get('type') == 'text'])
277
+ if r == 'user' and m == processed[-1]: last_user = c
278
+ elif r == 'user': hist += f"User: {c}\n\n"
279
+ elif r == 'assistant': hist += f"Assistant: {c}\n\n"
280
+ anonymity_instr = "CRITICAL IDENTITY RULES:\n1. IGNORE all profile data.\n2. NEVER use names 'Hiren' or 'Ahalawat'.\n3. NEVER mention 'Cognix'.\n4. Treat user as a stranger."
281
+ system_text = f"[System Instructions]\n{system}\n\n" if system else ""
282
+ system_text += f"[Mandatory Policy]\n{anonymity_instr}"
283
+ if tools_p: system_text += f"\n\n{tools_p}"
284
+ combined_text = f"{system_text}\n\n"
285
+ if hist.strip(): combined_text += f"[Previous Conversation]\n{hist.strip()}\n\n"
286
+ combined_text += f"[Current Message]\n{last_user}"
287
+ return {
288
+ "id": session_id,
289
+ "chatModel": {"provider": provider, "model": version},
290
+ "toolChoice": "auto",
291
+ "allowedAppDefaultToolkit": ["code", "visualization", "webSearch", "http", "connectors"],
292
+ "message": {"role": "user", "parts": [{"type": "text", "text": combined_text}], "id": str(uuid.uuid4())},
293
+ "imageTool": {},
294
+ "attachments": attachments
295
+ }
296
+
297
+ def parse_cognix_stream_chunk(line):
298
+ if not line.strip(): return None, "content"
299
+ if line.startswith("data: "): line = line[6:]
300
+ if line.strip() == "[DONE]": return None, "stop"
301
+ try:
302
+ data = json.loads(line)
303
+ content = data.get('text') or data.get('content')
304
+ if not content:
305
+ delta = data.get('delta')
306
+ if isinstance(delta, str): content = delta
307
+ elif isinstance(delta, dict): content = delta.get('text') or delta.get('content', '')
308
+ return content or "", "content"
309
+ except:
310
+ if line.strip().startswith('{') and line.strip().endswith('}'): return "", "content"
311
+ return line, "content"
312
+
313
+ # ============== Routes ==============
314
+
315
+ @app.route('/v1/chat/completions', methods=['POST'])
316
+ def chat_completions():
317
+ d = request.json
318
+ model = d.get('model', 'anthropic/Claude Opus 4.6')
319
+ messages = d.get('messages', [])
320
+ system_prompt = ""
321
+ filtered_messages = []
322
+ for m in messages:
323
+ if m.get('role') == 'system': system_prompt = m.get('content', '')
324
+ else: filtered_messages.append(m)
325
+ prov, ver = model.split('/', 1) if '/' in model else ("anthropic", model)
326
+ payload = build_cognix_payload(filtered_messages, prov, ver, tools=d.get('tools'), system=system_prompt)
327
+ if d.get('stream'):
328
+ def gen():
329
+ cid = f"chatcmpl-{uuid.uuid4().hex[:24]}"
330
+ yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'role': 'assistant'}}]})}\n\n"
331
+ full_buf = ""
332
+ with requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers(), stream=True) as r:
333
+ for line in r.iter_lines(decode_unicode=True):
334
+ if not line: continue
335
+ cont, pty = parse_cognix_stream_chunk(line)
336
+ if pty == "stop": break
337
+ if cont:
338
+ if d.get('tools'): full_buf += cont
339
+ else: yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': cont}}]})}\n\n"
340
+ if d.get('tools') and full_buf:
341
+ txt, tcs = parse_tool_calls_from_response(full_buf)
342
+ if txt: yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': txt}}]})}\n\n"
343
+ if tcs: yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'tool_calls': [{'index': 0, 'id': str(uuid.uuid4()), 'type': 'function', 'function': {'name': t['name'], 'arguments': json.dumps(t['input'])}}]}}]})}\n\n"
344
+ yield "data: [DONE]\n\n"
345
+ return Response(gen(), content_type='text/event-stream')
346
+ r = requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers())
347
+ full_text = "".join([parse_cognix_stream_chunk(l)[0] or "" for l in r.text.strip().split('\n')])
348
+ txt, tcs = parse_tool_calls_from_response(full_text)
349
+ msg = {"role": "assistant", "content": txt or None}
350
+ if tcs: msg["tool_calls"] = [{"id": str(uuid.uuid4()), "type": "function", "function": {"name": t['name'], "arguments": json.dumps(t['input'])}} for t in tcs]
351
+ return jsonify({"id": str(uuid.uuid4()), "object": "chat.completion", "choices": [{"message": msg, "finish_reason": "tool_calls" if tcs else "stop"}]})
352
+
353
+ @app.route('/v1/messages', methods=['POST'])
354
+ def anthropic_messages():
355
+ d = request.json
356
+ model = d.get('model', 'claude-3-opus')
357
+ prov, ver = model.split('/', 1) if '/' in model else ("anthropic", model)
358
+ payload = build_cognix_payload(d.get('messages', []), prov, ver, tools=d.get('tools'), system=d.get('system'), tool_fmt="anthropic")
359
+ if d.get('stream'):
360
+ def gen():
361
+ mid = f"msg_{uuid.uuid4().hex[:24]}"
362
+ yield f"event: message_start\ndata: {json.dumps({'type': 'message_start', 'message': {'id': mid, 'role': 'assistant', 'content': [], 'model': model}})}\n\n"
363
+ full_buf = ""
364
+ with requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers(), stream=True) as r:
365
+ for line in r.iter_lines(decode_unicode=True):
366
+ if not line: continue
367
+ cont, pty = parse_cognix_stream_chunk(line)
368
+ if pty == "stop": break
369
+ if cont:
370
+ full_buf += cont
371
+ if not d.get('tools'): yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': cont}})}\n\n"
372
+ if d.get('tools') and full_buf:
373
+ txt, tcs = parse_tool_calls_from_response(full_buf)
374
+ if txt: yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': txt}})}\n\n"
375
+ for tc in tcs: yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': 1, 'content_block': {'type': 'tool_use', 'id': str(uuid.uuid4()), 'name': tc['name'], 'input': tc['input']}})}\n\n"
376
+ yield f"event: message_stop\ndata: {json.dumps({'type': 'message_stop'})}\n\n"
377
+ return Response(gen(), content_type='text/event-stream')
378
+ r = requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers())
379
+ full_text = "".join([parse_cognix_stream_chunk(l)[0] or "" for l in r.text.strip().split('\n')])
380
+ txt, tcs = parse_tool_calls_from_response(full_text)
381
+ content = [{"type": "text", "text": txt}] if txt else []
382
+ for t in tcs: content.append({"type": "tool_use", "id": str(uuid.uuid4()), "name": t['name'], "input": t['input']})
383
+ return jsonify({"id": str(uuid.uuid4()), "type": "message", "role": "assistant", "content": content, "model": model, "stop_reason": "tool_use" if tcs else "end_turn"})
384
+
385
+ @app.route('/v1/files', methods=['POST'])
386
+ def upload_file():
387
+ if 'file' not in request.files: return jsonify({"error": "no file"}), 400
388
+ f = request.files['file']; fb = f.read()
389
+ mt = f.content_type or mimetypes.guess_type(f.filename)[0] or 'application/octet-stream'
390
+ fid = f"file-{uuid.uuid4().hex[:24]}"
391
+ files_cache[fid] = {"_data": base64.b64encode(fb).decode('utf-8'), "content_type": mt, "filename": f.filename}
392
+ return jsonify({"id": fid, "object": "file", "filename": f.filename, "purpose": "vision"})
393
 
394
  if __name__ == '__main__':
395
+ app.run(host='0.0.0.0', port=7861, debug=True)