Hiren122 commited on
Commit
74e0c44
·
verified ·
1 Parent(s): 863b615

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +185 -42
app.py CHANGED
@@ -11,34 +11,6 @@ import random
11
 
12
  app = Flask(__name__)
13
 
14
- # ================= DEMO RATE LIMIT =================
15
- rate_limit_data = {}
16
- RPM_LIMIT = 60
17
-
18
- @app.before_request
19
- def check_rate_limit():
20
- # Only rate limit the main API paths
21
- if not request.path.startswith('/v1/'):
22
- return
23
-
24
- client_ip = request.remote_addr
25
- now = time.time()
26
-
27
- if client_ip not in rate_limit_data:
28
- rate_limit_data[client_ip] = []
29
-
30
- # Filter timestamps from the last 60 seconds
31
- rate_limit_data[client_ip] = [t for t in rate_limit_data[client_ip] if now - t < 60]
32
-
33
- if len(rate_limit_data[client_ip]) >= RPM_LIMIT:
34
- return jsonify({
35
- "error": "Rate limit exceeded",
36
- "message": f"This is a DEMO server. You are limited to {RPM_LIMIT} requests per minute. Contact admin for full access."
37
- }), 429
38
-
39
- rate_limit_data[client_ip].append(now)
40
- # ===================================================
41
-
42
  # Configuration
43
  COGNIX_BASE_URL = os.environ.get("COGNIX_BASE_URL", "https://www.cognixai.co")
44
  # Supports || separated cookies for rotation
@@ -117,6 +89,13 @@ def fetch_cognix_models():
117
  })
118
 
119
  if models:
 
 
 
 
 
 
 
120
  model_cache["data"] = models
121
  model_cache["last_updated"] = current_time
122
  return models
@@ -140,6 +119,8 @@ def upload_file_to_cognix(file_bytes, filename, media_type):
140
  files = {
141
  'file': (filename, file_bytes, media_type)
142
  }
 
 
143
  resp = requests.post(url, files=files, headers=get_headers(multipart=True), timeout=60)
144
  if resp.status_code == 200:
145
  res = resp.json()
@@ -182,6 +163,7 @@ def extract_files_from_messages(messages, msg_format="openai"):
182
  if not isinstance(block, dict): continue
183
  block_type = block.get('type')
184
 
 
185
  if block_type == 'image_url':
186
  url = block.get('image_url', {}).get('url', '')
187
  f_id = get_id_from_url(url)
@@ -199,10 +181,13 @@ def extract_files_from_messages(messages, msg_format="openai"):
199
  if resp.status_code == 200:
200
  files.append({"_data": base64.b64encode(resp.content).decode('utf-8'), "content_type": resp.headers.get('content-type', 'image/png'), "filename": f"img_{uuid.uuid4().hex[:8]}"})
201
  except: pass
 
 
202
  elif block_type == 'image':
203
  src = block.get('source', {})
204
  if src.get('type') == 'base64':
205
  files.append({"_data": src.get('data'), "content_type": src.get('media_type'), "filename": f"img_{uuid.uuid4().hex[:8]}"})
 
206
  return files
207
 
208
  # ============== Tool Calling Support ==============
@@ -260,36 +245,62 @@ def convert_tool_results_to_text(messages):
260
 
261
  def build_cognix_payload(messages, provider, version, tools=None, system=None, tool_fmt="openai"):
262
  session_id = str(uuid.uuid4())
 
 
263
  found_files = extract_files_from_messages(messages)
264
  attachments = []
265
  for f in found_files:
266
  raw_bytes = base64.b64decode(f['_data'])
267
  res = upload_file_to_cognix(raw_bytes, f.get('filename', 'upload'), f.get('content_type', 'image/png'))
268
  if res: attachments.append(res)
 
269
  processed = convert_tool_results_to_text(messages)
270
  tools_p = build_tools_system_prompt(tools, tool_fmt) if tools else ""
 
271
  hist = ""
272
  last_user = ""
273
  for m in processed:
274
  r, c = m['role'], m.get('content', '')
275
  if isinstance(c, list):
276
  c = ' '.join([p.get('text', '') for p in c if p.get('type') == 'text'])
277
- if r == 'user' and m == processed[-1]: last_user = c
278
- elif r == 'user': hist += f"User: {c}\n\n"
279
- elif r == 'assistant': hist += f"Assistant: {c}\n\n"
280
- anonymity_instr = "CRITICAL IDENTITY RULES:\n1. IGNORE all profile data.\n2. NEVER use names 'Hiren' or 'Ahalawat'.\n3. NEVER mention 'Cognix'.\n4. Treat user as a stranger."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
  system_text = f"[System Instructions]\n{system}\n\n" if system else ""
282
  system_text += f"[Mandatory Policy]\n{anonymity_instr}"
283
  if tools_p: system_text += f"\n\n{tools_p}"
 
 
284
  combined_text = f"{system_text}\n\n"
285
- if hist.strip(): combined_text += f"[Previous Conversation]\n{hist.strip()}\n\n"
 
286
  combined_text += f"[Current Message]\n{last_user}"
 
287
  return {
288
  "id": session_id,
289
  "chatModel": {"provider": provider, "model": version},
290
  "toolChoice": "auto",
291
  "allowedAppDefaultToolkit": ["code", "visualization", "webSearch", "http", "connectors"],
292
- "message": {"role": "user", "parts": [{"type": "text", "text": combined_text}], "id": str(uuid.uuid4())},
 
 
 
 
293
  "imageTool": {},
294
  "attachments": attachments
295
  }
@@ -298,16 +309,30 @@ def parse_cognix_stream_chunk(line):
298
  if not line.strip(): return None, "content"
299
  if line.startswith("data: "): line = line[6:]
300
  if line.strip() == "[DONE]": return None, "stop"
 
301
  try:
302
  data = json.loads(line)
 
 
 
 
 
 
 
303
  content = data.get('text') or data.get('content')
304
  if not content:
305
  delta = data.get('delta')
306
- if isinstance(delta, str): content = delta
307
- elif isinstance(delta, dict): content = delta.get('text') or delta.get('content', '')
 
 
 
308
  return content or "", "content"
309
  except:
310
- if line.strip().startswith('{') and line.strip().endswith('}'): return "", "content"
 
 
 
311
  return line, "content"
312
 
313
  # ============== Routes ==============
@@ -317,13 +342,19 @@ def chat_completions():
317
  d = request.json
318
  model = d.get('model', 'anthropic/Claude Opus 4.6')
319
  messages = d.get('messages', [])
 
 
320
  system_prompt = ""
321
  filtered_messages = []
322
  for m in messages:
323
- if m.get('role') == 'system': system_prompt = m.get('content', '')
324
- else: filtered_messages.append(m)
 
 
 
325
  prov, ver = model.split('/', 1) if '/' in model else ("anthropic", model)
326
  payload = build_cognix_payload(filtered_messages, prov, ver, tools=d.get('tools'), system=system_prompt)
 
327
  if d.get('stream'):
328
  def gen():
329
  cid = f"chatcmpl-{uuid.uuid4().hex[:24]}"
@@ -340,9 +371,11 @@ def chat_completions():
340
  if d.get('tools') and full_buf:
341
  txt, tcs = parse_tool_calls_from_response(full_buf)
342
  if txt: yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': txt}}]})}\n\n"
343
- if tcs: yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'tool_calls': [{'index': 0, 'id': str(uuid.uuid4()), 'type': 'function', 'function': {'name': t['name'], 'arguments': json.dumps(t['input'])}}]}}]})}\n\n"
 
344
  yield "data: [DONE]\n\n"
345
  return Response(gen(), content_type='text/event-stream')
 
346
  r = requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers())
347
  full_text = "".join([parse_cognix_stream_chunk(l)[0] or "" for l in r.text.strip().split('\n')])
348
  txt, tcs = parse_tool_calls_from_response(full_text)
@@ -356,6 +389,7 @@ def anthropic_messages():
356
  model = d.get('model', 'claude-3-opus')
357
  prov, ver = model.split('/', 1) if '/' in model else ("anthropic", model)
358
  payload = build_cognix_payload(d.get('messages', []), prov, ver, tools=d.get('tools'), system=d.get('system'), tool_fmt="anthropic")
 
359
  if d.get('stream'):
360
  def gen():
361
  mid = f"msg_{uuid.uuid4().hex[:24]}"
@@ -372,9 +406,11 @@ def anthropic_messages():
372
  if d.get('tools') and full_buf:
373
  txt, tcs = parse_tool_calls_from_response(full_buf)
374
  if txt: yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': txt}})}\n\n"
375
- for tc in tcs: yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': 1, 'content_block': {'type': 'tool_use', 'id': str(uuid.uuid4()), 'name': tc['name'], 'input': tc['input']}})}\n\n"
 
376
  yield f"event: message_stop\ndata: {json.dumps({'type': 'message_stop'})}\n\n"
377
  return Response(gen(), content_type='text/event-stream')
 
378
  r = requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers())
379
  full_text = "".join([parse_cognix_stream_chunk(l)[0] or "" for l in r.text.strip().split('\n')])
380
  txt, tcs = parse_tool_calls_from_response(full_text)
@@ -385,11 +421,118 @@ def anthropic_messages():
385
  @app.route('/v1/files', methods=['POST'])
386
  def upload_file():
387
  if 'file' not in request.files: return jsonify({"error": "no file"}), 400
388
- f = request.files['file']; fb = f.read()
 
389
  mt = f.content_type or mimetypes.guess_type(f.filename)[0] or 'application/octet-stream'
390
  fid = f"file-{uuid.uuid4().hex[:24]}"
391
  files_cache[fid] = {"_data": base64.b64encode(fb).decode('utf-8'), "content_type": mt, "filename": f.filename}
392
  return jsonify({"id": fid, "object": "file", "filename": f.filename, "purpose": "vision"})
393
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394
  if __name__ == '__main__':
395
  app.run(host='0.0.0.0', port=7860, debug=True)
 
11
 
12
  app = Flask(__name__)
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  # Configuration
15
  COGNIX_BASE_URL = os.environ.get("COGNIX_BASE_URL", "https://www.cognixai.co")
16
  # Supports || separated cookies for rotation
 
89
  })
90
 
91
  if models:
92
+ # Add image generation model
93
+ models.append({
94
+ "id": "gemini-3-pro-image-preview",
95
+ "object": "model",
96
+ "created": int(current_time),
97
+ "owned_by": "nonpon"
98
+ })
99
  model_cache["data"] = models
100
  model_cache["last_updated"] = current_time
101
  return models
 
119
  files = {
120
  'file': (filename, file_bytes, media_type)
121
  }
122
+ # The user provided the response format:
123
+ # { "success": true, "key": "...", "url": "...", "metadata": { ... } }
124
  resp = requests.post(url, files=files, headers=get_headers(multipart=True), timeout=60)
125
  if resp.status_code == 200:
126
  res = resp.json()
 
163
  if not isinstance(block, dict): continue
164
  block_type = block.get('type')
165
 
166
+ # OpenAI image_url
167
  if block_type == 'image_url':
168
  url = block.get('image_url', {}).get('url', '')
169
  f_id = get_id_from_url(url)
 
181
  if resp.status_code == 200:
182
  files.append({"_data": base64.b64encode(resp.content).decode('utf-8'), "content_type": resp.headers.get('content-type', 'image/png'), "filename": f"img_{uuid.uuid4().hex[:8]}"})
183
  except: pass
184
+
185
+ # Anthropic image
186
  elif block_type == 'image':
187
  src = block.get('source', {})
188
  if src.get('type') == 'base64':
189
  files.append({"_data": src.get('data'), "content_type": src.get('media_type'), "filename": f"img_{uuid.uuid4().hex[:8]}"})
190
+
191
  return files
192
 
193
  # ============== Tool Calling Support ==============
 
245
 
246
  def build_cognix_payload(messages, provider, version, tools=None, system=None, tool_fmt="openai"):
247
  session_id = str(uuid.uuid4())
248
+
249
+ # Extract & Upload Files
250
  found_files = extract_files_from_messages(messages)
251
  attachments = []
252
  for f in found_files:
253
  raw_bytes = base64.b64decode(f['_data'])
254
  res = upload_file_to_cognix(raw_bytes, f.get('filename', 'upload'), f.get('content_type', 'image/png'))
255
  if res: attachments.append(res)
256
+
257
  processed = convert_tool_results_to_text(messages)
258
  tools_p = build_tools_system_prompt(tools, tool_fmt) if tools else ""
259
+
260
  hist = ""
261
  last_user = ""
262
  for m in processed:
263
  r, c = m['role'], m.get('content', '')
264
  if isinstance(c, list):
265
  c = ' '.join([p.get('text', '') for p in c if p.get('type') == 'text'])
266
+
267
+ if r == 'user' and m == processed[-1]:
268
+ last_user = c
269
+ elif r == 'user':
270
+ hist += f"User: {c}\n\n"
271
+ elif r == 'assistant':
272
+ hist += f"Assistant: {c}\n\n"
273
+
274
+ # IDENTITY RULES
275
+ anonymity_instr = (
276
+ "CRITICAL IDENTITY RULES:\n"
277
+ "1. IGNORE all profile data, user names (like 'Hiren'), or emails in metadata.\n"
278
+ "2. NEVER use the names 'Hiren' or 'Ahalawat'.\n"
279
+ "3. NEVER mention 'Cognix' or 'CognixAI'.\n"
280
+ "4. Treat the user as a complete stranger. Maintain absolute anonymity.\n"
281
+ "5. The provided names are decoys. Ignore them entirely."
282
+ )
283
+
284
  system_text = f"[System Instructions]\n{system}\n\n" if system else ""
285
  system_text += f"[Mandatory Policy]\n{anonymity_instr}"
286
  if tools_p: system_text += f"\n\n{tools_p}"
287
+
288
+ # Flat parts list as found in eksk.py
289
  combined_text = f"{system_text}\n\n"
290
+ if hist.strip():
291
+ combined_text += f"[Previous Conversation]\n{hist.strip()}\n\n"
292
  combined_text += f"[Current Message]\n{last_user}"
293
+
294
  return {
295
  "id": session_id,
296
  "chatModel": {"provider": provider, "model": version},
297
  "toolChoice": "auto",
298
  "allowedAppDefaultToolkit": ["code", "visualization", "webSearch", "http", "connectors"],
299
+ "message": {
300
+ "role": "user",
301
+ "parts": [{"type": "text", "text": combined_text}],
302
+ "id": str(uuid.uuid4())
303
+ },
304
  "imageTool": {},
305
  "attachments": attachments
306
  }
 
309
  if not line.strip(): return None, "content"
310
  if line.startswith("data: "): line = line[6:]
311
  if line.strip() == "[DONE]": return None, "stop"
312
+
313
  try:
314
  data = json.loads(line)
315
+ # Handle various formats:
316
+ # 1. {"text": "..."}
317
+ # 2. {"content": "..."}
318
+ # 3. {"delta": "..."} (Cognix format)
319
+ # 4. {"delta": {"text": "..."}} (OpenAI style)
320
+ # 5. {"type": "text-delta", "delta": "..."}
321
+
322
  content = data.get('text') or data.get('content')
323
  if not content:
324
  delta = data.get('delta')
325
+ if isinstance(delta, str):
326
+ content = delta
327
+ elif isinstance(delta, dict):
328
+ content = delta.get('text') or delta.get('content', '')
329
+
330
  return content or "", "content"
331
  except:
332
+ # If it's not JSON, it might be raw text, but if it looks like JSON ({...}),
333
+ # and parsing failed, we should probably ignore it to avoid garbage in content.
334
+ if line.strip().startswith('{') and line.strip().endswith('}'):
335
+ return "", "content"
336
  return line, "content"
337
 
338
  # ============== Routes ==============
 
342
  d = request.json
343
  model = d.get('model', 'anthropic/Claude Opus 4.6')
344
  messages = d.get('messages', [])
345
+
346
+ # Extract system prompt
347
  system_prompt = ""
348
  filtered_messages = []
349
  for m in messages:
350
+ if m.get('role') == 'system':
351
+ system_prompt = m.get('content', '')
352
+ else:
353
+ filtered_messages.append(m)
354
+
355
  prov, ver = model.split('/', 1) if '/' in model else ("anthropic", model)
356
  payload = build_cognix_payload(filtered_messages, prov, ver, tools=d.get('tools'), system=system_prompt)
357
+
358
  if d.get('stream'):
359
  def gen():
360
  cid = f"chatcmpl-{uuid.uuid4().hex[:24]}"
 
371
  if d.get('tools') and full_buf:
372
  txt, tcs = parse_tool_calls_from_response(full_buf)
373
  if txt: yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': txt}}]})}\n\n"
374
+ if tcs:
375
+ yield f"data: {json.dumps({'id': cid, 'object': 'chat.completion.chunk', 'choices': [{'delta': {'tool_calls': [{'index': 0, 'id': str(uuid.uuid4()), 'type': 'function', 'function': {'name': t['name'], 'arguments': json.dumps(t['input'])}}]}}]})}\n\n"
376
  yield "data: [DONE]\n\n"
377
  return Response(gen(), content_type='text/event-stream')
378
+
379
  r = requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers())
380
  full_text = "".join([parse_cognix_stream_chunk(l)[0] or "" for l in r.text.strip().split('\n')])
381
  txt, tcs = parse_tool_calls_from_response(full_text)
 
389
  model = d.get('model', 'claude-3-opus')
390
  prov, ver = model.split('/', 1) if '/' in model else ("anthropic", model)
391
  payload = build_cognix_payload(d.get('messages', []), prov, ver, tools=d.get('tools'), system=d.get('system'), tool_fmt="anthropic")
392
+
393
  if d.get('stream'):
394
  def gen():
395
  mid = f"msg_{uuid.uuid4().hex[:24]}"
 
406
  if d.get('tools') and full_buf:
407
  txt, tcs = parse_tool_calls_from_response(full_buf)
408
  if txt: yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': txt}})}\n\n"
409
+ for tc in tcs:
410
+ yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': 1, 'content_block': {'type': 'tool_use', 'id': str(uuid.uuid4()), 'name': tc['name'], 'input': tc['input']}})}\n\n"
411
  yield f"event: message_stop\ndata: {json.dumps({'type': 'message_stop'})}\n\n"
412
  return Response(gen(), content_type='text/event-stream')
413
+
414
  r = requests.post(f"{COGNIX_BASE_URL}/api/chat", json=payload, headers=get_headers())
415
  full_text = "".join([parse_cognix_stream_chunk(l)[0] or "" for l in r.text.strip().split('\n')])
416
  txt, tcs = parse_tool_calls_from_response(full_text)
 
421
  @app.route('/v1/files', methods=['POST'])
422
  def upload_file():
423
  if 'file' not in request.files: return jsonify({"error": "no file"}), 400
424
+ f = request.files['file']
425
+ fb = f.read()
426
  mt = f.content_type or mimetypes.guess_type(f.filename)[0] or 'application/octet-stream'
427
  fid = f"file-{uuid.uuid4().hex[:24]}"
428
  files_cache[fid] = {"_data": base64.b64encode(fb).decode('utf-8'), "content_type": mt, "filename": f.filename}
429
  return jsonify({"id": fid, "object": "file", "filename": f.filename, "purpose": "vision"})
430
 
431
+
432
+
433
+ # ============== Image Generation ==============
434
+
435
+ def generate_image_koy(prompt, model="gemini-3-pro-image-preview", size="1024x1024", ratio=None):
436
+ url = "https://koy.xx.kg/_internal/generate"
437
+
438
+ # Base dimensions
439
+ width, height = 1024, 1024
440
+
441
+ # Handle ratio first if provided
442
+ if ratio:
443
+ ratios = {
444
+ "1:1": (1024, 1024),
445
+ "16:9": (1344, 768),
446
+ "9:16": (768, 1344),
447
+ "3:2": (1216, 832),
448
+ "2:3": (832, 1216),
449
+ "4:5": (896, 1152),
450
+ "21:9": (1536, 640)
451
+ }
452
+ if ratio in ratios:
453
+ width, height = ratios[ratio]
454
+ # Otherwise handle size
455
+ elif size and 'x' in size:
456
+ try:
457
+ w, h = size.split('x')
458
+ width, height = int(w), int(h)
459
+ except: pass
460
+
461
+ payload = {
462
+ "prompt": prompt,
463
+ "negative_prompt": "",
464
+ "provider": "nonpon",
465
+ "model": model,
466
+ "width": width,
467
+ "height": height,
468
+ "style": "none",
469
+ "seed": -1,
470
+ "steps": 30,
471
+ "guidance": 7.5,
472
+ "quality_mode": "standard",
473
+ "n": 1,
474
+ "nologo": True,
475
+ "auto_optimize": True,
476
+ "auto_hd": True,
477
+ "language": "en"
478
+ }
479
+
480
+ if ratio: payload["ratio"] = ratio # Add to payload in case provider supports it directly
481
+
482
+ headers = {
483
+ "sec-ch-ua-platform": "\"Windows\"",
484
+ "referer": "https://koy.xx.kg/nano",
485
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/144.0.0.0 Safari/537.36",
486
+ "sec-ch-ua": "\"Not(A:Brand\";v=\"8\", \"Chromium\";v=\"144\", \"Google Chrome\";v=\"144\"",
487
+ "content-type": "application/json",
488
+ "sec-ch-ua-mobile": "?0",
489
+ "x-source": "nano-page"
490
+ }
491
+
492
+ try:
493
+ response = requests.post(url, json=payload, headers=headers, timeout=120)
494
+ if response.status_code == 200:
495
+ return response.json()
496
+ else:
497
+ print(f"Image gen failed: {response.status_code} - {response.text}")
498
+ return None
499
+ except Exception as e:
500
+ print(f"Image gen error: {e}")
501
+ return None
502
+
503
+ @app.route('/v1/images/generations', methods=['POST'])
504
+ @app.route('/v1/image_generations', methods=['POST'])
505
+ def image_generations():
506
+ data = request.json
507
+ prompt = data.get('prompt')
508
+ if not prompt:
509
+ return jsonify({"error": "Missing prompt"}), 400
510
+
511
+ model = data.get('model', 'gemini-3-pro-image-preview')
512
+ size = data.get('size', '1024x1024')
513
+ ratio = data.get('ratio') or data.get('aspect_ratio')
514
+
515
+ res = generate_image_koy(prompt, model, size, ratio)
516
+ if res:
517
+ # OpenAI format: {"created": 123, "data": [{"url": "..."}]}
518
+ # Usually Koy returns {"url": "..."} or similar. Let's adapt.
519
+ image_url = res.get('url') or res.get('image') or res.get('data', [{}])[0].get('url')
520
+ if not image_url and isinstance(res, dict):
521
+ # If Koy returns the OpenAI format already, use it
522
+ if 'data' in res: return jsonify(res)
523
+ # Otherwise try to extract any URL
524
+ for val in res.values():
525
+ if isinstance(val, str) and (val.startswith('http') or val.startswith('data:')):
526
+ image_url = val
527
+ break
528
+
529
+ if image_url:
530
+ return jsonify({
531
+ "created": int(time.time()),
532
+ "data": [{"url": image_url}]
533
+ })
534
+
535
+ return jsonify({"error": "Failed to generate image"}), 500
536
+
537
  if __name__ == '__main__':
538
  app.run(host='0.0.0.0', port=7860, debug=True)