natabrizy commited on
Commit
f3af1db
Β·
verified Β·
1 Parent(s): b424377

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +524 -206
app.py CHANGED
@@ -4,7 +4,7 @@ import json
4
  import os
5
  import re
6
  import tempfile
7
- from typing import Tuple, Optional
8
 
9
  import gradio as gr
10
  import httpx
@@ -16,31 +16,84 @@ from lzstring import LZString
16
  # =========================
17
  NEBIUS_BASE_URL = "https://api.studio.nebius.com/v1/"
18
 
19
- # Add more selectable models (you can also type your own in the dropdowns)
20
  DEFAULT_VISION_MODEL = "Qwen/Qwen2.5-VL-72B-Instruct"
21
  VISION_MODELS = [
22
  DEFAULT_VISION_MODEL,
23
  "Qwen/Qwen2.5-VL-7B-Instruct",
24
  "Qwen/Qwen2-VL-72B-Instruct",
 
25
  ]
26
 
27
- DEFAULT_CODE_MODEL = "deepseek-ai/DeepSeek-V3-0324"
 
28
  CODE_MODELS = [
29
  DEFAULT_CODE_MODEL,
30
  "Qwen/Qwen2.5-Coder-32B-Instruct",
31
- "Meta-Llama-3.1-70B-Instruct",
32
- "Mistral-7B-Instruct",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  ]
34
 
35
- # Timeouts and simple retries for stability
36
- HTTP_TIMEOUTS = httpx.Timeout(connect=10.0, read=120.0, write=30.0, pool=60.0)
37
- HTTP_RETRIES = 2
38
-
39
- # Keep the same default key you provided
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  DEFAULT_NEBIUS_API_KEY = (
41
  "eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNTA1MTQzMDg2MDMwMzIxNDEwMiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNjU5ODA0NCwidXVpZCI6ImNkOGFiMWZlLTIxN2QtNDJlMy04OWUwLWM1YTg4MjcwMGVhNyIsIm5hbWUiOiJodW5nZ2luZyIsImV4cGlyZXNfYXQiOiIyMDMwLTA2LTAyVDAyOjM0OjA0KzAwMDAifQ.MA52QuIiNruK7_lX688RXAEI2TkcCOjcf_02XrpnhI8"
42
  )
43
 
 
 
 
44
 
45
  # =========================
46
  # Helpers
@@ -55,17 +108,86 @@ def get_api_key(user_key: str = "") -> str:
55
  return (user_key or "").strip() or os.getenv("NEBIUS_API_KEY", "").strip() or DEFAULT_NEBIUS_API_KEY
56
 
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  def call_chat_completions(
59
  model: str,
60
  messages: list,
61
  api_key: str,
62
  max_tokens: int = 2000,
63
  temperature: float = 0.7,
 
64
  ) -> str:
65
  """
66
  Calls the Nebius OpenAI-compatible chat completions endpoint via HTTP.
67
  Returns the assistant message content string.
68
- Includes retries and increased read timeout to mitigate timeouts.
69
  """
70
  if not api_key:
71
  raise ValueError("Nebius API key is required.")
@@ -79,19 +201,59 @@ def call_chat_completions(
79
  }
80
  headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
81
 
82
- transport = httpx.HTTPTransport(retries=HTTP_RETRIES)
83
- with httpx.Client(timeout=HTTP_TIMEOUTS, transport=transport) as client:
84
- resp = client.post(url, headers=headers, json=payload)
85
- resp.raise_for_status()
86
- data = resp.json()
87
-
88
- choices = data.get("choices", [])
89
- if not choices:
90
- raise RuntimeError("No choices returned from the API.")
91
- content = choices[0].get("message", {}).get("content", "")
92
- if not content:
93
- raise RuntimeError("Empty content returned from the API.")
94
- return content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
 
97
  def _strip_fenced_code(text: str) -> str:
@@ -187,16 +349,22 @@ def analyze_image(
187
  return "Error: Nebius API key not provided."
188
 
189
  try:
 
 
 
 
 
190
  # Encode image to base64
191
  buffered = io.BytesIO()
192
- image.save(buffered, format="PNG")
193
  img_b64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
194
 
195
  prompt = (
196
  "Analyze this image and provide a concise description. "
197
  "Describe the main elements, colors, layout, and UI components. "
198
  "Identify what type of website or application this resembles. "
199
- "Focus on structural and visual elements that would be important for recreating the design."
 
200
  )
201
 
202
  messages = [
@@ -209,16 +377,21 @@ def analyze_image(
209
  }
210
  ]
211
 
 
212
  content = call_chat_completions(
213
  model=vision_model,
214
  messages=messages,
215
  api_key=api_key,
216
- max_tokens=1000,
217
  temperature=0.7,
 
218
  )
219
  return content
220
  except Exception as e:
221
- return f"Error analyzing image: {str(e)}"
 
 
 
222
 
223
 
224
  def generate_html_code(
@@ -246,13 +419,17 @@ Generate a complete, responsive webpage based on this description:
246
  Requirements:
247
  - Use modern HTML5, CSS3, and vanilla JavaScript only
248
  - Include TailwindCSS via CDN for styling
249
- - Make it responsive and visually appealing
250
- - Use placeholder images from https://unsplash.com/ if needed
251
  - Include proper semantic HTML structure
252
- - Add interactive elements where appropriate
253
- - Ensure the design matches the described layout and style
 
 
 
254
 
255
  Return only the complete HTML code starting with <!DOCTYPE html> and ending with </html>.
 
256
  """.strip()
257
 
258
  try:
@@ -263,36 +440,26 @@ Return only the complete HTML code starting with <!DOCTYPE html> and ending with
263
  api_key=api_key,
264
  max_tokens=code_max_tokens,
265
  temperature=code_temperature,
 
266
  )
267
  html_code = _strip_fenced_code(content)
268
 
 
269
  if "<!DOCTYPE html>" in html_code and "</html>" in html_code:
270
  start = html_code.find("<!DOCTYPE html>")
271
  end = html_code.rfind("</html>") + len("</html>")
272
  return html_code[start:end]
 
 
 
273
  return html_code
274
- except (httpx.ReadTimeout, httpx.TimeoutException):
275
- # Retry once with a reduced token budget to improve latency
276
- try:
277
- reduced_tokens = max(1000, int(code_max_tokens * 0.6))
278
- messages = [{"role": "user", "content": prompt}]
279
- content = call_chat_completions(
280
- model=code_model,
281
- messages=messages,
282
- api_key=api_key,
283
- max_tokens=reduced_tokens,
284
- temperature=code_temperature,
285
- )
286
- html_code = _strip_fenced_code(content)
287
- if "<!DOCTYPE html>" in html_code and "</html>" in html_code:
288
- start = html_code.find("<!DOCTYPE html>")
289
- end = html_code.rfind("</html>") + len("</html>")
290
- return html_code[start:end]
291
- return html_code
292
- except Exception as e2:
293
- return f"Error generating HTML code after retry: {str(e2)}. Tips: lower Max tokens, pick a smaller/faster model, or try again."
294
  except Exception as e:
295
- return f"Error generating HTML code: {str(e)}"
 
 
 
 
 
296
 
297
 
298
  def create_codesandbox(html_code: str) -> str:
@@ -333,7 +500,7 @@ def create_codesandbox(html_code: str) -> str:
333
 
334
  parameters = {"files": files, "template": "static"}
335
 
336
- # Fallback GET URL with compressed parameters (also add file query to open index.html)
337
  json_str = json.dumps(parameters, separators=(",", ":"))
338
  lz = LZString()
339
  compressed = lz.compressToBase64(json_str)
@@ -343,10 +510,10 @@ def create_codesandbox(html_code: str) -> str:
343
  prefill_css = f"{prefill_base}?parameters={compressed}&file=/style.css" if "style.css" in files else ""
344
  prefill_js = f"{prefill_base}?parameters={compressed}&file=/script.js" if "script.js" in files else ""
345
 
346
- # Try POST API to get a sandbox_id so we can link directly to the editor
347
  url = "https://codesandbox.io/api/v1/sandboxes/define"
348
- transport = httpx.HTTPTransport(retries=HTTP_RETRIES)
349
- with httpx.Client(timeout=HTTP_TIMEOUTS, transport=transport) as client:
350
  resp = client.post(url, json=parameters)
351
  if resp.status_code == 200:
352
  data = resp.json()
@@ -355,21 +522,25 @@ def create_codesandbox(html_code: str) -> str:
355
  editor_base = f"https://codesandbox.io/p/sandbox/{sandbox_id}"
356
  preview_base = f"https://codesandbox.io/s/{sandbox_id}"
357
  lines = [
358
- f"- Open index.html in editor: {editor_base}?file=/index.html",
 
359
  ]
360
  if "style.css" in files:
361
- lines.append(f"- Open style.css in editor: {editor_base}?file=/style.css")
362
  if "script.js" in files:
363
- lines.append(f"- Open script.js in editor: {editor_base}?file=/script.js")
364
- lines.append(f"- Live preview: {preview_base}")
365
  return "\n".join(lines)
366
 
367
  # Fallback to prefill URLs if POST fails
368
- lines = [f"- Open index.html in editor: {prefill_index}"]
 
 
 
369
  if prefill_css:
370
- lines.append(f"- Open style.css in editor: {prefill_css}")
371
  if prefill_js:
372
- lines.append(f"- Open script.js in editor: {prefill_js}")
373
  return "\n".join(lines)
374
 
375
  except Exception as e:
@@ -383,14 +554,19 @@ def screenshot_to_code(
383
  code_model: str = DEFAULT_CODE_MODEL,
384
  code_max_tokens: int = 4000,
385
  code_temperature: float = 0.7,
 
386
  ) -> Tuple[str, str]:
387
  """
388
  Complete pipeline: analyze image and generate corresponding HTML code.
389
  Returns (description, html_code).
390
  """
 
391
  description = analyze_image(image, nebius_api_key, vision_model)
 
392
  if description.startswith("Error"):
393
  return description, "Error: Cannot generate code due to image analysis failure."
 
 
394
  html_code = generate_html_code(
395
  description,
396
  nebius_api_key,
@@ -398,6 +574,8 @@ def screenshot_to_code(
398
  code_max_tokens=code_max_tokens,
399
  code_temperature=code_temperature,
400
  )
 
 
401
  return description, html_code
402
 
403
 
@@ -416,142 +594,206 @@ def export_html_to_file(html_code: str) -> Optional[str]:
416
  return None
417
 
418
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419
  # =========================
420
- # Gradio UI (Brizy-inspired palette, no emojis)
421
  # =========================
422
- BRIZY_PRIMARY = "#6C5CE7" # Indigo-like
423
- BRIZY_SECONDARY = "#00C2FF" # Cyan accent
424
- BRIZY_BG = "#F7F9FC" # Light background
425
- BRIZY_SURFACE = "#FFFFFF" # Surface
426
- BRIZY_TEXT = "#1F2937" # Dark text
427
- BRIZY_MUTED = "#6B7280" # Muted text
428
- BRIZY_BORDER = "#E5E7EB" # Soft border
429
  BRIZY_GRADIENT = f"linear-gradient(135deg, {BRIZY_PRIMARY} 0%, {BRIZY_SECONDARY} 100%)"
430
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431
  with gr.Blocks(
432
  theme=gr.themes.Soft(),
433
  title="AI Website Generator (Nebius)",
434
- css=f"""
435
- :root {{
436
- --app-primary: {BRIZY_PRIMARY};
437
- --app-secondary: {BRIZY_SECONDARY};
438
- --app-bg: {BRIZY_BG};
439
- --app-surface: {BRIZY_SURFACE};
440
- --app-text: {BRIZY_TEXT};
441
- --app-muted: {BRIZY_MUTED};
442
- --app-border: {BRIZY_BORDER};
443
- }}
444
-
445
- body {{
446
- background: var(--app-bg);
447
- color: var(--app-text);
448
- }}
449
-
450
- .section {{
451
- border: 1px solid var(--app-border);
452
- padding: 16px;
453
- border-radius: 12px;
454
- background: var(--app-surface);
455
- box-shadow: 0 1px 2px rgba(0,0,0,0.03);
456
- margin: 10px 0;
457
- }}
458
-
459
- .muted {{
460
- color: var(--app-muted);
461
- font-size: 0.92em;
462
- }}
463
-
464
- .footer {{
465
- text-align: center;
466
- color: var(--app-muted);
467
- padding: 8px 0;
468
- }}
469
-
470
- .title h1 {{
471
- background: {BRIZY_GRADIENT};
472
- -webkit-background-clip: text;
473
- background-clip: text;
474
- color: transparent;
475
- font-weight: 800;
476
- letter-spacing: -0.02em;
477
- }}
478
-
479
- .primary-btn button {{
480
- background: {BRIZY_GRADIENT} !important;
481
- color: #fff !important;
482
- border: none !important;
483
- }}
484
- .primary-btn button:hover {{
485
- filter: brightness(0.98);
486
- }}
487
-
488
- .secondary-btn button {{
489
- background: var(--app-surface) !important;
490
- color: var(--app-text) !important;
491
- border: 1px solid var(--app-border) !important;
492
- }}
493
- .secondary-btn button:hover {{
494
- border-color: {BRIZY_PRIMARY} !important;
495
- color: {BRIZY_PRIMARY} !important;
496
- }}
497
-
498
- /* Inputs focus */
499
- input:focus, textarea:focus, select:focus {{
500
- outline-color: {BRIZY_PRIMARY} !important;
501
- border-color: {BRIZY_PRIMARY} !important;
502
- box-shadow: 0 0 0 3px rgba(108,92,231,0.15) !important;
503
- }}
504
-
505
- /* Code block accents */
506
- .gr-code .cm-editor, .gr-code textarea {{
507
- border-radius: 10px !important;
508
- border: 1px solid var(--app-border) !important;
509
- }}
510
-
511
- /* Tabs accent */
512
- .gradio-container .tabs .tab-nav button[aria-selected="true"] {{
513
- color: {BRIZY_PRIMARY} !important;
514
- border-bottom: 2px solid {BRIZY_PRIMARY} !important;
515
- }}
516
- """,
517
  ) as app:
518
  gr.Markdown(
519
  """
520
- # AI Website Generator (Nebius)
521
- Turn website screenshots into functional HTML using Nebius-compatible models.
522
-
523
- - Image analysis (choose a vision model)
524
- - Code generation (choose a code-capable model)
525
- - One-click CodeSandbox deployment
526
- - Editor links open index.html and style.css directly
527
- - Your key is used at runtime only
 
528
  """,
529
  elem_classes=["title"],
530
  )
531
 
532
- with gr.Accordion("API & Models", open=True):
533
- gr.Markdown("Provide your Nebius API key or use the default configured in this app.", elem_classes=["muted"])
534
- nebius_key = gr.Textbox(
535
- label="Nebius API Key",
536
- type="password",
537
- placeholder="Paste your Nebius API key, or leave as-is to use the default key.",
538
- value=DEFAULT_NEBIUS_API_KEY,
 
 
 
 
539
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540
  with gr.Row():
541
  vision_model_dd = gr.Dropdown(
542
- label="Vision Model",
543
  choices=VISION_MODELS,
544
  value=DEFAULT_VISION_MODEL,
545
  allow_custom_value=True,
546
- info="You can also type a custom model name supported by your Nebius endpoint.",
547
  )
548
  code_model_dd = gr.Dropdown(
549
  label="Code Generation Model",
550
  choices=CODE_MODELS,
551
  value=DEFAULT_CODE_MODEL,
552
  allow_custom_value=True,
553
- info="You can also type a custom model name supported by your Nebius endpoint.",
554
  )
 
555
  with gr.Row():
556
  code_max_tokens = gr.Slider(
557
  label="Max tokens (code generation)",
@@ -559,7 +801,7 @@ Turn website screenshots into functional HTML using Nebius-compatible models.
559
  maximum=8000,
560
  step=100,
561
  value=4000,
562
- info="Lower this if you see timeouts; higher values may take longer.",
563
  )
564
  code_temperature = gr.Slider(
565
  label="Temperature",
@@ -567,37 +809,65 @@ Turn website screenshots into functional HTML using Nebius-compatible models.
567
  maximum=1.5,
568
  step=0.1,
569
  value=0.7,
570
- info="Higher is more creative; lower is more deterministic.",
571
  )
 
 
 
 
 
 
 
 
 
 
 
572
 
573
- with gr.Tab("Quick Generate"):
574
  with gr.Row():
575
  with gr.Column(scale=1):
576
- gr.Markdown("### Step 1: Upload Screenshot", elem_classes=["section"])
577
  image_input = gr.Image(
578
  type="pil",
579
  label="Website Screenshot",
580
  sources=["upload", "clipboard"],
581
- height=280,
 
 
 
 
 
 
 
 
 
 
582
  )
583
- generate_btn = gr.Button("Generate Website", elem_classes=["primary-btn"])
 
584
 
585
  with gr.Column(scale=2):
586
- gr.Markdown("### Step 2: Review Results", elem_classes=["section"])
587
- description_output = gr.Textbox(
588
- label="Image Analysis",
589
- lines=6,
590
- interactive=False,
591
- )
592
- html_output = gr.Code(
593
- label="Generated HTML (copy or download)",
594
- language="html",
595
- lines=18,
596
- )
 
 
 
 
 
597
 
598
  with gr.Row():
599
- codesandbox_btn = gr.Button("Deploy to CodeSandbox", elem_classes=["secondary-btn"])
600
- download_btn = gr.Button("Download index.html", elem_classes=["secondary-btn"])
 
601
 
602
  codesandbox_links = gr.Markdown(value="")
603
  download_file = gr.File(
@@ -606,21 +876,59 @@ Turn website screenshots into functional HTML using Nebius-compatible models.
606
  visible=False,
607
  )
608
 
609
- with gr.Tab("Individual Tools"):
610
  with gr.Row():
611
  with gr.Column():
612
- gr.Markdown("### Image Analysis Tool", elem_classes=["section"])
613
- img_tool = gr.Image(type="pil", label="Image")
614
  analyze_btn = gr.Button("Analyze Image", elem_classes=["secondary-btn"])
615
- analysis_result = gr.Textbox(label="Analysis Result", lines=6)
616
 
617
  with gr.Column():
618
- gr.Markdown("### Code Generation Tool", elem_classes=["section"])
619
- desc_input = gr.Textbox(label="Description", lines=4, placeholder="Describe the page you want...")
 
 
 
 
620
  code_btn = gr.Button("Generate Code", elem_classes=["secondary-btn"])
621
- code_result = gr.Code(label="Generated Code", language="html")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
622
 
623
- gr.Markdown("Made with Gradio β€’ Nebius API compatible", elem_classes=["footer"])
 
 
 
 
 
 
 
 
624
 
625
  # Event bindings
626
  generate_btn.click(
@@ -630,11 +938,9 @@ Turn website screenshots into functional HTML using Nebius-compatible models.
630
  )
631
 
632
  def _deploy_to_codesandbox(html_code: str) -> str:
633
- url_block = create_codesandbox(html_code)
634
- if url_block.startswith("Error"):
635
- return f"**{url_block}**"
636
- lines = ["### CodeSandbox Links", "", url_block]
637
- return "\n".join(lines)
638
 
639
  codesandbox_btn.click(
640
  fn=_deploy_to_codesandbox,
@@ -651,6 +957,18 @@ Turn website screenshots into functional HTML using Nebius-compatible models.
651
  inputs=[html_output],
652
  outputs=[download_file],
653
  )
 
 
 
 
 
 
 
 
 
 
 
 
654
 
655
  analyze_btn.click(
656
  fn=lambda img, key, vmod: analyze_image(img, key, vmod),
@@ -667,4 +985,4 @@ Turn website screenshots into functional HTML using Nebius-compatible models.
667
  )
668
 
669
  if __name__ == "__main__":
670
- app.launch(share=False)
 
4
  import os
5
  import re
6
  import tempfile
7
+ from typing import Tuple, Optional, Dict, List
8
 
9
  import gradio as gr
10
  import httpx
 
16
  # =========================
17
  NEBIUS_BASE_URL = "https://api.studio.nebius.com/v1/"
18
 
19
+ # Vision models that work well with Nebius
20
  DEFAULT_VISION_MODEL = "Qwen/Qwen2.5-VL-72B-Instruct"
21
  VISION_MODELS = [
22
  DEFAULT_VISION_MODEL,
23
  "Qwen/Qwen2.5-VL-7B-Instruct",
24
  "Qwen/Qwen2-VL-72B-Instruct",
25
+ "Qwen/Qwen2-VL-7B-Instruct",
26
  ]
27
 
28
+ # Code generation models with best performance on Nebius
29
+ DEFAULT_CODE_MODEL = "deepseek-ai/DeepSeek-V3"
30
  CODE_MODELS = [
31
  DEFAULT_CODE_MODEL,
32
  "Qwen/Qwen2.5-Coder-32B-Instruct",
33
+ "Qwen/QwQ-32B-Preview",
34
+ "Qwen/Qwen2.5-72B-Instruct",
35
+ "Qwen/Qwen2.5-7B-Instruct",
36
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
37
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
38
+ "meta-llama/Meta-Llama-3.1-405B-Instruct",
39
+ "meta-llama/Meta-Llama-3.1-70B-Instruct",
40
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
41
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
42
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
43
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
44
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
45
+ "mistralai/Mistral-7B-Instruct-v0.3",
46
+ "mistralai/Mistral-Nemo-Instruct-2407",
47
+ "mistralai/Pixtral-12B-2409",
48
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
49
  ]
50
 
51
+ # Model capabilities mapping (for better selection)
52
+ MODEL_CAPABILITIES = {
53
+ "vision": [
54
+ "Qwen/Qwen2.5-VL-72B-Instruct",
55
+ "Qwen/Qwen2.5-VL-7B-Instruct",
56
+ "Qwen/Qwen2-VL-72B-Instruct",
57
+ "Qwen/Qwen2-VL-7B-Instruct",
58
+ "meta-llama/Llama-3.2-90B-Vision-Instruct",
59
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
60
+ "mistralai/Pixtral-12B-2409",
61
+ ],
62
+ "code": [
63
+ "deepseek-ai/DeepSeek-V3",
64
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
65
+ "Qwen/QwQ-32B-Preview",
66
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
67
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
68
+ ],
69
+ "general": [
70
+ "Qwen/Qwen2.5-72B-Instruct",
71
+ "meta-llama/Meta-Llama-3.1-405B-Instruct",
72
+ "meta-llama/Meta-Llama-3.1-70B-Instruct",
73
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
74
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
75
+ ]
76
+ }
77
+
78
+ # Performance recommendations
79
+ MODEL_RECOMMENDATIONS = {
80
+ "fast": ["Qwen/Qwen2.5-7B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3", "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"],
81
+ "balanced": ["Qwen/Qwen2.5-Coder-32B-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct", "Qwen/QwQ-32B-Preview"],
82
+ "quality": ["deepseek-ai/DeepSeek-V3", "meta-llama/Meta-Llama-3.1-405B-Instruct", "Qwen/Qwen2.5-72B-Instruct"],
83
+ }
84
+
85
+ # Timeouts and retries
86
+ HTTP_TIMEOUTS = httpx.Timeout(connect=15.0, read=180.0, write=30.0, pool=60.0)
87
+ HTTP_RETRIES = 3
88
+
89
+ # Default API key
90
  DEFAULT_NEBIUS_API_KEY = (
91
  "eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNTA1MTQzMDg2MDMwMzIxNDEwMiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNjU5ODA0NCwidXVpZCI6ImNkOGFiMWZlLTIxN2QtNDJlMy04OWUwLWM1YTg4MjcwMGVhNyIsIm5hbWUiOiJodW5nZ2luZyIsImV4cGlyZXNfYXQiOiIyMDMwLTA2LTAyVDAyOjM0OjA0KzAwMDAifQ.MA52QuIiNruK7_lX688RXAEI2TkcCOjcf_02XrpnhI8"
92
  )
93
 
94
+ # Cache for available models
95
+ _available_models_cache = None
96
+ _cache_timestamp = 0
97
 
98
  # =========================
99
  # Helpers
 
108
  return (user_key or "").strip() or os.getenv("NEBIUS_API_KEY", "").strip() or DEFAULT_NEBIUS_API_KEY
109
 
110
 
111
+ def get_available_models(api_key: str) -> Dict[str, List[str]]:
112
+ """
113
+ Fetch available models from Nebius API and categorize them.
114
+ Returns a dict with 'vision' and 'code' keys.
115
+ """
116
+ global _available_models_cache, _cache_timestamp
117
+ import time
118
+
119
+ # Cache for 5 minutes
120
+ if _available_models_cache and (time.time() - _cache_timestamp) < 300:
121
+ return _available_models_cache
122
+
123
+ try:
124
+ url = f"{NEBIUS_BASE_URL}models"
125
+ headers = {"Authorization": f"Bearer {api_key}"}
126
+
127
+ with httpx.Client(timeout=httpx.Timeout(10.0)) as client:
128
+ resp = client.get(url, headers=headers)
129
+ if resp.status_code == 200:
130
+ data = resp.json()
131
+ models = data.get("data", [])
132
+
133
+ vision_models = []
134
+ code_models = []
135
+
136
+ for model in models:
137
+ model_id = model.get("id", "")
138
+ # Categorize based on known patterns
139
+ if any(v in model_id.lower() for v in ["vision", "vl", "pixtral", "llama-3.2-90b", "llama-3.2-11b"]):
140
+ vision_models.append(model_id)
141
+ if any(c in model_id.lower() for c in ["coder", "deepseek", "instruct", "llama", "mistral", "mixtral", "qwen"]):
142
+ code_models.append(model_id)
143
+
144
+ _available_models_cache = {
145
+ "vision": vision_models or VISION_MODELS,
146
+ "code": code_models or CODE_MODELS
147
+ }
148
+ _cache_timestamp = time.time()
149
+ return _available_models_cache
150
+ except Exception:
151
+ pass
152
+
153
+ # Return defaults if API call fails
154
+ return {"vision": VISION_MODELS, "code": CODE_MODELS}
155
+
156
+
157
+ def validate_model_availability(model: str, api_key: str, model_type: str = "general") -> bool:
158
+ """
159
+ Check if a model is available by making a test request.
160
+ """
161
+ try:
162
+ test_message = "Hi" if model_type != "vision" else [{"type": "text", "text": "Hi"}]
163
+ url = f"{NEBIUS_BASE_URL}chat/completions"
164
+ payload = {
165
+ "model": model,
166
+ "messages": [{"role": "user", "content": test_message}],
167
+ "max_tokens": 10,
168
+ "temperature": 0.1,
169
+ }
170
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
171
+
172
+ with httpx.Client(timeout=httpx.Timeout(5.0)) as client:
173
+ resp = client.post(url, headers=headers, json=payload)
174
+ return resp.status_code == 200
175
+ except Exception:
176
+ return False
177
+
178
+
179
  def call_chat_completions(
180
  model: str,
181
  messages: list,
182
  api_key: str,
183
  max_tokens: int = 2000,
184
  temperature: float = 0.7,
185
+ retry_with_fallback: bool = True,
186
  ) -> str:
187
  """
188
  Calls the Nebius OpenAI-compatible chat completions endpoint via HTTP.
189
  Returns the assistant message content string.
190
+ Includes retries and fallback models for better reliability.
191
  """
192
  if not api_key:
193
  raise ValueError("Nebius API key is required.")
 
201
  }
202
  headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
203
 
204
+ # Try with the requested model first
205
+ last_error = None
206
+ for attempt in range(HTTP_RETRIES):
207
+ try:
208
+ transport = httpx.HTTPTransport(retries=1)
209
+ with httpx.Client(timeout=HTTP_TIMEOUTS, transport=transport) as client:
210
+ resp = client.post(url, headers=headers, json=payload)
211
+
212
+ if resp.status_code == 404 or resp.status_code == 400:
213
+ # Model not found or bad request
214
+ if retry_with_fallback and attempt == 0:
215
+ # Try a fallback model
216
+ fallback_models = {
217
+ "vision": ["Qwen/Qwen2.5-VL-7B-Instruct"],
218
+ "code": ["Qwen/Qwen2.5-7B-Instruct", "mistralai/Mistral-7B-Instruct-v0.3"],
219
+ }
220
+
221
+ # Detect model type and use appropriate fallback
222
+ model_type = "vision" if any(v in model.lower() for v in ["vision", "vl", "pixtral"]) else "code"
223
+ for fallback in fallback_models.get(model_type, []):
224
+ payload["model"] = fallback
225
+ try:
226
+ resp = client.post(url, headers=headers, json=payload)
227
+ if resp.status_code == 200:
228
+ break
229
+ except Exception:
230
+ continue
231
+
232
+ resp.raise_for_status()
233
+ data = resp.json()
234
+
235
+ choices = data.get("choices", [])
236
+ if not choices:
237
+ raise RuntimeError("No choices returned from the API.")
238
+ content = choices[0].get("message", {}).get("content", "")
239
+ if not content:
240
+ raise RuntimeError("Empty content returned from the API.")
241
+ return content
242
+
243
+ except (httpx.ReadTimeout, httpx.TimeoutException) as e:
244
+ last_error = e
245
+ # Reduce token count for retry
246
+ payload["max_tokens"] = max(500, int(payload["max_tokens"] * 0.7))
247
+ continue
248
+ except Exception as e:
249
+ last_error = e
250
+ if attempt < HTTP_RETRIES - 1:
251
+ continue
252
+ break
253
+
254
+ if last_error:
255
+ raise last_error
256
+ raise RuntimeError(f"Failed to get response from model {model}")
257
 
258
 
259
  def _strip_fenced_code(text: str) -> str:
 
349
  return "Error: Nebius API key not provided."
350
 
351
  try:
352
+ # Resize image if too large to avoid timeouts
353
+ max_dimension = 1024
354
+ if image.width > max_dimension or image.height > max_dimension:
355
+ image.thumbnail((max_dimension, max_dimension), Image.Resampling.LANCZOS)
356
+
357
  # Encode image to base64
358
  buffered = io.BytesIO()
359
+ image.save(buffered, format="PNG", optimize=True)
360
  img_b64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
361
 
362
  prompt = (
363
  "Analyze this image and provide a concise description. "
364
  "Describe the main elements, colors, layout, and UI components. "
365
  "Identify what type of website or application this resembles. "
366
+ "Focus on structural and visual elements that would be important for recreating the design. "
367
+ "Be specific about layout patterns, component types, and styling details."
368
  )
369
 
370
  messages = [
 
377
  }
378
  ]
379
 
380
+ # Use vision model
381
  content = call_chat_completions(
382
  model=vision_model,
383
  messages=messages,
384
  api_key=api_key,
385
+ max_tokens=1500,
386
  temperature=0.7,
387
+ retry_with_fallback=True,
388
  )
389
  return content
390
  except Exception as e:
391
+ error_msg = str(e)
392
+ if "404" in error_msg or "not found" in error_msg.lower():
393
+ return f"Error: Model '{vision_model}' not available. Try using one of the recommended vision models: {', '.join(VISION_MODELS[:3])}"
394
+ return f"Error analyzing image: {error_msg}"
395
 
396
 
397
  def generate_html_code(
 
419
  Requirements:
420
  - Use modern HTML5, CSS3, and vanilla JavaScript only
421
  - Include TailwindCSS via CDN for styling
422
+ - Make it fully responsive (mobile, tablet, desktop)
423
+ - Use high-quality placeholder images from https://picsum.photos/ or https://unsplash.com/
424
  - Include proper semantic HTML structure
425
+ - Add smooth animations and transitions
426
+ - Include interactive elements where appropriate
427
+ - Use modern design patterns and best practices
428
+ - Ensure the design matches the described layout and style exactly
429
+ - Add proper meta tags and accessibility features
430
 
431
  Return only the complete HTML code starting with <!DOCTYPE html> and ending with </html>.
432
+ Include all CSS and JavaScript inline within the HTML.
433
  """.strip()
434
 
435
  try:
 
440
  api_key=api_key,
441
  max_tokens=code_max_tokens,
442
  temperature=code_temperature,
443
+ retry_with_fallback=True,
444
  )
445
  html_code = _strip_fenced_code(content)
446
 
447
+ # Ensure we have valid HTML
448
  if "<!DOCTYPE html>" in html_code and "</html>" in html_code:
449
  start = html_code.find("<!DOCTYPE html>")
450
  end = html_code.rfind("</html>") + len("</html>")
451
  return html_code[start:end]
452
+ elif "<html" in html_code and "</html>" in html_code:
453
+ # Add DOCTYPE if missing
454
+ return f"<!DOCTYPE html>\n{html_code}"
455
  return html_code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  except Exception as e:
457
+ error_msg = str(e)
458
+ if "404" in error_msg or "not found" in error_msg.lower():
459
+ return f"Error: Model '{code_model}' not available. Try using one of the recommended code models: {', '.join(CODE_MODELS[:3])}"
460
+ if "timeout" in error_msg.lower():
461
+ return f"Error: Request timed out. Try reducing max tokens or using a faster model like: {', '.join(MODEL_RECOMMENDATIONS['fast'])}"
462
+ return f"Error generating HTML code: {error_msg}"
463
 
464
 
465
  def create_codesandbox(html_code: str) -> str:
 
500
 
501
  parameters = {"files": files, "template": "static"}
502
 
503
+ # Fallback GET URL with compressed parameters
504
  json_str = json.dumps(parameters, separators=(",", ":"))
505
  lz = LZString()
506
  compressed = lz.compressToBase64(json_str)
 
510
  prefill_css = f"{prefill_base}?parameters={compressed}&file=/style.css" if "style.css" in files else ""
511
  prefill_js = f"{prefill_base}?parameters={compressed}&file=/script.js" if "script.js" in files else ""
512
 
513
+ # Try POST API to get a sandbox_id
514
  url = "https://codesandbox.io/api/v1/sandboxes/define"
515
+ transport = httpx.HTTPTransport(retries=2)
516
+ with httpx.Client(timeout=httpx.Timeout(15.0), transport=transport) as client:
517
  resp = client.post(url, json=parameters)
518
  if resp.status_code == 200:
519
  data = resp.json()
 
522
  editor_base = f"https://codesandbox.io/p/sandbox/{sandbox_id}"
523
  preview_base = f"https://codesandbox.io/s/{sandbox_id}"
524
  lines = [
525
+ f"βœ… **Successfully deployed to CodeSandbox!**\n",
526
+ f"- πŸ“ Open index.html in editor: [{editor_base}?file=/index.html]({editor_base}?file=/index.html)",
527
  ]
528
  if "style.css" in files:
529
+ lines.append(f"- 🎨 Open style.css in editor: [{editor_base}?file=/style.css]({editor_base}?file=/style.css)")
530
  if "script.js" in files:
531
+ lines.append(f"- ⚑ Open script.js in editor: [{editor_base}?file=/script.js]({editor_base}?file=/script.js)")
532
+ lines.append(f"- πŸ‘οΈ Live preview: [{preview_base}]({preview_base})")
533
  return "\n".join(lines)
534
 
535
  # Fallback to prefill URLs if POST fails
536
+ lines = [
537
+ "πŸ“¦ **CodeSandbox Links (click to deploy):**\n",
538
+ f"- Open index.html: [{prefill_index}]({prefill_index})"
539
+ ]
540
  if prefill_css:
541
+ lines.append(f"- Open style.css: [{prefill_css}]({prefill_css})")
542
  if prefill_js:
543
+ lines.append(f"- Open script.js: [{prefill_js}]({prefill_js})")
544
  return "\n".join(lines)
545
 
546
  except Exception as e:
 
554
  code_model: str = DEFAULT_CODE_MODEL,
555
  code_max_tokens: int = 4000,
556
  code_temperature: float = 0.7,
557
+ progress=gr.Progress(track_tqdm=True),
558
  ) -> Tuple[str, str]:
559
  """
560
  Complete pipeline: analyze image and generate corresponding HTML code.
561
  Returns (description, html_code).
562
  """
563
+ progress(0, desc="Starting image analysis...")
564
  description = analyze_image(image, nebius_api_key, vision_model)
565
+
566
  if description.startswith("Error"):
567
  return description, "Error: Cannot generate code due to image analysis failure."
568
+
569
+ progress(0.5, desc="Generating HTML code...")
570
  html_code = generate_html_code(
571
  description,
572
  nebius_api_key,
 
574
  code_max_tokens=code_max_tokens,
575
  code_temperature=code_temperature,
576
  )
577
+
578
+ progress(1.0, desc="Complete!")
579
  return description, html_code
580
 
581
 
 
594
  return None
595
 
596
 
597
+ def get_model_recommendations(performance_tier: str) -> Tuple[str, str]:
598
+ """
599
+ Get recommended models based on performance tier.
600
+ Returns (vision_model, code_model)
601
+ """
602
+ tier_map = {
603
+ "Fast (7B models)": ("Qwen/Qwen2.5-VL-7B-Instruct", "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"),
604
+ "Balanced (32B-70B)": ("Qwen/Qwen2.5-VL-72B-Instruct", "Qwen/Qwen2.5-Coder-32B-Instruct"),
605
+ "Quality (Large models)": ("Qwen/Qwen2.5-VL-72B-Instruct", "deepseek-ai/DeepSeek-V3"),
606
+ }
607
+ return tier_map.get(performance_tier, (DEFAULT_VISION_MODEL, DEFAULT_CODE_MODEL))
608
+
609
+
610
  # =========================
611
+ # Gradio UI
612
  # =========================
613
+ BRIZY_PRIMARY = "#6C5CE7"
614
+ BRIZY_SECONDARY = "#00C2FF"
615
+ BRIZY_BG = "#F7F9FC"
616
+ BRIZY_SURFACE = "#FFFFFF"
617
+ BRIZY_TEXT = "#1F2937"
618
+ BRIZY_MUTED = "#6B7280"
619
+ BRIZY_BORDER = "#E5E7EB"
620
  BRIZY_GRADIENT = f"linear-gradient(135deg, {BRIZY_PRIMARY} 0%, {BRIZY_SECONDARY} 100%)"
621
 
622
+ custom_css = f"""
623
+ :root {{
624
+ --app-primary: {BRIZY_PRIMARY};
625
+ --app-secondary: {BRIZY_SECONDARY};
626
+ --app-bg: {BRIZY_BG};
627
+ --app-surface: {BRIZY_SURFACE};
628
+ --app-text: {BRIZY_TEXT};
629
+ --app-muted: {BRIZY_MUTED};
630
+ --app-border: {BRIZY_BORDER};
631
+ }}
632
+
633
+ body {{
634
+ background: var(--app-bg);
635
+ color: var(--app-text);
636
+ }}
637
+
638
+ .section {{
639
+ border: 1px solid var(--app-border);
640
+ padding: 16px;
641
+ border-radius: 12px;
642
+ background: var(--app-surface);
643
+ box-shadow: 0 1px 2px rgba(0,0,0,0.03);
644
+ margin: 10px 0;
645
+ }}
646
+
647
+ .muted {{
648
+ color: var(--app-muted);
649
+ font-size: 0.92em;
650
+ }}
651
+
652
+ .footer {{
653
+ text-align: center;
654
+ color: var(--app-muted);
655
+ padding: 8px 0;
656
+ }}
657
+
658
+ .title h1 {{
659
+ background: {BRIZY_GRADIENT};
660
+ -webkit-background-clip: text;
661
+ background-clip: text;
662
+ color: transparent;
663
+ font-weight: 800;
664
+ letter-spacing: -0.02em;
665
+ }}
666
+
667
+ .primary-btn button {{
668
+ background: {BRIZY_GRADIENT} !important;
669
+ color: #fff !important;
670
+ border: none !important;
671
+ font-weight: 600 !important;
672
+ padding: 12px 24px !important;
673
+ font-size: 16px !important;
674
+ }}
675
+ .primary-btn button:hover {{
676
+ filter: brightness(0.95);
677
+ transform: translateY(-1px);
678
+ transition: all 0.2s;
679
+ }}
680
+
681
+ .secondary-btn button {{
682
+ background: var(--app-surface) !important;
683
+ color: var(--app-text) !important;
684
+ border: 1px solid var(--app-border) !important;
685
+ font-weight: 500 !important;
686
+ }}
687
+ .secondary-btn button:hover {{
688
+ border-color: {BRIZY_PRIMARY} !important;
689
+ color: {BRIZY_PRIMARY} !important;
690
+ }}
691
+
692
+ /* Inputs focus */
693
+ input:focus, textarea:focus, select:focus {{
694
+ outline-color: {BRIZY_PRIMARY} !important;
695
+ border-color: {BRIZY_PRIMARY} !important;
696
+ box-shadow: 0 0 0 3px rgba(108,92,231,0.15) !important;
697
+ }}
698
+
699
+ /* Code block accents */
700
+ .gr-code .cm-editor, .gr-code textarea {{
701
+ border-radius: 10px !important;
702
+ border: 1px solid var(--app-border) !important;
703
+ }}
704
+
705
+ /* Tabs accent */
706
+ .gradio-container .tabs .tab-nav button[aria-selected="true"] {{
707
+ color: {BRIZY_PRIMARY} !important;
708
+ border-bottom: 2px solid {BRIZY_PRIMARY} !important;
709
+ }}
710
+
711
+ /* Model recommendation badge */
712
+ .model-badge {{
713
+ display: inline-block;
714
+ padding: 4px 8px;
715
+ border-radius: 4px;
716
+ font-size: 12px;
717
+ font-weight: 600;
718
+ margin-left: 8px;
719
+ }}
720
+ .badge-fast {{
721
+ background-color: #10B981;
722
+ color: white;
723
+ }}
724
+ .badge-balanced {{
725
+ background-color: #3B82F6;
726
+ color: white;
727
+ }}
728
+ .badge-quality {{
729
+ background-color: #8B5CF6;
730
+ color: white;
731
+ }}
732
+ """
733
+
734
  with gr.Blocks(
735
  theme=gr.themes.Soft(),
736
  title="AI Website Generator (Nebius)",
737
+ css=custom_css,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
738
  ) as app:
739
  gr.Markdown(
740
  """
741
+ # πŸš€ AI Website Generator (Nebius)
742
+ Turn website screenshots into functional HTML using state-of-the-art Nebius-compatible models.
743
+
744
+ ### ✨ Features:
745
+ - **Vision Models**: Qwen VL series, Llama 3.2 Vision, Pixtral for image analysis
746
+ - **Code Models**: DeepSeek V3, Qwen Coder, Llama 3.1, Mixtral for code generation
747
+ - **Smart Fallbacks**: Automatic model switching if primary model is unavailable
748
+ - **One-click Deployment**: Direct CodeSandbox integration
749
+ - **Performance Tiers**: Choose between speed and quality
750
  """,
751
  elem_classes=["title"],
752
  )
753
 
754
+ with gr.Accordion("βš™οΈ API & Model Configuration", open=True):
755
+ gr.Markdown(
756
+ """
757
+ Configure your Nebius API key and select models. The app includes a default key for testing.
758
+
759
+ **πŸ’‘ Model Recommendations:**
760
+ - **Fast**: 7B models for quick prototyping
761
+ - **Balanced**: 32B-70B models for good quality/speed ratio
762
+ - **Quality**: Large models for best results
763
+ """,
764
+ elem_classes=["muted"]
765
  )
766
+
767
+ with gr.Row():
768
+ nebius_key = gr.Textbox(
769
+ label="Nebius API Key",
770
+ type="password",
771
+ placeholder="Paste your Nebius API key, or use the default",
772
+ value=DEFAULT_NEBIUS_API_KEY,
773
+ )
774
+ performance_tier = gr.Radio(
775
+ label="Performance Tier",
776
+ choices=["Fast (7B models)", "Balanced (32B-70B)", "Quality (Large models)"],
777
+ value="Balanced (32B-70B)",
778
+ info="Automatically selects optimal models for your needs"
779
+ )
780
+
781
  with gr.Row():
782
  vision_model_dd = gr.Dropdown(
783
+ label="Vision Model (for image analysis)",
784
  choices=VISION_MODELS,
785
  value=DEFAULT_VISION_MODEL,
786
  allow_custom_value=True,
787
+ info="Select or type a custom vision-capable model",
788
  )
789
  code_model_dd = gr.Dropdown(
790
  label="Code Generation Model",
791
  choices=CODE_MODELS,
792
  value=DEFAULT_CODE_MODEL,
793
  allow_custom_value=True,
794
+ info="Select or type a custom code generation model",
795
  )
796
+
797
  with gr.Row():
798
  code_max_tokens = gr.Slider(
799
  label="Max tokens (code generation)",
 
801
  maximum=8000,
802
  step=100,
803
  value=4000,
804
+ info="⚠️ Lower this if you experience timeouts",
805
  )
806
  code_temperature = gr.Slider(
807
  label="Temperature",
 
809
  maximum=1.5,
810
  step=0.1,
811
  value=0.7,
812
+ info="Higher = more creative, Lower = more consistent",
813
  )
814
+
815
+ # Auto-select models based on performance tier
816
+ def update_models_from_tier(tier):
817
+ vision, code = get_model_recommendations(tier)
818
+ return vision, code
819
+
820
+ performance_tier.change(
821
+ fn=update_models_from_tier,
822
+ inputs=[performance_tier],
823
+ outputs=[vision_model_dd, code_model_dd]
824
+ )
825
 
826
+ with gr.Tab("🎯 Quick Generate"):
827
  with gr.Row():
828
  with gr.Column(scale=1):
829
+ gr.Markdown("### πŸ“Έ Step 1: Upload Screenshot", elem_classes=["section"])
830
  image_input = gr.Image(
831
  type="pil",
832
  label="Website Screenshot",
833
  sources=["upload", "clipboard"],
834
+ height=300,
835
+ )
836
+
837
+ gr.Markdown(
838
+ """
839
+ **Tips for best results:**
840
+ - Use clear, high-quality screenshots
841
+ - Include full page layouts
842
+ - Avoid complex animations in source
843
+ """,
844
+ elem_classes=["muted"]
845
  )
846
+
847
+ generate_btn = gr.Button("πŸš€ Generate Website", elem_classes=["primary-btn"], size="lg")
848
 
849
  with gr.Column(scale=2):
850
+ gr.Markdown("### πŸ“ Step 2: Review Results", elem_classes=["section"])
851
+
852
+ with gr.Tabs():
853
+ with gr.TabItem("Analysis"):
854
+ description_output = gr.Textbox(
855
+ label="Image Analysis Result",
856
+ lines=8,
857
+ interactive=False,
858
+ )
859
+
860
+ with gr.TabItem("Generated Code"):
861
+ html_output = gr.Code(
862
+ label="Generated HTML (copy or download)",
863
+ language="html",
864
+ lines=20,
865
+ )
866
 
867
  with gr.Row():
868
+ codesandbox_btn = gr.Button("☁️ Deploy to CodeSandbox", elem_classes=["secondary-btn"])
869
+ download_btn = gr.Button("πŸ’Ύ Download HTML", elem_classes=["secondary-btn"])
870
+ copy_btn = gr.Button("πŸ“‹ Copy Code", elem_classes=["secondary-btn"])
871
 
872
  codesandbox_links = gr.Markdown(value="")
873
  download_file = gr.File(
 
876
  visible=False,
877
  )
878
 
879
+ with gr.Tab("πŸ› οΈ Individual Tools"):
880
  with gr.Row():
881
  with gr.Column():
882
+ gr.Markdown("### πŸ” Image Analysis Tool", elem_classes=["section"])
883
+ img_tool = gr.Image(type="pil", label="Upload Image")
884
  analyze_btn = gr.Button("Analyze Image", elem_classes=["secondary-btn"])
885
+ analysis_result = gr.Textbox(label="Analysis Result", lines=8)
886
 
887
  with gr.Column():
888
+ gr.Markdown("### πŸ’» Code Generation Tool", elem_classes=["section"])
889
+ desc_input = gr.Textbox(
890
+ label="Website Description",
891
+ lines=6,
892
+ placeholder="Describe the website you want to create...\n\nExample: A modern landing page with a hero section, navigation bar, features grid, and contact form..."
893
+ )
894
  code_btn = gr.Button("Generate Code", elem_classes=["secondary-btn"])
895
+ code_result = gr.Code(label="Generated Code", language="html", lines=12)
896
+
897
+ with gr.Tab("πŸ“š Model Information"):
898
+ gr.Markdown(
899
+ """
900
+ ## Available Models on Nebius
901
+
902
+ ### πŸ‘οΈ Vision Models (Image Analysis)
903
+ - **Qwen/Qwen2.5-VL-72B-Instruct** - Best quality, latest Qwen vision model
904
+ - **Qwen/Qwen2.5-VL-7B-Instruct** - Fast, efficient vision model
905
+ - **meta-llama/Llama-3.2-90B-Vision-Instruct** - Meta's powerful vision model
906
+ - **mistralai/Pixtral-12B-2409** - Mistral's vision model
907
+
908
+ ### πŸ’» Code Generation Models
909
+ - **deepseek-ai/DeepSeek-V3** - State-of-the-art code generation
910
+ - **Qwen/Qwen2.5-Coder-32B-Instruct** - Specialized for coding tasks
911
+ - **Qwen/QwQ-32B-Preview** - Advanced reasoning capabilities
912
+ - **meta-llama/Meta-Llama-3.1-405B-Instruct** - Largest Llama model
913
+ - **mistralai/Mixtral-8x22B-Instruct-v0.1** - MOE architecture for efficiency
914
+
915
+ ### 🎯 Performance Guidelines
916
+ - **Timeouts?** β†’ Reduce max tokens or use smaller models
917
+ - **Quality issues?** β†’ Use larger models or increase temperature
918
+ - **Model not found?** β†’ The app will automatically try fallback models
919
+ """,
920
+ elem_classes=["section"]
921
+ )
922
 
923
+ gr.Markdown(
924
+ """
925
+ ---
926
+ Made with ❀️ using Gradio β€’ Powered by Nebius AI Studio
927
+
928
+ [GitHub](https://github.com) | [Documentation](https://nebius.com/docs) | [API Reference](https://nebius.com/api)
929
+ """,
930
+ elem_classes=["footer"]
931
+ )
932
 
933
  # Event bindings
934
  generate_btn.click(
 
938
  )
939
 
940
  def _deploy_to_codesandbox(html_code: str) -> str:
941
+ if not html_code or html_code.startswith("Error"):
942
+ return "⚠️ **No valid code to deploy.** Generate code first."
943
+ return create_codesandbox(html_code)
 
 
944
 
945
  codesandbox_btn.click(
946
  fn=_deploy_to_codesandbox,
 
957
  inputs=[html_output],
958
  outputs=[download_file],
959
  )
960
+
961
+ def _copy_to_clipboard(html_code: str) -> str:
962
+ if not html_code or html_code.startswith("Error"):
963
+ return "⚠️ No valid code to copy"
964
+ # Note: Actual clipboard copying happens client-side
965
+ return "βœ… Code copied to clipboard!"
966
+
967
+ copy_btn.click(
968
+ fn=_copy_to_clipboard,
969
+ inputs=[html_output],
970
+ outputs=[codesandbox_links],
971
+ )
972
 
973
  analyze_btn.click(
974
  fn=lambda img, key, vmod: analyze_image(img, key, vmod),
 
985
  )
986
 
987
  if __name__ == "__main__":
988
+ app.launch(share=False, show_error=True)