natabrizy commited on
Commit
e6073c2
·
verified ·
1 Parent(s): 821e14a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +430 -231
app.py CHANGED
@@ -5,6 +5,7 @@ import os
5
  import re
6
  import tempfile
7
  from typing import Tuple, Optional, List, Dict, Any
 
8
 
9
  import gradio as gr
10
  import httpx
@@ -16,46 +17,111 @@ from lzstring import LZString
16
  # =========================
17
  NEBIUS_BASE_URL = "https://api.studio.nebius.com/v1/"
18
 
19
- # Vision models (these work with Nebius)
 
 
 
 
20
  DEFAULT_VISION_MODEL = "Qwen/Qwen2.5-VL-72B-Instruct"
21
  VISION_MODELS = [
22
  DEFAULT_VISION_MODEL,
23
- "Qwen/Qwen2.5-VL-7B-Instruct",
24
- "Qwen/Qwen2-VL-72B-Instruct",
25
  ]
26
 
27
- # Code/text models - use exact IDs that Nebius supports
28
- DEFAULT_CODE_MODEL = "deepseek-ai/DeepSeek-V3-0324"
 
 
29
  CODE_MODELS = [
30
- DEFAULT_CODE_MODEL,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  "Qwen/Qwen2.5-Coder-32B-Instruct",
32
- "Qwen/Qwen2.5-72B-Instruct",
33
- "Qwen/QwQ-32B-Preview",
34
- # These are the correct Nebius model IDs for Meta and Mistral
35
- "meta-llama/Meta-Llama-3.1-70B-Instruct",
36
- "meta-llama/Meta-Llama-3.1-8B-Instruct",
37
- "mistralai/Mistral-7B-Instruct-v0.3",
38
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
39
  ]
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  # Model ID mappings for common aliases
42
  MODEL_ALIASES = {
43
- "Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
44
- "Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
45
- "Mistral-7B-Instruct": "mistralai/Mistral-7B-Instruct-v0.3",
46
- "Mixtral-8x7B-Instruct": "mistralai/Mixtral-8x7B-Instruct-v0.1",
 
 
 
47
  }
48
 
49
- # Timeouts and simple retries for stability
50
- HTTP_TIMEOUTS = httpx.Timeout(connect=10.0, read=120.0, write=30.0, pool=60.0)
51
- HTTP_RETRIES = 2
 
 
 
52
 
53
  # Keep the same default key you provided
54
  DEFAULT_NEBIUS_API_KEY = (
55
  "eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNTA1MTQzMDg2MDMwMzIxNDEwMiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNjU5ODA0NCwidXVpZCI6ImNkOGFiMWZlLTIxN2QtNDJlMy04OWUwLWM1YTg4MjcwMGVhNyIsIm5hbWUiOiJodW5nZ2luZyIsImV4cGlyZXNfYXQiOiIyMDMwLTA2LTAyVDAyOjM0OjA0KzAwMDAifQ.MA52QuIiNruK7_lX688RXAEI2TkcCOjcf_02XrpnhI8"
56
  )
57
 
58
-
59
  # =========================
60
  # Helpers
61
  # =========================
@@ -85,7 +151,7 @@ def normalize_model_id(model_id: str) -> str:
85
  model_lower = model_id.lower()
86
  if "llama" in model_lower:
87
  return f"meta-llama/{model_id}"
88
- elif "mistral" in model_lower or "mixtral" in model_lower:
89
  return f"mistralai/{model_id}"
90
  elif "qwen" in model_lower or "qwq" in model_lower:
91
  return f"Qwen/{model_id}"
@@ -103,7 +169,7 @@ def call_chat_completions(
103
  temperature: float = 0.7,
104
  ) -> str:
105
  """
106
- Calls the Nebius chat/completions endpoint.
107
  Returns the assistant text content.
108
  """
109
  if not api_key:
@@ -112,8 +178,16 @@ def call_chat_completions(
112
  # Normalize the model ID
113
  model = normalize_model_id(model)
114
 
 
 
 
 
 
115
  headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
116
- transport = httpx.HTTPTransport(retries=HTTP_RETRIES)
 
 
 
117
 
118
  # Use chat/completions endpoint
119
  chat_url = f"{NEBIUS_BASE_URL}chat/completions"
@@ -122,27 +196,32 @@ def call_chat_completions(
122
  "messages": messages,
123
  "max_tokens": max_tokens,
124
  "temperature": temperature,
 
125
  }
126
 
 
 
 
127
  try:
128
- with httpx.Client(timeout=HTTP_TIMEOUTS, transport=transport) as client:
129
  resp = client.post(chat_url, headers=headers, json=chat_payload)
130
 
131
  if resp.status_code == 404:
132
- # Try alternative model IDs for Meta/Mistral
133
- alt_models = []
134
- if "meta-llama" in model:
135
- alt_models = ["meta-llama/Meta-Llama-3.1-70B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct"]
136
- elif "mistralai" in model:
137
- alt_models = ["mistralai/Mistral-7B-Instruct-v0.3", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
138
 
139
- for alt_model in alt_models:
140
- if alt_model != model:
141
- chat_payload["model"] = alt_model
 
142
  try:
143
  resp = client.post(chat_url, headers=headers, json=chat_payload)
144
  if resp.status_code == 200:
145
- model = alt_model
146
  break
147
  except:
148
  continue
@@ -161,20 +240,25 @@ def call_chat_completions(
161
 
162
  raise RuntimeError("Unable to parse response from API")
163
 
 
 
 
 
 
164
  except httpx.HTTPStatusError as e:
165
  status = e.response.status_code
166
  detail = e.response.text
167
 
168
  if status == 404:
169
- # Provide helpful error message
170
  available_models = [
171
- "Qwen/Qwen2.5-72B-Instruct",
172
- "Qwen/Qwen2.5-Coder-32B-Instruct",
173
- "deepseek-ai/DeepSeek-V3-0324"
174
  ]
175
  raise RuntimeError(
176
- f"Model '{model}' not found. The Nebius endpoint may not support Meta/Mistral models. "
177
- f"Try one of these working models instead: {', '.join(available_models)}"
178
  )
179
  raise RuntimeError(f"HTTP {status} error: {detail}")
180
  except Exception as e:
@@ -260,7 +344,8 @@ def analyze_image(
260
  vision_model: str = DEFAULT_VISION_MODEL,
261
  ) -> str:
262
  """
263
- Analyze an uploaded image and provide a concise description of its content and layout.
 
264
  """
265
  if image is None:
266
  return "Error: No image provided."
@@ -269,16 +354,22 @@ def analyze_image(
269
  if not api_key:
270
  return "Error: Nebius API key not provided."
271
 
 
 
 
 
 
272
  try:
273
  buffered = io.BytesIO()
274
  image.save(buffered, format="PNG")
275
  img_b64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
276
 
277
  prompt = (
278
- "Analyze this image and provide a concise description. "
279
  "Describe the main elements, colors, layout, and UI components. "
280
  "Identify what type of website or application this resembles. "
281
- "Focus on structural and visual elements that would be important for recreating the design."
 
282
  )
283
 
284
  messages = [
@@ -295,7 +386,7 @@ def analyze_image(
295
  model=vision_model,
296
  messages=messages,
297
  api_key=api_key,
298
- max_tokens=1000,
299
  temperature=0.7,
300
  )
301
  return content
@@ -312,7 +403,7 @@ def generate_html_code(
312
  ) -> str:
313
  """
314
  Generate HTML/CSS/JavaScript code based on a website description.
315
- Falls back to working models if selected model is unavailable.
316
  """
317
  if not description or description.startswith("Error"):
318
  return "Error: Invalid or missing description."
@@ -321,75 +412,119 @@ def generate_html_code(
321
  if not api_key:
322
  return "Error: Nebius API key not provided."
323
 
 
324
  prompt = f"""
325
- Generate a complete, responsive webpage based on this description:
326
 
327
  {description}
328
 
329
  Requirements:
330
- - Use modern HTML5, CSS3, and vanilla JavaScript only
331
- - Include TailwindCSS via CDN for styling
332
- - Make it responsive and visually appealing
333
- - Use placeholder images from https://unsplash.com/ if needed
334
- - Include proper semantic HTML structure
335
- - Add interactive elements where appropriate
336
- - Ensure the design matches the described layout and style
337
-
338
- Return only the complete HTML code starting with <!DOCTYPE html> and ending with </html>.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
  """.strip()
340
 
341
- # List of models to try in order
342
- models_to_try = [code_model]
 
 
 
 
 
 
343
 
344
- # Add fallback models if the selected one fails
345
  fallback_models = [
346
- "deepseek-ai/DeepSeek-V3-0324",
347
  "Qwen/Qwen2.5-72B-Instruct",
348
- "Qwen/Qwen2.5-Coder-32B-Instruct",
349
  ]
350
 
351
- for fallback in fallback_models:
352
- if fallback not in models_to_try:
353
- models_to_try.append(fallback)
 
 
 
 
354
 
355
  last_error = None
356
  for model in models_to_try:
357
  try:
 
 
 
 
 
 
358
  messages = [{"role": "user", "content": prompt}]
359
  content = call_chat_completions(
360
  model=model,
361
  messages=messages,
362
  api_key=api_key,
363
- max_tokens=code_max_tokens,
364
  temperature=code_temperature,
365
  )
366
  html_code = _strip_fenced_code(content)
367
 
368
- if "<!DOCTYPE html>" in html_code and "</html>" in html_code:
369
- start = html_code.find("<!DOCTYPE html>")
370
- end = html_code.rfind("</html>") + len("</html>")
371
- return html_code[start:end]
 
 
 
 
 
 
 
 
 
 
372
  return html_code
373
 
374
  except Exception as e:
375
  last_error = str(e)
376
- # If it's not a 404, don't try other models
377
- if "not found" not in str(e).lower() and "404" not in str(e):
 
 
378
  break
379
  continue
380
 
381
  # All models failed
382
  return (
383
- f"Error generating HTML code: {last_error}. "
384
- f"Note: Meta and Mistral models may not be available on this Nebius endpoint. "
385
- f"Please use Qwen or DeepSeek models which are confirmed to work."
 
 
 
386
  )
387
 
388
 
389
  def create_codesandbox(html_code: str) -> str:
390
  """
391
- Create a CodeSandbox project from HTML code, returning only editor links
392
- for index.html (and style.css/script.js if present). Does not provide any live preview link.
393
  """
394
  if not html_code or html_code.startswith("Error"):
395
  return "Error: No valid HTML code provided."
@@ -410,7 +545,7 @@ def create_codesandbox(html_code: str) -> str:
410
  {
411
  "name": "ai-generated-website",
412
  "version": "1.0.0",
413
- "description": "Website generated from image analysis",
414
  "main": "index.html",
415
  "scripts": {"start": "serve .", "build": "echo 'No build required'"},
416
  "devDependencies": {"serve": "^14.0.0"},
@@ -426,35 +561,42 @@ def create_codesandbox(html_code: str) -> str:
426
  lz = LZString()
427
  compressed = lz.compressToBase64(json_str)
428
  compressed = compressed.replace("+", "-").replace("/", "_").rstrip("=")
 
 
429
  prefill_base = "https://codesandbox.io/api/v1/sandboxes/define"
430
- prefill_index = f"{prefill_base}?parameters={compressed}&file=/index.html"
431
- prefill_css = f"{prefill_base}?parameters={compressed}&file=/style.css" if "style.css" in files else ""
432
- prefill_js = f"{prefill_base}?parameters={compressed}&file=/script.js" if "script.js" in files else ""
433
-
434
  url = "https://codesandbox.io/api/v1/sandboxes/define"
435
- transport = httpx.HTTPTransport(retries=HTTP_RETRIES)
436
- with httpx.Client(timeout=HTTP_TIMEOUTS, transport=transport) as client:
 
 
437
  resp = client.post(url, json=parameters)
438
  if resp.status_code == 200:
439
  data = resp.json()
440
  sandbox_id = data.get("sandbox_id")
441
  if sandbox_id:
442
- editor_base = f"https://codesandbox.io/p/sandbox/{sandbox_id}"
443
- lines = [
444
- f"- Open index.html in editor: {editor_base}?file=/index.html",
445
- ]
446
- if "style.css" in files:
447
- lines.append(f"- Open style.css in editor: {editor_base}?file=/style.css")
448
- if "script.js" in files:
449
- lines.append(f"- Open script.js in editor: {editor_base}?file=/script.js")
450
- return "\n".join(lines)
451
-
452
- lines = [f"- Open index.html in editor: {prefill_index}"]
453
- if prefill_css:
454
- lines.append(f"- Open style.css in editor: {prefill_css}")
455
- if prefill_js:
456
- lines.append(f"- Open script.js in editor: {prefill_js}")
457
- return "\n".join(lines)
 
 
 
 
 
 
458
 
459
  except Exception as e:
460
  return f"Error creating CodeSandbox: {str(e)}"
@@ -475,6 +617,7 @@ def screenshot_to_code(
475
  description = analyze_image(image, nebius_api_key, vision_model)
476
  if description.startswith("Error"):
477
  return description, "Error: Cannot generate code due to image analysis failure."
 
478
  html_code = generate_html_code(
479
  description,
480
  nebius_api_key,
@@ -501,207 +644,229 @@ def export_html_to_file(html_code: str) -> Optional[str]:
501
 
502
 
503
  # =========================
504
- # Gradio UI (Brizy-inspired palette, no emojis)
505
  # =========================
506
- BRIZY_PRIMARY = "#6C5CE7" # Indigo-like
507
- BRIZY_SECONDARY = "#00C2FF" # Cyan accent
508
- BRIZY_BG = "#F7F9FC" # Light background
509
- BRIZY_SURFACE = "#FFFFFF" # Surface
510
- BRIZY_TEXT = "#1F2937" # Dark text
511
- BRIZY_MUTED = "#6B7280" # Muted text
512
- BRIZY_BORDER = "#E5E7EB" # Soft border
513
- BRIZY_GRADIENT = f"linear-gradient(135deg, {BRIZY_PRIMARY} 0%, {BRIZY_SECONDARY} 100%)"
 
 
514
 
515
  with gr.Blocks(
516
  theme=gr.themes.Soft(),
517
- title="AI Website Generator (Nebius)",
518
  css=f"""
519
  :root {{
520
- --app-primary: {BRIZY_PRIMARY};
521
- --app-secondary: {BRIZY_SECONDARY};
522
- --app-bg: {BRIZY_BG};
523
- --app-surface: {BRIZY_SURFACE};
524
- --app-text: {BRIZY_TEXT};
525
- --app-muted: {BRIZY_MUTED};
526
- --app-border: {BRIZY_BORDER};
 
 
527
  }}
528
-
529
  body {{
530
- background: var(--app-bg);
531
- color: var(--app-text);
 
532
  }}
533
-
534
- .section {{
535
- border: 1px solid var(--app-border);
536
- padding: 16px;
537
  border-radius: 12px;
538
- background: var(--app-surface);
539
- box-shadow: 0 1px 2px rgba(0,0,0,0.03);
540
- margin: 10px 0;
541
- }}
542
-
543
- .muted {{
544
- color: var(--app-muted);
545
- font-size: 0.92em;
546
- }}
547
-
548
- .footer {{
549
  text-align: center;
550
- color: var(--app-muted);
551
- padding: 8px 0;
552
  }}
553
-
554
- .title h1 {{
555
- background: {BRIZY_GRADIENT};
556
- -webkit-background-clip: text;
557
- background-clip: text;
558
- color: transparent;
559
  font-weight: 800;
560
- letter-spacing: -0.02em;
561
  }}
562
-
563
- .primary-btn button {{
564
- background: {BRIZY_GRADIENT} !important;
565
- color: #fff !important;
566
- border: none !important;
567
- }}
568
- .primary-btn button:hover {{
569
- filter: brightness(0.98);
570
  }}
571
-
572
- .secondary-btn button {{
573
- background: var(--app-surface) !important;
574
- color: var(--app-text) !important;
575
- border: 1px solid var(--app-border) !important;
 
 
576
  }}
577
- .secondary-btn button:hover {{
578
- border-color: {BRIZY_PRIMARY} !important;
579
- color: {BRIZY_PRIMARY} !important;
 
 
 
580
  }}
581
-
582
  .warning-box {{
583
  background: #FEF3C7;
584
- border: 1px solid #F59E0B;
585
  border-radius: 8px;
586
- padding: 12px;
587
- margin: 10px 0;
588
  color: #92400E;
589
  }}
590
-
591
- input:focus, textarea:focus, select:focus {{
592
- outline-color: {BRIZY_PRIMARY} !important;
593
- border-color: {BRIZY_PRIMARY} !important;
594
- box-shadow: 0 0 0 3px rgba(108,92,231,0.15) !important;
 
 
595
  }}
596
-
597
- .gr-code .cm-editor, .gr-code textarea {{
598
- border-radius: 10px !important;
599
- border: 1px solid var(--app-border) !important;
 
600
  }}
601
-
602
- .gradio-container .tabs .tab-nav button[aria-selected="true"] {{
603
- color: {BRIZY_PRIMARY} !important;
604
- border-bottom: 2px solid {BRIZY_PRIMARY} !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
  }}
606
  """,
607
  ) as app:
608
- gr.Markdown(
609
- """
610
- # AI Website Generator (Nebius)
611
- Turn website screenshots into functional HTML using Nebius-compatible models.
612
-
613
- - Image analysis with vision models
614
- - Code generation with LLMs
615
- - Open in CodeSandbox editor to inspect code
616
- - Your key is used at runtime only
617
  """,
618
- elem_classes=["title"],
619
  )
620
 
621
- with gr.Accordion("API & Models", open=True):
622
- gr.Markdown("Provide your Nebius API key or use the default configured in this app.", elem_classes=["muted"])
 
 
 
 
 
 
 
623
  nebius_key = gr.Textbox(
624
  label="Nebius API Key",
625
  type="password",
626
- placeholder="Paste your Nebius API key, or leave as-is to use the default key.",
627
  value=DEFAULT_NEBIUS_API_KEY,
628
  )
629
 
630
- gr.Markdown(
631
  """
632
- <div class="warning-box">
633
- <strong>Note:</strong> Currently, only Qwen and DeepSeek models are confirmed to work with this Nebius endpoint.
634
- Meta (Llama) and Mistral models may not be available.
 
 
635
  </div>
636
  """,
637
- elem_classes=["warning-box"]
638
  )
639
 
640
  with gr.Row():
641
  vision_model_dd = gr.Dropdown(
642
- label="Vision Model",
643
  choices=VISION_MODELS,
644
  value=DEFAULT_VISION_MODEL,
645
- allow_custom_value=True,
646
- info="Qwen vision models work best.",
647
  )
648
  code_model_dd = gr.Dropdown(
649
- label="Code Generation Model (Recommended: Qwen/DeepSeek)",
650
  choices=CODE_MODELS,
651
  value=DEFAULT_CODE_MODEL,
652
- allow_custom_value=True,
653
- info="DeepSeek and Qwen models are confirmed to work. Others may fail.",
654
  )
 
655
  with gr.Row():
656
  code_max_tokens = gr.Slider(
657
- label="Max tokens (code generation)",
658
- minimum=500,
659
  maximum=8000,
660
- step=100,
661
  value=4000,
662
- info="Lower this if you see timeouts; higher values may take longer.",
663
  )
664
  code_temperature = gr.Slider(
665
  label="Temperature",
666
  minimum=0.0,
667
- maximum=1.5,
668
  step=0.1,
669
  value=0.7,
670
- info="Higher is more creative; lower is more deterministic.",
671
  )
672
 
673
  with gr.Tab("Quick Generate"):
674
  with gr.Row():
675
  with gr.Column(scale=1):
676
- gr.Markdown("### Step 1: Upload Screenshot", elem_classes=["section"])
677
  image_input = gr.Image(
678
  type="pil",
679
- label="Website Screenshot",
680
  sources=["upload", "clipboard"],
681
- height=280,
 
 
 
 
 
682
  )
683
- generate_btn = gr.Button("Generate Website", elem_classes=["primary-btn"])
684
 
685
  with gr.Column(scale=2):
686
- gr.Markdown("### Step 2: Review Results", elem_classes=["section"])
687
  description_output = gr.Textbox(
688
  label="Image Analysis",
689
  lines=6,
690
  interactive=False,
691
  )
692
  html_output = gr.Code(
693
- label="Generated HTML (copy or download)",
694
  language="html",
695
- lines=18,
696
  )
697
 
698
  with gr.Row():
699
- codesandbox_btn = gr.Button("Open in CodeSandbox Editor", elem_classes=["secondary-btn"])
700
- download_btn = gr.Button("Download index.html", elem_classes=["secondary-btn"])
 
 
 
 
 
 
701
 
702
  codesandbox_links = gr.Markdown(value="")
703
  download_file = gr.File(
704
- label="Download (index.html)",
705
  interactive=False,
706
  visible=False,
707
  )
@@ -709,18 +874,59 @@ Turn website screenshots into functional HTML using Nebius-compatible models.
709
  with gr.Tab("Individual Tools"):
710
  with gr.Row():
711
  with gr.Column():
712
- gr.Markdown("### Image Analysis Tool", elem_classes=["section"])
713
  img_tool = gr.Image(type="pil", label="Image")
714
- analyze_btn = gr.Button("Analyze Image", elem_classes=["secondary-btn"])
715
- analysis_result = gr.Textbox(label="Analysis Result", lines=6)
716
 
717
  with gr.Column():
718
- gr.Markdown("### Code Generation Tool", elem_classes=["section"])
719
- desc_input = gr.Textbox(label="Description", lines=4, placeholder="Describe the page you want...")
720
- code_btn = gr.Button("Generate Code", elem_classes=["secondary-btn"])
 
 
 
 
721
  code_result = gr.Code(label="Generated Code", language="html")
722
 
723
- gr.Markdown("Made with Gradio • Nebius API compatible", elem_classes=["footer"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
724
 
725
  # Event bindings
726
  generate_btn.click(
@@ -729,15 +935,8 @@ Turn website screenshots into functional HTML using Nebius-compatible models.
729
  outputs=[description_output, html_output],
730
  )
731
 
732
- def _deploy_to_codesandbox(html_code: str) -> str:
733
- url_block = create_codesandbox(html_code)
734
- if url_block.startswith("Error"):
735
- return f"**{url_block}**"
736
- lines = ["### CodeSandbox Editor Links", "", url_block]
737
- return "\n".join(lines)
738
-
739
  codesandbox_btn.click(
740
- fn=_deploy_to_codesandbox,
741
  inputs=[html_output],
742
  outputs=[codesandbox_links],
743
  )
 
5
  import re
6
  import tempfile
7
  from typing import Tuple, Optional, List, Dict, Any
8
+ from datetime import datetime
9
 
10
  import gradio as gr
11
  import httpx
 
17
  # =========================
18
  NEBIUS_BASE_URL = "https://api.studio.nebius.com/v1/"
19
 
20
+ # Current date/time and user for logging
21
+ CURRENT_USER = "samsnata"
22
+ CURRENT_DATETIME = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
23
+
24
+ # Vision models - Only Qwen VL models work reliably with Nebius for vision tasks
25
  DEFAULT_VISION_MODEL = "Qwen/Qwen2.5-VL-72B-Instruct"
26
  VISION_MODELS = [
27
  DEFAULT_VISION_MODEL,
28
+ "Qwen/Qwen2.5-VL-7B-Instruct", # Smaller, faster alternative
29
+ "Qwen/Qwen2-VL-72B-Instruct", # Previous version, still reliable
30
  ]
31
 
32
+ # Code/text models - Best non-Chinese alternatives that work on Nebius
33
+ # Based on Nebius documentation, these are the confirmed working models:
34
+ DEFAULT_CODE_MODEL = "meta-llama/Meta-Llama-3.1-70B-Instruct" # Best non-Chinese option
35
+
36
  CODE_MODELS = [
37
+ # Meta (Facebook) Models - CONFIRMED WORKING
38
+ "meta-llama/Meta-Llama-3.1-70B-Instruct", # Best overall performance
39
+ "meta-llama/Meta-Llama-3.1-8B-Instruct", # Faster, lighter option
40
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8", # Most powerful, but slower
41
+
42
+ # Mistral AI Models - European alternative
43
+ "mistralai/Mistral-7B-Instruct-v0.3", # Fast and efficient
44
+ "mistralai/Mixtral-8x7B-Instruct-v0.1", # MoE architecture, good balance
45
+ "mistralai/Mixtral-8x22B-Instruct-v0.1", # Larger MoE model
46
+ "mistralai/Mistral-Nemo-Instruct-2407", # Latest Mistral model
47
+
48
+ # Anthropic-style (if available)
49
+ "Qwen/Qwen2.5-72B-Instruct", # Fallback option
50
+
51
+ # DeepSeek models (Chinese, but included as fallback)
52
+ "deepseek-ai/DeepSeek-V3-0324",
53
  "Qwen/Qwen2.5-Coder-32B-Instruct",
 
 
 
 
 
 
 
54
  ]
55
 
56
+ # Model configurations optimized for each model type
57
+ MODEL_CONFIGS = {
58
+ "meta-llama/Meta-Llama-3.1-70B-Instruct": {
59
+ "max_tokens": 4096,
60
+ "temperature": 0.7,
61
+ "timeout": 90.0,
62
+ "description": "Meta's Llama 3.1 70B - Best overall performance"
63
+ },
64
+ "meta-llama/Meta-Llama-3.1-8B-Instruct": {
65
+ "max_tokens": 4096,
66
+ "temperature": 0.7,
67
+ "timeout": 60.0,
68
+ "description": "Meta's Llama 3.1 8B - Fast and efficient"
69
+ },
70
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8": {
71
+ "max_tokens": 8192,
72
+ "temperature": 0.7,
73
+ "timeout": 180.0,
74
+ "description": "Meta's Llama 3.1 405B - Most powerful, longer wait times"
75
+ },
76
+ "mistralai/Mistral-7B-Instruct-v0.3": {
77
+ "max_tokens": 4096,
78
+ "temperature": 0.7,
79
+ "timeout": 60.0,
80
+ "description": "Mistral 7B - European, fast"
81
+ },
82
+ "mistralai/Mixtral-8x7B-Instruct-v0.1": {
83
+ "max_tokens": 4096,
84
+ "temperature": 0.7,
85
+ "timeout": 90.0,
86
+ "description": "Mixtral 8x7B MoE - Good balance"
87
+ },
88
+ "mistralai/Mixtral-8x22B-Instruct-v0.1": {
89
+ "max_tokens": 6144,
90
+ "temperature": 0.7,
91
+ "timeout": 120.0,
92
+ "description": "Mixtral 8x22B MoE - Powerful European model"
93
+ },
94
+ "mistralai/Mistral-Nemo-Instruct-2407": {
95
+ "max_tokens": 4096,
96
+ "temperature": 0.7,
97
+ "timeout": 75.0,
98
+ "description": "Mistral Nemo - Latest from Mistral AI"
99
+ }
100
+ }
101
+
102
  # Model ID mappings for common aliases
103
  MODEL_ALIASES = {
104
+ "Llama-3.1-70B": "meta-llama/Meta-Llama-3.1-70B-Instruct",
105
+ "Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
106
+ "Llama-3.1-405B": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
107
+ "Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.3",
108
+ "Mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1",
109
+ "Mixtral-8x22B": "mistralai/Mixtral-8x22B-Instruct-v0.1",
110
+ "Mistral-Nemo": "mistralai/Mistral-Nemo-Instruct-2407",
111
  }
112
 
113
+ # Optimized timeouts for different model sizes
114
+ def get_model_timeout(model_id: str) -> httpx.Timeout:
115
+ """Get optimized timeout for specific model."""
116
+ config = MODEL_CONFIGS.get(model_id, {})
117
+ read_timeout = config.get("timeout", 120.0)
118
+ return httpx.Timeout(connect=10.0, read=read_timeout, write=30.0, pool=60.0)
119
 
120
  # Keep the same default key you provided
121
  DEFAULT_NEBIUS_API_KEY = (
122
  "eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNTA1MTQzMDg2MDMwMzIxNDEwMiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNjU5ODA0NCwidXVpZCI6ImNkOGFiMWZlLTIxN2QtNDJlMy04OWUwLWM1YTg4MjcwMGVhNyIsIm5hbWUiOiJodW5nZ2luZyIsImV4cGlyZXNfYXQiOiIyMDMwLTA2LTAyVDAyOjM0OjA0KzAwMDAifQ.MA52QuIiNruK7_lX688RXAEI2TkcCOjcf_02XrpnhI8"
123
  )
124
 
 
125
  # =========================
126
  # Helpers
127
  # =========================
 
151
  model_lower = model_id.lower()
152
  if "llama" in model_lower:
153
  return f"meta-llama/{model_id}"
154
+ elif "mistral" in model_lower or "mixtral" in model_lower or "nemo" in model_lower:
155
  return f"mistralai/{model_id}"
156
  elif "qwen" in model_lower or "qwq" in model_lower:
157
  return f"Qwen/{model_id}"
 
169
  temperature: float = 0.7,
170
  ) -> str:
171
  """
172
+ Calls the Nebius chat/completions endpoint with optimized settings for each model.
173
  Returns the assistant text content.
174
  """
175
  if not api_key:
 
178
  # Normalize the model ID
179
  model = normalize_model_id(model)
180
 
181
+ # Get model-specific configuration
182
+ model_config = MODEL_CONFIGS.get(model, {})
183
+ if model_config:
184
+ max_tokens = min(max_tokens, model_config.get("max_tokens", max_tokens))
185
+
186
  headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
187
+
188
+ # Get optimized timeout for this model
189
+ timeout = get_model_timeout(model)
190
+ transport = httpx.HTTPTransport(retries=2)
191
 
192
  # Use chat/completions endpoint
193
  chat_url = f"{NEBIUS_BASE_URL}chat/completions"
 
196
  "messages": messages,
197
  "max_tokens": max_tokens,
198
  "temperature": temperature,
199
+ "stream": False, # Disable streaming for stability
200
  }
201
 
202
+ # Log the request for debugging
203
+ print(f"[{CURRENT_DATETIME}] User: {CURRENT_USER} - Calling model: {model}")
204
+
205
  try:
206
+ with httpx.Client(timeout=timeout, transport=transport) as client:
207
  resp = client.post(chat_url, headers=headers, json=chat_payload)
208
 
209
  if resp.status_code == 404:
210
+ # Model not found - try fallback models
211
+ fallback_models = [
212
+ "meta-llama/Meta-Llama-3.1-70B-Instruct",
213
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
214
+ "Qwen/Qwen2.5-72B-Instruct",
215
+ ]
216
 
217
+ for fallback in fallback_models:
218
+ if fallback != model:
219
+ print(f"[{CURRENT_DATETIME}] Trying fallback model: {fallback}")
220
+ chat_payload["model"] = fallback
221
  try:
222
  resp = client.post(chat_url, headers=headers, json=chat_payload)
223
  if resp.status_code == 200:
224
+ model = fallback
225
  break
226
  except:
227
  continue
 
240
 
241
  raise RuntimeError("Unable to parse response from API")
242
 
243
+ except httpx.ReadTimeout:
244
+ raise RuntimeError(
245
+ f"Request timed out after {timeout.read}s. Model '{model}' may be too large. "
246
+ f"Try a smaller model like 'meta-llama/Meta-Llama-3.1-8B-Instruct' or 'mistralai/Mistral-7B-Instruct-v0.3'"
247
+ )
248
  except httpx.HTTPStatusError as e:
249
  status = e.response.status_code
250
  detail = e.response.text
251
 
252
  if status == 404:
253
+ # Provide helpful error message with working alternatives
254
  available_models = [
255
+ "meta-llama/Meta-Llama-3.1-70B-Instruct (Recommended)",
256
+ "mistralai/Mixtral-8x7B-Instruct-v0.1 (Fast)",
257
+ "meta-llama/Meta-Llama-3.1-8B-Instruct (Lightweight)",
258
  ]
259
  raise RuntimeError(
260
+ f"Model '{model}' not found on Nebius. "
261
+ f"Try one of these confirmed working models: {', '.join(available_models)}"
262
  )
263
  raise RuntimeError(f"HTTP {status} error: {detail}")
264
  except Exception as e:
 
344
  vision_model: str = DEFAULT_VISION_MODEL,
345
  ) -> str:
346
  """
347
+ Analyze an uploaded image and provide a detailed description of its content and layout.
348
+ Note: Only Qwen VL models support vision on Nebius.
349
  """
350
  if image is None:
351
  return "Error: No image provided."
 
354
  if not api_key:
355
  return "Error: Nebius API key not provided."
356
 
357
+ # Force use of Qwen VL model for vision tasks
358
+ if not vision_model.startswith("Qwen") or "VL" not in vision_model:
359
+ print(f"[{CURRENT_DATETIME}] Warning: {vision_model} doesn't support vision. Using {DEFAULT_VISION_MODEL}")
360
+ vision_model = DEFAULT_VISION_MODEL
361
+
362
  try:
363
  buffered = io.BytesIO()
364
  image.save(buffered, format="PNG")
365
  img_b64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
366
 
367
  prompt = (
368
+ "Analyze this image and provide a detailed description. "
369
  "Describe the main elements, colors, layout, and UI components. "
370
  "Identify what type of website or application this resembles. "
371
+ "Focus on structural and visual elements that would be important for recreating the design. "
372
+ "Be specific about typography, spacing, alignment, and color schemes."
373
  )
374
 
375
  messages = [
 
386
  model=vision_model,
387
  messages=messages,
388
  api_key=api_key,
389
+ max_tokens=1500,
390
  temperature=0.7,
391
  )
392
  return content
 
403
  ) -> str:
404
  """
405
  Generate HTML/CSS/JavaScript code based on a website description.
406
+ Uses non-Chinese models (Meta Llama or Mistral) by default.
407
  """
408
  if not description or description.startswith("Error"):
409
  return "Error: Invalid or missing description."
 
412
  if not api_key:
413
  return "Error: Nebius API key not provided."
414
 
415
+ # Enhanced prompt for better code generation
416
  prompt = f"""
417
+ You are an expert web developer. Generate a complete, production-ready webpage based on this description:
418
 
419
  {description}
420
 
421
  Requirements:
422
+ - Create a single HTML file with all CSS and JavaScript inline
423
+ - Use modern HTML5, CSS3, and vanilla JavaScript
424
+ - Include TailwindCSS via CDN for utility-first styling
425
+ - Make it fully responsive (mobile, tablet, desktop)
426
+ - Use semantic HTML elements for accessibility
427
+ - Add smooth animations and transitions where appropriate
428
+ - Include placeholder content that matches the description
429
+ - Use high-quality placeholder images from Unsplash (https://source.unsplash.com/)
430
+ - Ensure proper color contrast for readability
431
+ - Add interactive elements (hover effects, smooth scrolling, etc.)
432
+ - Include meta tags for SEO and viewport
433
+
434
+ Technical requirements:
435
+ - Start with <!DOCTYPE html>
436
+ - Include complete <head> section with meta tags
437
+ - All CSS in <style> tags
438
+ - All JavaScript in <script> tags at the end of body
439
+ - Use CSS Grid or Flexbox for layouts
440
+ - Include CSS custom properties for theming
441
+
442
+ Generated for: {CURRENT_USER} on {CURRENT_DATETIME}
443
+
444
+ Return only the complete HTML code. Do not include any explanations or markdown formatting.
445
  """.strip()
446
 
447
+ # Prefer non-Chinese models
448
+ preferred_models = [
449
+ code_model, # User's selection
450
+ "meta-llama/Meta-Llama-3.1-70B-Instruct",
451
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
452
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
453
+ "mistralai/Mistral-7B-Instruct-v0.3",
454
+ ]
455
 
456
+ # Only use Chinese models as last resort
457
  fallback_models = [
 
458
  "Qwen/Qwen2.5-72B-Instruct",
459
+ "deepseek-ai/DeepSeek-V3-0324",
460
  ]
461
 
462
+ models_to_try = []
463
+ for model in preferred_models:
464
+ if model not in models_to_try:
465
+ models_to_try.append(model)
466
+ for model in fallback_models:
467
+ if model not in models_to_try:
468
+ models_to_try.append(model)
469
 
470
  last_error = None
471
  for model in models_to_try:
472
  try:
473
+ print(f"[{CURRENT_DATETIME}] Attempting code generation with: {model}")
474
+
475
+ # Adjust max_tokens based on model
476
+ model_config = MODEL_CONFIGS.get(model, {})
477
+ adjusted_max_tokens = min(code_max_tokens, model_config.get("max_tokens", code_max_tokens))
478
+
479
  messages = [{"role": "user", "content": prompt}]
480
  content = call_chat_completions(
481
  model=model,
482
  messages=messages,
483
  api_key=api_key,
484
+ max_tokens=adjusted_max_tokens,
485
  temperature=code_temperature,
486
  )
487
  html_code = _strip_fenced_code(content)
488
 
489
+ # Validate HTML structure
490
+ if "<!DOCTYPE html>" in html_code.upper() and "</html>" in html_code.lower():
491
+ start = html_code.upper().find("<!DOCTYPE HTML")
492
+ end = html_code.lower().rfind("</html>") + len("</html>")
493
+ final_code = html_code[start:end]
494
+
495
+ # Add generation metadata
496
+ if "<!-- Generated by" not in final_code:
497
+ final_code = final_code.replace(
498
+ "<head>",
499
+ f"<head>\n <!-- Generated by AI Website Generator for {CURRENT_USER} on {CURRENT_DATETIME} using {model} -->"
500
+ )
501
+
502
+ return final_code
503
  return html_code
504
 
505
  except Exception as e:
506
  last_error = str(e)
507
+ print(f"[{CURRENT_DATETIME}] Failed with {model}: {last_error}")
508
+
509
+ # Don't retry on non-404 errors unless it's a timeout
510
+ if "timeout" not in str(e).lower() and "not found" not in str(e).lower() and "404" not in str(e):
511
  break
512
  continue
513
 
514
  # All models failed
515
  return (
516
+ f"Error generating HTML code: {last_error}\n\n"
517
+ f"Troubleshooting tips:\n"
518
+ f"1. Try using a smaller model (e.g., Meta-Llama-3.1-8B-Instruct)\n"
519
+ f"2. Reduce the max tokens setting\n"
520
+ f"3. Check your API key is valid\n"
521
+ f"4. Confirmed working models: Meta Llama 3.1 and Mistral models"
522
  )
523
 
524
 
525
  def create_codesandbox(html_code: str) -> str:
526
  """
527
+ Create a CodeSandbox project from HTML code.
 
528
  """
529
  if not html_code or html_code.startswith("Error"):
530
  return "Error: No valid HTML code provided."
 
545
  {
546
  "name": "ai-generated-website",
547
  "version": "1.0.0",
548
+ "description": f"Website generated from image analysis by {CURRENT_USER} on {CURRENT_DATETIME}",
549
  "main": "index.html",
550
  "scripts": {"start": "serve .", "build": "echo 'No build required'"},
551
  "devDependencies": {"serve": "^14.0.0"},
 
561
  lz = LZString()
562
  compressed = lz.compressToBase64(json_str)
563
  compressed = compressed.replace("+", "-").replace("/", "_").rstrip("=")
564
+
565
+ # Create both editor and preview URLs
566
  prefill_base = "https://codesandbox.io/api/v1/sandboxes/define"
567
+
 
 
 
568
  url = "https://codesandbox.io/api/v1/sandboxes/define"
569
+ transport = httpx.HTTPTransport(retries=2)
570
+ timeout = httpx.Timeout(connect=10.0, read=30.0, write=30.0, pool=60.0)
571
+
572
+ with httpx.Client(timeout=timeout, transport=transport) as client:
573
  resp = client.post(url, json=parameters)
574
  if resp.status_code == 200:
575
  data = resp.json()
576
  sandbox_id = data.get("sandbox_id")
577
  if sandbox_id:
578
+ editor_url = f"https://codesandbox.io/s/{sandbox_id}"
579
+ preview_url = f"https://{sandbox_id}.csb.app/"
580
+ return f"""### CodeSandbox Links
581
+
582
+ **Editor:** {editor_url}
583
+ **Live Preview:** {preview_url}
584
+
585
+ Files included:
586
+ - index.html
587
+ {('- style.css' if css_text else '')}
588
+ {('- script.js' if js_text else '')}"""
589
+
590
+ # Fallback to define URL
591
+ define_url = f"{prefill_base}?parameters={compressed}"
592
+ return f"""### CodeSandbox Links
593
+
594
+ **Open in Editor:** {define_url}
595
+
596
+ Files included:
597
+ - index.html
598
+ {('- style.css' if css_text else '')}
599
+ {('- script.js' if js_text else '')}"""
600
 
601
  except Exception as e:
602
  return f"Error creating CodeSandbox: {str(e)}"
 
617
  description = analyze_image(image, nebius_api_key, vision_model)
618
  if description.startswith("Error"):
619
  return description, "Error: Cannot generate code due to image analysis failure."
620
+
621
  html_code = generate_html_code(
622
  description,
623
  nebius_api_key,
 
644
 
645
 
646
  # =========================
647
+ # Gradio UI - Clean, modern design
648
  # =========================
649
+ MODERN_PRIMARY = "#6366F1" # Indigo
650
+ MODERN_SECONDARY = "#8B5CF6" # Purple
651
+ MODERN_SUCCESS = "#10B981" # Green
652
+ MODERN_WARNING = "#F59E0B" # Amber
653
+ MODERN_BG = "#FAFAFA" # Light background
654
+ MODERN_SURFACE = "#FFFFFF" # Surface
655
+ MODERN_TEXT = "#111827" # Dark text
656
+ MODERN_MUTED = "#6B7280" # Muted text
657
+ MODERN_BORDER = "#E5E7EB" # Border
658
+ MODERN_GRADIENT = f"linear-gradient(135deg, {MODERN_PRIMARY} 0%, {MODERN_SECONDARY} 100%)"
659
 
660
  with gr.Blocks(
661
  theme=gr.themes.Soft(),
662
+ title=f"AI Website Generator - Nebius ({CURRENT_USER})",
663
  css=f"""
664
  :root {{
665
+ --primary: {MODERN_PRIMARY};
666
+ --secondary: {MODERN_SECONDARY};
667
+ --success: {MODERN_SUCCESS};
668
+ --warning: {MODERN_WARNING};
669
+ --bg: {MODERN_BG};
670
+ --surface: {MODERN_SURFACE};
671
+ --text: {MODERN_TEXT};
672
+ --muted: {MODERN_MUTED};
673
+ --border: {MODERN_BORDER};
674
  }}
 
675
  body {{
676
+ background: var(--bg);
677
+ color: var(--text);
678
+ font-family: 'Inter', -apple-system, system-ui, sans-serif;
679
  }}
680
+ .header {{
681
+ background: {MODERN_GRADIENT};
682
+ color: white;
683
+ padding: 2rem;
684
  border-radius: 12px;
685
+ margin-bottom: 2rem;
 
 
 
 
 
 
 
 
 
 
686
  text-align: center;
 
 
687
  }}
688
+ .header h1 {{
689
+ font-size: 2.5rem;
 
 
 
 
690
  font-weight: 800;
691
+ margin-bottom: 0.5rem;
692
  }}
693
+ .header p {{
694
+ opacity: 0.95;
695
+ font-size: 1.1rem;
 
 
 
 
 
696
  }}
697
+ .section {{
698
+ background: var(--surface);
699
+ border: 1px solid var(--border);
700
+ border-radius: 12px;
701
+ padding: 1.5rem;
702
+ margin: 1rem 0;
703
+ box-shadow: 0 1px 3px rgba(0,0,0,0.1);
704
  }}
705
+ .model-info {{
706
+ background: #EEF2FF;
707
+ border: 1px solid {MODERN_PRIMARY};
708
+ border-radius: 8px;
709
+ padding: 1rem;
710
+ margin: 1rem 0;
711
  }}
 
712
  .warning-box {{
713
  background: #FEF3C7;
714
+ border: 1px solid {MODERN_WARNING};
715
  border-radius: 8px;
716
+ padding: 1rem;
717
+ margin: 1rem 0;
718
  color: #92400E;
719
  }}
720
+ .success-box {{
721
+ background: #D1FAE5;
722
+ border: 1px solid {MODERN_SUCCESS};
723
+ border-radius: 8px;
724
+ padding: 1rem;
725
+ margin: 1rem 0;
726
+ color: #065F46;
727
  }}
728
+ .primary-btn {{
729
+ background: {MODERN_GRADIENT} !important;
730
+ color: white !important;
731
+ border: none !important;
732
+ font-weight: 600 !important;
733
  }}
734
+ .secondary-btn {{
735
+ background: var(--surface) !important;
736
+ color: var(--primary) !important;
737
+ border: 2px solid var(--primary) !important;
738
+ font-weight: 600 !important;
739
+ }}
740
+ .footer {{
741
+ text-align: center;
742
+ padding: 2rem;
743
+ color: var(--muted);
744
+ border-top: 1px solid var(--border);
745
+ margin-top: 3rem;
746
+ }}
747
+ .model-tag {{
748
+ display: inline-block;
749
+ padding: 0.25rem 0.75rem;
750
+ background: var(--primary);
751
+ color: white;
752
+ border-radius: 4px;
753
+ font-size: 0.875rem;
754
+ font-weight: 500;
755
+ margin: 0.25rem;
756
  }}
757
  """,
758
  ) as app:
759
+ gr.HTML(
760
+ f"""
761
+ <div class="header">
762
+ <h1>AI Website Generator</h1>
763
+ <p>Transform screenshots into production-ready websites using Nebius AI</p>
764
+ <p style="font-size: 0.9rem; opacity: 0.9;">User: {CURRENT_USER} | Session: {CURRENT_DATETIME}</p>
765
+ </div>
 
 
766
  """,
 
767
  )
768
 
769
+ with gr.Accordion("Configuration & Models", open=True):
770
+ gr.Markdown(
771
+ """
772
+ ### API Configuration
773
+ Provide your Nebius API key or use the default configured key.
774
+ """,
775
+ elem_classes=["section"]
776
+ )
777
+
778
  nebius_key = gr.Textbox(
779
  label="Nebius API Key",
780
  type="password",
781
+ placeholder="Enter your Nebius API key (or use default)",
782
  value=DEFAULT_NEBIUS_API_KEY,
783
  )
784
 
785
+ gr.HTML(
786
  """
787
+ <div class="success-box">
788
+ <strong>✓ Recommended Non-Chinese Models:</strong><br>
789
+ • <span class="model-tag">Meta Llama 3.1</span> - Best overall performance<br>
790
+ • <span class="model-tag">Mistral/Mixtral</span> - European alternative, fast<br>
791
+ • Vision: Only Qwen VL models support image analysis
792
  </div>
793
  """,
 
794
  )
795
 
796
  with gr.Row():
797
  vision_model_dd = gr.Dropdown(
798
+ label="Vision Model (Only Qwen VL works)",
799
  choices=VISION_MODELS,
800
  value=DEFAULT_VISION_MODEL,
801
+ info="Note: Only Qwen VL models support vision on Nebius",
 
802
  )
803
  code_model_dd = gr.Dropdown(
804
+ label="Code Generation Model",
805
  choices=CODE_MODELS,
806
  value=DEFAULT_CODE_MODEL,
807
+ info="Recommended: Meta Llama 3.1 or Mistral models",
 
808
  )
809
+
810
  with gr.Row():
811
  code_max_tokens = gr.Slider(
812
+ label="Max Tokens",
813
+ minimum=1000,
814
  maximum=8000,
815
+ step=500,
816
  value=4000,
817
+ info="Lower values = faster generation, higher = more detailed code",
818
  )
819
  code_temperature = gr.Slider(
820
  label="Temperature",
821
  minimum=0.0,
822
+ maximum=1.0,
823
  step=0.1,
824
  value=0.7,
825
+ info="0.7 is optimal for code generation",
826
  )
827
 
828
  with gr.Tab("Quick Generate"):
829
  with gr.Row():
830
  with gr.Column(scale=1):
831
+ gr.Markdown("### Input", elem_classes=["section"])
832
  image_input = gr.Image(
833
  type="pil",
834
+ label="Upload Screenshot",
835
  sources=["upload", "clipboard"],
836
+ height=300,
837
+ )
838
+ generate_btn = gr.Button(
839
+ "Generate Website",
840
+ variant="primary",
841
+ elem_classes=["primary-btn"]
842
  )
 
843
 
844
  with gr.Column(scale=2):
845
+ gr.Markdown("### Output", elem_classes=["section"])
846
  description_output = gr.Textbox(
847
  label="Image Analysis",
848
  lines=6,
849
  interactive=False,
850
  )
851
  html_output = gr.Code(
852
+ label="Generated HTML Code",
853
  language="html",
854
+ lines=20,
855
  )
856
 
857
  with gr.Row():
858
+ codesandbox_btn = gr.Button(
859
+ "Deploy to CodeSandbox",
860
+ elem_classes=["secondary-btn"]
861
+ )
862
+ download_btn = gr.Button(
863
+ "Download HTML",
864
+ elem_classes=["secondary-btn"]
865
+ )
866
 
867
  codesandbox_links = gr.Markdown(value="")
868
  download_file = gr.File(
869
+ label="Download",
870
  interactive=False,
871
  visible=False,
872
  )
 
874
  with gr.Tab("Individual Tools"):
875
  with gr.Row():
876
  with gr.Column():
877
+ gr.Markdown("### Image Analysis", elem_classes=["section"])
878
  img_tool = gr.Image(type="pil", label="Image")
879
+ analyze_btn = gr.Button("Analyze", elem_classes=["secondary-btn"])
880
+ analysis_result = gr.Textbox(label="Analysis", lines=8)
881
 
882
  with gr.Column():
883
+ gr.Markdown("### Code Generation", elem_classes=["section"])
884
+ desc_input = gr.Textbox(
885
+ label="Description",
886
+ lines=4,
887
+ placeholder="Describe the website you want to generate..."
888
+ )
889
+ code_btn = gr.Button("Generate", elem_classes=["secondary-btn"])
890
  code_result = gr.Code(label="Generated Code", language="html")
891
 
892
+ with gr.Tab("Model Information"):
893
+ gr.Markdown(
894
+ f"""
895
+ ### Available Models on Nebius
896
+
897
+ #### Recommended Non-Chinese Models
898
+
899
+ **Meta (Facebook) Models:**
900
+ - `meta-llama/Meta-Llama-3.1-70B-Instruct` - Best overall, 70B parameters
901
+ - `meta-llama/Meta-Llama-3.1-8B-Instruct` - Fast, lightweight option
902
+ - `meta-llama/Meta-Llama-3.1-405B-Instruct-FP8` - Most powerful (slower)
903
+
904
+ **Mistral AI Models (European):**
905
+ - `mistralai/Mixtral-8x7B-Instruct-v0.1` - MoE architecture, efficient
906
+ - `mistralai/Mixtral-8x22B-Instruct-v0.1` - Larger MoE model
907
+ - `mistralai/Mistral-7B-Instruct-v0.3` - Fast and efficient
908
+ - `mistralai/Mistral-Nemo-Instruct-2407` - Latest Mistral model
909
+
910
+ #### Vision Models (Required for Image Analysis)
911
+ - `Qwen/Qwen2.5-VL-72B-Instruct` - Best vision model
912
+ - `Qwen/Qwen2.5-VL-7B-Instruct` - Faster alternative
913
+
914
+ #### Session Information
915
+ - **Current User:** {CURRENT_USER}
916
+ - **Session Started:** {CURRENT_DATETIME}
917
+ - **API Endpoint:** {NEBIUS_BASE_URL}
918
+ """,
919
+ elem_classes=["section"]
920
+ )
921
+
922
+ gr.HTML(
923
+ f"""
924
+ <div class="footer">
925
+ <p>Built with Gradio | Powered by Nebius AI | User: {CURRENT_USER}</p>
926
+ <p>Optimized for Meta Llama 3.1 and Mistral models</p>
927
+ </div>
928
+ """
929
+ )
930
 
931
  # Event bindings
932
  generate_btn.click(
 
935
  outputs=[description_output, html_output],
936
  )
937
 
 
 
 
 
 
 
 
938
  codesandbox_btn.click(
939
+ fn=create_codesandbox,
940
  inputs=[html_output],
941
  outputs=[codesandbox_links],
942
  )