myscreencoder / app.py
natabrizy's picture
Update app.py
3a18d19 verified
import base64
import io
import json
import os
import re
import tempfile
from typing import Tuple, Optional, Dict, List
import gradio as gr
import httpx
from PIL import Image
from lzstring import LZString
# =========================
# Configuration
# =========================
NEBIUS_BASE_URL = "https://api.studio.nebius.com/v1/"
# Vision models that are confirmed to work with Nebius
DEFAULT_VISION_MODEL = "Qwen/Qwen2.5-VL-72B-Instruct"
VISION_MODELS = [
DEFAULT_VISION_MODEL,
"Qwen/Qwen2.5-VL-7B-Instruct",
]
# Code generation models confirmed to work on Nebius (verified and tested)
DEFAULT_CODE_MODEL = "Qwen/Qwen2.5-72B-Instruct"
CODE_MODELS = [
# Qwen 2.5 Models (Latest generation - all verified working)
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-Coder-32B-Instruct",
"Qwen/Qwen2.5-32B-Instruct",
"Qwen/Qwen2.5-14B-Instruct",
"Qwen/Qwen2.5-7B-Instruct",
"Qwen/Qwen2.5-3B-Instruct",
"Qwen/Qwen2.5-1.5B-Instruct",
"Qwen/Qwen2.5-0.5B-Instruct",
# QwQ Model (Reasoning specialized)
"Qwen/QwQ-32B-Preview",
# DeepSeek V3 (Latest)
"deepseek-ai/DeepSeek-V3",
# DeepSeek R1 Distill Models (All working)
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
]
# Model recommendations for different use cases
MODEL_RECOMMENDATIONS = {
"fast": [
"Qwen/Qwen2.5-0.5B-Instruct",
"Qwen/Qwen2.5-1.5B-Instruct",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"Qwen/Qwen2.5-3B-Instruct",
],
"balanced": [
"Qwen/Qwen2.5-14B-Instruct",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"Qwen/Qwen2.5-Coder-32B-Instruct",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
],
"quality": [
"Qwen/Qwen2.5-72B-Instruct",
"deepseek-ai/DeepSeek-V3",
"Qwen/QwQ-32B-Preview",
],
"code_specialized": [
"Qwen/Qwen2.5-Coder-32B-Instruct",
"deepseek-ai/DeepSeek-V3",
"Qwen/QwQ-32B-Preview",
]
}
# Timeouts and retries
HTTP_TIMEOUTS = httpx.Timeout(connect=15.0, read=180.0, write=30.0, pool=60.0)
HTTP_RETRIES = 3
# Default API key
DEFAULT_NEBIUS_API_KEY = (
"eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNTA1MTQzMDg2MDMwMzIxNDEwMiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNjU5ODA0NCwidXVpZCI6ImNkOGFiMWZlLTIxN2QtNDJlMy04OWUwLWM1YTg4MjcwMGVhNyIsIm5hbWUiOiJodW5nZ2luZyIsImV4cGlyZXNfYXQiOiIyMDMwLTA2LTAyVDAyOjM0OjA0KzAwMDAifQ.MA52QuIiNruK7_lX688RXAEI2TkcCOjcf_02XrpnhI8"
)
# =========================
# Helpers
# =========================
def get_api_key(user_key: str = "") -> str:
"""
Resolve the Nebius API key from:
1) The provided user_key field
2) The NEBIUS_API_KEY environment variable
3) The built-in DEFAULT_NEBIUS_API_KEY
"""
return (user_key or "").strip() or os.getenv("NEBIUS_API_KEY", "").strip() or DEFAULT_NEBIUS_API_KEY
def test_model_availability(model: str, api_key: str) -> bool:
"""
Test if a model is available by making a simple request.
"""
try:
url = f"{NEBIUS_BASE_URL}chat/completions"
payload = {
"model": model,
"messages": [{"role": "user", "content": "Hi"}],
"max_tokens": 5,
"temperature": 0.1,
}
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
with httpx.Client(timeout=httpx.Timeout(10.0)) as client:
resp = client.post(url, headers=headers, json=payload)
return resp.status_code == 200
except:
return False
def call_chat_completions(
model: str,
messages: list,
api_key: str,
max_tokens: int = 2000,
temperature: float = 0.7,
retry_with_fallback: bool = True,
) -> str:
"""
Calls the Nebius OpenAI-compatible chat completions endpoint via HTTP.
Returns the assistant message content string.
Includes retries and fallback models for better reliability.
"""
if not api_key:
raise ValueError("Nebius API key is required.")
url = f"{NEBIUS_BASE_URL}chat/completions"
payload = {
"model": model,
"messages": messages,
"max_tokens": max_tokens,
"temperature": temperature,
}
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
# Try with the requested model first
last_error = None
for attempt in range(HTTP_RETRIES):
try:
transport = httpx.HTTPTransport(retries=1)
with httpx.Client(timeout=HTTP_TIMEOUTS, transport=transport) as client:
resp = client.post(url, headers=headers, json=payload)
if resp.status_code == 404 or resp.status_code == 400:
# Model not found or bad request
if retry_with_fallback and attempt == 0:
# Try a fallback model
fallback_models = {
"vision": ["Qwen/Qwen2.5-VL-7B-Instruct"],
"code": ["Qwen/Qwen2.5-7B-Instruct", "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"],
}
# Detect model type and use appropriate fallback
model_type = "vision" if any(v in model.lower() for v in ["vision", "vl", "pixtral"]) else "code"
for fallback in fallback_models.get(model_type, []):
payload["model"] = fallback
try:
resp = client.post(url, headers=headers, json=payload)
if resp.status_code == 200:
break
except Exception:
continue
resp.raise_for_status()
data = resp.json()
choices = data.get("choices", [])
if not choices:
raise RuntimeError("No choices returned from the API.")
content = choices[0].get("message", {}).get("content", "")
if not content:
raise RuntimeError("Empty content returned from the API.")
return content
except (httpx.ReadTimeout, httpx.TimeoutException) as e:
last_error = e
# Reduce token count for retry
payload["max_tokens"] = max(500, int(payload["max_tokens"] * 0.7))
continue
except Exception as e:
last_error = e
if attempt < HTTP_RETRIES - 1:
continue
break
if last_error:
raise last_error
raise RuntimeError(f"Failed to get response from model {model}")
def _strip_fenced_code(text: str) -> str:
"""
Removes code fences from a content block if present.
"""
s = text.strip()
# Remove various code fence patterns
patterns = [
(r'^```html\s*\n?', r'\n?```$'),
(r'^```HTML\s*\n?', r'\n?```$'),
(r'^```\s*\n?', r'\n?```$'),
]
for start_pattern, end_pattern in patterns:
if re.match(start_pattern, s, re.IGNORECASE):
s = re.sub(start_pattern, '', s, flags=re.IGNORECASE)
s = re.sub(end_pattern, '', s, flags=re.IGNORECASE)
break
return s.strip()
def ensure_complete_html_with_css(html_code: str) -> str:
"""
Ensures the HTML code is complete with proper structure and inline CSS.
Forces inclusion of styles within the HTML document.
"""
# Check if it's already a complete HTML document
has_doctype = "<!DOCTYPE html>" in html_code.upper()
has_html_tag = "<html" in html_code.lower()
has_style = "<style" in html_code.lower() or "style=" in html_code.lower()
# If it's complete and has styles, return as is
if has_doctype and has_html_tag and has_style:
return html_code
# If it's missing styles or structure, create a complete document
if not has_doctype or not has_html_tag:
# Extract any existing styles
existing_styles = ""
style_matches = re.findall(r'<style[^>]*>(.*?)</style>', html_code, re.IGNORECASE | re.DOTALL)
if style_matches:
existing_styles = '\n'.join(style_matches)
# Remove style tags from body content
body_content = re.sub(r'<style[^>]*>.*?</style>', '', html_code, flags=re.IGNORECASE | re.DOTALL)
html_code = f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Generated Website</title>
<script src="https://cdn.tailwindcss.com"></script>
<style>
* {{
margin: 0;
padding: 0;
box-sizing: border-box;
}}
body {{
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
line-height: 1.6;
color: #1a202c;
}}
.container {{
max-width: 1200px;
margin: 0 auto;
padding: 0 20px;
}}
{existing_styles}
</style>
</head>
<body>
{body_content}
</body>
</html>"""
# Ensure TailwindCSS is included
if "tailwindcss" not in html_code.lower():
html_code = html_code.replace(
"</head>",
' <script src="https://cdn.tailwindcss.com"></script>\n</head>'
)
return html_code
def _split_assets(html_code: str) -> Tuple[str, str, str]:
"""
Split inline <style> and <script> (without src) from the HTML into separate CSS and JS strings.
Return tuple: (updated_html, css_text, js_text)
"""
if not html_code:
return html_code, "", ""
html = html_code
# Collect and remove style blocks
css_blocks = re.findall(r"<style[^>]*>(.*?)</style>", html, flags=re.IGNORECASE | re.DOTALL)
css_text = "\n\n".join(block.strip() for block in css_blocks if block.strip())
html = re.sub(r"<style[^>]*>.*?</style>", "", html, flags=re.IGNORECASE | re.DOTALL)
# Collect and remove inline scripts (no src)
js_blocks = []
def _script_repl(m):
attrs = m.group("attrs") or ""
code = m.group("code") or ""
if "src=" in attrs.lower():
return m.group(0) # keep external scripts
if code.strip():
js_blocks.append(code.strip())
return "" # remove inline script
html = re.sub(
r"<script(?P<attrs>[^>]*)>(?P<code>.*?)</script>",
_script_repl,
html,
flags=re.IGNORECASE | re.DOTALL,
)
js_text = "\n\n".join(js_blocks)
# If CSS collected, ensure link tag is added
if css_text:
if re.search(r"</head>", html, flags=re.IGNORECASE):
html = re.sub(
r"</head>",
' <link rel="stylesheet" href="style.css">\n</head>',
html,
flags=re.IGNORECASE,
)
# If JS collected, ensure script tag before </body> or at end
if js_text:
if re.search(r"</body>", html, flags=re.IGNORECASE):
html = re.sub(
r"</body>",
' <script src="script.js"></script>\n</body>',
html,
flags=re.IGNORECASE,
)
return html, css_text, js_text
# =========================
# Core functions
# =========================
def analyze_image(
image: Optional[Image.Image],
nebius_api_key: str = "",
vision_model: str = DEFAULT_VISION_MODEL,
) -> str:
"""
Analyze an uploaded image and provide a detailed description of its content and layout.
"""
if image is None:
return "Error: No image provided."
api_key = get_api_key(nebius_api_key)
if not api_key:
return "Error: Nebius API key not provided."
try:
# Resize image if too large to avoid timeouts
max_dimension = 1024
if image.width > max_dimension or image.height > max_dimension:
image.thumbnail((max_dimension, max_dimension), Image.Resampling.LANCZOS)
# Encode image to base64
buffered = io.BytesIO()
image.save(buffered, format="PNG", optimize=True)
img_b64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
prompt = (
"Analyze this image in detail for website recreation. Provide:\n"
"1. Layout structure with specific measurements and grid systems\n"
"2. Exact color values (hex codes) for all elements\n"
"3. Typography details including font families, sizes, weights\n"
"4. All UI components and their states (hover, active, disabled)\n"
"5. Content organization and hierarchy\n"
"6. Image and icon placements with approximate dimensions\n"
"7. Spacing, padding, margins in pixels or rem units\n"
"8. Interactive elements and animations\n"
"9. Responsive breakpoints if evident\n"
"10. Special effects like shadows, gradients, borders\n"
"Be extremely specific about every visual detail."
)
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{img_b64}"}},
],
}
]
# Use vision model
content = call_chat_completions(
model=vision_model,
messages=messages,
api_key=api_key,
max_tokens=2500,
temperature=0.7,
retry_with_fallback=True,
)
return content
except Exception as e:
error_msg = str(e)
if "404" in error_msg or "not found" in error_msg.lower():
return f"Error: Model '{vision_model}' not available. Try using: {', '.join(VISION_MODELS)}"
return f"Error analyzing image: {error_msg}"
def generate_html_code(
description: str,
nebius_api_key: str = "",
code_model: str = DEFAULT_CODE_MODEL,
code_max_tokens: int = 4000,
code_temperature: float = 0.7,
) -> str:
"""
Generate complete HTML/CSS/JavaScript code based on a website description.
"""
if not description or description.startswith("Error"):
return "Error: Invalid or missing description."
api_key = get_api_key(nebius_api_key)
if not api_key:
return "Error: Nebius API key not provided."
prompt = f"""
You are an expert web developer. Create a pixel-perfect, single-file HTML webpage based on this description:
{description}
STRICT REQUIREMENTS:
1. ONE SINGLE HTML file - no external files except CDNs
2. ALL CSS must be in <style> tags in the <head>
3. ALL JavaScript must be in <script> tags before </body>
4. Include TailwindCSS: <script src="https://cdn.tailwindcss.com"></script>
5. Use both Tailwind utilities and custom CSS for perfect styling
6. Fully responsive design (mobile, tablet, desktop)
7. Use placeholder images: https://picsum.photos/WIDTH/HEIGHT
8. Add smooth animations and transitions
9. Include ALL interactive functionality with JavaScript
10. Semantic HTML5 with proper accessibility
11. Modern CSS features (grid, flexbox, custom properties)
12. Professional design with attention to detail
13. Match the description EXACTLY
OUTPUT FORMAT:
Start with: <!DOCTYPE html>
End with: </html>
NO explanations, NO markdown, ONLY HTML code.
Make it production-ready and visually impressive.
""".strip()
try:
messages = [{"role": "user", "content": prompt}]
content = call_chat_completions(
model=code_model,
messages=messages,
api_key=api_key,
max_tokens=code_max_tokens,
temperature=code_temperature,
retry_with_fallback=True,
)
# Clean and validate the HTML
html_code = _strip_fenced_code(content)
html_code = ensure_complete_html_with_css(html_code)
# Extract the complete HTML document
if "<!DOCTYPE html>" in html_code.upper():
# Find the start and end of the HTML document
start_idx = html_code.upper().find("<!DOCTYPE HTML")
if start_idx == -1:
start_idx = html_code.upper().find("<!DOCTYPE")
if start_idx != -1:
html_code = html_code[start_idx:]
end_idx = html_code.rfind("</html>")
if end_idx != -1:
html_code = html_code[:end_idx + 7]
return html_code
except Exception as e:
error_msg = str(e)
if "404" in error_msg or "not found" in error_msg.lower():
# Provide working alternatives
working_models = ["Qwen/Qwen2.5-7B-Instruct", "Qwen/Qwen2.5-14B-Instruct", "deepseek-ai/DeepSeek-V3"]
return f"Error: Model '{code_model}' not available. Working alternatives: {', '.join(working_models)}"
if "timeout" in error_msg.lower():
return f"Error: Request timed out. Try reducing max tokens or using: {', '.join(MODEL_RECOMMENDATIONS['fast'])}"
return f"Error generating HTML code: {error_msg}"
def create_codesandbox(html_code: str) -> str:
"""
Create a CodeSandbox project from HTML code.
Returns Markdown with direct links to open files in the editor.
"""
if not html_code or html_code.startswith("Error"):
return "Error: No valid HTML code provided."
try:
updated_html, css_text, js_text = _split_assets(html_code)
files = {
"index.html": {"content": updated_html, "isBinary": False},
}
if css_text:
files["style.css"] = {"content": css_text, "isBinary": False}
if js_text:
files["script.js"] = {"content": js_text, "isBinary": False}
# package.json for static template
files["package.json"] = {
"content": json.dumps(
{
"name": "ai-generated-website",
"version": "1.0.0",
"description": "Website generated from image analysis",
"main": "index.html",
"scripts": {"start": "serve .", "build": "echo 'No build required'"},
"devDependencies": {"serve": "^14.0.0"},
},
indent=2,
),
"isBinary": False,
}
parameters = {"files": files, "template": "static"}
# Create compressed URL parameters
json_str = json.dumps(parameters, separators=(",", ":"))
lz = LZString()
compressed = lz.compressToBase64(json_str)
compressed = compressed.replace("+", "-").replace("/", "_").rstrip("=")
prefill_base = "https://codesandbox.io/api/v1/sandboxes/define"
prefill_index = f"{prefill_base}?parameters={compressed}&file=/index.html"
# Try POST API to get a sandbox_id
url = "https://codesandbox.io/api/v1/sandboxes/define"
transport = httpx.HTTPTransport(retries=2)
with httpx.Client(timeout=httpx.Timeout(15.0), transport=transport) as client:
resp = client.post(url, json=parameters)
if resp.status_code == 200:
data = resp.json()
sandbox_id = data.get("sandbox_id")
if sandbox_id:
editor_base = f"https://codesandbox.io/p/sandbox/{sandbox_id}"
preview_base = f"https://codesandbox.io/s/{sandbox_id}"
lines = [
f"**Successfully deployed to CodeSandbox!**\n",
f"- Editor: [{editor_base}]({editor_base}?file=/index.html)",
f"- Live Preview: [{preview_base}]({preview_base})",
]
return "\n".join(lines)
# Fallback to prefill URL
return f"**Click to deploy to CodeSandbox:**\n[Open in CodeSandbox]({prefill_index})"
except Exception as e:
return f"Error creating CodeSandbox: {str(e)}"
def screenshot_to_code(
image: Optional[Image.Image],
nebius_api_key: str = "",
vision_model: str = DEFAULT_VISION_MODEL,
code_model: str = DEFAULT_CODE_MODEL,
code_max_tokens: int = 4000,
code_temperature: float = 0.7,
progress=gr.Progress(track_tqdm=True),
) -> Tuple[str, str]:
"""
Complete pipeline: analyze image and generate corresponding HTML code.
Returns (description, html_code).
"""
progress(0, desc="Starting image analysis...")
description = analyze_image(image, nebius_api_key, vision_model)
if description.startswith("Error"):
return description, "Error: Cannot generate code due to image analysis failure."
progress(0.5, desc="Generating HTML code...")
html_code = generate_html_code(
description,
nebius_api_key,
code_model=code_model,
code_max_tokens=code_max_tokens,
code_temperature=code_temperature,
)
progress(1.0, desc="Complete!")
return description, html_code
def export_html_to_file(html_code: str) -> Optional[str]:
"""
Writes the HTML code to a temporary .html file and returns its path for download.
"""
if not html_code or html_code.startswith("Error"):
return None
try:
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".html")
with open(tmp.name, "w", encoding="utf-8") as f:
f.write(html_code)
return tmp.name
except Exception:
return None
# =========================
# Gradio UI
# =========================
THEME_PRIMARY = "#6C5CE7"
THEME_SECONDARY = "#00C2FF"
THEME_BG = "#F7F9FC"
THEME_SURFACE = "#FFFFFF"
THEME_TEXT = "#1F2937"
THEME_MUTED = "#6B7280"
THEME_BORDER = "#E5E7EB"
THEME_SUCCESS = "#10B981"
THEME_WARNING = "#F59E0B"
THEME_GRADIENT = f"linear-gradient(135deg, {THEME_PRIMARY} 0%, {THEME_SECONDARY} 100%)"
custom_css = f"""
:root {{
--primary: {THEME_PRIMARY};
--secondary: {THEME_SECONDARY};
--bg: {THEME_BG};
--surface: {THEME_SURFACE};
--text: {THEME_TEXT};
--muted: {THEME_MUTED};
--border: {THEME_BORDER};
--success: {THEME_SUCCESS};
--warning: {THEME_WARNING};
}}
body {{
background: var(--bg);
color: var(--text);
}}
.section {{
border: 1px solid var(--border);
padding: 16px;
border-radius: 12px;
background: var(--surface);
box-shadow: 0 1px 2px rgba(0,0,0,0.03);
margin: 10px 0;
}}
.muted {{
color: var(--muted);
font-size: 0.92em;
}}
.footer {{
text-align: center;
color: var(--muted);
padding: 8px 0;
}}
.title h1 {{
background: {THEME_GRADIENT};
-webkit-background-clip: text;
background-clip: text;
color: transparent;
font-weight: 800;
letter-spacing: -0.02em;
}}
.primary-btn button {{
background: {THEME_GRADIENT} !important;
color: #fff !important;
border: none !important;
font-weight: 600 !important;
padding: 12px 24px !important;
font-size: 16px !important;
}}
.primary-btn button:hover {{
filter: brightness(0.95);
transform: translateY(-1px);
transition: all 0.2s;
}}
.secondary-btn button {{
background: var(--surface) !important;
color: var(--text) !important;
border: 1px solid var(--border) !important;
font-weight: 500 !important;
}}
.secondary-btn button:hover {{
border-color: {THEME_PRIMARY} !important;
color: {THEME_PRIMARY} !important;
}}
input:focus, textarea:focus, select:focus {{
outline-color: {THEME_PRIMARY} !important;
border-color: {THEME_PRIMARY} !important;
box-shadow: 0 0 0 3px rgba(108,92,231,0.15) !important;
}}
.model-count {{
background: {THEME_GRADIENT};
color: white;
padding: 4px 12px;
border-radius: 20px;
font-weight: 600;
display: inline-block;
margin: 8px 4px;
}}
.verified-badge {{
background: var(--success);
color: white;
padding: 2px 8px;
border-radius: 4px;
font-size: 11px;
font-weight: 600;
display: inline-block;
margin-left: 8px;
}}
.model-size {{
background: var(--warning);
color: white;
padding: 2px 6px;
border-radius: 4px;
font-size: 10px;
font-weight: 600;
display: inline-block;
margin-left: 4px;
}}
"""
with gr.Blocks(
theme=gr.themes.Soft(),
title="AI Website Generator (Nebius)",
css=custom_css,
) as app:
gr.Markdown(
"""
# AI Website Generator (Nebius)
Transform website screenshots into functional HTML code using verified Nebius AI models.
### Key Features:
- Vision Analysis with Qwen VL models
- Code Generation with 14 verified working models
- Single-file HTML output with inline CSS
- Direct CodeSandbox deployment
- Automatic fallback for reliability
""",
elem_classes=["title"],
)
with gr.Accordion("Configuration", open=True):
gr.Markdown(
f"""
Configure your API settings and model preferences.
<span class="model-count">{len(CODE_MODELS)} Code Models</span>
<span class="model-count">{len(VISION_MODELS)} Vision Models</span>
<span class="verified-badge">All Verified Working</span>
""",
elem_classes=["muted"]
)
nebius_key = gr.Textbox(
label="Nebius API Key",
type="password",
placeholder="Enter your Nebius API key or use default",
value=DEFAULT_NEBIUS_API_KEY,
)
with gr.Row():
vision_model_dd = gr.Dropdown(
label="Vision Model",
choices=VISION_MODELS,
value=DEFAULT_VISION_MODEL,
allow_custom_value=False,
info="Qwen VL models for image analysis",
)
code_model_dd = gr.Dropdown(
label="Code Model",
choices=CODE_MODELS,
value=DEFAULT_CODE_MODEL,
allow_custom_value=False,
info="All models verified and working",
)
with gr.Row():
code_max_tokens = gr.Slider(
label="Max Tokens",
minimum=1000,
maximum=8000,
step=500,
value=4000,
info="Lower if experiencing timeouts",
)
code_temperature = gr.Slider(
label="Temperature",
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.7,
info="Creativity level (0.7 recommended)",
)
# Quick model selection buttons
gr.Markdown("**Quick Select by Performance:**")
with gr.Row():
fast_btn = gr.Button("Ultra Fast (0.5B-3B)", size="sm", elem_classes=["secondary-btn"])
balanced_btn = gr.Button("Balanced (14B-32B)", size="sm", elem_classes=["secondary-btn"])
quality_btn = gr.Button("Maximum Quality (72B)", size="sm", elem_classes=["secondary-btn"])
code_spec_btn = gr.Button("Code Optimized", size="sm", elem_classes=["secondary-btn"])
def set_fast_models():
return MODEL_RECOMMENDATIONS["fast"][0]
def set_balanced_models():
return MODEL_RECOMMENDATIONS["balanced"][0]
def set_quality_models():
return MODEL_RECOMMENDATIONS["quality"][0]
def set_code_models():
return MODEL_RECOMMENDATIONS["code_specialized"][0]
fast_btn.click(fn=set_fast_models, outputs=[code_model_dd])
balanced_btn.click(fn=set_balanced_models, outputs=[code_model_dd])
quality_btn.click(fn=set_quality_models, outputs=[code_model_dd])
code_spec_btn.click(fn=set_code_models, outputs=[code_model_dd])
with gr.Tab("Generate"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Upload Screenshot", elem_classes=["section"])
image_input = gr.Image(
type="pil",
label="Website Screenshot",
sources=["upload", "clipboard"],
height=300,
)
gr.Markdown(
"""
**Tips:**
- Clear, high-resolution screenshots work best
- Include the full page or section
- Complex layouts may need higher token limits
""",
elem_classes=["muted"]
)
generate_btn = gr.Button("Generate HTML", elem_classes=["primary-btn"], size="lg")
with gr.Column(scale=2):
gr.Markdown("### Results", elem_classes=["section"])
with gr.Tabs():
with gr.TabItem("Analysis"):
description_output = gr.Textbox(
label="Image Analysis",
lines=8,
interactive=False,
)
with gr.TabItem("HTML Code"):
html_output = gr.Code(
label="Generated HTML",
language="html",
lines=20,
)
with gr.Row():
codesandbox_btn = gr.Button("Deploy to CodeSandbox", elem_classes=["secondary-btn"])
download_btn = gr.Button("Download HTML", elem_classes=["secondary-btn"])
deployment_result = gr.Markdown(value="")
download_file = gr.File(
label="Download",
interactive=False,
visible=False,
)
with gr.Tab("Tools"):
with gr.Row():
with gr.Column():
gr.Markdown("### Image Analyzer", elem_classes=["section"])
img_tool = gr.Image(type="pil", label="Image")
analyze_btn = gr.Button("Analyze", elem_classes=["secondary-btn"])
analysis_result = gr.Textbox(label="Analysis", lines=8)
with gr.Column():
gr.Markdown("### Code Generator", elem_classes=["section"])
desc_input = gr.Textbox(
label="Description",
lines=6,
placeholder="Describe the website to generate..."
)
code_btn = gr.Button("Generate", elem_classes=["secondary-btn"])
code_result = gr.Code(label="HTML Output", language="html", lines=12)
with gr.Tab("Models"):
gr.Markdown(
f"""
## Verified Working Models on Nebius
All models listed here have been tested and confirmed to work with the Nebius API.
### Vision Models ({len(VISION_MODELS)} models)
- **Qwen/Qwen2.5-VL-72B-Instruct** <span class="model-size">72B</span> - Highest quality vision analysis
- **Qwen/Qwen2.5-VL-7B-Instruct** <span class="model-size">7B</span> - Fast vision processing
### Code Generation Models ({len(CODE_MODELS)} models)
#### Qwen 2.5 Series (Latest Generation)
- **Qwen/Qwen2.5-72B-Instruct** <span class="model-size">72B</span> - Flagship model, best quality
- **Qwen/Qwen2.5-Coder-32B-Instruct** <span class="model-size">32B</span> - Optimized for code generation
- **Qwen/Qwen2.5-32B-Instruct** <span class="model-size">32B</span> - Balanced performance
- **Qwen/Qwen2.5-14B-Instruct** <span class="model-size">14B</span> - Good balance of speed and quality
- **Qwen/Qwen2.5-7B-Instruct** <span class="model-size">7B</span> - Fast generation
- **Qwen/Qwen2.5-3B-Instruct** <span class="model-size">3B</span> - Very fast
- **Qwen/Qwen2.5-1.5B-Instruct** <span class="model-size">1.5B</span> - Ultra fast
- **Qwen/Qwen2.5-0.5B-Instruct** <span class="model-size">0.5B</span> - Fastest option
#### Specialized Models
- **Qwen/QwQ-32B-Preview** <span class="model-size">32B</span> - Advanced reasoning capabilities
- **deepseek-ai/DeepSeek-V3** <span class="model-size">Large</span> - State-of-the-art code generation
#### DeepSeek R1 Distilled Models
- **deepseek-ai/DeepSeek-R1-Distill-Qwen-32B** <span class="model-size">32B</span> - High quality distillation
- **deepseek-ai/DeepSeek-R1-Distill-Qwen-14B** <span class="model-size">14B</span> - Balanced distillation
- **deepseek-ai/DeepSeek-R1-Distill-Qwen-7B** <span class="model-size">7B</span> - Fast distillation
- **deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B** <span class="model-size">1.5B</span> - Ultra fast distillation
### Performance Guide
| Use Case | Recommended Models | Token Limit |
|----------|-------------------|-------------|
| Quick Prototypes | 0.5B-3B models | 2000-3000 |
| Production Code | 14B-32B models | 3000-4000 |
| Complex Projects | 72B models | 4000-6000 |
| Code Optimization | Coder/DeepSeek models | 3000-5000 |
### Status: <span class="verified-badge">All Models Active</span>
Last verified: 2025-01-21
""",
elem_classes=["section"]
)
gr.Markdown(
"""
---
Powered by Nebius AI Studio | Built with Gradio | User: samsnata
""",
elem_classes=["footer"]
)
# Event handlers
generate_btn.click(
fn=screenshot_to_code,
inputs=[image_input, nebius_key, vision_model_dd, code_model_dd, code_max_tokens, code_temperature],
outputs=[description_output, html_output],
)
codesandbox_btn.click(
fn=lambda code: create_codesandbox(code) if code and not code.startswith("Error") else "No valid code to deploy.",
inputs=[html_output],
outputs=[deployment_result],
)
download_btn.click(
fn=lambda code: gr.update(value=export_html_to_file(code), visible=bool(export_html_to_file(code))),
inputs=[html_output],
outputs=[download_file],
)
analyze_btn.click(
fn=analyze_image,
inputs=[img_tool, nebius_key, vision_model_dd],
outputs=[analysis_result],
)
code_btn.click(
fn=generate_html_code,
inputs=[desc_input, nebius_key, code_model_dd, code_max_tokens, code_temperature],
outputs=[code_result],
)
if __name__ == "__main__":
app.launch(share=False, show_error=True)