Update app.py
Browse files
app.py
CHANGED
|
@@ -16,9 +16,12 @@ from lzstring import LZString
|
|
| 16 |
NEBIUS_BASE_URL = "https://api.studio.nebius.com/v1/"
|
| 17 |
DEFAULT_VISION_MODEL = "Qwen/Qwen2.5-VL-72B-Instruct"
|
| 18 |
DEFAULT_CODE_MODEL = "deepseek-ai/DeepSeek-V3-0324"
|
| 19 |
-
HTTP_TIMEOUT = 60.0 # seconds
|
| 20 |
|
| 21 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
DEFAULT_NEBIUS_API_KEY = (
|
| 23 |
"eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNTA1MTQzMDg2MDMwMzIxNDEwMiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNjU5ODA0NCwidXVpZCI6ImNkOGFiMWZlLTIxN2QtNDJlMy04OWUwLWM1YTg4MjcwMGVhNyIsIm5hbWUiOiJodW5nZ2luZyIsImV4cGlyZXNfYXQiOiIyMDMwLTA2LTAyVDAyOjM0OjA0KzAwMDAifQ.MA52QuIiNruK7_lX688RXAEI2TkcCOjcf_02XrpnhI8"
|
| 24 |
)
|
|
@@ -32,7 +35,7 @@ def get_api_key(user_key: str = "") -> str:
|
|
| 32 |
Resolve the Nebius API key from:
|
| 33 |
1) The provided user_key field
|
| 34 |
2) The NEBIUS_API_KEY environment variable
|
| 35 |
-
3) The built-in DEFAULT_NEBIUS_API_KEY
|
| 36 |
"""
|
| 37 |
return (user_key or "").strip() or os.getenv("NEBIUS_API_KEY", "").strip() or DEFAULT_NEBIUS_API_KEY
|
| 38 |
|
|
@@ -46,7 +49,8 @@ def call_chat_completions(
|
|
| 46 |
) -> str:
|
| 47 |
"""
|
| 48 |
Calls the Nebius OpenAI-compatible chat completions endpoint via HTTP.
|
| 49 |
-
Returns the message content string.
|
|
|
|
| 50 |
"""
|
| 51 |
if not api_key:
|
| 52 |
raise ValueError("Nebius API key is required.")
|
|
@@ -60,7 +64,10 @@ def call_chat_completions(
|
|
| 60 |
}
|
| 61 |
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
| 62 |
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
| 64 |
resp = client.post(url, headers=headers, json=payload)
|
| 65 |
resp.raise_for_status()
|
| 66 |
data = resp.json()
|
|
@@ -143,9 +150,12 @@ def generate_html_code(
|
|
| 143 |
description: str,
|
| 144 |
nebius_api_key: str = "",
|
| 145 |
code_model: str = DEFAULT_CODE_MODEL,
|
|
|
|
|
|
|
| 146 |
) -> str:
|
| 147 |
"""
|
| 148 |
Generate HTML/CSS/JavaScript code based on a website description.
|
|
|
|
| 149 |
"""
|
| 150 |
if not description or description.startswith("Error"):
|
| 151 |
return "Error: Invalid or missing description."
|
|
@@ -177,17 +187,36 @@ Return only the complete HTML code starting with <!DOCTYPE html> and ending with
|
|
| 177 |
model=code_model,
|
| 178 |
messages=messages,
|
| 179 |
api_key=api_key,
|
| 180 |
-
max_tokens=
|
| 181 |
-
temperature=
|
| 182 |
)
|
| 183 |
html_code = _strip_fenced_code(content)
|
| 184 |
|
| 185 |
-
# Ensure HTML document bounds if present
|
| 186 |
if "<!DOCTYPE html>" in html_code and "</html>" in html_code:
|
| 187 |
start = html_code.find("<!DOCTYPE html>")
|
| 188 |
end = html_code.rfind("</html>") + len("</html>")
|
| 189 |
return html_code[start:end]
|
| 190 |
return html_code
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
except Exception as e:
|
| 192 |
return f"Error generating HTML code: {str(e)}"
|
| 193 |
|
|
@@ -230,7 +259,8 @@ def create_codesandbox(html_code: str) -> str:
|
|
| 230 |
|
| 231 |
# Try POST API to get a sandbox_id
|
| 232 |
url = "https://codesandbox.io/api/v1/sandboxes/define"
|
| 233 |
-
|
|
|
|
| 234 |
resp = client.post(url, json=parameters)
|
| 235 |
if resp.status_code == 200:
|
| 236 |
data = resp.json()
|
|
@@ -249,6 +279,8 @@ def screenshot_to_code(
|
|
| 249 |
nebius_api_key: str = "",
|
| 250 |
vision_model: str = DEFAULT_VISION_MODEL,
|
| 251 |
code_model: str = DEFAULT_CODE_MODEL,
|
|
|
|
|
|
|
| 252 |
) -> Tuple[str, str]:
|
| 253 |
"""
|
| 254 |
Complete pipeline: analyze image and generate corresponding HTML code.
|
|
@@ -257,7 +289,13 @@ def screenshot_to_code(
|
|
| 257 |
description = analyze_image(image, nebius_api_key, vision_model)
|
| 258 |
if description.startswith("Error"):
|
| 259 |
return description, "Error: Cannot generate code due to image analysis failure."
|
| 260 |
-
html_code = generate_html_code(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 261 |
return description, html_code
|
| 262 |
|
| 263 |
|
|
@@ -277,7 +315,7 @@ def export_html_to_file(html_code: str) -> Optional[str]:
|
|
| 277 |
|
| 278 |
|
| 279 |
# =========================
|
| 280 |
-
# Gradio UI (
|
| 281 |
# =========================
|
| 282 |
with gr.Blocks(
|
| 283 |
theme=gr.themes.Soft(),
|
|
@@ -296,7 +334,7 @@ Turn website screenshots into functional HTML using Nebius models.
|
|
| 296 |
- ๐ธ Image analysis (default: Qwen2.5-VL-72B-Instruct)
|
| 297 |
- ๐ป Code generation (default: DeepSeek-V3-0324)
|
| 298 |
- ๐ One-click CodeSandbox deployment
|
| 299 |
-
- ๐
|
| 300 |
"""
|
| 301 |
)
|
| 302 |
|
|
@@ -321,6 +359,23 @@ Turn website screenshots into functional HTML using Nebius models.
|
|
| 321 |
value=DEFAULT_CODE_MODEL,
|
| 322 |
allow_custom_value=True,
|
| 323 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 324 |
|
| 325 |
with gr.Tab("๐ฏ Quick Generate"):
|
| 326 |
with gr.Row():
|
|
@@ -377,7 +432,7 @@ Turn website screenshots into functional HTML using Nebius models.
|
|
| 377 |
# Event bindings
|
| 378 |
generate_btn.click(
|
| 379 |
fn=screenshot_to_code,
|
| 380 |
-
inputs=[image_input, nebius_key, vision_model_dd, code_model_dd],
|
| 381 |
outputs=[description_output, html_output],
|
| 382 |
)
|
| 383 |
|
|
@@ -410,8 +465,10 @@ Turn website screenshots into functional HTML using Nebius models.
|
|
| 410 |
)
|
| 411 |
|
| 412 |
code_btn.click(
|
| 413 |
-
fn=lambda desc, key, cmod: generate_html_code(
|
| 414 |
-
|
|
|
|
|
|
|
| 415 |
outputs=[code_result],
|
| 416 |
)
|
| 417 |
|
|
|
|
| 16 |
NEBIUS_BASE_URL = "https://api.studio.nebius.com/v1/"
|
| 17 |
DEFAULT_VISION_MODEL = "Qwen/Qwen2.5-VL-72B-Instruct"
|
| 18 |
DEFAULT_CODE_MODEL = "deepseek-ai/DeepSeek-V3-0324"
|
|
|
|
| 19 |
|
| 20 |
+
# More forgiving timeouts and built-in retries to reduce "read operation timed out"
|
| 21 |
+
HTTP_TIMEOUTS = httpx.Timeout(connect=10.0, read=120.0, write=30.0, pool=60.0)
|
| 22 |
+
HTTP_RETRIES = 2 # extra attempts on transient failures
|
| 23 |
+
|
| 24 |
+
# Use the same default key you provided
|
| 25 |
DEFAULT_NEBIUS_API_KEY = (
|
| 26 |
"eyJhbGciOiJIUzI1NiIsImtpZCI6IlV6SXJWd1h0dnprLVRvdzlLZWstc0M1akptWXBvX1VaVkxUZlpnMDRlOFUiLCJ0eXAiOiJKV1QifQ.eyJzdWIiOiJnb29nbGUtb2F1dGgyfDEwNTA1MTQzMDg2MDMwMzIxNDEwMiIsInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIiwiaXNzIjoiYXBpX2tleV9pc3N1ZXIiLCJhdWQiOlsiaHR0cHM6Ly9uZWJpdXMtaW5mZXJlbmNlLmV1LmF1dGgwLmNvbS9hcGkvdjIvIl0sImV4cCI6MTkwNjU5ODA0NCwidXVpZCI6ImNkOGFiMWZlLTIxN2QtNDJlMy04OWUwLWM1YTg4MjcwMGVhNyIsIm5hbWUiOiJodW5nZ2luZyIsImV4cGlyZXNfYXQiOiIyMDMwLTA2LTAyVDAyOjM0OjA0KzAwMDAifQ.MA52QuIiNruK7_lX688RXAEI2TkcCOjcf_02XrpnhI8"
|
| 27 |
)
|
|
|
|
| 35 |
Resolve the Nebius API key from:
|
| 36 |
1) The provided user_key field
|
| 37 |
2) The NEBIUS_API_KEY environment variable
|
| 38 |
+
3) The built-in DEFAULT_NEBIUS_API_KEY
|
| 39 |
"""
|
| 40 |
return (user_key or "").strip() or os.getenv("NEBIUS_API_KEY", "").strip() or DEFAULT_NEBIUS_API_KEY
|
| 41 |
|
|
|
|
| 49 |
) -> str:
|
| 50 |
"""
|
| 51 |
Calls the Nebius OpenAI-compatible chat completions endpoint via HTTP.
|
| 52 |
+
Returns the assistant message content string.
|
| 53 |
+
Includes retries and increased read timeout to mitigate timeouts.
|
| 54 |
"""
|
| 55 |
if not api_key:
|
| 56 |
raise ValueError("Nebius API key is required.")
|
|
|
|
| 64 |
}
|
| 65 |
headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
| 66 |
|
| 67 |
+
# HTTPX transport with retries
|
| 68 |
+
transport = httpx.HTTPTransport(retries=HTTP_RETRIES)
|
| 69 |
+
|
| 70 |
+
with httpx.Client(timeout=HTTP_TIMEOUTS, transport=transport) as client:
|
| 71 |
resp = client.post(url, headers=headers, json=payload)
|
| 72 |
resp.raise_for_status()
|
| 73 |
data = resp.json()
|
|
|
|
| 150 |
description: str,
|
| 151 |
nebius_api_key: str = "",
|
| 152 |
code_model: str = DEFAULT_CODE_MODEL,
|
| 153 |
+
code_max_tokens: int = 4000,
|
| 154 |
+
code_temperature: float = 0.7,
|
| 155 |
) -> str:
|
| 156 |
"""
|
| 157 |
Generate HTML/CSS/JavaScript code based on a website description.
|
| 158 |
+
Adds timeout-aware retry with a reduced token budget on second attempt.
|
| 159 |
"""
|
| 160 |
if not description or description.startswith("Error"):
|
| 161 |
return "Error: Invalid or missing description."
|
|
|
|
| 187 |
model=code_model,
|
| 188 |
messages=messages,
|
| 189 |
api_key=api_key,
|
| 190 |
+
max_tokens=code_max_tokens,
|
| 191 |
+
temperature=code_temperature,
|
| 192 |
)
|
| 193 |
html_code = _strip_fenced_code(content)
|
| 194 |
|
|
|
|
| 195 |
if "<!DOCTYPE html>" in html_code and "</html>" in html_code:
|
| 196 |
start = html_code.find("<!DOCTYPE html>")
|
| 197 |
end = html_code.rfind("</html>") + len("</html>")
|
| 198 |
return html_code[start:end]
|
| 199 |
return html_code
|
| 200 |
+
except (httpx.ReadTimeout, httpx.TimeoutException):
|
| 201 |
+
# Retry once with a reduced token budget to improve latency
|
| 202 |
+
try:
|
| 203 |
+
reduced_tokens = max(1000, int(code_max_tokens * 0.6))
|
| 204 |
+
messages = [{"role": "user", "content": prompt}]
|
| 205 |
+
content = call_chat_completions(
|
| 206 |
+
model=code_model,
|
| 207 |
+
messages=messages,
|
| 208 |
+
api_key=api_key,
|
| 209 |
+
max_tokens=reduced_tokens,
|
| 210 |
+
temperature=code_temperature,
|
| 211 |
+
)
|
| 212 |
+
html_code = _strip_fenced_code(content)
|
| 213 |
+
if "<!DOCTYPE html>" in html_code and "</html>" in html_code:
|
| 214 |
+
start = html_code.find("<!DOCTYPE html>")
|
| 215 |
+
end = html_code.rfind("</html>") + len("</html>")
|
| 216 |
+
return html_code[start:end]
|
| 217 |
+
return html_code
|
| 218 |
+
except Exception as e2:
|
| 219 |
+
return f"Error generating HTML code after retry: {str(e2)}. Tips: lower Max tokens, pick a smaller/faster model, or try again."
|
| 220 |
except Exception as e:
|
| 221 |
return f"Error generating HTML code: {str(e)}"
|
| 222 |
|
|
|
|
| 259 |
|
| 260 |
# Try POST API to get a sandbox_id
|
| 261 |
url = "https://codesandbox.io/api/v1/sandboxes/define"
|
| 262 |
+
transport = httpx.HTTPTransport(retries=HTTP_RETRIES)
|
| 263 |
+
with httpx.Client(timeout=HTTP_TIMEOUTS, transport=transport) as client:
|
| 264 |
resp = client.post(url, json=parameters)
|
| 265 |
if resp.status_code == 200:
|
| 266 |
data = resp.json()
|
|
|
|
| 279 |
nebius_api_key: str = "",
|
| 280 |
vision_model: str = DEFAULT_VISION_MODEL,
|
| 281 |
code_model: str = DEFAULT_CODE_MODEL,
|
| 282 |
+
code_max_tokens: int = 4000,
|
| 283 |
+
code_temperature: float = 0.7,
|
| 284 |
) -> Tuple[str, str]:
|
| 285 |
"""
|
| 286 |
Complete pipeline: analyze image and generate corresponding HTML code.
|
|
|
|
| 289 |
description = analyze_image(image, nebius_api_key, vision_model)
|
| 290 |
if description.startswith("Error"):
|
| 291 |
return description, "Error: Cannot generate code due to image analysis failure."
|
| 292 |
+
html_code = generate_html_code(
|
| 293 |
+
description,
|
| 294 |
+
nebius_api_key,
|
| 295 |
+
code_model=code_model,
|
| 296 |
+
code_max_tokens=code_max_tokens,
|
| 297 |
+
code_temperature=code_temperature,
|
| 298 |
+
)
|
| 299 |
return description, html_code
|
| 300 |
|
| 301 |
|
|
|
|
| 315 |
|
| 316 |
|
| 317 |
# =========================
|
| 318 |
+
# Gradio UI (English-only, user-friendly)
|
| 319 |
# =========================
|
| 320 |
with gr.Blocks(
|
| 321 |
theme=gr.themes.Soft(),
|
|
|
|
| 334 |
- ๐ธ Image analysis (default: Qwen2.5-VL-72B-Instruct)
|
| 335 |
- ๐ป Code generation (default: DeepSeek-V3-0324)
|
| 336 |
- ๐ One-click CodeSandbox deployment
|
| 337 |
+
- ๐ Your key is used at runtime only
|
| 338 |
"""
|
| 339 |
)
|
| 340 |
|
|
|
|
| 359 |
value=DEFAULT_CODE_MODEL,
|
| 360 |
allow_custom_value=True,
|
| 361 |
)
|
| 362 |
+
with gr.Row():
|
| 363 |
+
code_max_tokens = gr.Slider(
|
| 364 |
+
label="Max tokens (code generation)",
|
| 365 |
+
minimum=500,
|
| 366 |
+
maximum=8000,
|
| 367 |
+
step=100,
|
| 368 |
+
value=4000,
|
| 369 |
+
info="Lower this if you see timeouts; higher values may take longer.",
|
| 370 |
+
)
|
| 371 |
+
code_temperature = gr.Slider(
|
| 372 |
+
label="Temperature",
|
| 373 |
+
minimum=0.0,
|
| 374 |
+
maximum=1.5,
|
| 375 |
+
step=0.1,
|
| 376 |
+
value=0.7,
|
| 377 |
+
info="Higher is more creative; lower is more deterministic.",
|
| 378 |
+
)
|
| 379 |
|
| 380 |
with gr.Tab("๐ฏ Quick Generate"):
|
| 381 |
with gr.Row():
|
|
|
|
| 432 |
# Event bindings
|
| 433 |
generate_btn.click(
|
| 434 |
fn=screenshot_to_code,
|
| 435 |
+
inputs=[image_input, nebius_key, vision_model_dd, code_model_dd, code_max_tokens, code_temperature],
|
| 436 |
outputs=[description_output, html_output],
|
| 437 |
)
|
| 438 |
|
|
|
|
| 465 |
)
|
| 466 |
|
| 467 |
code_btn.click(
|
| 468 |
+
fn=lambda desc, key, cmod, mtoks, temp: generate_html_code(
|
| 469 |
+
desc, key, code_model=cmod, code_max_tokens=mtoks, code_temperature=temp
|
| 470 |
+
),
|
| 471 |
+
inputs=[desc_input, nebius_key, code_model_dd, code_max_tokens, code_temperature],
|
| 472 |
outputs=[code_result],
|
| 473 |
)
|
| 474 |
|