venice-2 / app.py
00Boobs00's picture
Update app.py from anycoder
3db82fa verified
import os
import time
import datetime
import gradio as gr
# --- Safe Imports to Prevent Runtime Crashes ---
# We wrap imports in try-except blocks so the Gradio UI loads even if dependencies are missing.
# This allows the user to see an error message in the UI instead of a generic 500 error.
try:
from openai import OpenAI
except ImportError:
OpenAI = None
try:
from tavily import TavilyClient
except ImportError:
TavilyClient = None
# --- Configuration & Initialization ---
# In production, these are set as environment variables
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
# Initialize Clients
openai_client = None
tavily_client = None
if OpenAI and OPENAI_API_KEY:
try:
openai_client = OpenAI(api_key=OPENAI_API_KEY)
except Exception as e:
print(f"OpenAI Initialization Error: {e}")
if TavilyClient and TAVILY_API_KEY:
try:
tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
except Exception as e:
print(f"Tavily Initialization Error: {e}")
# --- Production-Grade Backend Logic ---
def get_system_prompt():
"""Returns the system prompt for the LLM to act as an expert web architect."""
return """
You are an expert Senior Front-End Engineer and AI Architect.
Your goal is to generate clean, semantic, modern HTML/CSS (inline styles) based on user requests.
Guidelines:
1. Use semantic HTML5 tags.
2. Use inline CSS for simplicity in this standalone preview context.
3. Ensure responsive design (flexbox/grid).
4. Use modern color palettes (avoid default blue/red).
5. No JavaScript unless explicitly requested. If JS is needed, keep it vanilla and inline.
6. Output ONLY the HTML code block. Do not include markdown formatting like ```html.
"""
def log_event(message, current_logs):
"""Helper to append timestamped logs."""
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
return f"{current_logs}\n[{timestamp}] {message}"
def real_web_search(query, current_logs):
"""
Performs a real-time web search using Tavily API.
Returns search context and updated logs.
"""
if not TavilyClient:
return [], log_event("ERROR: Tavily library not installed. Please add 'tavily-python' to requirements.txt.", current_logs)
if not tavily_client:
return [], log_event("ERROR: TAVILY_API_KEY not found. Search disabled.", current_logs)
try:
logs = log_event(f"Initiating search for: '{query}'...", current_logs)
# Execute search with production parameters
response = tavily_client.search(
query=query,
search_depth="advanced",
max_results=5,
include_answer=False,
include_raw_content=False
)
results = response.get("results", [])
logs = log_event(f"Search completed. Found {len(results)} relevant sources.", logs)
# Format results for context injection
context = [f"{res['title']}: {res['content']}" for res in results]
return context, logs
except Exception as e:
error_msg = f"Search API Error: {str(e)}"
return [], log_event(error_msg, current_logs)
def real_llm_generate(prompt, search_context, current_logs):
"""
Generates code using OpenAI GPT-4o (or configured model).
Returns generated HTML and updated logs.
"""
if not OpenAI:
error_html = "<div style='color:red; padding:20px;'>Error: OpenAI library not installed. Please add 'openai' to requirements.txt.</div>"
return error_html, log_event("ERROR: OpenAI library not installed.", current_logs)
if not openai_client:
error_html = "<div style='color:red; padding:20px;'>Error: OPENAI_API_KEY missing.</div>"
return error_html, log_event("ERROR: OPENAI_API_KEY not found. Generation disabled.", current_logs)
try:
logs = log_event("Constructing prompt with context...", current_logs)
# Construct the full prompt
context_str = "\n".join(search_context) if search_context else "No specific context found."
full_prompt = f"User Request: {prompt}\n\nContext from Search:\n{context_str}"
logs = log_event("Sending request to LLM (GPT-4o)...", logs)
# API Call
completion = openai_client.chat.completions.create(
model="gpt-4o", # Production grade model
messages=[
{"role": "system", "content": get_system_prompt()},
{"role": "user", "content": full_prompt}
],
temperature=0.7,
max_tokens=2048
)
generated_code = completion.choices[0].message.content
logs = log_event(f"Generation successful. Tokens used: {completion.usage.total_tokens}", logs)
return generated_code, logs
except Exception as e:
error_msg = f"LLM Generation Error: {str(e)}"
return f"<div style='color:red; padding:20px;'>{error_msg}</div>", log_event(error_msg, current_logs)
def process_message(message, history, code_state, use_search, current_logs):
"""
Main pipeline: State Management -> Search -> Generate -> Update UI
"""
logs = current_logs
logs = log_event(f"--- New Request ---", logs)
# 1. Web Search Step (Conditional)
search_context = []
if use_search:
search_context, logs = real_web_search(message, logs)
else:
logs = log_event("Search skipped by user setting.", logs)
# 2. Generation Step
generated_code, logs = real_llm_generate(message, search_context, logs)
# 3. Update Chat History
assistant_response = "I've generated the code based on your request and the retrieved context."
history.append({"role": "assistant", "content": assistant_response})
return history, generated_code, generated_code, generated_code, logs
def update_preview_from_editor(code):
"""Updates the preview pane when user manually edits code."""
return code
# --- Gradio Application (Gradio 6 Syntax) ---
# Custom CSS for "Upscaled UI"
custom_css = """
/* Browser Frame Simulation */
.browser-frame {
border: 1px solid #e5e7eb;
border-radius: 8px;
overflow: hidden;
background: white;
height: 100%;
display: flex;
flex-direction: column;
}
.browser-header {
background: #f3f4f6;
padding: 8px 12px;
display: flex;
align-items: center;
border-bottom: 1px solid #e5e7eb;
gap: 8px;
}
.browser-dots {
display: flex;
gap: 6px;
}
.dot {
width: 10px;
height: 10px;
border-radius: 50%;
}
.dot-red { background: #ef4444; }
.dot-yellow { background: #f59e0b; }
.dot-green { background: #10b981; }
.url-bar {
flex: 1;
background: white;
border-radius: 4px;
padding: 4px 12px;
font-size: 12px;
color: #6b7280;
text-align: center;
border: 1px solid #d1d5db;
}
.preview-content {
flex: 1;
overflow: auto;
padding: 0;
}
/* Terminal/Console Styling */
.console-output textarea {
font-family: 'Menlo', 'Monaco', 'Courier New', monospace;
background-color: #1e1e1e;
color: #d4d4d4;
}
"""
# Define the theme using Gradio 6 syntax
custom_theme = gr.themes.Soft(
primary_hue="indigo",
secondary_hue="cyan",
neutral_hue="slate",
font=gr.themes.GoogleFont("Inter"),
text_size="lg",
spacing_size="lg",
radius_size="md"
).set(
button_primary_background_fill="*primary_600",
button_primary_background_fill_hover="*primary_700",
block_title_text_weight="600",
block_border_width="1px",
block_border_color="*neutral_200",
)
# GRADIO 6: NO parameters in gr.Blocks() constructor!
with gr.Blocks() as demo:
# Header with branding
gr.HTML("""
<div style="display: flex; justify-content: space-between; align-items: center; padding: 10px 20px; border-bottom: 1px solid #e5e7eb; margin-bottom: 20px;">
<div style="display: flex; align-items: center; gap: 10px;">
<h1 style="margin: 0; font-size: 1.5rem; font-weight: 700; color: #4f46e5;">Bolt.new <span style="color: #1f2937; font-weight: 300;">Production</span></h1>
</div>
<div>
<a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="text-decoration: none; color: #4b5563; font-size: 0.9rem; display: flex; align-items: center; gap: 5px;">
Built with anycoder ↗
</a>
</div>
</div>
""")
# Main State
current_code = gr.Code(value="", visible=False, language="html")
with gr.Row(equal_height=True):
# --- LEFT COLUMN: Chat & Controls ---
with gr.Column(scale=3, min_width=350):
gr.Markdown("### 💬 AI Architect (Production)")
gr.Markdown("Describe your web app. Connected to GPT-4o & Tavily Search.")
chatbot = gr.Chatbot(
label="Conversation",
height=500,
show_copy_button=True,
type="messages",
avatar_images=(None, "https://huggingface.co/spaces/friuns/bolt.new/resolve/main/logo.png")
)
with gr.Row():
with gr.Accordion("⚙️ Production Settings", open=False):
use_search = gr.Checkbox(
label="Enable Tavily Search",
value=True,
info="Ground the AI generation with real-time web data."
)
# Note: In a real app, this might dynamically change the model client
model_display = gr.Textbox(
value="gpt-4o (OpenAI)",
label="Active Model",
interactive=False
)
with gr.Row():
msg_input = gr.Textbox(
label="Prompt",
placeholder="e.g., Create a responsive dashboard layout...",
lines=2,
scale=4,
autofocus=True
)
submit_btn = gr.Button("Generate", variant="primary", scale=1, size="lg")
gr.Examples(
examples=[
["Create a modern pricing table with 3 tiers and hover effects."],
["Build a hero section for a SaaS analytics platform."],
["Design a dark-themed login form with validation styles."],
["Generate a clean footer with social links and newsletter signup."]
],
inputs=msg_input
)
# --- RIGHT COLUMN: IDE & Preview ---
with gr.Column(scale=7):
gr.Markdown("### 🛠️ Workspace")
with gr.Tabs() as workspace_tabs:
# Tab 1: Split View (Default)
with gr.Tab("Split View", id="split"):
with gr.Row():
# Code Editor
with gr.Column(scale=1):
gr.Markdown("**Code Editor**")
code_editor = gr.Code(
label="index.html",
language="html",
lines=30,
interactive=True,
value="<!-- AI generated code will appear here -->"
)
# Live Preview
with gr.Column(scale=1):
gr.Markdown("**Live Preview**")
with gr.Group(elem_classes="browser-frame"):
with gr.Row(elem_classes="browser-header"):
with gr.Column(scale=0, min_width=40):
with gr.Row(elem_classes="browser-dots"):
gr.HTML('<div class="dot dot-red"></div>')
gr.HTML('<div class="dot dot-yellow"></div>')
gr.HTML('<div class="dot dot-green"></div>')
url_display = gr.Textbox(
value="http://localhost:3000",
interactive=False,
container=False,
scale=1,
elem_classes="url-bar"
)
preview_html = gr.HTML(
value="<div style='display:flex;justify-content:center;align-items:center;height:100%;color:#9ca3af;'>Preview will appear here</div>",
elem_classes="preview-content"
)
# Tab 2: Full Code
with gr.Tab("Full Code", id="code"):
full_code_editor = gr.Code(
label="Source Code",
language="html",
lines=40,
interactive=True
)
# Tab 3: Console/Logs
with gr.Tab("Console", id="console"):
console_output = gr.Textbox(
label="Execution Logs",
lines=10,
interactive=False,
container=True,
elem_classes="console-output",
value=f"[System] Ready to build.\n[System] OpenAI Client: {'Connected' if openai_client else 'Not Found (Check OPENAI_API_KEY)'}\n[System] Tavily Client: {'Connected' if tavily_client else 'Not Found (Check TAVILY_API_KEY)'}"
)
with gr.Row():
status_bar = gr.Textbox(
label="Status",
value="Ready",
interactive=False,
scale=4,
container=False
)
deploy_btn = gr.Button("Simulate Deploy", variant="stop", scale=1)
# --- Event Listeners ---
# 1. Main Generation Flow
submit_btn.click(
fn=process_message,
inputs=[msg_input, chatbot, current_code, use_search, console_output],
outputs=[chatbot, code_editor, preview_html, full_code_editor, console_output]
).then(
lambda: gr.Textbox(value=""),
None,
msg_input
)
msg_input.submit(
fn=process_message,
inputs=[msg_input, chatbot, current_code, use_search, console_output],
outputs=[chatbot, code_editor, preview_html, full_code_editor, console_output]
).then(
lambda: gr.Textbox(value=""),
None,
msg_input
)
# 2. Real-time Preview Updates (Editor -> Preview)
code_editor.change(
fn=update_preview_from_editor,
inputs=code_editor,
outputs=preview_html
)
# Sync full code editor
code_editor.change(
fn=lambda x: x,
inputs=code_editor,
outputs=full_code_editor
)
full_code_editor.change(
fn=lambda x: x,
inputs=full_code_editor,
outputs=[code_editor, preview_html]
)
# 3. Mock Deploy Action (Updated Log)
def mock_deploy(logs):
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
new_logs = f"{logs}\n[{timestamp}] Triggering build pipeline..."
time.sleep(0.5)
new_logs = f"{new_logs}\n[{timestamp}] Optimizing assets..."
time.sleep(0.5)
new_logs = f"{new_logs}\n[{timestamp}] Deployment to Vercel successful! 🚀"
return "Deployed", new_logs
deploy_btn.click(
fn=mock_deploy,
inputs=console_output,
outputs=[status_bar, console_output]
)
# --- Launch Configuration ---
# GRADIO 6: All app-level params go in launch()!
demo.launch(
theme=custom_theme,
css=custom_css,
footer_links=[
{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}
]
)