File size: 16,344 Bytes
1477cd2
5972abb
1477cd2
 
45bee14
 
 
 
 
 
 
 
 
 
 
 
 
1477cd2
 
 
 
 
 
 
45bee14
 
 
 
 
 
 
 
 
 
 
 
 
 
5972abb
1477cd2
5972abb
1477cd2
 
 
 
 
5972abb
1477cd2
 
 
 
 
 
 
 
 
 
 
 
 
5972abb
1477cd2
5972abb
1477cd2
 
5972abb
45bee14
 
 
1477cd2
 
5972abb
1477cd2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5972abb
1477cd2
 
 
 
 
45bee14
 
 
 
1477cd2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5972abb
1477cd2
 
 
 
 
5972abb
1477cd2
5972abb
1477cd2
 
5972abb
1477cd2
 
 
 
 
 
5972abb
 
1477cd2
5972abb
1477cd2
 
5972abb
 
1477cd2
5972abb
 
 
 
 
1477cd2
5972abb
1477cd2
5972abb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1477cd2
 
 
 
 
5972abb
 
 
45bee14
92cf99f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1477cd2
 
 
5972abb
 
 
 
1477cd2
5972abb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1477cd2
 
5972abb
 
 
 
 
92cf99f
 
5972abb
 
 
1477cd2
5972abb
1477cd2
5972abb
1477cd2
5972abb
1477cd2
 
 
 
 
5972abb
 
 
 
 
1477cd2
5972abb
 
 
 
 
 
 
 
1477cd2
 
 
 
5972abb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1477cd2
5972abb
 
 
 
 
1477cd2
 
 
5972abb
 
 
 
 
 
 
 
 
 
1477cd2
5972abb
 
 
 
 
 
1477cd2
3db82fa
5972abb
 
 
 
 
 
 
 
1477cd2
3db82fa
5972abb
 
 
 
 
 
 
 
 
 
9bf7edc
5972abb
 
 
 
 
 
9bf7edc
5972abb
 
 
 
 
9bf7edc
5972abb
 
1477cd2
 
 
 
 
 
 
 
 
 
5972abb
1477cd2
 
3db82fa
5972abb
 
 
1477cd2
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
import os
import time
import datetime
import gradio as gr

# --- Safe Imports to Prevent Runtime Crashes ---
# We wrap imports in try-except blocks so the Gradio UI loads even if dependencies are missing.
# This allows the user to see an error message in the UI instead of a generic 500 error.
try:
    from openai import OpenAI
except ImportError:
    OpenAI = None

try:
    from tavily import TavilyClient
except ImportError:
    TavilyClient = None

# --- Configuration & Initialization ---
# In production, these are set as environment variables
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")

# Initialize Clients
openai_client = None
tavily_client = None

if OpenAI and OPENAI_API_KEY:
    try:
        openai_client = OpenAI(api_key=OPENAI_API_KEY)
    except Exception as e:
        print(f"OpenAI Initialization Error: {e}")

if TavilyClient and TAVILY_API_KEY:
    try:
        tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
    except Exception as e:
        print(f"Tavily Initialization Error: {e}")

# --- Production-Grade Backend Logic ---

def get_system_prompt():
    """Returns the system prompt for the LLM to act as an expert web architect."""
    return """
    You are an expert Senior Front-End Engineer and AI Architect. 
    Your goal is to generate clean, semantic, modern HTML/CSS (inline styles) based on user requests.
    
    Guidelines:
    1. Use semantic HTML5 tags.
    2. Use inline CSS for simplicity in this standalone preview context.
    3. Ensure responsive design (flexbox/grid).
    4. Use modern color palettes (avoid default blue/red).
    5. No JavaScript unless explicitly requested. If JS is needed, keep it vanilla and inline.
    6. Output ONLY the HTML code block. Do not include markdown formatting like ```html.
    """

def log_event(message, current_logs):
    """Helper to append timestamped logs."""
    timestamp = datetime.datetime.now().strftime("%H:%M:%S")
    return f"{current_logs}\n[{timestamp}] {message}"

def real_web_search(query, current_logs):
    """
    Performs a real-time web search using Tavily API.
    Returns search context and updated logs.
    """
    if not TavilyClient:
        return [], log_event("ERROR: Tavily library not installed. Please add 'tavily-python' to requirements.txt.", current_logs)
    
    if not tavily_client:
        return [], log_event("ERROR: TAVILY_API_KEY not found. Search disabled.", current_logs)
    
    try:
        logs = log_event(f"Initiating search for: '{query}'...", current_logs)
        
        # Execute search with production parameters
        response = tavily_client.search(
            query=query, 
            search_depth="advanced", 
            max_results=5,
            include_answer=False,
            include_raw_content=False
        )
        
        results = response.get("results", [])
        logs = log_event(f"Search completed. Found {len(results)} relevant sources.", logs)
        
        # Format results for context injection
        context = [f"{res['title']}: {res['content']}" for res in results]
        return context, logs
        
    except Exception as e:
        error_msg = f"Search API Error: {str(e)}"
        return [], log_event(error_msg, current_logs)

def real_llm_generate(prompt, search_context, current_logs):
    """
    Generates code using OpenAI GPT-4o (or configured model).
    Returns generated HTML and updated logs.
    """
    if not OpenAI:
        error_html = "<div style='color:red; padding:20px;'>Error: OpenAI library not installed. Please add 'openai' to requirements.txt.</div>"
        return error_html, log_event("ERROR: OpenAI library not installed.", current_logs)

    if not openai_client:
        error_html = "<div style='color:red; padding:20px;'>Error: OPENAI_API_KEY missing.</div>"
        return error_html, log_event("ERROR: OPENAI_API_KEY not found. Generation disabled.", current_logs)

    try:
        logs = log_event("Constructing prompt with context...", current_logs)
        
        # Construct the full prompt
        context_str = "\n".join(search_context) if search_context else "No specific context found."
        full_prompt = f"User Request: {prompt}\n\nContext from Search:\n{context_str}"
        
        logs = log_event("Sending request to LLM (GPT-4o)...", logs)
        
        # API Call
        completion = openai_client.chat.completions.create(
            model="gpt-4o", # Production grade model
            messages=[
                {"role": "system", "content": get_system_prompt()},
                {"role": "user", "content": full_prompt}
            ],
            temperature=0.7,
            max_tokens=2048
        )
        
        generated_code = completion.choices[0].message.content
        logs = log_event(f"Generation successful. Tokens used: {completion.usage.total_tokens}", logs)
        
        return generated_code, logs

    except Exception as e:
        error_msg = f"LLM Generation Error: {str(e)}"
        return f"<div style='color:red; padding:20px;'>{error_msg}</div>", log_event(error_msg, current_logs)

def process_message(message, history, code_state, use_search, current_logs):
    """
    Main pipeline: State Management -> Search -> Generate -> Update UI
    """
    logs = current_logs
    logs = log_event(f"--- New Request ---", logs)
    
    # 1. Web Search Step (Conditional)
    search_context = []
    if use_search:
        search_context, logs = real_web_search(message, logs)
    else:
        logs = log_event("Search skipped by user setting.", logs)
    
    # 2. Generation Step
    generated_code, logs = real_llm_generate(message, search_context, logs)
    
    # 3. Update Chat History
    assistant_response = "I've generated the code based on your request and the retrieved context."
    history.append({"role": "assistant", "content": assistant_response})
    
    return history, generated_code, generated_code, generated_code, logs

def update_preview_from_editor(code):
    """Updates the preview pane when user manually edits code."""
    return code

# --- Gradio Application (Gradio 6 Syntax) ---

# Custom CSS for "Upscaled UI"
custom_css = """
/* Browser Frame Simulation */
.browser-frame {
    border: 1px solid #e5e7eb;
    border-radius: 8px;
    overflow: hidden;
    background: white;
    height: 100%;
    display: flex;
    flex-direction: column;
}

.browser-header {
    background: #f3f4f6;
    padding: 8px 12px;
    display: flex;
    align-items: center;
    border-bottom: 1px solid #e5e7eb;
    gap: 8px;
}

.browser-dots {
    display: flex;
    gap: 6px;
}

.dot {
    width: 10px;
    height: 10px;
    border-radius: 50%;
}

.dot-red { background: #ef4444; }
.dot-yellow { background: #f59e0b; }
.dot-green { background: #10b981; }

.url-bar {
    flex: 1;
    background: white;
    border-radius: 4px;
    padding: 4px 12px;
    font-size: 12px;
    color: #6b7280;
    text-align: center;
    border: 1px solid #d1d5db;
}

.preview-content {
    flex: 1;
    overflow: auto;
    padding: 0;
}

/* Terminal/Console Styling */
.console-output textarea {
    font-family: 'Menlo', 'Monaco', 'Courier New', monospace;
    background-color: #1e1e1e;
    color: #d4d4d4;
}
"""

# Define the theme using Gradio 6 syntax
custom_theme = gr.themes.Soft(
    primary_hue="indigo",
    secondary_hue="cyan",
    neutral_hue="slate",
    font=gr.themes.GoogleFont("Inter"),
    text_size="lg",
    spacing_size="lg",
    radius_size="md"
).set(
    button_primary_background_fill="*primary_600",
    button_primary_background_fill_hover="*primary_700",
    block_title_text_weight="600",
    block_border_width="1px",
    block_border_color="*neutral_200",
)

# GRADIO 6: NO parameters in gr.Blocks() constructor!
with gr.Blocks() as demo:
    
    # Header with branding
    gr.HTML("""
    <div style="display: flex; justify-content: space-between; align-items: center; padding: 10px 20px; border-bottom: 1px solid #e5e7eb; margin-bottom: 20px;">
        <div style="display: flex; align-items: center; gap: 10px;">
            <h1 style="margin: 0; font-size: 1.5rem; font-weight: 700; color: #4f46e5;">Bolt.new <span style="color: #1f2937; font-weight: 300;">Production</span></h1>
        </div>
        <div>
            <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="text-decoration: none; color: #4b5563; font-size: 0.9rem; display: flex; align-items: center; gap: 5px;">
                Built with anycoder ↗
            </a>
        </div>
    </div>
    """)

    # Main State
    current_code = gr.Code(value="", visible=False, language="html")
    
    with gr.Row(equal_height=True):
        # --- LEFT COLUMN: Chat & Controls ---
        with gr.Column(scale=3, min_width=350):
            gr.Markdown("### 💬 AI Architect (Production)")
            gr.Markdown("Describe your web app. Connected to GPT-4o & Tavily Search.")
            
            chatbot = gr.Chatbot(
                label="Conversation",
                height=500,
                show_copy_button=True,
                type="messages",
                avatar_images=(None, "https://huggingface.co/spaces/friuns/bolt.new/resolve/main/logo.png")
            )
            
            with gr.Row():
                with gr.Accordion("⚙️ Production Settings", open=False):
                    use_search = gr.Checkbox(
                        label="Enable Tavily Search",
                        value=True,
                        info="Ground the AI generation with real-time web data."
                    )
                    # Note: In a real app, this might dynamically change the model client
                    model_display = gr.Textbox(
                        value="gpt-4o (OpenAI)",
                        label="Active Model",
                        interactive=False
                    )

            with gr.Row():
                msg_input = gr.Textbox(
                    label="Prompt",
                    placeholder="e.g., Create a responsive dashboard layout...",
                    lines=2,
                    scale=4,
                    autofocus=True
                )
                submit_btn = gr.Button("Generate", variant="primary", scale=1, size="lg")
            
            gr.Examples(
                examples=[
                    ["Create a modern pricing table with 3 tiers and hover effects."],
                    ["Build a hero section for a SaaS analytics platform."],
                    ["Design a dark-themed login form with validation styles."],
                    ["Generate a clean footer with social links and newsletter signup."]
                ],
                inputs=msg_input
            )

        # --- RIGHT COLUMN: IDE & Preview ---
        with gr.Column(scale=7):
            gr.Markdown("### 🛠️ Workspace")
            
            with gr.Tabs() as workspace_tabs:
                # Tab 1: Split View (Default)
                with gr.Tab("Split View", id="split"):
                    with gr.Row():
                        # Code Editor
                        with gr.Column(scale=1):
                            gr.Markdown("**Code Editor**")
                            code_editor = gr.Code(
                                label="index.html",
                                language="html",
                                lines=30,
                                interactive=True,
                                value="<!-- AI generated code will appear here -->"
                            )
                        
                        # Live Preview
                        with gr.Column(scale=1):
                            gr.Markdown("**Live Preview**")
                            with gr.Group(elem_classes="browser-frame"):
                                with gr.Row(elem_classes="browser-header"):
                                    with gr.Column(scale=0, min_width=40):
                                        with gr.Row(elem_classes="browser-dots"):
                                            gr.HTML('<div class="dot dot-red"></div>')
                                            gr.HTML('<div class="dot dot-yellow"></div>')
                                            gr.HTML('<div class="dot dot-green"></div>')
                                    url_display = gr.Textbox(
                                        value="http://localhost:3000",
                                        interactive=False,
                                        container=False,
                                        scale=1,
                                        elem_classes="url-bar"
                                    )
                                preview_html = gr.HTML(
                                    value="<div style='display:flex;justify-content:center;align-items:center;height:100%;color:#9ca3af;'>Preview will appear here</div>",
                                    elem_classes="preview-content"
                                )

                # Tab 2: Full Code
                with gr.Tab("Full Code", id="code"):
                    full_code_editor = gr.Code(
                        label="Source Code",
                        language="html",
                        lines=40,
                        interactive=True
                    )

                # Tab 3: Console/Logs
                with gr.Tab("Console", id="console"):
                    console_output = gr.Textbox(
                        label="Execution Logs",
                        lines=10,
                        interactive=False,
                        container=True,
                        elem_classes="console-output",
                        value=f"[System] Ready to build.\n[System] OpenAI Client: {'Connected' if openai_client else 'Not Found (Check OPENAI_API_KEY)'}\n[System] Tavily Client: {'Connected' if tavily_client else 'Not Found (Check TAVILY_API_KEY)'}"
                    )

            with gr.Row():
                status_bar = gr.Textbox(
                    label="Status",
                    value="Ready",
                    interactive=False,
                    scale=4,
                    container=False
                )
                deploy_btn = gr.Button("Simulate Deploy", variant="stop", scale=1)

    # --- Event Listeners ---
    
    # 1. Main Generation Flow
    submit_btn.click(
        fn=process_message,
        inputs=[msg_input, chatbot, current_code, use_search, console_output],
        outputs=[chatbot, code_editor, preview_html, full_code_editor, console_output]
    ).then(
        lambda: gr.Textbox(value=""),
        None,
        msg_input
    )

    msg_input.submit(
        fn=process_message,
        inputs=[msg_input, chatbot, current_code, use_search, console_output],
        outputs=[chatbot, code_editor, preview_html, full_code_editor, console_output]
    ).then(
        lambda: gr.Textbox(value=""),
        None,
        msg_input
    )

    # 2. Real-time Preview Updates (Editor -> Preview)
    code_editor.change(
        fn=update_preview_from_editor,
        inputs=code_editor,
        outputs=preview_html
    )
    
    # Sync full code editor
    code_editor.change(
        fn=lambda x: x,
        inputs=code_editor,
        outputs=full_code_editor
    )
    
    full_code_editor.change(
        fn=lambda x: x,
        inputs=full_code_editor,
        outputs=[code_editor, preview_html]
    )

    # 3. Mock Deploy Action (Updated Log)
    def mock_deploy(logs):
        timestamp = datetime.datetime.now().strftime("%H:%M:%S")
        new_logs = f"{logs}\n[{timestamp}] Triggering build pipeline..."
        time.sleep(0.5)
        new_logs = f"{new_logs}\n[{timestamp}] Optimizing assets..."
        time.sleep(0.5)
        new_logs = f"{new_logs}\n[{timestamp}] Deployment to Vercel successful! 🚀"
        return "Deployed", new_logs

    deploy_btn.click(
        fn=mock_deploy,
        inputs=console_output,
        outputs=[status_bar, console_output]
    )

# --- Launch Configuration ---
# GRADIO 6: All app-level params go in launch()!
demo.launch(
    theme=custom_theme,
    css=custom_css,
    footer_links=[
        {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}
    ]
)