File size: 7,321 Bytes
dbf9d04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
"""
Production FastAPI Backend for Image2Code
Wraps the VLM agent into a REST API for HuggingFace Spaces deployment
"""
from fastapi import FastAPI, File, UploadFile, Form, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
import uvicorn
import tempfile
import os
from pathlib import Path
from typing import Optional
from uuid import uuid4
import sys
import shutil

# Import agent components
from agent_azure_vlm_tools import ModelManager, azure_vlm_app, CodeRefineState

app = FastAPI(
    title="Image2Code VLM API",
    description="Backend API for Image2Code using Qwen2.5-VL-7B-Instruct",
    version="1.0.0"
)

# CORS configuration for production
app.add_middleware(
    CORSMiddleware,
    allow_origins=[
        "http://localhost:5173",  # Local dev
        "http://localhost:3000",
        "https://*.vercel.app",   # Vercel deployments
        # Add your specific production domain here
    ],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Global model manager (singleton)
models = None


@app.on_event("startup")
async def startup_event():
    """Load models on server startup"""
    global models
    print("🚀 Starting Image2Code VLM API...")
    print("📦 Loading Qwen2.5-VL-7B-Instruct model... This may take 2-3 minutes.")
    models = ModelManager()
    print("✅ Models loaded successfully!")


@app.get("/")
async def root():
    """Health check endpoint"""
    return {
        "status": "healthy",
        "service": "Image2Code VLM API",
        "model": "Qwen2.5-VL-7B-Instruct",
        "version": "1.0.0",
        "ready": models is not None
    }


@app.get("/health")
async def health_check():
    """Detailed health check"""
    return {
        "status": "healthy" if models is not None else "starting",
        "models_loaded": models is not None,
        "gpu_available": True,  # Update based on actual check
    }


@app.post("/api/generate")
async def generate_ui(
    prompt: str = Form(..., description="Text prompt describing the UI to generate"),
    image: UploadFile = File(..., description="Wireframe/screenshot image"),
):
    """
    Generate UI code from a prompt and wireframe image using Qwen VLM
    
    Args:
        prompt: Description of the desired UI
        image: Uploaded image file (PNG, JPG, etc.)
    
    Returns:
        JSON response with generated code, plan, and reasoning
    """
    if not models:
        raise HTTPException(
            status_code=503,
            detail="Models are still loading. Please wait a moment and try again."
        )
    
    # Validate image type
    if not image.content_type.startswith("image/"):
        raise HTTPException(
            status_code=400,
            detail=f"Invalid file type: {image.content_type}. Please upload an image."
        )
    
    # Create temporary directory for this request
    temp_dir = tempfile.mkdtemp()
    
    try:
        # Save uploaded image
        image_path = os.path.join(temp_dir, "input_wireframe.png")
        contents = await image.read()
        with open(image_path, "wb") as f:
            f.write(contents)
        
        # Define output paths
        out_html = os.path.join(temp_dir, "generated_output.html")
        out_brief = os.path.join(temp_dir, "generated_brief.txt")
        out_reldesc = os.path.join(temp_dir, "generated_reldesc.txt")
        
        # Build state for agent
        state = CodeRefineState(
            image_path=image_path,
            out_rel_desc=out_reldesc,
            out_brief=out_brief,
            out_html=out_html,
            vision_deployment="gpt-4.1-mini",
            text_deployment="gpt-4.1-mini",
            reldesc_tokens=700,
            brief_tokens=1100,
            code_tokens=2200,
            judge_tokens=900,
            temp=0.12,
            refine_max_iters=3,
            refine_threshold=8,
            shot_width=1536,
            shot_height=900
        )
        
        # Run the agent
        run_id = f"api-{uuid4()}"
        config = {"configurable": {"thread_id": run_id}}
        
        print(f"🎨 Processing wireframe with run_id: {run_id}")
        agent_result = azure_vlm_app.invoke(state, config=config)
        print(f"✅ Agent completed for run_id: {run_id}")
        
        # Read generated code
        generated_code = ""
        if os.path.exists(out_html):
            with open(out_html, 'r', encoding='utf-8') as f:
                generated_code = f.read()
        
        # Build response messages
        messages = []
        
        messages.append({
            "id": f"msg-{uuid4()}",
            "role": "assistant",
            "variant": "accent",
            "content": f"**Plan:** Generated UI from your wireframe using Qwen2.5-VL-7B-Instruct"
        })
        
        messages.append({
            "id": f"msg-{uuid4()}",
            "role": "assistant",
            "variant": "subtle",
            "content": """**Process:**
• Loaded and analyzed wireframe structure
• Identified UI components and layout
• Generated semantic HTML/CSS code
• Applied multi-stage refinement pipeline"""
        })
        
        if generated_code:
            messages.append({
                "id": f"msg-{uuid4()}",
                "role": "assistant",
                "variant": "accent",
                "content": f"```html\n{generated_code}\n```"
            })
        else:
            messages.append({
                "id": f"msg-{uuid4()}",
                "role": "assistant",
                "variant": "subtle",
                "content": "⚠️ Code generation completed but output file was empty."
            })
        
        return JSONResponse(content={
            "messages": messages,
            "status": {
                "kind": "success",
                "text": "UI code generated successfully",
                "detail": f"Run ID: {run_id}"
            },
            "usedFallback": False
        })
        
    except Exception as e:
        import traceback
        error_details = traceback.format_exc()
        print(f"❌ Error processing request: {error_details}")
        
        raise HTTPException(
            status_code=500,
            detail=f"Error generating UI: {str(e)}"
        )
    
    finally:
        # Cleanup temporary files
        try:
            shutil.rmtree(temp_dir)
        except Exception as e:
            print(f"⚠️ Failed to cleanup temp dir: {e}")


@app.post("/api/chat")
async def chat_with_vlm(
    prompt: str = Form(..., description="Chat message/question"),
    image: Optional[UploadFile] = File(None, description="Optional image for vision tasks"),
):
    """
    Simple chat endpoint for VLM queries
    """
    if not models:
        raise HTTPException(
            status_code=503,
            detail="Models not loaded yet"
        )
    
    return JSONResponse(content={
        "response": "Chat endpoint is available but not fully implemented yet.",
        "status": "success"
    })


if __name__ == "__main__":
    # For local testing
    port = int(os.environ.get("PORT", 7860))  # HF Spaces uses 7860
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=port,
        timeout_keep_alive=300  # 5 minutes for long-running requests
    )