Spaces:
Paused
Fix: Gradio UI image display for text-to-LEGO generation
Browse filesProblem: Rendered LEGO images weren't showing after successful rendering
Root cause: Return value mismatch between unified_input_handler() and Gradio outputs
Changes:
- unified_input_handler() now returns 6 values (was 5)
- Added LDR text output as 5th return value
- Added rendered_image as 6th return value
- Updated Gradio output mapping to include render_result (Image component)
Affected functions:
- unified_input_handler(): Returns (renderings, part_list, status, ldr_data, ldr_text, rendered_image)
- process_ldr_file(): Returns (..., None, None) for file upload flow
- process_ldr_from_path(): Returns (..., None, None) for path-based flow
UI Flow:
1. User enters "red car" → CLIP retrieval → GPT generation → Blender render
2. Image now displays correctly in "Rendering Result" section
3. LDR text displays in "Generated LDR Content" textbox
This completes the text-to-LEGO visualization pipeline.
- code/demo.py +16 -12
|
@@ -152,10 +152,10 @@ def process_ldr_file(file, process_for_model=True):
|
|
| 152 |
If False, skip numerical conversion (only extract parts for visualization).
|
| 153 |
|
| 154 |
Returns:
|
| 155 |
-
Tuple of (renderings, part_list, status, process_ldr_data, None)
|
| 156 |
"""
|
| 157 |
if not file:
|
| 158 |
-
return None, None, "Please upload an LDR file", None, None
|
| 159 |
|
| 160 |
# Read LDR content
|
| 161 |
with open(file.name, 'r') as f:
|
|
@@ -202,7 +202,7 @@ def process_ldr_file(file, process_for_model=True):
|
|
| 202 |
print(f"🔍 [DEBUG] Skipping numerical conversion (process_for_model=False)")
|
| 203 |
|
| 204 |
print(f"🔍 [DEBUG] Final process_ldr_data: {'None' if process_ldr_data is None else 'has data'}")
|
| 205 |
-
return renderings, part_list, f"File loaded, {len(part_names)} valid parts identified", process_ldr_data, None
|
| 206 |
|
| 207 |
# except Exception as e:
|
| 208 |
# return None, None, f"File processing failed: {str(e)}", None, None
|
|
@@ -218,10 +218,10 @@ def process_ldr_from_path(ldr_path, process_for_model=False):
|
|
| 218 |
If False (default), skip numerical conversion for visualization-only.
|
| 219 |
|
| 220 |
Returns:
|
| 221 |
-
Tuple of (renderings, part_list, status, process_ldr_data, None)
|
| 222 |
"""
|
| 223 |
if not os.path.exists(ldr_path):
|
| 224 |
-
return None, None, f"LDR file not found: {ldr_path}", None, None
|
| 225 |
|
| 226 |
# Create a mock file object to reuse process_ldr_file logic
|
| 227 |
class MockFile:
|
|
@@ -249,7 +249,7 @@ def unified_input_handler(file, text_query):
|
|
| 249 |
# Case 2: Text query (neural generation)
|
| 250 |
elif text_query and text_query.strip():
|
| 251 |
if not CLIP_AVAILABLE:
|
| 252 |
-
return None, None, "❌ Text-to-LEGO feature is not available (generation module not loaded)", None, None
|
| 253 |
|
| 254 |
try:
|
| 255 |
# Generate LDR design from text
|
|
@@ -261,7 +261,7 @@ def unified_input_handler(file, text_query):
|
|
| 261 |
result = retriever.get_best_match(query)
|
| 262 |
|
| 263 |
if result is None or not result.get("ldr_exists", True):
|
| 264 |
-
return None, None, f"❌ Could not generate design for '{query}'", None, None
|
| 265 |
|
| 266 |
ldr_path = result["ldr_path"]
|
| 267 |
confidence = result["similarity"]
|
|
@@ -277,7 +277,7 @@ def unified_input_handler(file, text_query):
|
|
| 277 |
|
| 278 |
# Check if numerical conversion succeeded
|
| 279 |
if process_ldr_data is None:
|
| 280 |
-
return None, None, f"❌ Failed to convert LDR to model format (missing label mappings)", None, None
|
| 281 |
|
| 282 |
# Generate new LDR using GPT model (GPU-accelerated)
|
| 283 |
new_ldr_filename = f"generated_{uuid.uuid4()}.ldr"
|
|
@@ -295,17 +295,21 @@ def unified_input_handler(file, text_query):
|
|
| 295 |
# Update status message with generation info
|
| 296 |
enhanced_status = f"✨ Generated from car_{car_id} (confidence: {confidence*100:.1f}%)\n🤖 GPT model created new assembly sequence\n{status}"
|
| 297 |
|
| 298 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
|
| 300 |
except Exception as e:
|
| 301 |
import traceback
|
| 302 |
error_msg = f"❌ Design generation failed: {str(e)}\n{traceback.format_exc()}"
|
| 303 |
print(error_msg)
|
| 304 |
-
return None, None, error_msg, None, None
|
| 305 |
|
| 306 |
# Case 3: No input
|
| 307 |
else:
|
| 308 |
-
return None, None, "⚠️ Please upload an LDR file OR enter a text description", None, None
|
| 309 |
|
| 310 |
|
| 311 |
import traceback # 导入traceback,用于打印完整堆栈
|
|
@@ -474,7 +478,7 @@ with gr.Blocks(
|
|
| 474 |
upload_btn.click(
|
| 475 |
fn=unified_input_handler,
|
| 476 |
inputs=[ldr_file, text_input],
|
| 477 |
-
outputs=[part_renderings, part_list, status_msg, original_ldr, predicted_ldr]
|
| 478 |
)
|
| 479 |
|
| 480 |
predict_btn.click(
|
|
|
|
| 152 |
If False, skip numerical conversion (only extract parts for visualization).
|
| 153 |
|
| 154 |
Returns:
|
| 155 |
+
Tuple of (renderings, part_list, status, process_ldr_data, None, None)
|
| 156 |
"""
|
| 157 |
if not file:
|
| 158 |
+
return None, None, "Please upload an LDR file", None, None, None
|
| 159 |
|
| 160 |
# Read LDR content
|
| 161 |
with open(file.name, 'r') as f:
|
|
|
|
| 202 |
print(f"🔍 [DEBUG] Skipping numerical conversion (process_for_model=False)")
|
| 203 |
|
| 204 |
print(f"🔍 [DEBUG] Final process_ldr_data: {'None' if process_ldr_data is None else 'has data'}")
|
| 205 |
+
return renderings, part_list, f"File loaded, {len(part_names)} valid parts identified", process_ldr_data, None, None
|
| 206 |
|
| 207 |
# except Exception as e:
|
| 208 |
# return None, None, f"File processing failed: {str(e)}", None, None
|
|
|
|
| 218 |
If False (default), skip numerical conversion for visualization-only.
|
| 219 |
|
| 220 |
Returns:
|
| 221 |
+
Tuple of (renderings, part_list, status, process_ldr_data, None, None)
|
| 222 |
"""
|
| 223 |
if not os.path.exists(ldr_path):
|
| 224 |
+
return None, None, f"LDR file not found: {ldr_path}", None, None, None
|
| 225 |
|
| 226 |
# Create a mock file object to reuse process_ldr_file logic
|
| 227 |
class MockFile:
|
|
|
|
| 249 |
# Case 2: Text query (neural generation)
|
| 250 |
elif text_query and text_query.strip():
|
| 251 |
if not CLIP_AVAILABLE:
|
| 252 |
+
return None, None, "❌ Text-to-LEGO feature is not available (generation module not loaded)", None, None, None
|
| 253 |
|
| 254 |
try:
|
| 255 |
# Generate LDR design from text
|
|
|
|
| 261 |
result = retriever.get_best_match(query)
|
| 262 |
|
| 263 |
if result is None or not result.get("ldr_exists", True):
|
| 264 |
+
return None, None, f"❌ Could not generate design for '{query}'", None, None, None
|
| 265 |
|
| 266 |
ldr_path = result["ldr_path"]
|
| 267 |
confidence = result["similarity"]
|
|
|
|
| 277 |
|
| 278 |
# Check if numerical conversion succeeded
|
| 279 |
if process_ldr_data is None:
|
| 280 |
+
return None, None, f"❌ Failed to convert LDR to model format (missing label mappings)", None, None, None
|
| 281 |
|
| 282 |
# Generate new LDR using GPT model (GPU-accelerated)
|
| 283 |
new_ldr_filename = f"generated_{uuid.uuid4()}.ldr"
|
|
|
|
| 295 |
# Update status message with generation info
|
| 296 |
enhanced_status = f"✨ Generated from car_{car_id} (confidence: {confidence*100:.1f}%)\n🤖 GPT model created new assembly sequence\n{status}"
|
| 297 |
|
| 298 |
+
# Read generated LDR content for display
|
| 299 |
+
with open(new_ldr_path, 'r', encoding='utf-8') as f:
|
| 300 |
+
ldr_text = f.read()
|
| 301 |
+
|
| 302 |
+
return renderings, part_list, enhanced_status, process_ldr_data, ldr_text, rendered_image
|
| 303 |
|
| 304 |
except Exception as e:
|
| 305 |
import traceback
|
| 306 |
error_msg = f"❌ Design generation failed: {str(e)}\n{traceback.format_exc()}"
|
| 307 |
print(error_msg)
|
| 308 |
+
return None, None, error_msg, None, None, None
|
| 309 |
|
| 310 |
# Case 3: No input
|
| 311 |
else:
|
| 312 |
+
return None, None, "⚠️ Please upload an LDR file OR enter a text description", None, None, None
|
| 313 |
|
| 314 |
|
| 315 |
import traceback # 导入traceback,用于打印完整堆栈
|
|
|
|
| 478 |
upload_btn.click(
|
| 479 |
fn=unified_input_handler,
|
| 480 |
inputs=[ldr_file, text_input],
|
| 481 |
+
outputs=[part_renderings, part_list, status_msg, original_ldr, predicted_ldr, render_result]
|
| 482 |
)
|
| 483 |
|
| 484 |
predict_btn.click(
|