0xZohar commited on
Commit
a27d916
·
verified ·
1 Parent(s): 534a3ae

Add @spaces.GPU decorators and lazy loading for ZeroGPU

Browse files
Files changed (1) hide show
  1. code/demo.py +562 -0
code/demo.py ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import uuid
4
+ import shutil
5
+ import functools
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ import numpy as np
8
+ import torch
9
+
10
+ # ZeroGPU Support - CRITICAL for HuggingFace Spaces
11
+ try:
12
+ import spaces
13
+ ZEROGPU_AVAILABLE = True
14
+ print("✅ ZeroGPU support enabled")
15
+ except ImportError:
16
+ print("⚠️ ZeroGPU not available - running in standard mode")
17
+ ZEROGPU_AVAILABLE = False
18
+ # Create dummy decorator for local development
19
+ class spaces:
20
+ @staticmethod
21
+ def GPU(duration=60):
22
+ def decorator(func):
23
+ return func
24
+ return decorator
25
+
26
+ #from cube3d.render.render_bricks import render_bricks
27
+ from cube3d.render.render_bricks_safe import render_bricks_safe
28
+ from cube3d.training.engine import Engine, EngineFast
29
+ from cube3d.training.bert_infer import generate_tokens
30
+ from cube3d.training.utils import normalize_bboxs
31
+ from cube3d.training.process_single_ldr import process_ldr_data, process_ldr_flatten, logits2botldrpr
32
+ from cube3d.config import HF_CACHE_DIR
33
+
34
+ # Neural design generation for text-to-LEGO functionality
35
+ try:
36
+ from clip_retrieval import get_retriever
37
+ CLIP_AVAILABLE = True
38
+ except ImportError:
39
+ print("⚠️ Text-to-design module not available. Text input feature will be disabled.")
40
+ CLIP_AVAILABLE = False
41
+
42
+ # Lazy loading for GPU models (ZeroGPU requirement)
43
+ _retriever = None
44
+ _gpt_engine = None
45
+
46
+ @functools.lru_cache(maxsize=1)
47
+ def get_clip_retriever_cached():
48
+ """Lazy load CLIP retriever (initialized only once, cached)"""
49
+ print("🔧 Initializing CLIP retriever (one-time setup)...")
50
+ retriever = get_retriever(data_root="data/1313个筛选车结构和对照渲染图")
51
+ print(f"✅ CLIP retriever loaded ({retriever.features.shape[0]} designs)")
52
+ return retriever
53
+
54
+ @functools.lru_cache(maxsize=1)
55
+ def get_gpt_engine_cached():
56
+ """Lazy load GPT engine (initialized only once, cached)"""
57
+ print("🔧 Initializing GPT engine (one-time setup)...")
58
+
59
+ config_path = 'cube3d/configs/open_model_v0.5.yaml'
60
+ gpt_ckpt_path = None # test mode doesn't use this
61
+
62
+ # Detect HuggingFace Spaces environment
63
+ is_hf_space = os.getenv("SPACE_ID") is not None
64
+
65
+ if is_hf_space:
66
+ from huggingface_hub import hf_hub_download
67
+ print(f"Loading GPT model from HuggingFace Model Hub...")
68
+ shape_ckpt_path = hf_hub_download(
69
+ repo_id="0xZohar/object-assembler-models",
70
+ filename="save_shape_cars_whole_p_rot_scratch_4mask_randp.safetensors",
71
+ cache_dir=HF_CACHE_DIR,
72
+ local_files_only=True
73
+ )
74
+ save_gpt_ckpt_path = shape_ckpt_path
75
+ print(f"✅ GPT model loaded from cache: {shape_ckpt_path}")
76
+ else:
77
+ shape_ckpt_path = 'model_weights/save_shape_cars_whole_p_rot_scratch_4mask_randp.safetensors'
78
+ save_gpt_ckpt_path = 'model_weights/save_shape_cars_whole_p_rot_scratch_4mask_randp.safetensors'
79
+
80
+ # ZeroGPU: Use fixed device='cuda', GPU allocation happens in @spaces.GPU functions
81
+ engine = EngineFast(
82
+ config_path, gpt_ckpt_path, shape_ckpt_path, save_gpt_ckpt_path,
83
+ device=torch.device('cuda'), # ZeroGPU manages this automatically
84
+ mode='test'
85
+ )
86
+ print("✅ GPT engine initialized")
87
+ return engine
88
+
89
+ # 确保临时目录存在(远程服务器路径)
90
+ TMP_DIR = "./tmp/ldr_processor_demo"
91
+ os.makedirs(TMP_DIR, exist_ok=True)
92
+
93
+ class MockFileStorage:
94
+ def __init__(self, file_path):
95
+ self.name = file_path # 关键:模拟文件路径属性,和 Gadio 保持一致
96
+
97
+ # 模型预测函数(保持原逻辑)
98
+ def model_predict(ldr_content):
99
+ parts = [line.strip() for line in ldr_content.splitlines() if line.strip()]
100
+ positions = [(120.0, 0, 180.0), (90.0, 0, 210.0), (90.0, 0, 180.0), (70.0, 0, 170.0)]
101
+ color_code = 115
102
+
103
+ result = []
104
+ for i, part in enumerate(parts):
105
+ pos = positions[i % len(positions)]
106
+ part_line = f"1 {color_code} {pos[0]} {pos[1]} {pos[2]} 0 0 1 0 1 0 -1 0 0 {part}"
107
+ result.append(part_line)
108
+ if i < len(parts) - 1:
109
+ result.append("0 STEP")
110
+
111
+ return "\n".join(result)
112
+
113
+ DEFAULT_PART_RENDER_PATH = "../data/car_1k/demos/example/part_ldr_1k_render/"
114
+ os.makedirs(DEFAULT_PART_RENDER_PATH, exist_ok=True)
115
+ def get_part_renderings(part_names):
116
+ renderings = []
117
+ for part in part_names:
118
+ # 拼接零件对应的渲染图路径(假设文件名与part_name一致,后缀为.png)
119
+ # 例如:part为"3001.dat" → 对应路径为 "./part_renders/3001.dat.png"
120
+ part_base = part.replace(".dat", "") # 统一转为小写并移除.dat
121
+ part_render_path = os.path.join(DEFAULT_PART_RENDER_PATH, f"{part_base}.png")
122
+ # 检查文件是否存在,不存在则使用默认缺失图(可选逻辑)
123
+ if not os.path.exists(part_render_path):
124
+ # 若需要,可指定一张"未知零件"的默认图路径
125
+ part_render_path = os.path.join(DEFAULT_PART_RENDER_PATH, "unknown_part.png")
126
+
127
+ renderings.append((part_render_path, part)) # (图片路径, 零件名)
128
+ return renderings
129
+
130
+
131
+ def process_data(data):
132
+ max_num_tokens = 410
133
+ processed_data = []
134
+
135
+ def padding(data, max_len=300):
136
+ pad_data = np.pad(data, ((0, max_len - data.shape[0]), (0, 0)), 'constant', constant_values=-1)
137
+ pad_data[data.shape[0]-max_len:,-1] = 1 #flag label
138
+ pad_data[data.shape[0]-max_len:,-2] = 0
139
+ return pad_data
140
+
141
+ processed_data.append(padding(data, max_num_tokens))
142
+ return processed_data
143
+ # 处理上传的LDR文件(保持原逻辑,增强异常捕获)
144
+ def process_ldr_file(file, process_for_model=True):
145
+ """
146
+ Process LDR file for display and optionally for model inference
147
+
148
+ Args:
149
+ file: File object with .name attribute pointing to LDR file
150
+ process_for_model: If True, convert to numerical format for ML model (requires label mapping).
151
+ If False, skip numerical conversion (only extract parts for visualization).
152
+
153
+ Returns:
154
+ Tuple of (renderings, part_list, status, process_ldr_data, None)
155
+ """
156
+ if not file:
157
+ return None, None, "Please upload an LDR file", None, None
158
+
159
+ # Read LDR content
160
+ with open(file.name, 'r') as f:
161
+ ldr_content = f.read()
162
+
163
+ # Extract part names for visualization (always needed)
164
+ part_names = []
165
+ for line in ldr_content.splitlines():
166
+ stripped_line = line.strip()
167
+ if stripped_line: # 跳过空行
168
+ parts = stripped_line.split()
169
+ # 检查第一列是否为'1',且行中至少有足够的元素
170
+ if len(parts) > 0 and parts[0] == '1' and len(parts) >= 12:
171
+ part_name = parts[-1].lower() # 取最后一列并转为小写
172
+ part_names.append(part_name)
173
+
174
+ renderings = get_part_renderings(part_names)
175
+ part_list = "\n".join(part_names)
176
+
177
+ # Conditionally process for ML model (requires label mapping)
178
+ if process_for_model:
179
+ with open(file.name, 'r') as f:
180
+ lines = f.readlines()
181
+ ldr_data, _ = process_ldr_flatten(lines)
182
+
183
+ # Sort
184
+ sort_cols = ldr_data[:, [-4, -5, -3]]
185
+ sort_idx = np.lexsort((sort_cols[:, 2], sort_cols[:, 1], sort_cols[:, 0]))
186
+ ldr_data = ldr_data[sort_idx]
187
+ process_ldr_data = process_data(ldr_data)
188
+ else:
189
+ # Skip numerical conversion - not needed for visualization
190
+ process_ldr_data = None
191
+
192
+ return renderings, part_list, f"File loaded, {len(part_names)} valid parts identified", process_ldr_data, None
193
+
194
+ # except Exception as e:
195
+ # return None, None, f"File processing failed: {str(e)}", None, None
196
+
197
+ # Process LDR from file system path (for text-generated designs)
198
+ def process_ldr_from_path(ldr_path, process_for_model=False):
199
+ """
200
+ Process LDR file from file system path (not Gradio upload)
201
+
202
+ Args:
203
+ ldr_path: Absolute path to LDR file
204
+ process_for_model: If True, convert to numerical format for ML model.
205
+ If False (default), skip numerical conversion for visualization-only.
206
+
207
+ Returns:
208
+ Tuple of (renderings, part_list, status, process_ldr_data, None)
209
+ """
210
+ if not os.path.exists(ldr_path):
211
+ return None, None, f"LDR file not found: {ldr_path}", None, None
212
+
213
+ # Create a mock file object to reuse process_ldr_file logic
214
+ class MockFile:
215
+ def __init__(self, path):
216
+ self.name = path
217
+
218
+ mock_file = MockFile(ldr_path)
219
+ return process_ldr_file(mock_file, process_for_model=process_for_model)
220
+
221
+
222
+ # Unified input handler: supports both file upload and text query
223
+ def unified_input_handler(file, text_query):
224
+ """
225
+ Unified input handler for both file upload and text description
226
+
227
+ Priority:
228
+ 1. If file is uploaded, use it
229
+ 2. If text is provided, use CLIP retrieval
230
+ 3. Otherwise, show error
231
+ """
232
+ # Case 1: File upload (original flow)
233
+ if file is not None:
234
+ return process_ldr_file(file)
235
+
236
+ # Case 2: Text query (neural generation)
237
+ elif text_query and text_query.strip():
238
+ if not CLIP_AVAILABLE:
239
+ return None, None, "❌ Text-to-LEGO feature is not available (generation module not loaded)", None, None
240
+
241
+ try:
242
+ # Generate LDR design from text
243
+ query = text_query.strip()
244
+ print(f"🎨 Generating design from: {query}")
245
+
246
+ # Lazy load CLIP retriever (cached)
247
+ retriever = get_clip_retriever_cached()
248
+ result = retriever.get_best_match(query)
249
+
250
+ if result is None or not result.get("ldr_exists", True):
251
+ return None, None, f"❌ Could not generate design for '{query}'", None, None
252
+
253
+ ldr_path = result["ldr_path"]
254
+ confidence = result["similarity"]
255
+ car_id = result["car_id"]
256
+
257
+ print(f"✅ Found reference design: car_{car_id} (confidence: {confidence:.3f})")
258
+
259
+ # Process the LDR design for GPT model (WITH numerical conversion)
260
+ renderings, part_list, status, process_ldr_data, _ = process_ldr_from_path(
261
+ ldr_path,
262
+ process_for_model=True # Enable label mapping for GPT generation
263
+ )
264
+
265
+ # Check if numerical conversion succeeded
266
+ if process_ldr_data is None:
267
+ return None, None, f"❌ Failed to convert LDR to model format (missing label mappings)", None, None
268
+
269
+ # Generate new LDR using GPT model (GPU-accelerated)
270
+ new_ldr_filename = f"generated_{uuid.uuid4()}.ldr"
271
+ new_ldr_path = os.path.join(TMP_DIR, new_ldr_filename)
272
+
273
+ predicted_ldr_lines = generate_ldr_gpu(process_ldr_data, new_ldr_path)
274
+
275
+ # Render the GPT-generated LDR file
276
+ print(f"🎨 Rendering GPT-generated LEGO design...")
277
+ rendered_image = render_bricks_safe(new_ldr_path)
278
+
279
+ # Update status message with generation info
280
+ enhanced_status = f"✨ Generated from car_{car_id} (confidence: {confidence*100:.1f}%)\n🤖 GPT model created new assembly sequence\n{status}"
281
+
282
+ return renderings, part_list, enhanced_status, process_ldr_data, rendered_image
283
+
284
+ except Exception as e:
285
+ import traceback
286
+ error_msg = f"❌ Design generation failed: {str(e)}\n{traceback.format_exc()}"
287
+ print(error_msg)
288
+ return None, None, error_msg, None, None
289
+
290
+ # Case 3: No input
291
+ else:
292
+ return None, None, "⚠️ Please upload an LDR file OR enter a text description", None, None
293
+
294
+
295
+ import traceback # 导入traceback,用于打印完整堆栈
296
+
297
+ @spaces.GPU(duration=120) # GPT generation can take up to 120 seconds
298
+ def generate_ldr_gpu(ldr_content, ldr_path):
299
+ """
300
+ Generate LDR file using GPT model (GPU-accelerated)
301
+
302
+ This function is decorated with @spaces.GPU to enable GPU allocation
303
+ on HuggingFace ZeroGPU Spaces. The engine is loaded lazily and cached.
304
+
305
+ Args:
306
+ ldr_content: Numerical LDR data (numpy array)
307
+ ldr_path: Output path for generated LDR file
308
+
309
+ Returns:
310
+ List of predicted LDR lines
311
+ """
312
+ print("🤖 Running GPT model to generate new assembly sequence...")
313
+ print(" Using CUDA graphs (this will take some time to warmup)")
314
+
315
+ stride = 5
316
+ rot_num = 24
317
+ bert_shift = 1
318
+ shift = 0
319
+
320
+ # Lazy load GPT engine (cached, initialized only once)
321
+ engine = get_gpt_engine_cached()
322
+
323
+ # ZeroGPU: Device is always 'cuda' inside @spaces.GPU decorated functions
324
+ device = 'cuda'
325
+
326
+ print(" Graph compiled, starting generation...")
327
+
328
+ targets_source = torch.from_numpy(ldr_content[0]).to(device).unsqueeze(0)
329
+ targets = targets_source.clone()
330
+ logits, inputs_ids, strategy, mask, cut_idx = generate_tokens(
331
+ engine,
332
+ '',
333
+ targets,
334
+ None,
335
+ None,
336
+ False,
337
+ 0.9,
338
+ None,
339
+ 1,
340
+ 'test'
341
+ )
342
+ targets = targets_source.clone()
343
+
344
+ targets[:,shift:,-7] = logits[:,1:-3:stride,:rot_num+1].permute(0, 2, 1).argmax(dim=1)
345
+
346
+ logits_x, inputs_ids, strategy, mask, cut_idx = generate_tokens(
347
+ engine,
348
+ '',
349
+ targets,
350
+ None,
351
+ None,
352
+ False,
353
+ 0.9,
354
+ None,
355
+ 0,
356
+ 'test'
357
+ )
358
+
359
+ logits_x[:,1+bert_shift:-3:stride,:rot_num+1] = logits[:,1+bert_shift:-3:stride,:rot_num+1]
360
+
361
+ predict_ldr = logits2botldrpr(logits_x[0].cpu().detach().numpy(), inputs_ids[0].cpu().detach().numpy(), stride, 0, output_file=ldr_path)
362
+
363
+ print(f"✅ GPT generated {len(predict_ldr)} parts")
364
+ return predict_ldr
365
+
366
+ # CPU wrapper function for predict_and_render (non-GPU operations)
367
+ def predict_and_render(ldr_content):
368
+ """
369
+ Predict and render LDR file (orchestrator function)
370
+
371
+ This function handles non-GPU operations (file I/O, rendering)
372
+ and calls GPU-accelerated functions when needed.
373
+ """
374
+ if not ldr_content:
375
+ return "Please upload an LDR file first", None, None
376
+
377
+ ldr_filename = f"{uuid.uuid4()}.ldr"
378
+ ldr_path = os.path.join(TMP_DIR, ldr_filename)
379
+
380
+ # Call GPU-accelerated function
381
+ predicted_ldr = generate_ldr_gpu(ldr_content, ldr_path)
382
+
383
+ # 渲染新LDR
384
+ render_filename = f"{uuid.uuid4()}.png"
385
+ render_path = os.path.join(TMP_DIR, render_filename)
386
+ render_bricks_safe(ldr_path, render_path)
387
+
388
+ return predicted_ldr, ldr_path, render_path
389
+
390
+ #except Exception as e:
391
+ # error_msg = f"类型: {type(e).__name__}, 信息: {str(e)}, 堆栈: {traceback.format_exc()}"
392
+ # return f"Prediction failed: {error_msg}", None, None
393
+
394
+ # 清除临时文件(保持原逻辑)
395
+ def clean_temp_files():
396
+ try:
397
+ shutil.rmtree(TMP_DIR)
398
+ os.makedirs(TMP_DIR, exist_ok=True)
399
+ return "临时文件已清理"
400
+ except Exception as e:
401
+ return f"清理失败: {str(e)}"
402
+
403
+ #gr.Blocks.set_language("en")
404
+ _DESCRIPTION = '''
405
+ * **Option 1**: Upload an LDR file with part names
406
+ * **Option 2**: Describe your desired LEGO design in text (e.g., "red sports car")
407
+ * Generate a 3D assembly plan in LDR format
408
+ '''
409
+ with gr.Blocks(
410
+ title="ObjectAssembler: Assemble Your Object with Diverse Components",
411
+ ) as demo:
412
+
413
+ gr.Markdown("ObjectAssembler: Assemble Your Object with Diverse Components")
414
+ gr.Markdown(_DESCRIPTION)
415
+
416
+ original_ldr = gr.State("")
417
+
418
+ with gr.Row():
419
+ with gr.Column(scale=1):
420
+ gr.Markdown("### Input Method")
421
+ ldr_file = gr.File(
422
+ label="Upload LDR File",
423
+ file_types=[".ldr"],
424
+ )
425
+ gr.Markdown("**— OR —**")
426
+ text_input = gr.Textbox(
427
+ label="Describe Your Design",
428
+ placeholder="e.g., red sports car, blue police car, yellow construction vehicle...",
429
+ lines=2
430
+ )
431
+ upload_btn = gr.Button("Load Input", variant="secondary")
432
+ predict_btn = gr.Button("Generate New LDR & Render", variant="primary")
433
+ clean_btn = gr.Button("Clean Temporary Files", variant="stop")
434
+ status_msg = gr.Textbox(label="Status Info", interactive=False)
435
+
436
+ gr.Markdown("### Original Part List")
437
+ part_list = gr.Textbox(lines=6, label="Part Names", interactive=False)
438
+
439
+ with gr.Column(scale=2):
440
+ gr.Markdown("### Part Preview")
441
+ part_renderings = gr.Gallery(
442
+ label="Part List Visualization",
443
+ columns=[6],
444
+ rows=[2],
445
+ object_fit="contain",
446
+ height="auto"
447
+ )
448
+
449
+ gr.Markdown("### Generated LDR Content")
450
+ predicted_ldr = gr.Textbox(lines=8, label="New LDR Format", interactive=False)
451
+
452
+ gr.Markdown("### Rendering Result")
453
+ render_result = gr.Image(label="Part Assembly Visualization", height=300)
454
+
455
+ ldr_download = gr.File(label="Download New LDR File")
456
+
457
+ # 事件绑定
458
+ upload_btn.click(
459
+ fn=unified_input_handler,
460
+ inputs=[ldr_file, text_input],
461
+ outputs=[part_renderings, part_list, status_msg, original_ldr, predicted_ldr]
462
+ )
463
+
464
+ predict_btn.click(
465
+ fn=predict_and_render,
466
+ inputs=[original_ldr],
467
+ outputs=[predicted_ldr, ldr_download, render_result]
468
+ )
469
+
470
+ clean_btn.click(
471
+ fn=clean_temp_files,
472
+ inputs=[],
473
+ outputs=[status_msg]
474
+ )
475
+
476
+ # 远程服务器启动配置(Hugging Face Spaces 兼容)
477
+ if __name__ == "__main__":
478
+ import os
479
+
480
+ # 检测是否在 Hugging Face Spaces 环境
481
+ is_hf_space = os.getenv("SPACE_ID") is not None
482
+
483
+ print("\n" + "="*50)
484
+ print("🚀 LEGO 3D建模序列生成系统启动中...")
485
+ print("="*50)
486
+
487
+ # ZeroGPU: Models are loaded lazily (on first use) to avoid CUDA initialization at startup
488
+ if CLIP_AVAILABLE:
489
+ print("✅ CLIP text-to-design feature enabled (lazy loading)")
490
+ print(" Models will be initialized on first use")
491
+ else:
492
+ print("⚠️ CLIP module not available - text-to-LEGO disabled")
493
+
494
+ if ZEROGPU_AVAILABLE:
495
+ print("✅ ZeroGPU support enabled - GPU allocation on demand")
496
+ else:
497
+ print("⚠️ Running in standard mode (no ZeroGPU)")
498
+
499
+ if is_hf_space:
500
+ print("🌐 运行环境: Hugging Face Spaces")
501
+ # Hugging Face Spaces 会自动处理端口和公开访问
502
+ demo.queue()
503
+ demo.launch(
504
+ show_error=True,
505
+ allowed_paths=[os.path.abspath(DEFAULT_PART_RENDER_PATH)]
506
+ )
507
+ else:
508
+ import threading
509
+ import time
510
+
511
+ print("💻 运行环境: 本地服务器")
512
+
513
+ # 在后台线程中启动,避免阻塞
514
+ def launch_gradio():
515
+ try:
516
+ demo.queue() # 启用队列功能
517
+ demo.launch(
518
+ server_name="0.0.0.0", # 允许所有IP访问
519
+ server_port=8080, # 修改为8080端口避免冲突
520
+ share=False, # 关闭公网临时链接
521
+ quiet=False, # 显示日志输出便于调试
522
+ show_error=True, # 显示错误便于调试
523
+ debug=False, # 调试模式
524
+ inbrowser=False, # 不自动打开浏览器
525
+ prevent_thread_lock=True, # 防止线程锁定
526
+ allowed_paths=[
527
+ os.path.abspath(DEFAULT_PART_RENDER_PATH) # 转换为绝对路径
528
+ ]
529
+ )
530
+ except Exception as e:
531
+ print(f"启动时出现警告(可忽略): {e}")
532
+ print("服务器已在 http://0.0.0.0:8080 上运行")
533
+
534
+ # 启动Gradio
535
+ thread = threading.Thread(target=launch_gradio, daemon=False)
536
+ thread.start()
537
+
538
+ # 保持主线程运行
539
+ print(f"📍 访问地址: http://localhost:8080")
540
+ print(f"🔧 Blender: 已安装 (3.6.18)")
541
+ print(f"🤖 模型权重: 已加载 (1.6GB)")
542
+ print(f"📁 示例文件: examples/ldr_file/")
543
+ print("="*50)
544
+ print("\n按 Ctrl+C 停止服务器\n")
545
+
546
+ try:
547
+ while True:
548
+ time.sleep(1)
549
+ except KeyboardInterrupt:
550
+ print("\n正在关闭服务器...")
551
+ exit(0)
552
+
553
+ # test_ldr_path = "../data/car_1k/demos/example/ldr_filter_truck_abnormal_rot_expand_trans_mid_final/modified_car_1_rot.ldr"
554
+
555
+ # mock_file = MockFileStorage(test_ldr_path)
556
+ # renderings, part_list, _, ldr_content, _ = process_ldr_file(mock_file)
557
+ # # if result:
558
+ # # print(f"调试结果:{result}")
559
+ # # else:
560
+ # # print("调试失败")
561
+
562
+ # predict_and_render(ldr_content)