notenoughram commited on
Commit
3bc70a3
ยท
verified ยท
1 Parent(s): ab535c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -37
app.py CHANGED
@@ -1,35 +1,32 @@
1
  import os
2
  import sys
3
  import subprocess
 
 
 
4
 
5
- # [AUTO-INSTALL] accelerate ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ ์ž๋™ ์„ค์น˜
6
  try:
7
  import accelerate
8
  except ImportError:
9
- print("โš ๏ธ accelerate not found. Installing now...")
10
  subprocess.check_call([sys.executable, "-m", "pip", "install", "accelerate"])
11
- print("โœ… accelerate installed.")
12
 
13
- # [์ค‘์š”] OOM ๋ฐฉ์ง€ ์„ค์ •
14
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
15
  os.environ['SPCONV_ALGO'] = 'native'
16
 
17
- import gradio as gr
18
- from gradio_litmodel3d import LitModel3D
19
- import shutil
20
- from typing import *
21
  import torch
22
  import torch.nn as nn
23
  import numpy as np
24
  import imageio
25
- import gc
26
  from easydict import EasyDict as edict
27
  from PIL import Image
 
 
 
28
  from trellis.pipelines import TrellisVGGTTo3DPipeline
29
  from trellis.representations import Gaussian, MeshExtractResult
30
  from trellis.utils import render_utils, postprocessing_utils
31
-
32
- # [์ˆ˜์ •] infer_auto_device_map ์ถ”๊ฐ€ ์ž„ํฌํŠธ
33
  from accelerate import dispatch_model, infer_auto_device_map
34
 
35
  MAX_SEED = np.iinfo(np.int32).max
@@ -88,7 +85,6 @@ def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
88
 
89
  def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
90
  device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
91
-
92
  gs = Gaussian(
93
  aabb=state['gaussian']['aabb'],
94
  sh_degree=state['gaussian']['sh_degree'],
@@ -150,7 +146,7 @@ def generate_and_extract_glb(
150
  )
151
  except Exception as e:
152
  torch.cuda.empty_cache()
153
- raise RuntimeError(f"Generation Failed: {str(e)}\n(Try reducing image size or restart space)")
154
 
155
  video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
156
  video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
@@ -177,7 +173,6 @@ def extract_gaussian(state: dict, req: gr.Request) -> Tuple[str, str]:
177
  gs, _ = unpack_state(state)
178
  gaussian_path = os.path.join(user_dir, 'sample.ply')
179
  gs.save_ply(gaussian_path)
180
-
181
  del gs
182
  torch.cuda.empty_cache()
183
  return gaussian_path, gaussian_path
@@ -222,9 +217,7 @@ demo = gr.Blocks(
222
  """
223
  )
224
  with demo:
225
- gr.Markdown("""
226
- # ๐Ÿ’ป ReconViaGen (Fixed Multi-GPU)
227
- """)
228
 
229
  with gr.Row():
230
  with gr.Column():
@@ -311,34 +304,41 @@ with demo:
311
  if __name__ == "__main__":
312
  print("๐Ÿš€ Initializing Pipeline...")
313
  pipeline = TrellisVGGTTo3DPipeline.from_pretrained("esther11/trellis-vggt-v0-2")
 
 
 
 
 
314
 
315
  gpu_count = torch.cuda.device_count()
316
  print(f"โšก Detected {gpu_count} GPUs.")
317
 
318
  if gpu_count > 1:
319
- print("โšก Multi-GPU Mode Activated.")
320
 
321
- # [์ˆ˜์ •] slat_model ๊ด€๋ จ ์ฝ”๋“œ ์‚ญ์ œ (์กด์žฌํ•˜์ง€ ์•Š์Œ)
322
- # VGGT_model๋งŒ ๋ถ„์‚ฐ ์ฒ˜๋ฆฌ (5GB ๋Œ€์šฉ๋Ÿ‰ ๋ชจ๋ธ)
323
- print(" - Calculating Device Map for VGGT Model...")
324
- try:
325
- # infer_auto_device_map์„ ํ†ตํ•ด VGGT ๋ชจ๋ธ์˜ ๋ ˆ์ด์–ด๋ฅผ 4๊ฐœ GPU์— ๋‚˜๋ˆŒ ๊ณ„ํšํ‘œ(Map)๋ฅผ ์งญ๋‹ˆ๋‹ค.
326
- vggt_map = infer_auto_device_map(pipeline.VGGT_model, max_memory={i: "22GiB" for i in range(gpu_count)})
327
-
328
- # ๊ณ„ํšํ‘œ๋Œ€๋กœ ๋ชจ๋ธ์„ ์ฐข์–ด์„œ ๊ฐ GPU์— ์˜ฌ๋ฆฝ๋‹ˆ๋‹ค.
329
- pipeline.VGGT_model = dispatch_model(pipeline.VGGT_model, device_map=vggt_map)
330
- print("โœ… VGGT Model dispatched successfully.")
331
-
332
- except Exception as e:
333
- print(f"โš ๏ธ Failed to dispatch VGGT model: {e}")
334
- print("Falling back to single GPU for VGGT (Risky).")
335
- pipeline.VGGT_model.cuda()
336
-
337
- # ๊ฐ€๋ฒผ์šด ๋ชจ๋ธ์€ 0๋ฒˆ ๊ณ ์ •
338
- pipeline.birefnet_model.to("cuda:0")
 
 
339
 
340
  else:
341
  print("โš ๏ธ Warning: Only 1 GPU detected.")
342
- pipeline.cuda()
343
 
344
  demo.launch()
 
1
  import os
2
  import sys
3
  import subprocess
4
+ import gc
5
+ import shutil
6
+ from typing import *
7
 
8
+ # [AUTO-INSTALL] accelerate
9
  try:
10
  import accelerate
11
  except ImportError:
 
12
  subprocess.check_call([sys.executable, "-m", "pip", "install", "accelerate"])
 
13
 
14
+ # [์ค‘์š”] OOM ๋ฐฉ์ง€ ๋ฐ ์•Œ๊ณ ๋ฆฌ์ฆ˜ ์„ค์ •
15
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
16
  os.environ['SPCONV_ALGO'] = 'native'
17
 
 
 
 
 
18
  import torch
19
  import torch.nn as nn
20
  import numpy as np
21
  import imageio
 
22
  from easydict import EasyDict as edict
23
  from PIL import Image
24
+ import gradio as gr
25
+ from gradio_litmodel3d import LitModel3D
26
+
27
  from trellis.pipelines import TrellisVGGTTo3DPipeline
28
  from trellis.representations import Gaussian, MeshExtractResult
29
  from trellis.utils import render_utils, postprocessing_utils
 
 
30
  from accelerate import dispatch_model, infer_auto_device_map
31
 
32
  MAX_SEED = np.iinfo(np.int32).max
 
85
 
86
  def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
87
  device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
 
88
  gs = Gaussian(
89
  aabb=state['gaussian']['aabb'],
90
  sh_degree=state['gaussian']['sh_degree'],
 
146
  )
147
  except Exception as e:
148
  torch.cuda.empty_cache()
149
+ raise RuntimeError(f"Generation Failed: {str(e)}")
150
 
151
  video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
152
  video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
 
173
  gs, _ = unpack_state(state)
174
  gaussian_path = os.path.join(user_dir, 'sample.ply')
175
  gs.save_ply(gaussian_path)
 
176
  del gs
177
  torch.cuda.empty_cache()
178
  return gaussian_path, gaussian_path
 
217
  """
218
  )
219
  with demo:
220
+ gr.Markdown("# ๐Ÿ’ป ReconViaGen (Corrected Multi-GPU)")
 
 
221
 
222
  with gr.Row():
223
  with gr.Column():
 
304
  if __name__ == "__main__":
305
  print("๐Ÿš€ Initializing Pipeline...")
306
  pipeline = TrellisVGGTTo3DPipeline.from_pretrained("esther11/trellis-vggt-v0-2")
307
+
308
+ # [ํ•ต์‹ฌ ์ˆ˜์ •] ํŒŒ์ดํ”„๋ผ์ธ ์ „์ฒด๋ฅผ ๋จผ์ € CUDA:0์œผ๋กœ ์ด๋™
309
+ # ์ด๋ ‡๊ฒŒ ํ•ด์•ผ pipeline.device๊ฐ€ "cuda:0"์œผ๋กœ ์„ค์ •๋˜๊ณ ,
310
+ # preprocess_image ํ˜ธ์ถœ ์‹œ ์ž…๋ ฅ ํ…์„œ๊ฐ€ ์ž๋™์œผ๋กœ GPU๋กœ ์ด๋™ํ•˜์—ฌ birefnet(GPU0)๊ณผ ๋งŒ๋‚ฉ๋‹ˆ๋‹ค.
311
+ pipeline.cuda()
312
 
313
  gpu_count = torch.cuda.device_count()
314
  print(f"โšก Detected {gpu_count} GPUs.")
315
 
316
  if gpu_count > 1:
317
+ print("โšก Multi-GPU Mode: Distributing VGGT model.")
318
 
319
+ # [ํ•ต์‹ฌ ์ˆ˜์ •] VGGT ๋ชจ๋ธ๋งŒ ๋‹ค์‹œ CPU๋กœ ๋‚ด๋ ค์„œ ๋งต์„ ๊ณ„์‚ฐํ•˜๊ฑฐ๋‚˜,
320
+ # ๊ทธ๋ƒฅ infer_auto_device_map์— ๋งก๊น๋‹ˆ๋‹ค.
321
+ # ์ด๋ฏธ GPU0์— ๋‹ค ์˜ฌ๋ผ๊ฐ€ ์žˆ์œผ๋ฏ€๋กœ, 'max_memory' ์ œํ•œ์„ ์ฃผ์ง€ ์•Š์œผ๋ฉด "GPU0์— ๋‹ค ๋“ค์–ด๊ฐ€๋„ค?" ํ•˜๊ณ  ๋ถ„์‚ฐ์„ ์•ˆ ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.
322
+ # ๋”ฐ๋ผ์„œ VGGT๋ฅผ ๊ฐ•์ œ๋กœ ์ฐข๊ธฐ ์œ„ํ•ด max_memory๋ฅผ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค.
323
+
324
+ # ์˜ˆ: ๊ฐ GPU๋‹น 12GB ์ •๋„๋งŒ ์“ฐ๋„๋ก ์œ ๋„ํ•˜์—ฌ ๋ถ„์‚ฐ์„ ๊ฐ•์ œํ•จ (VGGT 5GB + ํ™œ์„ฑ ๋ฉ”๋ชจ๋ฆฌ ๊ณ ๋ ค)
325
+ # ํ˜น์€ infer_auto_device_map์ด ํ˜„์žฌ ํ• ๋‹น๋œ ๋ฉ”๋ชจ๋ฆฌ๋ฅผ ๊ณ ๋ คํ•˜๋„๋ก ํ•ฉ๋‹ˆ๋‹ค.
326
+
327
+ pipeline.VGGT_model.cpu() # ๋งต ๊ณ„์‚ฐ์„ ์œ„ํ•ด ์ž ์‹œ CPU๋กœ ์ด๋™ (ํ™•์‹คํ•œ ๋ถ„์‚ฐ์„ ์œ„ํ•จ)
328
+
329
+ print(" - Calculating Device Map for VGGT...")
330
+ # ๋ชจ๋“  GPU๋ฅผ ๊ณจ๊ณ ๋ฃจ ์“ฐ๋„๋ก ์œ ๋„
331
+ device_map = infer_auto_device_map(
332
+ pipeline.VGGT_model,
333
+ max_memory={i: "10GiB" for i in range(gpu_count)}, # ๊ฐ•์ œ ๋ถ„ํ• ์„ ์œ„ํ•œ ๋ฉ”๋ชจ๋ฆฌ ์ œํ•œ
334
+ no_split_module_classes=["Block", "ResnetBlock"] # ๋ ˆ์ด์–ด ์ค‘๊ฐ„์ด ์งค๋ฆฌ์ง€ ์•Š๋„๋ก ๋ณดํ˜ธ
335
+ )
336
+
337
+ pipeline.VGGT_model = dispatch_model(pipeline.VGGT_model, device_map=device_map)
338
+ print("โœ… VGGT Model dispatched.")
339
 
340
  else:
341
  print("โš ๏ธ Warning: Only 1 GPU detected.")
342
+ # ์ด๋ฏธ ์œ„์—์„œ pipeline.cuda() ํ–ˆ์œผ๋ฏ€๋กœ ์ถ”๊ฐ€ ์ž‘์—… ๋ถˆํ•„์š”
343
 
344
  demo.launch()