notenoughram commited on
Commit
ab535c4
ยท
verified ยท
1 Parent(s): 067e49b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -16
app.py CHANGED
@@ -87,7 +87,6 @@ def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
87
  }
88
 
89
  def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
90
- # ๊ฒฐ๊ณผ ์ˆ˜์ง‘์šฉ ๋””๋ฐ”์ด์Šค
91
  device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
92
 
93
  gs = Gaussian(
@@ -151,10 +150,8 @@ def generate_and_extract_glb(
151
  )
152
  except Exception as e:
153
  torch.cuda.empty_cache()
154
- # ์—๋Ÿฌ ๋ฉ”์‹œ์ง€์— ๋ฉ”๋ชจ๋ฆฌ ํŒ ์ถ”๊ฐ€
155
  raise RuntimeError(f"Generation Failed: {str(e)}\n(Try reducing image size or restart space)")
156
 
157
- # ๋ Œ๋”๋ง์€ CPU ํ˜น์€ 0๋ฒˆ GPU์—์„œ ์ˆ˜ํ–‰ (๋ฉ”๋ชจ๋ฆฌ ์ ˆ์•ฝ)
158
  video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
159
  video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
160
  video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
@@ -321,23 +318,25 @@ if __name__ == "__main__":
321
  if gpu_count > 1:
322
  print("โšก Multi-GPU Mode Activated.")
323
 
324
- # [์ˆ˜์ •] infer_auto_device_map์œผ๋กœ ๋งต(Dictionary)์„ ๋จผ์ € ์ƒ์„ฑํ•ด์•ผ ํ•จ
325
- # "balanced"๋Š” ๋ฌธ์ž์—ด์ด ์•„๋‹ˆ๋ผ ๋‚ด๋ถ€ ๋™์ž‘ ๋ฐฉ์‹์ด๋ฏ€๋กœ, infer_auto_device_map์„ ํ†ตํ•ด
326
- # ์‹ค์ œ ๋ ˆ์ด์–ด๋ณ„ GPU ํ• ๋‹นํ‘œ(dict)๋ฅผ ๋ฐ›์•„์™€์•ผ dispatch_model์ด ์•Œ์•„๋จน์Šต๋‹ˆ๋‹ค.
327
-
328
  print(" - Calculating Device Map for VGGT Model...")
329
- vggt_map = infer_auto_device_map(pipeline.VGGT_model)
330
- pipeline.VGGT_model = dispatch_model(pipeline.VGGT_model, device_map=vggt_map)
331
-
332
- print(" - Calculating Device Map for SLAT Model...")
333
- slat_map = infer_auto_device_map(pipeline.slat_model)
334
- pipeline.slat_model = dispatch_model(pipeline.slat_model, device_map=slat_map)
335
-
 
 
 
 
 
 
336
  # ๊ฐ€๋ฒผ์šด ๋ชจ๋ธ์€ 0๋ฒˆ ๊ณ ์ •
337
  pipeline.birefnet_model.to("cuda:0")
338
 
339
- print("โœ… Models dispatched successfully.")
340
-
341
  else:
342
  print("โš ๏ธ Warning: Only 1 GPU detected.")
343
  pipeline.cuda()
 
87
  }
88
 
89
  def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
 
90
  device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
91
 
92
  gs = Gaussian(
 
150
  )
151
  except Exception as e:
152
  torch.cuda.empty_cache()
 
153
  raise RuntimeError(f"Generation Failed: {str(e)}\n(Try reducing image size or restart space)")
154
 
 
155
  video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
156
  video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
157
  video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
 
318
  if gpu_count > 1:
319
  print("โšก Multi-GPU Mode Activated.")
320
 
321
+ # [์ˆ˜์ •] slat_model ๊ด€๋ จ ์ฝ”๋“œ ์‚ญ์ œ (์กด์žฌํ•˜์ง€ ์•Š์Œ)
322
+ # VGGT_model๋งŒ ๋ถ„์‚ฐ ์ฒ˜๋ฆฌ (5GB ๋Œ€์šฉ๋Ÿ‰ ๋ชจ๋ธ)
 
 
323
  print(" - Calculating Device Map for VGGT Model...")
324
+ try:
325
+ # infer_auto_device_map์„ ํ†ตํ•ด VGGT ๋ชจ๋ธ์˜ ๋ ˆ์ด์–ด๋ฅผ 4๊ฐœ GPU์— ๋‚˜๋ˆŒ ๊ณ„ํšํ‘œ(Map)๋ฅผ ์งญ๋‹ˆ๋‹ค.
326
+ vggt_map = infer_auto_device_map(pipeline.VGGT_model, max_memory={i: "22GiB" for i in range(gpu_count)})
327
+
328
+ # ๊ณ„ํšํ‘œ๋Œ€๋กœ ๋ชจ๋ธ์„ ์ฐข์–ด์„œ ๊ฐ GPU์— ์˜ฌ๋ฆฝ๋‹ˆ๋‹ค.
329
+ pipeline.VGGT_model = dispatch_model(pipeline.VGGT_model, device_map=vggt_map)
330
+ print("โœ… VGGT Model dispatched successfully.")
331
+
332
+ except Exception as e:
333
+ print(f"โš ๏ธ Failed to dispatch VGGT model: {e}")
334
+ print("Falling back to single GPU for VGGT (Risky).")
335
+ pipeline.VGGT_model.cuda()
336
+
337
  # ๊ฐ€๋ฒผ์šด ๋ชจ๋ธ์€ 0๋ฒˆ ๊ณ ์ •
338
  pipeline.birefnet_model.to("cuda:0")
339
 
 
 
340
  else:
341
  print("โš ๏ธ Warning: Only 1 GPU detected.")
342
  pipeline.cuda()