Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -29,8 +29,8 @@ from trellis.pipelines import TrellisVGGTTo3DPipeline
|
|
| 29 |
from trellis.representations import Gaussian, MeshExtractResult
|
| 30 |
from trellis.utils import render_utils, postprocessing_utils
|
| 31 |
|
| 32 |
-
#
|
| 33 |
-
from accelerate import dispatch_model
|
| 34 |
|
| 35 |
MAX_SEED = np.iinfo(np.int32).max
|
| 36 |
TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
|
|
@@ -87,7 +87,9 @@ def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
|
| 87 |
}
|
| 88 |
|
| 89 |
def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
|
|
|
|
| 90 |
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
|
|
|
|
| 91 |
gs = Gaussian(
|
| 92 |
aabb=state['gaussian']['aabb'],
|
| 93 |
sh_degree=state['gaussian']['sh_degree'],
|
|
@@ -149,8 +151,10 @@ def generate_and_extract_glb(
|
|
| 149 |
)
|
| 150 |
except Exception as e:
|
| 151 |
torch.cuda.empty_cache()
|
| 152 |
-
|
|
|
|
| 153 |
|
|
|
|
| 154 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
|
| 155 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
|
| 156 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
|
@@ -222,7 +226,7 @@ demo = gr.Blocks(
|
|
| 222 |
)
|
| 223 |
with demo:
|
| 224 |
gr.Markdown("""
|
| 225 |
-
# ๐ป ReconViaGen (Multi-GPU
|
| 226 |
""")
|
| 227 |
|
| 228 |
with gr.Row():
|
|
@@ -315,21 +319,24 @@ if __name__ == "__main__":
|
|
| 315 |
print(f"โก Detected {gpu_count} GPUs.")
|
| 316 |
|
| 317 |
if gpu_count > 1:
|
| 318 |
-
print("โก Multi-GPU Mode Activated
|
| 319 |
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
)
|
| 324 |
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
|
|
|
|
|
|
|
|
|
| 329 |
|
|
|
|
| 330 |
pipeline.birefnet_model.to("cuda:0")
|
| 331 |
|
| 332 |
-
print("โ
|
| 333 |
|
| 334 |
else:
|
| 335 |
print("โ ๏ธ Warning: Only 1 GPU detected.")
|
|
|
|
| 29 |
from trellis.representations import Gaussian, MeshExtractResult
|
| 30 |
from trellis.utils import render_utils, postprocessing_utils
|
| 31 |
|
| 32 |
+
# [์์ ] infer_auto_device_map ์ถ๊ฐ ์ํฌํธ
|
| 33 |
+
from accelerate import dispatch_model, infer_auto_device_map
|
| 34 |
|
| 35 |
MAX_SEED = np.iinfo(np.int32).max
|
| 36 |
TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
|
|
|
|
| 87 |
}
|
| 88 |
|
| 89 |
def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
|
| 90 |
+
# ๊ฒฐ๊ณผ ์์ง์ฉ ๋๋ฐ์ด์ค
|
| 91 |
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
|
| 92 |
+
|
| 93 |
gs = Gaussian(
|
| 94 |
aabb=state['gaussian']['aabb'],
|
| 95 |
sh_degree=state['gaussian']['sh_degree'],
|
|
|
|
| 151 |
)
|
| 152 |
except Exception as e:
|
| 153 |
torch.cuda.empty_cache()
|
| 154 |
+
# ์๋ฌ ๋ฉ์์ง์ ๋ฉ๋ชจ๋ฆฌ ํ ์ถ๊ฐ
|
| 155 |
+
raise RuntimeError(f"Generation Failed: {str(e)}\n(Try reducing image size or restart space)")
|
| 156 |
|
| 157 |
+
# ๋ ๋๋ง์ CPU ํน์ 0๋ฒ GPU์์ ์ํ (๋ฉ๋ชจ๋ฆฌ ์ ์ฝ)
|
| 158 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
|
| 159 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
|
| 160 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
|
|
|
| 226 |
)
|
| 227 |
with demo:
|
| 228 |
gr.Markdown("""
|
| 229 |
+
# ๐ป ReconViaGen (Fixed Multi-GPU)
|
| 230 |
""")
|
| 231 |
|
| 232 |
with gr.Row():
|
|
|
|
| 319 |
print(f"โก Detected {gpu_count} GPUs.")
|
| 320 |
|
| 321 |
if gpu_count > 1:
|
| 322 |
+
print("โก Multi-GPU Mode Activated.")
|
| 323 |
|
| 324 |
+
# [์์ ] infer_auto_device_map์ผ๋ก ๋งต(Dictionary)์ ๋จผ์ ์์ฑํด์ผ ํจ
|
| 325 |
+
# "balanced"๋ ๋ฌธ์์ด์ด ์๋๋ผ ๋ด๋ถ ๋์ ๋ฐฉ์์ด๋ฏ๋ก, infer_auto_device_map์ ํตํด
|
| 326 |
+
# ์ค์ ๋ ์ด์ด๋ณ GPU ํ ๋นํ(dict)๋ฅผ ๋ฐ์์์ผ dispatch_model์ด ์์๋จน์ต๋๋ค.
|
|
|
|
| 327 |
|
| 328 |
+
print(" - Calculating Device Map for VGGT Model...")
|
| 329 |
+
vggt_map = infer_auto_device_map(pipeline.VGGT_model)
|
| 330 |
+
pipeline.VGGT_model = dispatch_model(pipeline.VGGT_model, device_map=vggt_map)
|
| 331 |
+
|
| 332 |
+
print(" - Calculating Device Map for SLAT Model...")
|
| 333 |
+
slat_map = infer_auto_device_map(pipeline.slat_model)
|
| 334 |
+
pipeline.slat_model = dispatch_model(pipeline.slat_model, device_map=slat_map)
|
| 335 |
|
| 336 |
+
# ๊ฐ๋ฒผ์ด ๋ชจ๋ธ์ 0๋ฒ ๊ณ ์
|
| 337 |
pipeline.birefnet_model.to("cuda:0")
|
| 338 |
|
| 339 |
+
print("โ
Models dispatched successfully.")
|
| 340 |
|
| 341 |
else:
|
| 342 |
print("โ ๏ธ Warning: Only 1 GPU detected.")
|