Spaces:
Running
on
Zero
Running
on
Zero
Ruining Li
commited on
Commit
·
656937d
1
Parent(s):
8a56ebd
Adapt to HF ZeroGPU
Browse files- app.py +41 -25
- infer_asset.py +12 -18
app.py
CHANGED
|
@@ -9,8 +9,9 @@ from pathlib import Path
|
|
| 9 |
from huggingface_hub import hf_hub_download
|
| 10 |
import zipfile
|
| 11 |
from datetime import datetime
|
|
|
|
| 12 |
|
| 13 |
-
from infer_asset import infer_single_asset
|
| 14 |
from particulate.models import Articulate3D_B
|
| 15 |
from particulate.data_utils import load_obj_raw_preserve
|
| 16 |
from particulate.export_utils import export_urdf, export_mjcf
|
|
@@ -31,15 +32,11 @@ class ParticulateApp:
|
|
| 31 |
self.model = Articulate3D_B(**self.model_config)
|
| 32 |
self.model.eval()
|
| 33 |
|
| 34 |
-
|
| 35 |
-
if device == "cpu":
|
| 36 |
-
print("WARNING: CUDA is not available. This application requires CUDA for full functionality as infer_asset.py assumes CUDA.")
|
| 37 |
-
# We attempt to use CUDA anyway because infer_asset.py hardcodes it in prepare_inputs
|
| 38 |
-
|
| 39 |
print("Downloading/Loading model from Hugging Face...")
|
| 40 |
self.model_checkpoint = hf_hub_download(repo_id="rayli/Particulate", filename="model.pt")
|
| 41 |
-
self.model.load_state_dict(torch.load(self.model_checkpoint, map_location="
|
| 42 |
-
|
| 43 |
|
| 44 |
model_dir = os.path.join("PartField", "model")
|
| 45 |
os.makedirs(model_dir, exist_ok=True)
|
|
@@ -78,9 +75,16 @@ class ParticulateApp:
|
|
| 78 |
):
|
| 79 |
if mesh is None:
|
| 80 |
return None, "Please upload a 3D model."
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
(
|
| 85 |
mesh_parts_original,
|
| 86 |
unique_part_ids,
|
|
@@ -91,15 +95,11 @@ class ParticulateApp:
|
|
| 91 |
revolute_range,
|
| 92 |
prismatic_axis,
|
| 93 |
prismatic_range
|
| 94 |
-
) =
|
| 95 |
-
mesh
|
| 96 |
-
up_dir=up_dir,
|
| 97 |
-
model=self.model,
|
| 98 |
-
num_points=int(num_points),
|
| 99 |
strict=strict,
|
| 100 |
-
output_path=temp_dir,
|
| 101 |
animation_frames=int(animation_frames),
|
| 102 |
-
|
| 103 |
)
|
| 104 |
|
| 105 |
animated_glb_file = os.path.join(temp_dir, "animated_textured.glb")
|
|
@@ -130,13 +130,29 @@ class ParticulateApp:
|
|
| 130 |
*[None] * 9
|
| 131 |
)
|
| 132 |
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
def export_urdf(
|
| 142 |
self,
|
|
|
|
| 9 |
from huggingface_hub import hf_hub_download
|
| 10 |
import zipfile
|
| 11 |
from datetime import datetime
|
| 12 |
+
import spaces
|
| 13 |
|
| 14 |
+
from infer_asset import infer_single_asset, save_articulated_meshes
|
| 15 |
from particulate.models import Articulate3D_B
|
| 16 |
from particulate.data_utils import load_obj_raw_preserve
|
| 17 |
from particulate.export_utils import export_urdf, export_mjcf
|
|
|
|
| 32 |
self.model = Articulate3D_B(**self.model_config)
|
| 33 |
self.model.eval()
|
| 34 |
|
| 35 |
+
# Always load to CPU initially to support Zero GPU and avoid VRAM usage when idle
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
print("Downloading/Loading model from Hugging Face...")
|
| 37 |
self.model_checkpoint = hf_hub_download(repo_id="rayli/Particulate", filename="model.pt")
|
| 38 |
+
self.model.load_state_dict(torch.load(self.model_checkpoint, map_location="cpu"))
|
| 39 |
+
# Model stays on CPU until inference
|
| 40 |
|
| 41 |
model_dir = os.path.join("PartField", "model")
|
| 42 |
os.makedirs(model_dir, exist_ok=True)
|
|
|
|
| 75 |
):
|
| 76 |
if mesh is None:
|
| 77 |
return None, "Please upload a 3D model."
|
| 78 |
+
|
| 79 |
+
try:
|
| 80 |
+
outputs, face_indices = self._predict_impl(
|
| 81 |
+
mesh,
|
| 82 |
+
min_part_confidence,
|
| 83 |
+
num_points,
|
| 84 |
+
up_dir
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 88 |
(
|
| 89 |
mesh_parts_original,
|
| 90 |
unique_part_ids,
|
|
|
|
| 95 |
revolute_range,
|
| 96 |
prismatic_axis,
|
| 97 |
prismatic_range
|
| 98 |
+
) = save_articulated_meshes(
|
| 99 |
+
mesh, face_indices, outputs,
|
|
|
|
|
|
|
|
|
|
| 100 |
strict=strict,
|
|
|
|
| 101 |
animation_frames=int(animation_frames),
|
| 102 |
+
output_path=temp_dir
|
| 103 |
)
|
| 104 |
|
| 105 |
animated_glb_file = os.path.join(temp_dir, "animated_textured.glb")
|
|
|
|
| 130 |
*[None] * 9
|
| 131 |
)
|
| 132 |
|
| 133 |
+
except Exception as e:
|
| 134 |
+
import traceback
|
| 135 |
+
traceback.print_exc()
|
| 136 |
+
return (
|
| 137 |
+
None, None, f"Error: {str(e)}",
|
| 138 |
+
*[None] * 9
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
@spaces.GPU
|
| 142 |
+
def _predict_impl(
|
| 143 |
+
self,
|
| 144 |
+
mesh,
|
| 145 |
+
min_part_confidence,
|
| 146 |
+
num_points,
|
| 147 |
+
up_dir
|
| 148 |
+
):
|
| 149 |
+
return infer_single_asset(
|
| 150 |
+
mesh=mesh,
|
| 151 |
+
up_dir=up_dir,
|
| 152 |
+
model=self.model.to('cuda'),
|
| 153 |
+
num_points=int(num_points),
|
| 154 |
+
min_part_confidence=min_part_confidence,
|
| 155 |
+
)
|
| 156 |
|
| 157 |
def export_urdf(
|
| 158 |
self,
|
infer_asset.py
CHANGED
|
@@ -445,9 +445,6 @@ def infer_single_asset(
|
|
| 445 |
up_dir,
|
| 446 |
model,
|
| 447 |
num_points,
|
| 448 |
-
strict,
|
| 449 |
-
output_path,
|
| 450 |
-
animation_frames,
|
| 451 |
min_part_confidence=0.0
|
| 452 |
):
|
| 453 |
if up_dir is ["x", "X"]:
|
|
@@ -491,14 +488,18 @@ def infer_single_asset(
|
|
| 491 |
min_part_confidence=min_part_confidence
|
| 492 |
)
|
| 493 |
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 502 |
|
| 503 |
_, face_part_ids_refined_strict, face_part_ids_refined = find_part_ids_for_faces(
|
| 504 |
mesh,
|
|
@@ -510,13 +511,6 @@ def infer_single_asset(
|
|
| 510 |
num_parts = len(unique_part_ids)
|
| 511 |
print(f"Found {num_parts} unique parts")
|
| 512 |
|
| 513 |
-
# Check if original mesh has texture/UV coordinates
|
| 514 |
-
has_original_texture = (
|
| 515 |
-
hasattr(mesh.visual, 'uv') and
|
| 516 |
-
mesh.visual.uv is not None and
|
| 517 |
-
len(mesh.visual.uv) > 0
|
| 518 |
-
)
|
| 519 |
-
|
| 520 |
mesh_parts_original = [mesh.submesh([face_part_ids == part_id], append=True) for part_id in unique_part_ids]
|
| 521 |
mesh_parts_segmented = create_textured_mesh_parts([mp.copy() for mp in mesh_parts_original])
|
| 522 |
|
|
|
|
| 445 |
up_dir,
|
| 446 |
model,
|
| 447 |
num_points,
|
|
|
|
|
|
|
|
|
|
| 448 |
min_part_confidence=0.0
|
| 449 |
):
|
| 450 |
if up_dir is ["x", "X"]:
|
|
|
|
| 488 |
min_part_confidence=min_part_confidence
|
| 489 |
)
|
| 490 |
|
| 491 |
+
return outputs, face_indices
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def save_articulated_meshes(mesh, face_indices, outputs, output_path, strict, animation_frames: int = 50, hyp_idx: int = 0):
|
| 495 |
+
part_ids = outputs[hyp_idx]['part_ids']
|
| 496 |
+
motion_hierarchy = outputs[hyp_idx]['motion_hierarchy']
|
| 497 |
+
is_part_revolute = outputs[hyp_idx]['is_part_revolute']
|
| 498 |
+
is_part_prismatic = outputs[hyp_idx]['is_part_prismatic']
|
| 499 |
+
revolute_plucker = outputs[hyp_idx]['revolute_plucker']
|
| 500 |
+
revolute_range = outputs[hyp_idx]['revolute_range']
|
| 501 |
+
prismatic_axis = outputs[hyp_idx]['prismatic_axis']
|
| 502 |
+
prismatic_range = outputs[hyp_idx]['prismatic_range']
|
| 503 |
|
| 504 |
_, face_part_ids_refined_strict, face_part_ids_refined = find_part_ids_for_faces(
|
| 505 |
mesh,
|
|
|
|
| 511 |
num_parts = len(unique_part_ids)
|
| 512 |
print(f"Found {num_parts} unique parts")
|
| 513 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 514 |
mesh_parts_original = [mesh.submesh([face_part_ids == part_id], append=True) for part_id in unique_part_ids]
|
| 515 |
mesh_parts_segmented = create_textured_mesh_parts([mp.copy() for mp in mesh_parts_original])
|
| 516 |
|