Commit
·
e88bb31
1
Parent(s):
da5e325
Refactor inference functions in app.py for improved structure and clarity; unify skeleton and skin inference processes, streamline imports, and enhance output handling.
Browse files
app.py
CHANGED
|
@@ -25,17 +25,6 @@ else:
|
|
| 25 |
subprocess.run(f'pip install spconv{spconv_version}', shell=True)
|
| 26 |
subprocess.run(f'pip install torch_scatter torch_cluster -f https://data.pyg.org/whl/torch-{torch_version}+{cuda_version}.html --no-cache-dir', shell=True)
|
| 27 |
|
| 28 |
-
from src.data.datapath import Datapath
|
| 29 |
-
from src.data.dataset import DatasetConfig, UniRigDatasetModule
|
| 30 |
-
from src.data.extract import extract_builtin, get_files
|
| 31 |
-
from src.data.transform import TransformConfig
|
| 32 |
-
from src.inference.download import download
|
| 33 |
-
from src.model.parse import get_model
|
| 34 |
-
from src.system.parse import get_system, get_writer
|
| 35 |
-
from src.tokenizer.parse import get_tokenizer
|
| 36 |
-
from src.tokenizer.spec import TokenizerConfig
|
| 37 |
-
|
| 38 |
-
|
| 39 |
# Helper functions
|
| 40 |
def validate_input_file(file_path: str) -> bool:
|
| 41 |
"""Validate if the input file format is supported."""
|
|
@@ -52,6 +41,7 @@ def extract_mesh_python(input_file: str, output_dir: str) -> str:
|
|
| 52 |
Returns path to generated .npz file
|
| 53 |
"""
|
| 54 |
# Import required modules
|
|
|
|
| 55 |
|
| 56 |
# Create extraction parameters
|
| 57 |
files = get_files(
|
|
@@ -87,45 +77,95 @@ def extract_mesh_python(input_file: str, output_dir: str) -> str:
|
|
| 87 |
|
| 88 |
return expected_npz_dir # Return the directory containing raw_data.npz
|
| 89 |
|
| 90 |
-
def
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
| 94 |
"""
|
|
|
|
| 95 |
|
| 96 |
-
|
| 97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
# Load task configuration
|
| 100 |
-
task_config_path = "configs/task/quick_inference_skeleton_articulationxl_ar_256.yaml"
|
| 101 |
if not Path(task_config_path).exists():
|
| 102 |
raise FileNotFoundError(f"Task configuration file not found: {task_config_path}")
|
| 103 |
|
| 104 |
-
# Load the task configuration
|
| 105 |
with open(task_config_path, 'r') as f:
|
| 106 |
task = Box(yaml.safe_load(f))
|
| 107 |
|
| 108 |
-
#
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
data_config = Box(yaml.safe_load(open("configs/data/quick_inference.yaml", 'r')))
|
| 120 |
-
transform_config = Box(yaml.safe_load(open(
|
| 121 |
-
|
| 122 |
-
#
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
# Setup datasets and transforms
|
| 131 |
predict_dataset_config = DatasetConfig.parse(config=data_config.predict_dataset_config).split_by_cls()
|
|
@@ -138,7 +178,7 @@ def run_skeleton_inference_python(input_file: str, output_file: str, seed: int =
|
|
| 138 |
predict_transform_config=predict_transform_config,
|
| 139 |
tokenizer_config=tokenizer_config,
|
| 140 |
debug=False,
|
| 141 |
-
data_name=
|
| 142 |
datapath=datapath,
|
| 143 |
cls=None,
|
| 144 |
)
|
|
@@ -146,107 +186,23 @@ def run_skeleton_inference_python(input_file: str, output_file: str, seed: int =
|
|
| 146 |
# Setup callbacks and writer
|
| 147 |
callbacks = []
|
| 148 |
writer_config = task.writer.copy()
|
| 149 |
-
writer_config['npz_dir'] = str(npz_dir)
|
| 150 |
-
writer_config['output_dir'] = str(Path(output_file).parent)
|
| 151 |
-
writer_config['output_name'] = Path(output_file).name
|
| 152 |
-
writer_config['user_mode'] = False # Set to False to enable NPZ export
|
| 153 |
-
print(f"Writer config: {writer_config}")
|
| 154 |
-
# But we want the FBX to go to our specified location when in user mode for FBX
|
| 155 |
-
callbacks.append(get_writer(**writer_config, order_config=predict_transform_config.order_config))
|
| 156 |
-
|
| 157 |
-
# Get system
|
| 158 |
-
system_config = Box(yaml.safe_load(open("configs/system/ar_inference_articulationxl.yaml", 'r')))
|
| 159 |
-
system = get_system(**system_config, model=model, steps_per_epoch=1)
|
| 160 |
-
|
| 161 |
-
# Setup trainer
|
| 162 |
-
trainer_config = task.trainer
|
| 163 |
-
resume_from_checkpoint = download(task.resume_from_checkpoint)
|
| 164 |
-
|
| 165 |
-
trainer = L.Trainer(callbacks=callbacks, logger=None, **trainer_config)
|
| 166 |
-
|
| 167 |
-
# Run prediction
|
| 168 |
-
trainer.predict(system, datamodule=data, ckpt_path=resume_from_checkpoint, return_predictions=False)
|
| 169 |
-
|
| 170 |
-
# The actual output file will be in a subdirectory named after the input file
|
| 171 |
-
# Look for the generated skeleton.fbx file
|
| 172 |
-
input_name_stem = Path(input_file).stem
|
| 173 |
-
actual_output_dir = Path(output_file).parent / input_name_stem
|
| 174 |
-
actual_output_file = actual_output_dir / "skeleton.fbx"
|
| 175 |
-
|
| 176 |
-
if not actual_output_file.exists():
|
| 177 |
-
# Try alternative locations - look for any skeleton.fbx file in the output directory
|
| 178 |
-
alt_files = list(Path(output_file).parent.rglob("skeleton.fbx"))
|
| 179 |
-
if alt_files:
|
| 180 |
-
actual_output_file = alt_files[0]
|
| 181 |
-
print(f"Found skeleton at alternative location: {actual_output_file}")
|
| 182 |
-
else:
|
| 183 |
-
# List all files for debugging
|
| 184 |
-
all_files = list(Path(output_file).parent.rglob("*"))
|
| 185 |
-
print(f"Available files: {[str(f) for f in all_files]}")
|
| 186 |
-
raise RuntimeError(f"Skeleton FBX file not found. Expected at: {actual_output_file}")
|
| 187 |
-
|
| 188 |
-
# Copy to the expected output location
|
| 189 |
-
if actual_output_file != Path(output_file):
|
| 190 |
-
shutil.copy2(actual_output_file, output_file)
|
| 191 |
-
print(f"Copied skeleton from {actual_output_file} to {output_file}")
|
| 192 |
-
|
| 193 |
-
print(f"Generated skeleton at: {output_file}")
|
| 194 |
-
return str(output_file)
|
| 195 |
-
|
| 196 |
-
def run_skin_inference_python(skeleton_file: str, output_file: str) -> str:
|
| 197 |
-
"""
|
| 198 |
-
Run skin inference using Python (replaces skin part of generate_skin.sh)
|
| 199 |
-
Returns path to skin FBX file
|
| 200 |
-
"""
|
| 201 |
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
#
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
# Load configurations
|
| 216 |
-
data_config = Box(yaml.safe_load(open("configs/data/quick_inference.yaml", 'r')))
|
| 217 |
-
transform_config = Box(yaml.safe_load(open("configs/transform/inference_skin_transform.yaml", 'r')))
|
| 218 |
-
|
| 219 |
-
# Get model
|
| 220 |
-
model_config = Box(yaml.safe_load(open("configs/model/unirig_skin.yaml", 'r')))
|
| 221 |
-
model = get_model(tokenizer=None, **model_config)
|
| 222 |
-
|
| 223 |
-
# Setup datasets and transforms
|
| 224 |
-
predict_dataset_config = DatasetConfig.parse(config=data_config.predict_dataset_config).split_by_cls()
|
| 225 |
-
predict_transform_config = TransformConfig.parse(config=transform_config.predict_transform_config)
|
| 226 |
-
|
| 227 |
-
# Create data module
|
| 228 |
-
data = UniRigDatasetModule(
|
| 229 |
-
process_fn=model._process_fn,
|
| 230 |
-
predict_dataset_config=predict_dataset_config,
|
| 231 |
-
predict_transform_config=predict_transform_config,
|
| 232 |
-
tokenizer_config=None,
|
| 233 |
-
debug=False,
|
| 234 |
-
data_name="predict_skeleton.npz",
|
| 235 |
-
datapath=datapath,
|
| 236 |
-
cls=None,
|
| 237 |
-
)
|
| 238 |
-
|
| 239 |
-
# Setup callbacks and writer
|
| 240 |
-
callbacks = []
|
| 241 |
-
writer_config = task.writer.copy()
|
| 242 |
-
writer_config['npz_dir'] = str(skeleton_npz_dir)
|
| 243 |
-
writer_config['output_name'] = str(output_file)
|
| 244 |
-
writer_config['user_mode'] = True
|
| 245 |
-
writer_config['export_fbx'] = True # Enable FBX export
|
| 246 |
callbacks.append(get_writer(**writer_config, order_config=predict_transform_config.order_config))
|
| 247 |
|
| 248 |
# Get system
|
| 249 |
-
system_config = Box(yaml.safe_load(open(
|
| 250 |
system = get_system(**system_config, model=model, steps_per_epoch=1)
|
| 251 |
|
| 252 |
# Setup trainer
|
|
@@ -258,18 +214,41 @@ def run_skin_inference_python(skeleton_file: str, output_file: str) -> str:
|
|
| 258 |
# Run prediction
|
| 259 |
trainer.predict(system, datamodule=data, ckpt_path=resume_from_checkpoint, return_predictions=False)
|
| 260 |
|
| 261 |
-
#
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
shutil.copy2(actual_output_file, output_file)
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 273 |
return str(output_file)
|
| 274 |
|
| 275 |
def merge_results_python(source_file: str, target_file: str, output_file: str) -> str:
|
|
@@ -302,9 +281,9 @@ def merge_results_python(source_file: str, target_file: str, output_file: str) -
|
|
| 302 |
return str(output_path.resolve())
|
| 303 |
|
| 304 |
@spaces.GPU()
|
| 305 |
-
def
|
| 306 |
"""
|
| 307 |
-
Run the
|
| 308 |
|
| 309 |
Args:
|
| 310 |
input_file: Path to the input 3D model file
|
|
@@ -336,20 +315,29 @@ def complete_pipeline(input_file: str, seed: int = 12345) -> Tuple[str, list]:
|
|
| 336 |
input_file = input_model_dir / input_file.name
|
| 337 |
print(f"New input file path: {input_file}")
|
| 338 |
|
| 339 |
-
#
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
# Step
|
| 344 |
-
|
| 345 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 346 |
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
merge_results_python(output_skin_file, input_file, final_file)
|
| 350 |
|
| 351 |
-
return
|
| 352 |
|
|
|
|
|
|
|
| 353 |
|
| 354 |
def create_app():
|
| 355 |
"""Create and configure the Gradio interface."""
|
|
@@ -370,9 +358,13 @@ def create_app():
|
|
| 370 |
gr.Markdown("""
|
| 371 |
## 📋 How to Use ?
|
| 372 |
1. **Upload your 3D model** - Drop your .obj, .fbx, or .glb file in the upload area
|
| 373 |
-
2. **
|
| 374 |
-
|
| 375 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 376 |
|
| 377 |
**Supported File Formats:** .obj, .fbx, .glb
|
| 378 |
**Note:** The process may take a few minutes depending on the model complexity and server load.
|
|
@@ -384,16 +376,16 @@ def create_app():
|
|
| 384 |
|
| 385 |
with gr.Row(equal_height=True):
|
| 386 |
seed = gr.Number(
|
| 387 |
-
value=
|
| 388 |
label="Random Seed (for reproducible results)",
|
| 389 |
scale=4,
|
| 390 |
)
|
| 391 |
random_btn = gr.Button("🔄 Random Seed", variant="secondary", scale=1)
|
| 392 |
|
| 393 |
-
pipeline_btn = gr.Button("🎯 Start
|
| 394 |
|
| 395 |
with gr.Column():
|
| 396 |
-
pipeline_skeleton_out = gr.Model3D(label="Final
|
| 397 |
files_to_download = gr.Files(label="Download Files", scale=1)
|
| 398 |
|
| 399 |
random_btn.click(
|
|
@@ -402,7 +394,7 @@ def create_app():
|
|
| 402 |
)
|
| 403 |
|
| 404 |
pipeline_btn.click(
|
| 405 |
-
fn=
|
| 406 |
inputs=[input_3d_model, seed],
|
| 407 |
outputs=[pipeline_skeleton_out, files_to_download]
|
| 408 |
)
|
|
|
|
| 25 |
subprocess.run(f'pip install spconv{spconv_version}', shell=True)
|
| 26 |
subprocess.run(f'pip install torch_scatter torch_cluster -f https://data.pyg.org/whl/torch-{torch_version}+{cuda_version}.html --no-cache-dir', shell=True)
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
# Helper functions
|
| 29 |
def validate_input_file(file_path: str) -> bool:
|
| 30 |
"""Validate if the input file format is supported."""
|
|
|
|
| 41 |
Returns path to generated .npz file
|
| 42 |
"""
|
| 43 |
# Import required modules
|
| 44 |
+
from src.data.extract import extract_builtin, get_files
|
| 45 |
|
| 46 |
# Create extraction parameters
|
| 47 |
files = get_files(
|
|
|
|
| 77 |
|
| 78 |
return expected_npz_dir # Return the directory containing raw_data.npz
|
| 79 |
|
| 80 |
+
def run_inference_python(
|
| 81 |
+
input_file: str,
|
| 82 |
+
output_file: str,
|
| 83 |
+
inference_type: str,
|
| 84 |
+
seed: int = 12345,
|
| 85 |
+
npz_dir: str = None
|
| 86 |
+
) -> str:
|
| 87 |
"""
|
| 88 |
+
Unified inference function for both skeleton and skin inference.
|
| 89 |
|
| 90 |
+
Args:
|
| 91 |
+
input_file: Path to input file (3D model for skeleton, skeleton FBX for skin)
|
| 92 |
+
output_file: Path to output file
|
| 93 |
+
inference_type: Either "skeleton" or "skin"
|
| 94 |
+
seed: Random seed for reproducible results
|
| 95 |
+
npz_dir: Directory for NPZ files (used for skeleton inference)
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
Path to generated file
|
| 99 |
+
"""
|
| 100 |
+
from src.data.datapath import Datapath
|
| 101 |
+
from src.data.dataset import DatasetConfig, UniRigDatasetModule
|
| 102 |
+
from src.data.transform import TransformConfig
|
| 103 |
+
from src.inference.download import download
|
| 104 |
+
from src.model.parse import get_model
|
| 105 |
+
from src.system.parse import get_system, get_writer
|
| 106 |
+
from src.tokenizer.parse import get_tokenizer
|
| 107 |
+
from src.tokenizer.spec import TokenizerConfig
|
| 108 |
+
|
| 109 |
+
# Set random seed for skeleton inference
|
| 110 |
+
if inference_type == "skeleton":
|
| 111 |
+
L.seed_everything(seed, workers=True)
|
| 112 |
+
|
| 113 |
+
# Load task and model configurations based on inference type
|
| 114 |
+
if inference_type == "skeleton":
|
| 115 |
+
task_config_path = "configs/task/quick_inference_skeleton_articulationxl_ar_256.yaml"
|
| 116 |
+
transform_config_path = "configs/transform/inference_ar_transform.yaml"
|
| 117 |
+
model_config_path = "configs/model/unirig_ar_350m_1024_81920_float32.yaml"
|
| 118 |
+
system_config_path = "configs/system/ar_inference_articulationxl.yaml"
|
| 119 |
+
tokenizer_config_path = "configs/tokenizer/tokenizer_parts_articulationxl_256.yaml"
|
| 120 |
+
data_name = "raw_data.npz"
|
| 121 |
+
else: # skin
|
| 122 |
+
task_config_path = "configs/task/quick_inference_unirig_skin.yaml"
|
| 123 |
+
transform_config_path = "configs/transform/inference_skin_transform.yaml"
|
| 124 |
+
model_config_path = "configs/model/unirig_skin.yaml"
|
| 125 |
+
system_config_path = "configs/system/skin.yaml"
|
| 126 |
+
tokenizer_config_path = None
|
| 127 |
+
data_name = "predict_skeleton.npz"
|
| 128 |
|
| 129 |
# Load task configuration
|
|
|
|
| 130 |
if not Path(task_config_path).exists():
|
| 131 |
raise FileNotFoundError(f"Task configuration file not found: {task_config_path}")
|
| 132 |
|
|
|
|
| 133 |
with open(task_config_path, 'r') as f:
|
| 134 |
task = Box(yaml.safe_load(f))
|
| 135 |
|
| 136 |
+
# Setup data directory and datapath
|
| 137 |
+
if inference_type == "skeleton":
|
| 138 |
+
# Create temporary npz directory and extract mesh data
|
| 139 |
+
if npz_dir is None:
|
| 140 |
+
npz_dir = Path(output_file).parent / "npz"
|
| 141 |
+
npz_dir = Path(npz_dir)
|
| 142 |
+
npz_dir.mkdir(exist_ok=True)
|
| 143 |
+
npz_data_dir = extract_mesh_python(input_file, npz_dir)
|
| 144 |
+
datapath = Datapath(files=[npz_data_dir], cls=None)
|
| 145 |
+
else: # skin
|
| 146 |
+
# Look for NPZ files from previous skeleton inference
|
| 147 |
+
skeleton_work_dir = Path(input_file).parent
|
| 148 |
+
all_npz_files = list(skeleton_work_dir.rglob("**/*.npz"))
|
| 149 |
+
if not all_npz_files:
|
| 150 |
+
raise RuntimeError(f"No NPZ files found for skin inference in {skeleton_work_dir}")
|
| 151 |
+
skeleton_npz_dir = all_npz_files[0].parent
|
| 152 |
+
datapath = Datapath(files=[str(skeleton_npz_dir)], cls=None)
|
| 153 |
+
|
| 154 |
+
# Load common configurations
|
| 155 |
data_config = Box(yaml.safe_load(open("configs/data/quick_inference.yaml", 'r')))
|
| 156 |
+
transform_config = Box(yaml.safe_load(open(transform_config_path, 'r')))
|
| 157 |
+
|
| 158 |
+
# Setup tokenizer and model
|
| 159 |
+
if inference_type == "skeleton":
|
| 160 |
+
tokenizer_config = TokenizerConfig.parse(config=Box(yaml.safe_load(open(tokenizer_config_path, 'r'))))
|
| 161 |
+
tokenizer = get_tokenizer(config=tokenizer_config)
|
| 162 |
+
model_config = Box(yaml.safe_load(open(model_config_path, 'r')))
|
| 163 |
+
model = get_model(tokenizer=tokenizer, **model_config)
|
| 164 |
+
else: # skin
|
| 165 |
+
tokenizer_config = None
|
| 166 |
+
tokenizer = None
|
| 167 |
+
model_config = Box(yaml.safe_load(open(model_config_path, 'r')))
|
| 168 |
+
model = get_model(tokenizer=None, **model_config)
|
| 169 |
|
| 170 |
# Setup datasets and transforms
|
| 171 |
predict_dataset_config = DatasetConfig.parse(config=data_config.predict_dataset_config).split_by_cls()
|
|
|
|
| 178 |
predict_transform_config=predict_transform_config,
|
| 179 |
tokenizer_config=tokenizer_config,
|
| 180 |
debug=False,
|
| 181 |
+
data_name=data_name,
|
| 182 |
datapath=datapath,
|
| 183 |
cls=None,
|
| 184 |
)
|
|
|
|
| 186 |
# Setup callbacks and writer
|
| 187 |
callbacks = []
|
| 188 |
writer_config = task.writer.copy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
|
| 190 |
+
if inference_type == "skeleton":
|
| 191 |
+
writer_config['npz_dir'] = str(npz_dir)
|
| 192 |
+
writer_config['output_dir'] = str(Path(output_file).parent)
|
| 193 |
+
writer_config['output_name'] = Path(output_file).name
|
| 194 |
+
writer_config['user_mode'] = False # Enable NPZ export for skeleton
|
| 195 |
+
else: # skin
|
| 196 |
+
writer_config['npz_dir'] = str(skeleton_npz_dir)
|
| 197 |
+
writer_config['output_name'] = str(output_file)
|
| 198 |
+
writer_config['user_mode'] = True
|
| 199 |
+
writer_config['export_fbx'] = True
|
| 200 |
+
|
| 201 |
+
print(f"Writer config for {inference_type}: {writer_config}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
callbacks.append(get_writer(**writer_config, order_config=predict_transform_config.order_config))
|
| 203 |
|
| 204 |
# Get system
|
| 205 |
+
system_config = Box(yaml.safe_load(open(system_config_path, 'r')))
|
| 206 |
system = get_system(**system_config, model=model, steps_per_epoch=1)
|
| 207 |
|
| 208 |
# Setup trainer
|
|
|
|
| 214 |
# Run prediction
|
| 215 |
trainer.predict(system, datamodule=data, ckpt_path=resume_from_checkpoint, return_predictions=False)
|
| 216 |
|
| 217 |
+
# Handle output file location and validation
|
| 218 |
+
if inference_type == "skeleton":
|
| 219 |
+
# Look for the generated skeleton.fbx file
|
| 220 |
+
input_name_stem = Path(input_file).stem
|
| 221 |
+
actual_output_dir = Path(output_file).parent / input_name_stem
|
| 222 |
+
actual_output_file = actual_output_dir / "skeleton.fbx"
|
| 223 |
+
|
| 224 |
+
if not actual_output_file.exists():
|
| 225 |
+
# Try alternative locations
|
| 226 |
+
alt_files = list(Path(output_file).parent.rglob("skeleton.fbx"))
|
| 227 |
+
if alt_files:
|
| 228 |
+
actual_output_file = alt_files[0]
|
| 229 |
+
print(f"Found skeleton at alternative location: {actual_output_file}")
|
| 230 |
+
else:
|
| 231 |
+
all_files = list(Path(output_file).parent.rglob("*"))
|
| 232 |
+
print(f"Available files: {[str(f) for f in all_files]}")
|
| 233 |
+
raise RuntimeError(f"Skeleton FBX file not found. Expected at: {actual_output_file}")
|
| 234 |
+
|
| 235 |
+
# Copy to the expected output location
|
| 236 |
+
if actual_output_file != Path(output_file):
|
| 237 |
shutil.copy2(actual_output_file, output_file)
|
| 238 |
+
print(f"Copied skeleton from {actual_output_file} to {output_file}")
|
| 239 |
+
|
| 240 |
+
else: # skin
|
| 241 |
+
# Check if skin FBX file was generated
|
| 242 |
+
if not Path(output_file).exists():
|
| 243 |
+
# Look for generated skin FBX files
|
| 244 |
+
skin_files = list(Path(output_file).parent.rglob("*skin*.fbx"))
|
| 245 |
+
if skin_files:
|
| 246 |
+
actual_output_file = skin_files[0]
|
| 247 |
+
shutil.copy2(actual_output_file, output_file)
|
| 248 |
+
else:
|
| 249 |
+
raise RuntimeError(f"Skin FBX file not found. Expected at: {output_file}")
|
| 250 |
+
|
| 251 |
+
print(f"Generated {inference_type} at: {output_file}")
|
| 252 |
return str(output_file)
|
| 253 |
|
| 254 |
def merge_results_python(source_file: str, target_file: str, output_file: str) -> str:
|
|
|
|
| 281 |
return str(output_path.resolve())
|
| 282 |
|
| 283 |
@spaces.GPU()
|
| 284 |
+
def main(input_file: str, seed: int = 12345) -> Tuple[str, list]:
|
| 285 |
"""
|
| 286 |
+
Run the rigging pipeline based on selected mode.
|
| 287 |
|
| 288 |
Args:
|
| 289 |
input_file: Path to the input 3D model file
|
|
|
|
| 315 |
input_file = input_model_dir / input_file.name
|
| 316 |
print(f"New input file path: {input_file}")
|
| 317 |
|
| 318 |
+
# Initialize file paths and output list
|
| 319 |
+
output_files = []
|
| 320 |
+
final_file = None
|
| 321 |
+
|
| 322 |
+
# Step 1: Generate skeleton
|
| 323 |
+
intermediate_skeleton_file = input_model_dir / f"{file_stem}_skeleton.fbx"
|
| 324 |
+
final_skeleton_file = input_model_dir / f"{file_stem}_skeleton_only{input_file.suffix}"
|
| 325 |
+
run_inference_python(input_file, intermediate_skeleton_file, "skeleton", seed)
|
| 326 |
+
merge_results_python(intermediate_skeleton_file, input_file, final_skeleton_file)
|
| 327 |
+
|
| 328 |
+
# Step 2: Generate skinning and Merge everything together
|
| 329 |
+
intermediate_skin_file = input_model_dir / f"{file_stem}_skin.fbx"
|
| 330 |
+
final_skin_file = input_model_dir / f"{file_stem}_skeleton_and_skinning{input_file.suffix}"
|
| 331 |
+
run_inference_python(intermediate_skeleton_file, intermediate_skin_file, "skin")
|
| 332 |
+
merge_results_python(intermediate_skin_file, input_file, final_skin_file)
|
| 333 |
|
| 334 |
+
final_file = str(final_skin_file)
|
| 335 |
+
output_files = [str(final_skeleton_file), str(final_skin_file)]
|
|
|
|
| 336 |
|
| 337 |
+
return final_file, output_files
|
| 338 |
|
| 339 |
+
# main("/home/morashad/projects/UniRig/UniRig/tmp/base_basic_pbr_12345/base_basic_pbr_skeleton.glb", "Skinning Only", 1234)
|
| 340 |
+
# exit()
|
| 341 |
|
| 342 |
def create_app():
|
| 343 |
"""Create and configure the Gradio interface."""
|
|
|
|
| 358 |
gr.Markdown("""
|
| 359 |
## 📋 How to Use ?
|
| 360 |
1. **Upload your 3D model** - Drop your .obj, .fbx, or .glb file in the upload area
|
| 361 |
+
2. **Choose processing mode**:
|
| 362 |
+
- **Skeleton Only**: Generate just the bone structure for your model
|
| 363 |
+
- **Skinning Only**: Apply skinning weights (requires existing skeleton data)
|
| 364 |
+
- **Complete Pipeline**: Full automated rigging (skeleton + skinning + merge)
|
| 365 |
+
3. **Set random seed** (optional) - Use the same seed for reproducible results
|
| 366 |
+
4. **Click "Start Processing"** - The AI will process your model based on the selected mode
|
| 367 |
+
5. **Download results** - Different files will be generated based on your selected mode
|
| 368 |
|
| 369 |
**Supported File Formats:** .obj, .fbx, .glb
|
| 370 |
**Note:** The process may take a few minutes depending on the model complexity and server load.
|
|
|
|
| 376 |
|
| 377 |
with gr.Row(equal_height=True):
|
| 378 |
seed = gr.Number(
|
| 379 |
+
value=int(torch.randint(0, 100000, (1,)).item()),
|
| 380 |
label="Random Seed (for reproducible results)",
|
| 381 |
scale=4,
|
| 382 |
)
|
| 383 |
random_btn = gr.Button("🔄 Random Seed", variant="secondary", scale=1)
|
| 384 |
|
| 385 |
+
pipeline_btn = gr.Button("🎯 Start Processing", variant="primary", size="lg")
|
| 386 |
|
| 387 |
with gr.Column():
|
| 388 |
+
pipeline_skeleton_out = gr.Model3D(label="Final Result", scale=4)
|
| 389 |
files_to_download = gr.Files(label="Download Files", scale=1)
|
| 390 |
|
| 391 |
random_btn.click(
|
|
|
|
| 394 |
)
|
| 395 |
|
| 396 |
pipeline_btn.click(
|
| 397 |
+
fn=main,
|
| 398 |
inputs=[input_3d_model, seed],
|
| 399 |
outputs=[pipeline_skeleton_out, files_to_download]
|
| 400 |
)
|