Spaces:
Running
on
Zero
Running
on
Zero
daidedou
commited on
Commit
·
ef92111
1
Parent(s):
f5df033
Adapt to ZeroGPU?
Browse files
app.py
CHANGED
|
@@ -193,7 +193,7 @@ def init_clicked(mesh1_path, mesh2_path,
|
|
| 193 |
diff_model_cuda.net.cuda()
|
| 194 |
C12_pred_init, C21_pred_init, feat1, feat2, evecs_trans1, evecs_trans2 = fmap_model_cuda({"shape1": shape_dict, "shape2": target_dict}, diff_model=diff_model_cuda, scale=matcher.fmap_cfg.diffusion.time)
|
| 195 |
C12_pred, C12_obj, mask_12 = C12_pred_init
|
| 196 |
-
p2p_init, _ = extract_p2p_torch_fmap(C12_obj,
|
| 197 |
return build_outputs(datadicts.shape_surf, datadicts.target_surf, datadicts.cmap1, p2p_init, tag="init")
|
| 198 |
|
| 199 |
@spaces.GPU
|
|
@@ -218,14 +218,15 @@ def run_clicked(mesh1_path, mesh2_path, yaml_path, lambda_val, zoomout_val, time
|
|
| 218 |
if not (datadicts.shape_path == mesh1_path and datadicts.target_path == mesh2_path):
|
| 219 |
datadicts = Datadicts(mesh1_path, mesh2_path)
|
| 220 |
|
|
|
|
| 221 |
target_normals = torch.from_numpy(datadicts.target_surf.surfel/np.linalg.norm(datadicts.target_surf.surfel, axis=-1, keepdims=True)).float().to("cuda")
|
| 222 |
|
| 223 |
-
C12_new, p2p, p2p_init, _, loss_save = matcher.optimize(
|
| 224 |
-
evecs1, evecs2 =
|
| 225 |
-
evecs_2trans = evecs2.t() @ torch.diag(
|
| 226 |
with torch.no_grad():
|
| 227 |
C12_end_zo = torch_zoomout(evecs1, evecs2, evecs_2trans, C12_new.squeeze(), matcher.cfg.sds_conf.zoomout)
|
| 228 |
-
p2p_zo, _ = extract_p2p_torch_fmap(C12_end_zo,
|
| 229 |
return build_outputs(datadicts.shape_surf, datadicts.target_surf, datadicts.cmap1, p2p_zo, tag="run")
|
| 230 |
|
| 231 |
|
|
@@ -319,7 +320,7 @@ if __name__ == "__main__":
|
|
| 319 |
print("Making matcher")
|
| 320 |
matcher = zero_shot.Matcher(cfg)
|
| 321 |
print("Matcher ready")
|
| 322 |
-
shutil.rmtree("tmp")
|
| 323 |
os.makedirs("tmp", exist_ok=True)
|
| 324 |
datadicts = None
|
| 325 |
demo.launch(share=args.share)
|
|
|
|
| 193 |
diff_model_cuda.net.cuda()
|
| 194 |
C12_pred_init, C21_pred_init, feat1, feat2, evecs_trans1, evecs_trans2 = fmap_model_cuda({"shape1": shape_dict, "shape2": target_dict}, diff_model=diff_model_cuda, scale=matcher.fmap_cfg.diffusion.time)
|
| 195 |
C12_pred, C12_obj, mask_12 = C12_pred_init
|
| 196 |
+
p2p_init, _ = extract_p2p_torch_fmap(C12_obj, shape_dict["evecs"], target_dict["evecs"])
|
| 197 |
return build_outputs(datadicts.shape_surf, datadicts.target_surf, datadicts.cmap1, p2p_init, tag="init")
|
| 198 |
|
| 199 |
@spaces.GPU
|
|
|
|
| 218 |
if not (datadicts.shape_path == mesh1_path and datadicts.target_path == mesh2_path):
|
| 219 |
datadicts = Datadicts(mesh1_path, mesh2_path)
|
| 220 |
|
| 221 |
+
shape_dict, target_dict = convert_dict(datadicts.shape_dict, 'cuda'), convert_dict(datadicts.target_dict, 'cuda')
|
| 222 |
target_normals = torch.from_numpy(datadicts.target_surf.surfel/np.linalg.norm(datadicts.target_surf.surfel, axis=-1, keepdims=True)).float().to("cuda")
|
| 223 |
|
| 224 |
+
C12_new, p2p, p2p_init, _, loss_save = matcher.optimize(shape_dict, target_dict, target_normals)
|
| 225 |
+
evecs1, evecs2 = shape_dict["evecs"], target_dict["evecs"]
|
| 226 |
+
evecs_2trans = evecs2.t() @ torch.diag(target_dict["mass"])
|
| 227 |
with torch.no_grad():
|
| 228 |
C12_end_zo = torch_zoomout(evecs1, evecs2, evecs_2trans, C12_new.squeeze(), matcher.cfg.sds_conf.zoomout)
|
| 229 |
+
p2p_zo, _ = extract_p2p_torch_fmap(C12_end_zo, shape_dict["evecs"], target_dict["evecs"])
|
| 230 |
return build_outputs(datadicts.shape_surf, datadicts.target_surf, datadicts.cmap1, p2p_zo, tag="run")
|
| 231 |
|
| 232 |
|
|
|
|
| 320 |
print("Making matcher")
|
| 321 |
matcher = zero_shot.Matcher(cfg)
|
| 322 |
print("Matcher ready")
|
| 323 |
+
#shutil.rmtree("tmp")
|
| 324 |
os.makedirs("tmp", exist_ok=True)
|
| 325 |
datadicts = None
|
| 326 |
demo.launch(share=args.share)
|