savoji commited on
Commit
4bfe3e0
·
verified ·
1 Parent(s): 56bf4a9

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. msx_assets/object/052_extra_large_clamp/textured.mtl +3 -0
  2. msx_assets/object/052_extra_large_clamp/textured.obj +0 -0
  3. msx_assets/object/073-e_lego_duplo/textured.mtl +3 -0
  4. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/base_model.rst +9 -0
  5. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/generic_model.rst +9 -0
  6. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/index.rst +15 -0
  7. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/metrics.rst +9 -0
  8. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/model_dbir.rst +9 -0
  9. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/renderer/fisheyecameras.rst +9 -0
  10. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/__init__.py +49 -0
  11. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/ball_query.py +142 -0
  12. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/cubify.py +275 -0
  13. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/graph_conv.py +176 -0
  14. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/interp_face_attrs.py +101 -0
  15. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/knn.py +250 -0
  16. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/packed_to_padded.py +198 -0
  17. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/points_normals.py +191 -0
  18. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/sample_farthest_points.py +202 -0
  19. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/sample_points_from_meshes.py +180 -0
  20. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/vert_align.py +107 -0
msx_assets/object/052_extra_large_clamp/textured.mtl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ newmtl material_0
2
+ # shader_type beckmann
3
+ map_Kd texture_map.png
msx_assets/object/052_extra_large_clamp/textured.obj ADDED
The diff for this file is too large to render. See raw diff
 
msx_assets/object/073-e_lego_duplo/textured.mtl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ newmtl material_0
2
+ # shader_type beckmann
3
+ map_Kd texture_map.png
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/base_model.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.base_model
2
+ =======================================
3
+
4
+ base_model
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.base_model
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/generic_model.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.generic_model
2
+ ==========================================
3
+
4
+ generic_model
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.generic_model
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/index.rst ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models
2
+ ============================
3
+
4
+ .. toctree::
5
+
6
+ base_model
7
+ generic_model
8
+ metrics
9
+ model_dbir
10
+ feature_extractor/index
11
+ global_encoder/index
12
+ implicit_function/index
13
+ renderer/index
14
+ view_pooler/index
15
+ visualization/index
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/metrics.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.metrics
2
+ ====================================
3
+
4
+ metrics
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.metrics
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/implicitron/models/model_dbir.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.implicitron.models.model_dbir
2
+ =======================================
3
+
4
+ model_dbir
5
+
6
+ .. automodule:: pytorch3d.implicitron.models.model_dbir
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/docs/modules/renderer/fisheyecameras.rst ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pytorch3d.renderer.fisheyecameras
2
+ =================================
3
+
4
+ fisheyecameras
5
+
6
+ .. automodule:: pytorch3d.renderer.fisheyecameras
7
+ :members:
8
+ :undoc-members:
9
+ :show-inheritance:
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from .ball_query import ball_query
10
+ from .cameras_alignment import corresponding_cameras_alignment
11
+
12
+ from .cubify import cubify
13
+ from .graph_conv import GraphConv
14
+ from .interp_face_attrs import interpolate_face_attributes
15
+ from .iou_box3d import box3d_overlap
16
+ from .knn import knn_gather, knn_points
17
+ from .laplacian_matrices import cot_laplacian, laplacian, norm_laplacian
18
+
19
+ from .mesh_face_areas_normals import mesh_face_areas_normals
20
+ from .mesh_filtering import taubin_smoothing
21
+
22
+ from .packed_to_padded import packed_to_padded, padded_to_packed
23
+ from .perspective_n_points import efficient_pnp
24
+ from .points_alignment import corresponding_points_alignment, iterative_closest_point
25
+ from .points_normals import (
26
+ estimate_pointcloud_local_coord_frames,
27
+ estimate_pointcloud_normals,
28
+ )
29
+ from .points_to_volumes import (
30
+ add_pointclouds_to_volumes,
31
+ add_points_features_to_volume_densities_features,
32
+ )
33
+
34
+ from .sample_farthest_points import sample_farthest_points
35
+
36
+ from .sample_points_from_meshes import sample_points_from_meshes
37
+ from .subdivide_meshes import SubdivideMeshes
38
+ from .utils import (
39
+ convert_pointclouds_to_tensor,
40
+ eyes,
41
+ get_point_covariances,
42
+ is_pointclouds,
43
+ wmean,
44
+ )
45
+
46
+ from .vert_align import vert_align
47
+
48
+
49
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/ball_query.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from typing import Union
10
+
11
+ import torch
12
+ from pytorch3d import _C
13
+ from torch.autograd import Function
14
+ from torch.autograd.function import once_differentiable
15
+
16
+ from .knn import _KNN
17
+ from .utils import masked_gather
18
+
19
+
20
+ class _ball_query(Function):
21
+ """
22
+ Torch autograd Function wrapper for Ball Query C++/CUDA implementations.
23
+ """
24
+
25
+ @staticmethod
26
+ def forward(ctx, p1, p2, lengths1, lengths2, K, radius):
27
+ """
28
+ Arguments defintions the same as in the ball_query function
29
+ """
30
+ idx, dists = _C.ball_query(p1, p2, lengths1, lengths2, K, radius)
31
+ ctx.save_for_backward(p1, p2, lengths1, lengths2, idx)
32
+ ctx.mark_non_differentiable(idx)
33
+ return dists, idx
34
+
35
+ @staticmethod
36
+ @once_differentiable
37
+ def backward(ctx, grad_dists, grad_idx):
38
+ p1, p2, lengths1, lengths2, idx = ctx.saved_tensors
39
+ # TODO(gkioxari) Change cast to floats once we add support for doubles.
40
+ if not (grad_dists.dtype == torch.float32):
41
+ grad_dists = grad_dists.float()
42
+ if not (p1.dtype == torch.float32):
43
+ p1 = p1.float()
44
+ if not (p2.dtype == torch.float32):
45
+ p2 = p2.float()
46
+
47
+ # Reuse the KNN backward function
48
+ # by default, norm is 2
49
+ grad_p1, grad_p2 = _C.knn_points_backward(
50
+ p1, p2, lengths1, lengths2, idx, 2, grad_dists
51
+ )
52
+ return grad_p1, grad_p2, None, None, None, None
53
+
54
+
55
+ def ball_query(
56
+ p1: torch.Tensor,
57
+ p2: torch.Tensor,
58
+ lengths1: Union[torch.Tensor, None] = None,
59
+ lengths2: Union[torch.Tensor, None] = None,
60
+ K: int = 500,
61
+ radius: float = 0.2,
62
+ return_nn: bool = True,
63
+ ):
64
+ """
65
+ Ball Query is an alternative to KNN. It can be
66
+ used to find all points in p2 that are within a specified radius
67
+ to the query point in p1 (with an upper limit of K neighbors).
68
+
69
+ The neighbors returned are not necssarily the *nearest* to the
70
+ point in p1, just the first K values in p2 which are within the
71
+ specified radius.
72
+
73
+ This method is faster than kNN when there are large numbers of points
74
+ in p2 and the ordering of neighbors is not important compared to the
75
+ distance being within the radius threshold.
76
+
77
+ "Ball query’s local neighborhood guarantees a fixed region scale thus
78
+ making local region features more generalizable across space, which is
79
+ preferred for tasks requiring local pattern recognition
80
+ (e.g. semantic point labeling)" [1].
81
+
82
+ [1] Charles R. Qi et al, "PointNet++: Deep Hierarchical Feature Learning
83
+ on Point Sets in a Metric Space", NeurIPS 2017.
84
+
85
+ Args:
86
+ p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each
87
+ containing up to P1 points of dimension D. These represent the centers of
88
+ the ball queries.
89
+ p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each
90
+ containing up to P2 points of dimension D.
91
+ lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the
92
+ length of each pointcloud in p1. Or None to indicate that every cloud has
93
+ length P1.
94
+ lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the
95
+ length of each pointcloud in p2. Or None to indicate that every cloud has
96
+ length P2.
97
+ K: Integer giving the upper bound on the number of samples to take
98
+ within the radius
99
+ radius: the radius around each point within which the neighbors need to be located
100
+ return_nn: If set to True returns the K neighbor points in p2 for each point in p1.
101
+
102
+ Returns:
103
+ dists: Tensor of shape (N, P1, K) giving the squared distances to
104
+ the neighbors. This is padded with zeros both where a cloud in p2
105
+ has fewer than S points and where a cloud in p1 has fewer than P1 points
106
+ and also if there are fewer than K points which satisfy the radius threshold.
107
+
108
+ idx: LongTensor of shape (N, P1, K) giving the indices of the
109
+ S neighbors in p2 for points in p1.
110
+ Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th
111
+ neighbor to `p1[n, i]` in `p2[n]`. This is padded with -1 both where a cloud
112
+ in p2 has fewer than S points and where a cloud in p1 has fewer than P1
113
+ points and also if there are fewer than K points which satisfy the radius threshold.
114
+
115
+ nn: Tensor of shape (N, P1, K, D) giving the K neighbors in p2 for
116
+ each point in p1. Concretely, `p2_nn[n, i, k]` gives the k-th neighbor
117
+ for `p1[n, i]`. Returned if `return_nn` is True. The output is a tensor
118
+ of shape (N, P1, K, U).
119
+
120
+ """
121
+ if p1.shape[0] != p2.shape[0]:
122
+ raise ValueError("pts1 and pts2 must have the same batch dimension.")
123
+ if p1.shape[2] != p2.shape[2]:
124
+ raise ValueError("pts1 and pts2 must have the same point dimension.")
125
+
126
+ p1 = p1.contiguous()
127
+ p2 = p2.contiguous()
128
+ P1 = p1.shape[1]
129
+ P2 = p2.shape[1]
130
+ N = p1.shape[0]
131
+
132
+ if lengths1 is None:
133
+ lengths1 = torch.full((N,), P1, dtype=torch.int64, device=p1.device)
134
+ if lengths2 is None:
135
+ lengths2 = torch.full((N,), P2, dtype=torch.int64, device=p1.device)
136
+
137
+ dists, idx = _ball_query.apply(p1, p2, lengths1, lengths2, K, radius)
138
+
139
+ # Gather the neighbors if needed
140
+ points_nn = masked_gather(p2, idx) if return_nn else None
141
+
142
+ return _KNN(dists=dists, idx=idx, knn=points_nn)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/cubify.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+
10
+ from typing import Optional
11
+
12
+ import torch
13
+ import torch.nn.functional as F
14
+
15
+ from pytorch3d.common.compat import meshgrid_ij
16
+
17
+ from pytorch3d.structures import Meshes
18
+
19
+
20
+ def unravel_index(idx, dims) -> torch.Tensor:
21
+ r"""
22
+ Equivalent to np.unravel_index
23
+ Args:
24
+ idx: A LongTensor whose elements are indices into the
25
+ flattened version of an array of dimensions dims.
26
+ dims: The shape of the array to be indexed.
27
+ Implemented only for dims=(N, H, W, D)
28
+ """
29
+ if len(dims) != 4:
30
+ raise ValueError("Expects a 4-element list.")
31
+ N, H, W, D = dims
32
+ n = idx // (H * W * D)
33
+ h = (idx - n * H * W * D) // (W * D)
34
+ w = (idx - n * H * W * D - h * W * D) // D
35
+ d = idx - n * H * W * D - h * W * D - w * D
36
+ return torch.stack((n, h, w, d), dim=1)
37
+
38
+
39
+ def ravel_index(idx, dims) -> torch.Tensor:
40
+ """
41
+ Computes the linear index in an array of shape dims.
42
+ It performs the reverse functionality of unravel_index
43
+ Args:
44
+ idx: A LongTensor of shape (N, 3). Each row corresponds to indices into an
45
+ array of dimensions dims.
46
+ dims: The shape of the array to be indexed.
47
+ Implemented only for dims=(H, W, D)
48
+ """
49
+ if len(dims) != 3:
50
+ raise ValueError("Expects a 3-element list")
51
+ if idx.shape[1] != 3:
52
+ raise ValueError("Expects an index tensor of shape Nx3")
53
+ H, W, D = dims
54
+ linind = idx[:, 0] * W * D + idx[:, 1] * D + idx[:, 2]
55
+ return linind
56
+
57
+
58
+ @torch.no_grad()
59
+ def cubify(
60
+ voxels: torch.Tensor,
61
+ thresh: float,
62
+ *,
63
+ feats: Optional[torch.Tensor] = None,
64
+ device=None,
65
+ align: str = "topleft",
66
+ ) -> Meshes:
67
+ r"""
68
+ Converts a voxel to a mesh by replacing each occupied voxel with a cube
69
+ consisting of 12 faces and 8 vertices. Shared vertices are merged, and
70
+ internal faces are removed.
71
+ Args:
72
+ voxels: A FloatTensor of shape (N, D, H, W) containing occupancy probabilities.
73
+ thresh: A scalar threshold. If a voxel occupancy is larger than
74
+ thresh, the voxel is considered occupied.
75
+ feats: A FloatTensor of shape (N, K, D, H, W) containing the color information
76
+ of each voxel. K is the number of channels. This is supported only when
77
+ align == "center"
78
+ device: The device of the output meshes
79
+ align: Defines the alignment of the mesh vertices and the grid locations.
80
+ Has to be one of {"topleft", "corner", "center"}. See below for explanation.
81
+ Default is "topleft".
82
+ Returns:
83
+ meshes: A Meshes object of the corresponding meshes.
84
+
85
+
86
+ The alignment between the vertices of the cubified mesh and the voxel locations (or pixels)
87
+ is defined by the choice of `align`. We support three modes, as shown below for a 2x2 grid:
88
+
89
+ X---X---- X-------X ---------
90
+ | | | | | | | X | X |
91
+ X---X---- --------- ---------
92
+ | | | | | | | X | X |
93
+ --------- X-------X ---------
94
+
95
+ topleft corner center
96
+
97
+ In the figure, X denote the grid locations and the squares represent the added cuboids.
98
+ When `align="topleft"`, then the top left corner of each cuboid corresponds to the
99
+ pixel coordinate of the input grid.
100
+ When `align="corner"`, then the corners of the output mesh span the whole grid.
101
+ When `align="center"`, then the grid locations form the center of the cuboids.
102
+ """
103
+
104
+ if device is None:
105
+ device = voxels.device
106
+
107
+ if align not in ["topleft", "corner", "center"]:
108
+ raise ValueError("Align mode must be one of (topleft, corner, center).")
109
+
110
+ if len(voxels) == 0:
111
+ return Meshes(verts=[], faces=[])
112
+
113
+ N, D, H, W = voxels.size()
114
+ # vertices corresponding to a unit cube: 8x3
115
+ cube_verts = torch.tensor(
116
+ [
117
+ [0, 0, 0],
118
+ [0, 0, 1],
119
+ [0, 1, 0],
120
+ [0, 1, 1],
121
+ [1, 0, 0],
122
+ [1, 0, 1],
123
+ [1, 1, 0],
124
+ [1, 1, 1],
125
+ ],
126
+ dtype=torch.int64,
127
+ device=device,
128
+ )
129
+
130
+ # faces corresponding to a unit cube: 12x3
131
+ cube_faces = torch.tensor(
132
+ [
133
+ [0, 1, 2],
134
+ [1, 3, 2], # left face: 0, 1
135
+ [2, 3, 6],
136
+ [3, 7, 6], # bottom face: 2, 3
137
+ [0, 2, 6],
138
+ [0, 6, 4], # front face: 4, 5
139
+ [0, 5, 1],
140
+ [0, 4, 5], # up face: 6, 7
141
+ [6, 7, 5],
142
+ [6, 5, 4], # right face: 8, 9
143
+ [1, 7, 3],
144
+ [1, 5, 7], # back face: 10, 11
145
+ ],
146
+ dtype=torch.int64,
147
+ device=device,
148
+ )
149
+
150
+ wx = torch.tensor([0.5, 0.5], device=device).view(1, 1, 1, 1, 2)
151
+ wy = torch.tensor([0.5, 0.5], device=device).view(1, 1, 1, 2, 1)
152
+ wz = torch.tensor([0.5, 0.5], device=device).view(1, 1, 2, 1, 1)
153
+
154
+ voxelt = voxels.ge(thresh).float()
155
+ # N x 1 x D x H x W
156
+ voxelt = voxelt.view(N, 1, D, H, W)
157
+
158
+ # N x 1 x (D-1) x (H-1) x (W-1)
159
+ voxelt_x = F.conv3d(voxelt, wx).gt(0.5).float()
160
+ voxelt_y = F.conv3d(voxelt, wy).gt(0.5).float()
161
+ voxelt_z = F.conv3d(voxelt, wz).gt(0.5).float()
162
+
163
+ # 12 x N x 1 x D x H x W
164
+ faces_idx = torch.ones((cube_faces.size(0), N, 1, D, H, W), device=device)
165
+
166
+ # add left face
167
+ faces_idx[0, :, :, :, :, 1:] = 1 - voxelt_x
168
+ faces_idx[1, :, :, :, :, 1:] = 1 - voxelt_x
169
+ # add bottom face
170
+ faces_idx[2, :, :, :, :-1, :] = 1 - voxelt_y
171
+ faces_idx[3, :, :, :, :-1, :] = 1 - voxelt_y
172
+ # add front face
173
+ faces_idx[4, :, :, 1:, :, :] = 1 - voxelt_z
174
+ faces_idx[5, :, :, 1:, :, :] = 1 - voxelt_z
175
+ # add up face
176
+ faces_idx[6, :, :, :, 1:, :] = 1 - voxelt_y
177
+ faces_idx[7, :, :, :, 1:, :] = 1 - voxelt_y
178
+ # add right face
179
+ faces_idx[8, :, :, :, :, :-1] = 1 - voxelt_x
180
+ faces_idx[9, :, :, :, :, :-1] = 1 - voxelt_x
181
+ # add back face
182
+ faces_idx[10, :, :, :-1, :, :] = 1 - voxelt_z
183
+ faces_idx[11, :, :, :-1, :, :] = 1 - voxelt_z
184
+
185
+ faces_idx *= voxelt
186
+
187
+ # N x H x W x D x 12
188
+ faces_idx = faces_idx.permute(1, 2, 4, 5, 3, 0).squeeze(1)
189
+ # (NHWD) x 12
190
+ faces_idx = faces_idx.contiguous()
191
+ faces_idx = faces_idx.view(-1, cube_faces.size(0))
192
+
193
+ # boolean to linear index
194
+ # NF x 2
195
+ linind = torch.nonzero(faces_idx, as_tuple=False)
196
+
197
+ # NF x 4
198
+ nyxz = unravel_index(linind[:, 0], (N, H, W, D))
199
+
200
+ # NF x 3: faces
201
+ faces = torch.index_select(cube_faces, 0, linind[:, 1])
202
+
203
+ grid_faces = []
204
+ for d in range(cube_faces.size(1)):
205
+ # NF x 3
206
+ xyz = torch.index_select(cube_verts, 0, faces[:, d])
207
+ permute_idx = torch.tensor([1, 0, 2], device=device)
208
+ yxz = torch.index_select(xyz, 1, permute_idx)
209
+ yxz += nyxz[:, 1:]
210
+ # NF x 1
211
+ temp = ravel_index(yxz, (H + 1, W + 1, D + 1))
212
+ grid_faces.append(temp)
213
+ # NF x 3
214
+ grid_faces = torch.stack(grid_faces, dim=1)
215
+
216
+ y, x, z = meshgrid_ij(torch.arange(H + 1), torch.arange(W + 1), torch.arange(D + 1))
217
+ y = y.to(device=device, dtype=torch.float32)
218
+ x = x.to(device=device, dtype=torch.float32)
219
+ z = z.to(device=device, dtype=torch.float32)
220
+
221
+ if align == "center":
222
+ x = x - 0.5
223
+ y = y - 0.5
224
+ z = z - 0.5
225
+
226
+ margin = 0.0 if align == "corner" else 1.0
227
+ y = y * 2.0 / (H - margin) - 1.0
228
+ x = x * 2.0 / (W - margin) - 1.0
229
+ z = z * 2.0 / (D - margin) - 1.0
230
+
231
+ # ((H+1)(W+1)(D+1)) x 3
232
+ grid_verts = torch.stack((x, y, z), dim=3).view(-1, 3)
233
+
234
+ if len(nyxz) == 0:
235
+ verts_list = [torch.tensor([], dtype=torch.float32, device=device)] * N
236
+ faces_list = [torch.tensor([], dtype=torch.int64, device=device)] * N
237
+ return Meshes(verts=verts_list, faces=faces_list)
238
+
239
+ num_verts = grid_verts.size(0)
240
+ grid_faces += nyxz[:, 0].view(-1, 1) * num_verts
241
+ idleverts = torch.ones(num_verts * N, dtype=torch.uint8, device=device)
242
+
243
+ indices = grid_faces.flatten()
244
+ if device.type == "cpu":
245
+ indices = torch.unique(indices)
246
+ idleverts.scatter_(0, indices, 0)
247
+ grid_faces -= nyxz[:, 0].view(-1, 1) * num_verts
248
+ split_size = torch.bincount(nyxz[:, 0], minlength=N)
249
+ faces_list = list(torch.split(grid_faces, split_size.tolist(), 0))
250
+
251
+ idleverts = idleverts.view(N, num_verts)
252
+ idlenum = idleverts.cumsum(1)
253
+
254
+ verts_list = [
255
+ grid_verts.index_select(0, (idleverts[n] == 0).nonzero(as_tuple=False)[:, 0])
256
+ for n in range(N)
257
+ ]
258
+
259
+ textures_list = None
260
+ if feats is not None and align == "center":
261
+ # We return a TexturesAtlas containing one color for each face
262
+ # N x K x D x H x W -> N x H x W x D x K
263
+ feats = feats.permute(0, 3, 4, 2, 1)
264
+
265
+ # (NHWD) x K
266
+ feats = feats.reshape(-1, feats.size(4))
267
+ feats = torch.index_select(feats, 0, linind[:, 0])
268
+ feats = feats.reshape(-1, 1, 1, feats.size(1))
269
+ feats_list = list(torch.split(feats, split_size.tolist(), 0))
270
+ from pytorch3d.renderer.mesh.textures import TexturesAtlas
271
+
272
+ textures_list = TexturesAtlas(feats_list)
273
+
274
+ faces_list = [nface - idlenum[n][nface] for n, nface in enumerate(faces_list)]
275
+ return Meshes(verts=verts_list, faces=faces_list, textures=textures_list)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/graph_conv.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ from pytorch3d import _C
13
+ from torch.autograd import Function
14
+ from torch.autograd.function import once_differentiable
15
+
16
+
17
+ class GraphConv(nn.Module):
18
+ """A single graph convolution layer."""
19
+
20
+ def __init__(
21
+ self,
22
+ input_dim: int,
23
+ output_dim: int,
24
+ init: str = "normal",
25
+ directed: bool = False,
26
+ ) -> None:
27
+ """
28
+ Args:
29
+ input_dim: Number of input features per vertex.
30
+ output_dim: Number of output features per vertex.
31
+ init: Weight initialization method. Can be one of ['zero', 'normal'].
32
+ directed: Bool indicating if edges in the graph are directed.
33
+ """
34
+ super().__init__()
35
+ self.input_dim = input_dim
36
+ self.output_dim = output_dim
37
+ self.directed = directed
38
+ self.w0 = nn.Linear(input_dim, output_dim)
39
+ self.w1 = nn.Linear(input_dim, output_dim)
40
+
41
+ if init == "normal":
42
+ nn.init.normal_(self.w0.weight, mean=0, std=0.01)
43
+ nn.init.normal_(self.w1.weight, mean=0, std=0.01)
44
+ self.w0.bias.data.zero_()
45
+ self.w1.bias.data.zero_()
46
+ elif init == "zero":
47
+ self.w0.weight.data.zero_()
48
+ self.w1.weight.data.zero_()
49
+ else:
50
+ raise ValueError('Invalid GraphConv initialization "%s"' % init)
51
+
52
+ def forward(self, verts, edges):
53
+ """
54
+ Args:
55
+ verts: FloatTensor of shape (V, input_dim) where V is the number of
56
+ vertices and input_dim is the number of input features
57
+ per vertex. input_dim has to match the input_dim specified
58
+ in __init__.
59
+ edges: LongTensor of shape (E, 2) where E is the number of edges
60
+ where each edge has the indices of the two vertices which
61
+ form the edge.
62
+
63
+ Returns:
64
+ out: FloatTensor of shape (V, output_dim) where output_dim is the
65
+ number of output features per vertex.
66
+ """
67
+ if verts.is_cuda != edges.is_cuda:
68
+ raise ValueError("verts and edges tensors must be on the same device.")
69
+ if verts.shape[0] == 0:
70
+ # empty graph.
71
+ return verts.new_zeros((0, self.output_dim)) * verts.sum()
72
+
73
+ verts_w0 = self.w0(verts) # (V, output_dim)
74
+ verts_w1 = self.w1(verts) # (V, output_dim)
75
+
76
+ if torch.cuda.is_available() and verts.is_cuda and edges.is_cuda:
77
+ neighbor_sums = gather_scatter(verts_w1, edges, self.directed)
78
+ else:
79
+ neighbor_sums = gather_scatter_python(
80
+ verts_w1, edges, self.directed
81
+ ) # (V, output_dim)
82
+
83
+ # Add neighbor features to each vertex's features.
84
+ out = verts_w0 + neighbor_sums
85
+ return out
86
+
87
+ def __repr__(self):
88
+ Din, Dout, directed = self.input_dim, self.output_dim, self.directed
89
+ return "GraphConv(%d -> %d, directed=%r)" % (Din, Dout, directed)
90
+
91
+
92
+ def gather_scatter_python(input, edges, directed: bool = False):
93
+ """
94
+ Python implementation of gather_scatter for aggregating features of
95
+ neighbor nodes in a graph.
96
+
97
+ Given a directed graph: v0 -> v1 -> v2 the updated feature for v1 depends
98
+ on v2 in order to be consistent with Morris et al. AAAI 2019
99
+ (https://arxiv.org/abs/1810.02244). This only affects
100
+ directed graphs; for undirected graphs v1 will depend on both v0 and v2,
101
+ no matter which way the edges are physically stored.
102
+
103
+ Args:
104
+ input: Tensor of shape (num_vertices, input_dim).
105
+ edges: Tensor of edge indices of shape (num_edges, 2).
106
+ directed: bool indicating if edges are directed.
107
+
108
+ Returns:
109
+ output: Tensor of same shape as input.
110
+ """
111
+ if not (input.dim() == 2):
112
+ raise ValueError("input can only have 2 dimensions.")
113
+ if not (edges.dim() == 2):
114
+ raise ValueError("edges can only have 2 dimensions.")
115
+ if not (edges.shape[1] == 2):
116
+ raise ValueError("edges must be of shape (num_edges, 2).")
117
+
118
+ num_vertices, input_feature_dim = input.shape
119
+ num_edges = edges.shape[0]
120
+ output = torch.zeros_like(input)
121
+ idx0 = edges[:, 0].view(num_edges, 1).expand(num_edges, input_feature_dim)
122
+ idx1 = edges[:, 1].view(num_edges, 1).expand(num_edges, input_feature_dim)
123
+
124
+ output = output.scatter_add(0, idx0, input.gather(0, idx1))
125
+ if not directed:
126
+ output = output.scatter_add(0, idx1, input.gather(0, idx0))
127
+ return output
128
+
129
+
130
+ class GatherScatter(Function):
131
+ """
132
+ Torch autograd Function wrapper for gather_scatter C++/CUDA implementations.
133
+ """
134
+
135
+ @staticmethod
136
+ def forward(ctx, input, edges, directed=False):
137
+ """
138
+ Args:
139
+ ctx: Context object used to calculate gradients.
140
+ input: Tensor of shape (num_vertices, input_dim)
141
+ edges: Tensor of edge indices of shape (num_edges, 2)
142
+ directed: Bool indicating if edges are directed.
143
+
144
+ Returns:
145
+ output: Tensor of same shape as input.
146
+ """
147
+ if not (input.dim() == 2):
148
+ raise ValueError("input can only have 2 dimensions.")
149
+ if not (edges.dim() == 2):
150
+ raise ValueError("edges can only have 2 dimensions.")
151
+ if not (edges.shape[1] == 2):
152
+ raise ValueError("edges must be of shape (num_edges, 2).")
153
+ if not (input.dtype == torch.float32):
154
+ raise ValueError("input has to be of type torch.float32.")
155
+
156
+ ctx.directed = directed
157
+ input, edges = input.contiguous(), edges.contiguous()
158
+ ctx.save_for_backward(edges)
159
+ backward = False
160
+ output = _C.gather_scatter(input, edges, directed, backward)
161
+ return output
162
+
163
+ @staticmethod
164
+ @once_differentiable
165
+ def backward(ctx, grad_output):
166
+ grad_output = grad_output.contiguous()
167
+ edges = ctx.saved_tensors[0]
168
+ directed = ctx.directed
169
+ backward = True
170
+ grad_input = _C.gather_scatter(grad_output, edges, directed, backward)
171
+ grad_edges = None
172
+ grad_directed = None
173
+ return grad_input, grad_edges, grad_directed
174
+
175
+
176
+ gather_scatter = GatherScatter.apply
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/interp_face_attrs.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import torch
10
+ from pytorch3d import _C
11
+ from torch.autograd import Function
12
+ from torch.autograd.function import once_differentiable
13
+
14
+
15
+ def interpolate_face_attributes(
16
+ pix_to_face: torch.Tensor,
17
+ barycentric_coords: torch.Tensor,
18
+ face_attributes: torch.Tensor,
19
+ ) -> torch.Tensor:
20
+ """
21
+ Interpolate arbitrary face attributes using the barycentric coordinates
22
+ for each pixel in the rasterized output.
23
+
24
+ Args:
25
+ pix_to_face: LongTensor of shape (...) specifying the indices
26
+ of the faces (in the packed representation) which overlap each
27
+ pixel in the image. A value < 0 indicates that the pixel does not
28
+ overlap any face and should be skipped.
29
+ barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
30
+ the barycentric coordinates of each pixel
31
+ relative to the faces (in the packed
32
+ representation) which overlap the pixel.
33
+ face_attributes: packed attributes of shape (total_faces, 3, D),
34
+ specifying the value of the attribute for each
35
+ vertex in the face.
36
+
37
+ Returns:
38
+ pixel_vals: tensor of shape (N, H, W, K, D) giving the interpolated
39
+ value of the face attribute for each pixel.
40
+ """
41
+ # Check shapes
42
+ F, FV, D = face_attributes.shape
43
+ if FV != 3:
44
+ raise ValueError("Faces can only have three vertices; got %r" % FV)
45
+ N, H, W, K, _ = barycentric_coords.shape
46
+ if pix_to_face.shape != (N, H, W, K):
47
+ msg = "pix_to_face must have shape (batch_size, H, W, K); got %r"
48
+ raise ValueError(msg % (pix_to_face.shape,))
49
+
50
+ # On CPU use the python version
51
+ # TODO: Implement a C++ version of this function
52
+ if not pix_to_face.is_cuda:
53
+ args = (pix_to_face, barycentric_coords, face_attributes)
54
+ return interpolate_face_attributes_python(*args)
55
+
56
+ # Otherwise flatten and call the custom autograd function
57
+ N, H, W, K = pix_to_face.shape
58
+ pix_to_face = pix_to_face.view(-1)
59
+ barycentric_coords = barycentric_coords.view(N * H * W * K, 3)
60
+ args = (pix_to_face, barycentric_coords, face_attributes)
61
+ out = _InterpFaceAttrs.apply(*args)
62
+ out = out.view(N, H, W, K, -1)
63
+ return out
64
+
65
+
66
+ class _InterpFaceAttrs(Function):
67
+ @staticmethod
68
+ def forward(ctx, pix_to_face, barycentric_coords, face_attrs):
69
+ args = (pix_to_face, barycentric_coords, face_attrs)
70
+ ctx.save_for_backward(*args)
71
+ return _C.interp_face_attrs_forward(*args)
72
+
73
+ @staticmethod
74
+ @once_differentiable
75
+ def backward(ctx, grad_pix_attrs):
76
+ args = ctx.saved_tensors
77
+ args = args + (grad_pix_attrs,)
78
+ grads = _C.interp_face_attrs_backward(*args)
79
+ grad_pix_to_face = None
80
+ grad_barycentric_coords = grads[0]
81
+ grad_face_attrs = grads[1]
82
+ return grad_pix_to_face, grad_barycentric_coords, grad_face_attrs
83
+
84
+
85
+ def interpolate_face_attributes_python(
86
+ pix_to_face: torch.Tensor,
87
+ barycentric_coords: torch.Tensor,
88
+ face_attributes: torch.Tensor,
89
+ ) -> torch.Tensor:
90
+ F, FV, D = face_attributes.shape
91
+ N, H, W, K, _ = barycentric_coords.shape
92
+
93
+ # Replace empty pixels in pix_to_face with 0 in order to interpolate.
94
+ mask = pix_to_face < 0
95
+ pix_to_face = pix_to_face.clone()
96
+ pix_to_face[mask] = 0
97
+ idx = pix_to_face.view(N * H * W * K, 1, 1).expand(N * H * W * K, 3, D)
98
+ pixel_face_vals = face_attributes.gather(0, idx).view(N, H, W, K, 3, D)
99
+ pixel_vals = (barycentric_coords[..., None] * pixel_face_vals).sum(dim=-2)
100
+ pixel_vals[mask] = 0 # Replace masked values in output.
101
+ return pixel_vals
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/knn.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from collections import namedtuple
10
+ from typing import Union
11
+
12
+ import torch
13
+ from pytorch3d import _C
14
+ from torch.autograd import Function
15
+ from torch.autograd.function import once_differentiable
16
+
17
+
18
+ _KNN = namedtuple("KNN", "dists idx knn")
19
+
20
+
21
+ class _knn_points(Function):
22
+ """
23
+ Torch autograd Function wrapper for KNN C++/CUDA implementations.
24
+ """
25
+
26
+ @staticmethod
27
+ # pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently.
28
+ def forward(
29
+ ctx,
30
+ p1,
31
+ p2,
32
+ lengths1,
33
+ lengths2,
34
+ K,
35
+ version,
36
+ norm: int = 2,
37
+ return_sorted: bool = True,
38
+ ):
39
+ """
40
+ K-Nearest neighbors on point clouds.
41
+
42
+ Args:
43
+ p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each
44
+ containing up to P1 points of dimension D.
45
+ p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each
46
+ containing up to P2 points of dimension D.
47
+ lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the
48
+ length of each pointcloud in p1. Or None to indicate that every cloud has
49
+ length P1.
50
+ lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the
51
+ length of each pointcloud in p2. Or None to indicate that every cloud has
52
+ length P2.
53
+ K: Integer giving the number of nearest neighbors to return.
54
+ version: Which KNN implementation to use in the backend. If version=-1,
55
+ the correct implementation is selected based on the shapes of the inputs.
56
+ norm: (int) indicating the norm. Only supports 1 (for L1) and 2 (for L2).
57
+ return_sorted: (bool) whether to return the nearest neighbors sorted in
58
+ ascending order of distance.
59
+
60
+ Returns:
61
+ p1_dists: Tensor of shape (N, P1, K) giving the squared distances to
62
+ the nearest neighbors. This is padded with zeros both where a cloud in p2
63
+ has fewer than K points and where a cloud in p1 has fewer than P1 points.
64
+
65
+ p1_idx: LongTensor of shape (N, P1, K) giving the indices of the
66
+ K nearest neighbors from points in p1 to points in p2.
67
+ Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th nearest
68
+ neighbors to `p1[n, i]` in `p2[n]`. This is padded with zeros both where a cloud
69
+ in p2 has fewer than K points and where a cloud in p1 has fewer than P1 points.
70
+ """
71
+ if not ((norm == 1) or (norm == 2)):
72
+ raise ValueError("Support for 1 or 2 norm.")
73
+
74
+ idx, dists = _C.knn_points_idx(p1, p2, lengths1, lengths2, norm, K, version)
75
+
76
+ # sort KNN in ascending order if K > 1
77
+ if K > 1 and return_sorted:
78
+ if lengths2.min() < K:
79
+ P1 = p1.shape[1]
80
+ mask = lengths2[:, None] <= torch.arange(K, device=dists.device)[None]
81
+ # mask has shape [N, K], true where dists irrelevant
82
+ mask = mask[:, None].expand(-1, P1, -1)
83
+ # mask has shape [N, P1, K], true where dists irrelevant
84
+ dists[mask] = float("inf")
85
+ dists, sort_idx = dists.sort(dim=2)
86
+ dists[mask] = 0
87
+ else:
88
+ dists, sort_idx = dists.sort(dim=2)
89
+ idx = idx.gather(2, sort_idx)
90
+
91
+ ctx.save_for_backward(p1, p2, lengths1, lengths2, idx)
92
+ ctx.mark_non_differentiable(idx)
93
+ ctx.norm = norm
94
+ return dists, idx
95
+
96
+ @staticmethod
97
+ @once_differentiable
98
+ def backward(ctx, grad_dists, grad_idx):
99
+ p1, p2, lengths1, lengths2, idx = ctx.saved_tensors
100
+ norm = ctx.norm
101
+ # TODO(gkioxari) Change cast to floats once we add support for doubles.
102
+ if not (grad_dists.dtype == torch.float32):
103
+ grad_dists = grad_dists.float()
104
+ if not (p1.dtype == torch.float32):
105
+ p1 = p1.float()
106
+ if not (p2.dtype == torch.float32):
107
+ p2 = p2.float()
108
+ grad_p1, grad_p2 = _C.knn_points_backward(
109
+ p1, p2, lengths1, lengths2, idx, norm, grad_dists
110
+ )
111
+ return grad_p1, grad_p2, None, None, None, None, None, None
112
+
113
+
114
+ def knn_points(
115
+ p1: torch.Tensor,
116
+ p2: torch.Tensor,
117
+ lengths1: Union[torch.Tensor, None] = None,
118
+ lengths2: Union[torch.Tensor, None] = None,
119
+ norm: int = 2,
120
+ K: int = 1,
121
+ version: int = -1,
122
+ return_nn: bool = False,
123
+ return_sorted: bool = True,
124
+ ) -> _KNN:
125
+ """
126
+ K-Nearest neighbors on point clouds.
127
+
128
+ Args:
129
+ p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each
130
+ containing up to P1 points of dimension D.
131
+ p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each
132
+ containing up to P2 points of dimension D.
133
+ lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the
134
+ length of each pointcloud in p1. Or None to indicate that every cloud has
135
+ length P1.
136
+ lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the
137
+ length of each pointcloud in p2. Or None to indicate that every cloud has
138
+ length P2.
139
+ norm: Integer indicating the norm of the distance. Supports only 1 for L1, 2 for L2.
140
+ K: Integer giving the number of nearest neighbors to return.
141
+ version: Which KNN implementation to use in the backend. If version=-1,
142
+ the correct implementation is selected based on the shapes of the inputs.
143
+ return_nn: If set to True returns the K nearest neighbors in p2 for each point in p1.
144
+ return_sorted: (bool) whether to return the nearest neighbors sorted in
145
+ ascending order of distance.
146
+
147
+ Returns:
148
+ dists: Tensor of shape (N, P1, K) giving the squared distances to
149
+ the nearest neighbors. This is padded with zeros both where a cloud in p2
150
+ has fewer than K points and where a cloud in p1 has fewer than P1 points.
151
+
152
+ idx: LongTensor of shape (N, P1, K) giving the indices of the
153
+ K nearest neighbors from points in p1 to points in p2.
154
+ Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th nearest
155
+ neighbors to `p1[n, i]` in `p2[n]`. This is padded with zeros both where a cloud
156
+ in p2 has fewer than K points and where a cloud in p1 has fewer than P1
157
+ points.
158
+
159
+ nn: Tensor of shape (N, P1, K, D) giving the K nearest neighbors in p2 for
160
+ each point in p1. Concretely, `p2_nn[n, i, k]` gives the k-th nearest neighbor
161
+ for `p1[n, i]`. Returned if `return_nn` is True.
162
+ The nearest neighbors are collected using `knn_gather`
163
+
164
+ .. code-block::
165
+
166
+ p2_nn = knn_gather(p2, p1_idx, lengths2)
167
+
168
+ which is a helper function that allows indexing any tensor of shape (N, P2, U) with
169
+ the indices `p1_idx` returned by `knn_points`. The output is a tensor
170
+ of shape (N, P1, K, U).
171
+
172
+ """
173
+ if p1.shape[0] != p2.shape[0]:
174
+ raise ValueError("pts1 and pts2 must have the same batch dimension.")
175
+ if p1.shape[2] != p2.shape[2]:
176
+ raise ValueError("pts1 and pts2 must have the same point dimension.")
177
+
178
+ p1 = p1.contiguous()
179
+ p2 = p2.contiguous()
180
+
181
+ P1 = p1.shape[1]
182
+ P2 = p2.shape[1]
183
+
184
+ if lengths1 is None:
185
+ lengths1 = torch.full((p1.shape[0],), P1, dtype=torch.int64, device=p1.device)
186
+ if lengths2 is None:
187
+ lengths2 = torch.full((p1.shape[0],), P2, dtype=torch.int64, device=p1.device)
188
+
189
+ p1_dists, p1_idx = _knn_points.apply(
190
+ p1, p2, lengths1, lengths2, K, version, norm, return_sorted
191
+ )
192
+
193
+ p2_nn = None
194
+ if return_nn:
195
+ p2_nn = knn_gather(p2, p1_idx, lengths2)
196
+
197
+ return _KNN(dists=p1_dists, idx=p1_idx, knn=p2_nn if return_nn else None)
198
+
199
+
200
+ def knn_gather(
201
+ x: torch.Tensor, idx: torch.Tensor, lengths: Union[torch.Tensor, None] = None
202
+ ):
203
+ """
204
+ A helper function for knn that allows indexing a tensor x with the indices `idx`
205
+ returned by `knn_points`.
206
+
207
+ For example, if `dists, idx = knn_points(p, x, lengths_p, lengths, K)`
208
+ where p is a tensor of shape (N, L, D) and x a tensor of shape (N, M, D),
209
+ then one can compute the K nearest neighbors of p with `p_nn = knn_gather(x, idx, lengths)`.
210
+ It can also be applied for any tensor x of shape (N, M, U) where U != D.
211
+
212
+ Args:
213
+ x: Tensor of shape (N, M, U) containing U-dimensional features to
214
+ be gathered.
215
+ idx: LongTensor of shape (N, L, K) giving the indices returned by `knn_points`.
216
+ lengths: LongTensor of shape (N,) of values in the range [0, M], giving the
217
+ length of each example in the batch in x. Or None to indicate that every
218
+ example has length M.
219
+ Returns:
220
+ x_out: Tensor of shape (N, L, K, U) resulting from gathering the elements of x
221
+ with idx, s.t. `x_out[n, l, k] = x[n, idx[n, l, k]]`.
222
+ If `k > lengths[n]` then `x_out[n, l, k]` is filled with 0.0.
223
+ """
224
+ N, M, U = x.shape
225
+ _N, L, K = idx.shape
226
+
227
+ if N != _N:
228
+ raise ValueError("x and idx must have same batch dimension.")
229
+
230
+ if lengths is None:
231
+ lengths = torch.full((x.shape[0],), M, dtype=torch.int64, device=x.device)
232
+
233
+ idx_expanded = idx[:, :, :, None].expand(-1, -1, -1, U)
234
+ # idx_expanded has shape [N, L, K, U]
235
+
236
+ x_out = x[:, :, None].expand(-1, -1, K, -1).gather(1, idx_expanded)
237
+ # p2_nn has shape [N, L, K, U]
238
+
239
+ needs_mask = lengths.min() < K
240
+ if needs_mask:
241
+ # mask has shape [N, K], true where idx is irrelevant because
242
+ # there is less number of points in p2 than K
243
+ mask = lengths[:, None] <= torch.arange(K, device=x.device)[None]
244
+
245
+ # expand mask to shape [N, L, K, U]
246
+ mask = mask[:, None].expand(-1, L, -1)
247
+ mask = mask[:, :, :, None].expand(-1, -1, -1, U)
248
+ x_out[mask] = 0.0
249
+
250
+ return x_out
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/packed_to_padded.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import torch
10
+ from pytorch3d import _C
11
+ from torch.autograd import Function
12
+ from torch.autograd.function import once_differentiable
13
+
14
+
15
+ class _PackedToPadded(Function):
16
+ """
17
+ Torch autograd Function wrapper for packed_to_padded C++/CUDA implementations.
18
+ """
19
+
20
+ @staticmethod
21
+ def forward(ctx, inputs, first_idxs, max_size):
22
+ """
23
+ Args:
24
+ ctx: Context object used to calculate gradients.
25
+ inputs: FloatTensor of shape (F, D), representing the packed batch tensor.
26
+ e.g. areas for faces in a batch of meshes.
27
+ first_idxs: LongTensor of shape (N,) where N is the number of
28
+ elements in the batch and `first_idxs[i] = f`
29
+ means that the inputs for batch element i begin at `inputs[f]`.
30
+ max_size: Max length of an element in the batch.
31
+
32
+ Returns:
33
+ inputs_padded: FloatTensor of shape (N, max_size, D) where max_size is max
34
+ of `sizes`. The values for batch element i which start at
35
+ `inputs[first_idxs[i]]` will be copied to `inputs_padded[i, :]`,
36
+ with zeros padding out the extra inputs.
37
+ """
38
+ if not (inputs.dim() == 2):
39
+ raise ValueError("input can only be 2-dimensional.")
40
+ if not (first_idxs.dim() == 1):
41
+ raise ValueError("first_idxs can only be 1-dimensional.")
42
+ if not (inputs.dtype == torch.float32):
43
+ raise ValueError("input has to be of type torch.float32.")
44
+ if not (first_idxs.dtype == torch.int64):
45
+ raise ValueError("first_idxs has to be of type torch.int64.")
46
+ if not isinstance(max_size, int):
47
+ raise ValueError("max_size has to be int.")
48
+
49
+ ctx.save_for_backward(first_idxs)
50
+ ctx.num_inputs = int(inputs.shape[0])
51
+ inputs, first_idxs = inputs.contiguous(), first_idxs.contiguous()
52
+ inputs_padded = _C.packed_to_padded(inputs, first_idxs, max_size)
53
+ return inputs_padded
54
+
55
+ @staticmethod
56
+ @once_differentiable
57
+ def backward(ctx, grad_output):
58
+ grad_output = grad_output.contiguous()
59
+ first_idxs = ctx.saved_tensors[0]
60
+ num_inputs = ctx.num_inputs
61
+ grad_input = _C.padded_to_packed(grad_output, first_idxs, num_inputs)
62
+ return grad_input, None, None
63
+
64
+
65
+ def packed_to_padded(
66
+ inputs: torch.Tensor, first_idxs: torch.LongTensor, max_size: int
67
+ ) -> torch.Tensor:
68
+ """
69
+ Torch wrapper that handles allowed input shapes. See description below.
70
+
71
+ Args:
72
+ inputs: FloatTensor of shape (F,) or (F, ...), representing the packed
73
+ batch tensor, e.g. areas for faces in a batch of meshes.
74
+ first_idxs: LongTensor of shape (N,) where N is the number of
75
+ elements in the batch and `first_idxs[i] = f`
76
+ means that the inputs for batch element i begin at `inputs[f]`.
77
+ max_size: Max length of an element in the batch.
78
+
79
+ Returns:
80
+ inputs_padded: FloatTensor of shape (N, max_size) or (N, max_size, ...)
81
+ where max_size is max of `sizes`. The values for batch element i
82
+ which start at `inputs[first_idxs[i]]` will be copied to
83
+ `inputs_padded[i, :]`, with zeros padding out the extra inputs.
84
+
85
+ To handle the allowed input shapes, we convert the inputs tensor of shape
86
+ (F,) to (F, 1). We reshape the output back to (N, max_size) from
87
+ (N, max_size, 1).
88
+ """
89
+ # if inputs is of shape (F,), reshape into (F, 1)
90
+ input_shape = inputs.shape
91
+ n_dims = inputs.dim()
92
+ if n_dims == 1:
93
+ inputs = inputs.unsqueeze(1)
94
+ else:
95
+ inputs = inputs.reshape(input_shape[0], -1)
96
+ inputs_padded = _PackedToPadded.apply(inputs, first_idxs, max_size)
97
+ # if flat is True, reshape output to (N, max_size) from (N, max_size, 1)
98
+ # else reshape output to (N, max_size, ...)
99
+ if n_dims == 1:
100
+ return inputs_padded.squeeze(2)
101
+ if n_dims == 2:
102
+ return inputs_padded
103
+ return inputs_padded.view(*inputs_padded.shape[:2], *input_shape[1:])
104
+
105
+
106
+ class _PaddedToPacked(Function):
107
+ """
108
+ Torch autograd Function wrapper for padded_to_packed C++/CUDA implementations.
109
+ """
110
+
111
+ @staticmethod
112
+ def forward(ctx, inputs, first_idxs, num_inputs):
113
+ """
114
+ Args:
115
+ ctx: Context object used to calculate gradients.
116
+ inputs: FloatTensor of shape (N, max_size, D), representing
117
+ the padded tensor, e.g. areas for faces in a batch of meshes.
118
+ first_idxs: LongTensor of shape (N,) where N is the number of
119
+ elements in the batch and `first_idxs[i] = f`
120
+ means that the inputs for batch element i begin at `inputs_packed[f]`.
121
+ num_inputs: Number of packed entries (= F)
122
+
123
+ Returns:
124
+ inputs_packed: FloatTensor of shape (F, D) where
125
+ `inputs_packed[first_idx[i]:] = inputs[i, :]`.
126
+ """
127
+ if not (inputs.dim() == 3):
128
+ raise ValueError("input can only be 3-dimensional.")
129
+ if not (first_idxs.dim() == 1):
130
+ raise ValueError("first_idxs can only be 1-dimensional.")
131
+ if not (inputs.dtype == torch.float32):
132
+ raise ValueError("input has to be of type torch.float32.")
133
+ if not (first_idxs.dtype == torch.int64):
134
+ raise ValueError("first_idxs has to be of type torch.int64.")
135
+ if not isinstance(num_inputs, int):
136
+ raise ValueError("max_size has to be int.")
137
+
138
+ ctx.save_for_backward(first_idxs)
139
+ ctx.max_size = inputs.shape[1]
140
+ inputs, first_idxs = inputs.contiguous(), first_idxs.contiguous()
141
+ inputs_packed = _C.padded_to_packed(inputs, first_idxs, num_inputs)
142
+ return inputs_packed
143
+
144
+ @staticmethod
145
+ @once_differentiable
146
+ def backward(ctx, grad_output):
147
+ grad_output = grad_output.contiguous()
148
+ first_idxs = ctx.saved_tensors[0]
149
+ max_size = ctx.max_size
150
+ grad_input = _C.packed_to_padded(grad_output, first_idxs, max_size)
151
+ return grad_input, None, None
152
+
153
+
154
+ def padded_to_packed(
155
+ inputs: torch.Tensor,
156
+ first_idxs: torch.LongTensor,
157
+ num_inputs: int,
158
+ max_size_dim: int = 1,
159
+ ) -> torch.Tensor:
160
+ """
161
+ Torch wrapper that handles allowed input shapes. See description below.
162
+
163
+ Args:
164
+ inputs: FloatTensor of shape (N, ..., max_size) or (N, ..., max_size, ...),
165
+ representing the padded tensor, e.g. areas for faces in a batch of
166
+ meshes, where max_size occurs on max_size_dim-th position.
167
+ first_idxs: LongTensor of shape (N,) where N is the number of
168
+ elements in the batch and `first_idxs[i] = f`
169
+ means that the inputs for batch element i begin at `inputs_packed[f]`.
170
+ num_inputs: Number of packed entries (= F)
171
+ max_size_dim: the dimension to be packed
172
+
173
+ Returns:
174
+ inputs_packed: FloatTensor of shape (F,) or (F, ...) where
175
+ `inputs_packed[first_idx[i]:first_idx[i+1]] = inputs[i, ..., :delta[i]]`,
176
+ where `delta[i] = first_idx[i+1] - first_idx[i]`.
177
+
178
+ To handle the allowed input shapes, we convert the inputs tensor of shape
179
+ (N, max_size) to (N, max_size, 1). We reshape the output back to (F,) from
180
+ (F, 1).
181
+ """
182
+ n_dims = inputs.dim()
183
+ # move the variable dim to position 1
184
+ inputs = inputs.movedim(max_size_dim, 1)
185
+
186
+ # if inputs is of shape (N, max_size), reshape into (N, max_size, 1))
187
+ input_shape = inputs.shape
188
+ if n_dims == 2:
189
+ inputs = inputs.unsqueeze(2)
190
+ else:
191
+ inputs = inputs.reshape(*input_shape[:2], -1)
192
+ inputs_packed = _PaddedToPacked.apply(inputs, first_idxs, num_inputs)
193
+ # if input is flat, reshape output to (F,) from (F, 1)
194
+ # else reshape output to (F, ...)
195
+ if n_dims == 2:
196
+ return inputs_packed.squeeze(1)
197
+
198
+ return inputs_packed.view(-1, *input_shape[2:])
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/points_normals.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from typing import Tuple, TYPE_CHECKING, Union
10
+
11
+ import torch
12
+ from pytorch3d.common.workaround import symeig3x3
13
+
14
+ from .utils import convert_pointclouds_to_tensor, get_point_covariances
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ from ..structures import Pointclouds
19
+
20
+
21
+ def estimate_pointcloud_normals(
22
+ pointclouds: Union[torch.Tensor, "Pointclouds"],
23
+ neighborhood_size: int = 50,
24
+ disambiguate_directions: bool = True,
25
+ *,
26
+ use_symeig_workaround: bool = True,
27
+ ) -> torch.Tensor:
28
+ """
29
+ Estimates the normals of a batch of `pointclouds`.
30
+
31
+ The function uses `estimate_pointcloud_local_coord_frames` to estimate
32
+ the normals. Please refer to that function for more detailed information.
33
+
34
+ Args:
35
+ **pointclouds**: Batch of 3-dimensional points of shape
36
+ `(minibatch, num_point, 3)` or a `Pointclouds` object.
37
+ **neighborhood_size**: The size of the neighborhood used to estimate the
38
+ geometry around each point.
39
+ **disambiguate_directions**: If `True`, uses the algorithm from [1] to
40
+ ensure sign consistency of the normals of neighboring points.
41
+ **use_symeig_workaround**: If `True`, uses a custom eigenvalue
42
+ calculation.
43
+
44
+ Returns:
45
+ **normals**: A tensor of normals for each input point
46
+ of shape `(minibatch, num_point, 3)`.
47
+ If `pointclouds` are of `Pointclouds` class, returns a padded tensor.
48
+
49
+ References:
50
+ [1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for
51
+ Local Surface Description, ECCV 2010.
52
+ """
53
+
54
+ curvatures, local_coord_frames = estimate_pointcloud_local_coord_frames(
55
+ pointclouds,
56
+ neighborhood_size=neighborhood_size,
57
+ disambiguate_directions=disambiguate_directions,
58
+ use_symeig_workaround=use_symeig_workaround,
59
+ )
60
+
61
+ # the normals correspond to the first vector of each local coord frame
62
+ normals = local_coord_frames[:, :, :, 0]
63
+
64
+ return normals
65
+
66
+
67
+ def estimate_pointcloud_local_coord_frames(
68
+ pointclouds: Union[torch.Tensor, "Pointclouds"],
69
+ neighborhood_size: int = 50,
70
+ disambiguate_directions: bool = True,
71
+ *,
72
+ use_symeig_workaround: bool = True,
73
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
74
+ """
75
+ Estimates the principal directions of curvature (which includes normals)
76
+ of a batch of `pointclouds`.
77
+
78
+ The algorithm first finds `neighborhood_size` nearest neighbors for each
79
+ point of the point clouds, followed by obtaining principal vectors of
80
+ covariance matrices of each of the point neighborhoods.
81
+ The main principal vector corresponds to the normals, while the
82
+ other 2 are the direction of the highest curvature and the 2nd highest
83
+ curvature.
84
+
85
+ Note that each principal direction is given up to a sign. Hence,
86
+ the function implements `disambiguate_directions` switch that allows
87
+ to ensure consistency of the sign of neighboring normals. The implementation
88
+ follows the sign disabiguation from SHOT descriptors [1].
89
+
90
+ The algorithm also returns the curvature values themselves.
91
+ These are the eigenvalues of the estimated covariance matrices
92
+ of each point neighborhood.
93
+
94
+ Args:
95
+ **pointclouds**: Batch of 3-dimensional points of shape
96
+ `(minibatch, num_point, 3)` or a `Pointclouds` object.
97
+ **neighborhood_size**: The size of the neighborhood used to estimate the
98
+ geometry around each point.
99
+ **disambiguate_directions**: If `True`, uses the algorithm from [1] to
100
+ ensure sign consistency of the normals of neighboring points.
101
+ **use_symeig_workaround**: If `True`, uses a custom eigenvalue
102
+ calculation.
103
+
104
+ Returns:
105
+ **curvatures**: The three principal curvatures of each point
106
+ of shape `(minibatch, num_point, 3)`.
107
+ If `pointclouds` are of `Pointclouds` class, returns a padded tensor.
108
+ **local_coord_frames**: The three principal directions of the curvature
109
+ around each point of shape `(minibatch, num_point, 3, 3)`.
110
+ The principal directions are stored in columns of the output.
111
+ E.g. `local_coord_frames[i, j, :, 0]` is the normal of
112
+ `j`-th point in the `i`-th pointcloud.
113
+ If `pointclouds` are of `Pointclouds` class, returns a padded tensor.
114
+
115
+ References:
116
+ [1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for
117
+ Local Surface Description, ECCV 2010.
118
+ """
119
+
120
+ points_padded, num_points = convert_pointclouds_to_tensor(pointclouds)
121
+
122
+ ba, N, dim = points_padded.shape
123
+ if dim != 3:
124
+ raise ValueError(
125
+ "The pointclouds argument has to be of shape (minibatch, N, 3)"
126
+ )
127
+
128
+ if (num_points <= neighborhood_size).any():
129
+ raise ValueError(
130
+ "The neighborhood_size argument has to be"
131
+ + " >= size of each of the point clouds."
132
+ )
133
+
134
+ # undo global mean for stability
135
+ # TODO: replace with tutil.wmean once landed
136
+ pcl_mean = points_padded.sum(1) / num_points[:, None]
137
+ points_centered = points_padded - pcl_mean[:, None, :]
138
+
139
+ # get the per-point covariance and nearest neighbors used to compute it
140
+ cov, knns = get_point_covariances(points_centered, num_points, neighborhood_size)
141
+
142
+ # get the local coord frames as principal directions of
143
+ # the per-point covariance
144
+ # this is done with torch.symeig / torch.linalg.eigh, which returns the
145
+ # eigenvectors (=principal directions) in an ascending order of their
146
+ # corresponding eigenvalues, and the smallest eigenvalue's eigenvector
147
+ # corresponds to the normal direction; or with a custom equivalent.
148
+ if use_symeig_workaround:
149
+ curvatures, local_coord_frames = symeig3x3(cov, eigenvectors=True)
150
+ else:
151
+ curvatures, local_coord_frames = torch.linalg.eigh(cov)
152
+
153
+ # disambiguate the directions of individual principal vectors
154
+ if disambiguate_directions:
155
+ # disambiguate normal
156
+ n = _disambiguate_vector_directions(
157
+ points_centered, knns, local_coord_frames[:, :, :, 0]
158
+ )
159
+ # disambiguate the main curvature
160
+ z = _disambiguate_vector_directions(
161
+ points_centered, knns, local_coord_frames[:, :, :, 2]
162
+ )
163
+ # the secondary curvature is just a cross between n and z
164
+ y = torch.cross(n, z, dim=2)
165
+ # cat to form the set of principal directions
166
+ local_coord_frames = torch.stack((n, y, z), dim=3)
167
+
168
+ return curvatures, local_coord_frames
169
+
170
+
171
+ def _disambiguate_vector_directions(pcl, knns, vecs: torch.Tensor) -> torch.Tensor:
172
+ """
173
+ Disambiguates normal directions according to [1].
174
+
175
+ References:
176
+ [1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for
177
+ Local Surface Description, ECCV 2010.
178
+ """
179
+ # parse out K from the shape of knns
180
+ K = knns.shape[2]
181
+ # the difference between the mean of each neighborhood and
182
+ # each element of the neighborhood
183
+ df = knns - pcl[:, :, None]
184
+ # projection of the difference on the principal direction
185
+ proj = (vecs[:, :, None] * df).sum(3)
186
+ # check how many projections are positive
187
+ n_pos = (proj > 0).type_as(knns).sum(2, keepdim=True)
188
+ # flip the principal directions where number of positive correlations
189
+ flip = (n_pos < (0.5 * K)).type_as(knns)
190
+ vecs = (1.0 - 2.0 * flip) * vecs
191
+ return vecs
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/sample_farthest_points.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from random import randint
10
+ from typing import List, Optional, Tuple, Union
11
+
12
+ import torch
13
+ from pytorch3d import _C
14
+
15
+ from .utils import masked_gather
16
+
17
+
18
+ def sample_farthest_points(
19
+ points: torch.Tensor,
20
+ lengths: Optional[torch.Tensor] = None,
21
+ K: Union[int, List, torch.Tensor] = 50,
22
+ random_start_point: bool = False,
23
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
24
+ """
25
+ Iterative farthest point sampling algorithm [1] to subsample a set of
26
+ K points from a given pointcloud. At each iteration, a point is selected
27
+ which has the largest nearest neighbor distance to any of the
28
+ already selected points.
29
+
30
+ Farthest point sampling provides more uniform coverage of the input
31
+ point cloud compared to uniform random sampling.
32
+
33
+ [1] Charles R. Qi et al, "PointNet++: Deep Hierarchical Feature Learning
34
+ on Point Sets in a Metric Space", NeurIPS 2017.
35
+
36
+ Args:
37
+ points: (N, P, D) array containing the batch of pointclouds
38
+ lengths: (N,) number of points in each pointcloud (to support heterogeneous
39
+ batches of pointclouds)
40
+ K: samples required in each sampled point cloud (this is typically << P). If
41
+ K is an int then the same number of samples are selected for each
42
+ pointcloud in the batch. If K is a tensor is should be length (N,)
43
+ giving the number of samples to select for each element in the batch
44
+ random_start_point: bool, if True, a random point is selected as the starting
45
+ point for iterative sampling.
46
+
47
+ Returns:
48
+ selected_points: (N, K, D), array of selected values from points. If the input
49
+ K is a tensor, then the shape will be (N, max(K), D), and padded with
50
+ 0.0 for batch elements where k_i < max(K).
51
+ selected_indices: (N, K) array of selected indices. If the input
52
+ K is a tensor, then the shape will be (N, max(K), D), and padded with
53
+ -1 for batch elements where k_i < max(K).
54
+ """
55
+ N, P, D = points.shape
56
+ device = points.device
57
+
58
+ constant_length = lengths is None
59
+ # Validate inputs
60
+ if lengths is None:
61
+ lengths = torch.full((N,), P, dtype=torch.int64, device=device)
62
+ else:
63
+ if lengths.shape != (N,):
64
+ raise ValueError("points and lengths must have same batch dimension.")
65
+ if lengths.max() > P:
66
+ raise ValueError("A value in lengths was too large.")
67
+
68
+ # TODO: support providing K as a ratio of the total number of points instead of as an int
69
+ max_K = -1
70
+ if isinstance(K, int):
71
+ max_K = K
72
+ K = torch.full((N,), K, dtype=torch.int64, device=device)
73
+ elif isinstance(K, list):
74
+ K = torch.tensor(K, dtype=torch.int64, device=device)
75
+
76
+ if K.shape[0] != N:
77
+ raise ValueError("K and points must have the same batch dimension")
78
+
79
+ # Check dtypes are correct and convert if necessary
80
+ if not (points.dtype == torch.float32):
81
+ points = points.to(torch.float32)
82
+ if not (lengths.dtype == torch.int64):
83
+ lengths = lengths.to(torch.int64)
84
+ if not (K.dtype == torch.int64):
85
+ K = K.to(torch.int64)
86
+
87
+ # Generate the starting indices for sampling
88
+ if random_start_point:
89
+ if constant_length:
90
+ start_idxs = torch.randint(high=P, size=(N,), device=device)
91
+ else:
92
+ start_idxs = (lengths * torch.rand(lengths.size())).to(torch.int64)
93
+ else:
94
+ start_idxs = torch.zeros_like(lengths)
95
+
96
+ with torch.no_grad():
97
+ # pyre-fixme[16]: `pytorch3d_._C` has no attribute `sample_farthest_points`.
98
+ idx = _C.sample_farthest_points(points, lengths, K, start_idxs, max_K)
99
+ sampled_points = masked_gather(points, idx)
100
+
101
+ return sampled_points, idx
102
+
103
+
104
+ def sample_farthest_points_naive(
105
+ points: torch.Tensor,
106
+ lengths: Optional[torch.Tensor] = None,
107
+ K: Union[int, List, torch.Tensor] = 50,
108
+ random_start_point: bool = False,
109
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
110
+ """
111
+ Same Args/Returns as sample_farthest_points
112
+ """
113
+ N, P, D = points.shape
114
+ device = points.device
115
+
116
+ # Validate inputs
117
+ if lengths is None:
118
+ lengths = torch.full((N,), P, dtype=torch.int64, device=device)
119
+ else:
120
+ if lengths.shape != (N,):
121
+ raise ValueError("points and lengths must have same batch dimension.")
122
+ if lengths.max() > P:
123
+ raise ValueError("Invalid lengths.")
124
+
125
+ # TODO: support providing K as a ratio of the total number of points instead of as an int
126
+ if isinstance(K, int):
127
+ K = torch.full((N,), K, dtype=torch.int64, device=device)
128
+ elif isinstance(K, list):
129
+ K = torch.tensor(K, dtype=torch.int64, device=device)
130
+
131
+ if K.shape[0] != N:
132
+ raise ValueError("K and points must have the same batch dimension")
133
+
134
+ # Find max value of K
135
+ max_K = torch.max(K)
136
+
137
+ # List of selected indices from each batch element
138
+ all_sampled_indices = []
139
+
140
+ for n in range(N):
141
+ # Initialize an array for the sampled indices, shape: (max_K,)
142
+ sample_idx_batch = torch.full(
143
+ # pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
144
+ # typing.Tuple[int, ...]]` but got `Tuple[Tensor]`.
145
+ (max_K,),
146
+ fill_value=-1,
147
+ dtype=torch.int64,
148
+ device=device,
149
+ )
150
+
151
+ # Initialize closest distances to inf, shape: (P,)
152
+ # This will be updated at each iteration to track the closest distance of the
153
+ # remaining points to any of the selected points
154
+ closest_dists = points.new_full(
155
+ # pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
156
+ # typing.Tuple[int, ...]]` but got `Tuple[Tensor]`.
157
+ (lengths[n],),
158
+ float("inf"),
159
+ dtype=torch.float32,
160
+ )
161
+
162
+ # Select a random point index and save it as the starting point
163
+ # pyre-fixme[6]: For 2nd argument expected `int` but got `Tensor`.
164
+ selected_idx = randint(0, lengths[n] - 1) if random_start_point else 0
165
+ sample_idx_batch[0] = selected_idx
166
+
167
+ # If the pointcloud has fewer than K points then only iterate over the min
168
+ # pyre-fixme[6]: For 1st param expected `SupportsRichComparisonT` but got
169
+ # `Tensor`.
170
+ # pyre-fixme[6]: For 2nd param expected `SupportsRichComparisonT` but got
171
+ # `Tensor`.
172
+ k_n = min(lengths[n], K[n])
173
+
174
+ # Iteratively select points for a maximum of k_n
175
+ for i in range(1, k_n):
176
+ # Find the distance between the last selected point
177
+ # and all the other points. If a point has already been selected
178
+ # it's distance will be 0.0 so it will not be selected again as the max.
179
+ dist = points[n, selected_idx, :] - points[n, : lengths[n], :]
180
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
181
+ # `int`.
182
+ dist_to_last_selected = (dist**2).sum(-1) # (P - i)
183
+
184
+ # If closer than currently saved distance to one of the selected
185
+ # points, then updated closest_dists
186
+ closest_dists = torch.min(dist_to_last_selected, closest_dists) # (P - i)
187
+
188
+ # The aim is to pick the point that has the largest
189
+ # nearest neighbour distance to any of the already selected points
190
+ selected_idx = torch.argmax(closest_dists)
191
+ sample_idx_batch[i] = selected_idx
192
+
193
+ # Add the list of points for this batch to the final list
194
+ all_sampled_indices.append(sample_idx_batch)
195
+
196
+ all_sampled_indices = torch.stack(all_sampled_indices, dim=0)
197
+
198
+ # Gather the points
199
+ all_sampled_points = masked_gather(points, all_sampled_indices)
200
+
201
+ # Return (N, max_K, D) subsampled points and indices
202
+ return all_sampled_points, all_sampled_indices
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/sample_points_from_meshes.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+
10
+ """
11
+ This module implements utility functions for sampling points from
12
+ batches of meshes.
13
+ """
14
+
15
+ import sys
16
+ from typing import Tuple, Union
17
+
18
+ import torch
19
+
20
+ from pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals
21
+
22
+ from pytorch3d.ops.packed_to_padded import packed_to_padded
23
+ from pytorch3d.renderer.mesh.rasterizer import Fragments as MeshFragments
24
+
25
+
26
+ def sample_points_from_meshes(
27
+ meshes,
28
+ num_samples: int = 10000,
29
+ return_normals: bool = False,
30
+ return_textures: bool = False,
31
+ ) -> Union[
32
+ torch.Tensor,
33
+ Tuple[torch.Tensor, torch.Tensor],
34
+ Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
35
+ ]:
36
+ """
37
+ Convert a batch of meshes to a batch of pointclouds by uniformly sampling
38
+ points on the surface of the mesh with probability proportional to the
39
+ face area.
40
+
41
+ Args:
42
+ meshes: A Meshes object with a batch of N meshes.
43
+ num_samples: Integer giving the number of point samples per mesh.
44
+ return_normals: If True, return normals for the sampled points.
45
+ return_textures: If True, return textures for the sampled points.
46
+
47
+ Returns:
48
+ 3-element tuple containing
49
+
50
+ - **samples**: FloatTensor of shape (N, num_samples, 3) giving the
51
+ coordinates of sampled points for each mesh in the batch. For empty
52
+ meshes the corresponding row in the samples array will be filled with 0.
53
+ - **normals**: FloatTensor of shape (N, num_samples, 3) giving a normal vector
54
+ to each sampled point. Only returned if return_normals is True.
55
+ For empty meshes the corresponding row in the normals array will
56
+ be filled with 0.
57
+ - **textures**: FloatTensor of shape (N, num_samples, C) giving a C-dimensional
58
+ texture vector to each sampled point. Only returned if return_textures is True.
59
+ For empty meshes the corresponding row in the textures array will
60
+ be filled with 0.
61
+
62
+ Note that in a future releases, we will replace the 3-element tuple output
63
+ with a `Pointclouds` datastructure, as follows
64
+
65
+ .. code-block:: python
66
+
67
+ Pointclouds(samples, normals=normals, features=textures)
68
+ """
69
+ if meshes.isempty():
70
+ raise ValueError("Meshes are empty.")
71
+
72
+ verts = meshes.verts_packed()
73
+ if not torch.isfinite(verts).all():
74
+ raise ValueError("Meshes contain nan or inf.")
75
+
76
+ if return_textures and meshes.textures is None:
77
+ raise ValueError("Meshes do not contain textures.")
78
+
79
+ faces = meshes.faces_packed()
80
+ mesh_to_face = meshes.mesh_to_faces_packed_first_idx()
81
+ num_meshes = len(meshes)
82
+ num_valid_meshes = torch.sum(meshes.valid) # Non empty meshes.
83
+
84
+ # Initialize samples tensor with fill value 0 for empty meshes.
85
+ samples = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)
86
+
87
+ # Only compute samples for non empty meshes
88
+ with torch.no_grad():
89
+ areas, _ = mesh_face_areas_normals(verts, faces) # Face areas can be zero.
90
+ max_faces = meshes.num_faces_per_mesh().max().item()
91
+ areas_padded = packed_to_padded(
92
+ areas, mesh_to_face[meshes.valid], max_faces
93
+ ) # (N, F)
94
+
95
+ # TODO (gkioxari) Confirm multinomial bug is not present with real data.
96
+ sample_face_idxs = areas_padded.multinomial(
97
+ num_samples, replacement=True
98
+ ) # (N, num_samples)
99
+ sample_face_idxs += mesh_to_face[meshes.valid].view(num_valid_meshes, 1)
100
+
101
+ # Get the vertex coordinates of the sampled faces.
102
+ face_verts = verts[faces]
103
+ v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]
104
+
105
+ # Randomly generate barycentric coords.
106
+ w0, w1, w2 = _rand_barycentric_coords(
107
+ num_valid_meshes, num_samples, verts.dtype, verts.device
108
+ )
109
+
110
+ # Use the barycentric coords to get a point on each sampled face.
111
+ a = v0[sample_face_idxs] # (N, num_samples, 3)
112
+ b = v1[sample_face_idxs]
113
+ c = v2[sample_face_idxs]
114
+ samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c
115
+
116
+ if return_normals:
117
+ # Initialize normals tensor with fill value 0 for empty meshes.
118
+ # Normals for the sampled points are face normals computed from
119
+ # the vertices of the face in which the sampled point lies.
120
+ normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device)
121
+ vert_normals = (v1 - v0).cross(v2 - v1, dim=1)
122
+ vert_normals = vert_normals / vert_normals.norm(dim=1, p=2, keepdim=True).clamp(
123
+ min=sys.float_info.epsilon
124
+ )
125
+ vert_normals = vert_normals[sample_face_idxs]
126
+ normals[meshes.valid] = vert_normals
127
+
128
+ if return_textures:
129
+ # fragment data are of shape NxHxWxK. Here H=S, W=1 & K=1.
130
+ pix_to_face = sample_face_idxs.view(len(meshes), num_samples, 1, 1) # NxSx1x1
131
+ bary = torch.stack((w0, w1, w2), dim=2).unsqueeze(2).unsqueeze(2) # NxSx1x1x3
132
+ # zbuf and dists are not used in `sample_textures` so we initialize them with dummy
133
+ dummy = torch.zeros(
134
+ (len(meshes), num_samples, 1, 1), device=meshes.device, dtype=torch.float32
135
+ ) # NxSx1x1
136
+ fragments = MeshFragments(
137
+ pix_to_face=pix_to_face, zbuf=dummy, bary_coords=bary, dists=dummy
138
+ )
139
+ textures = meshes.sample_textures(fragments) # NxSx1x1xC
140
+ textures = textures[:, :, 0, 0, :] # NxSxC
141
+
142
+ # return
143
+ # TODO(gkioxari) consider returning a Pointclouds instance [breaking]
144
+ if return_normals and return_textures:
145
+ # pyre-fixme[61]: `normals` may not be initialized here.
146
+ # pyre-fixme[61]: `textures` may not be initialized here.
147
+ return samples, normals, textures
148
+ if return_normals: # return_textures is False
149
+ # pyre-fixme[61]: `normals` may not be initialized here.
150
+ return samples, normals
151
+ if return_textures: # return_normals is False
152
+ # pyre-fixme[61]: `textures` may not be initialized here.
153
+ return samples, textures
154
+ return samples
155
+
156
+
157
+ def _rand_barycentric_coords(
158
+ size1, size2, dtype: torch.dtype, device: torch.device
159
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
160
+ """
161
+ Helper function to generate random barycentric coordinates which are uniformly
162
+ distributed over a triangle.
163
+
164
+ Args:
165
+ size1, size2: The number of coordinates generated will be size1*size2.
166
+ Output tensors will each be of shape (size1, size2).
167
+ dtype: Datatype to generate.
168
+ device: A torch.device object on which the outputs will be allocated.
169
+
170
+ Returns:
171
+ w0, w1, w2: Tensors of shape (size1, size2) giving random barycentric
172
+ coordinates
173
+ """
174
+ uv = torch.rand(2, size1, size2, dtype=dtype, device=device)
175
+ u, v = uv[0], uv[1]
176
+ u_sqrt = u.sqrt()
177
+ w0 = 1.0 - u_sqrt
178
+ w1 = u_sqrt * (1.0 - v)
179
+ w2 = u_sqrt * v
180
+ return w0, w1, w2
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/ops/vert_align.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+
13
+
14
+ def vert_align(
15
+ feats,
16
+ verts,
17
+ return_packed: bool = False,
18
+ interp_mode: str = "bilinear",
19
+ padding_mode: str = "zeros",
20
+ align_corners: bool = True,
21
+ ) -> torch.Tensor:
22
+ """
23
+ Sample vertex features from a feature map. This operation is called
24
+ "perceptual feature pooling" in [1] or "vert align" in [2].
25
+
26
+ [1] Wang et al, "Pixel2Mesh: Generating 3D Mesh Models from Single
27
+ RGB Images", ECCV 2018.
28
+ [2] Gkioxari et al, "Mesh R-CNN", ICCV 2019
29
+
30
+ Args:
31
+ feats: FloatTensor of shape (N, C, H, W) representing image features
32
+ from which to sample or a list of features each with potentially
33
+ different C, H or W dimensions.
34
+ verts: FloatTensor of shape (N, V, 3) or an object (e.g. Meshes or Pointclouds)
35
+ with `verts_padded' or `points_padded' as an attribute giving the (x, y, z)
36
+ vertex positions for which to sample. (x, y) verts should be normalized such
37
+ that (-1, -1) corresponds to top-left and (+1, +1) to bottom-right
38
+ location in the input feature map.
39
+ return_packed: (bool) Indicates whether to return packed features
40
+ interp_mode: (str) Specifies how to interpolate features.
41
+ ('bilinear' or 'nearest')
42
+ padding_mode: (str) Specifies how to handle vertices outside of the
43
+ [-1, 1] range. ('zeros', 'reflection', or 'border')
44
+ align_corners (bool): Geometrically, we consider the pixels of the
45
+ input as squares rather than points.
46
+ If set to ``True``, the extrema (``-1`` and ``1``) are considered as
47
+ referring to the center points of the input's corner pixels. If set
48
+ to ``False``, they are instead considered as referring to the corner
49
+ points of the input's corner pixels, making the sampling more
50
+ resolution agnostic. Default: ``True``
51
+
52
+ Returns:
53
+ feats_sampled: FloatTensor of shape (N, V, C) giving sampled features for each
54
+ vertex. If feats is a list, we return concatenated features in axis=2 of
55
+ shape (N, V, sum(C_n)) where C_n = feats[n].shape[1].
56
+ If return_packed = True, the features are transformed to a packed
57
+ representation of shape (sum(V), C)
58
+ """
59
+ if torch.is_tensor(verts):
60
+ if verts.dim() != 3:
61
+ raise ValueError("verts tensor should be 3 dimensional")
62
+ grid = verts
63
+ elif hasattr(verts, "verts_padded"):
64
+ grid = verts.verts_padded()
65
+ elif hasattr(verts, "points_padded"):
66
+ grid = verts.points_padded()
67
+ else:
68
+ raise ValueError(
69
+ "verts must be a tensor or have a "
70
+ + "`points_padded' or`verts_padded` attribute."
71
+ )
72
+
73
+ grid = grid[:, None, :, :2] # (N, 1, V, 2)
74
+
75
+ if torch.is_tensor(feats):
76
+ feats = [feats]
77
+ for feat in feats:
78
+ if feat.dim() != 4:
79
+ raise ValueError("feats must have shape (N, C, H, W)")
80
+ if grid.shape[0] != feat.shape[0]:
81
+ raise ValueError("inconsistent batch dimension")
82
+
83
+ feats_sampled = []
84
+ for feat in feats:
85
+ feat_sampled = F.grid_sample(
86
+ feat,
87
+ grid,
88
+ mode=interp_mode,
89
+ padding_mode=padding_mode,
90
+ align_corners=align_corners,
91
+ ) # (N, C, 1, V)
92
+ feat_sampled = feat_sampled.squeeze(dim=2).transpose(1, 2) # (N, V, C)
93
+ feats_sampled.append(feat_sampled)
94
+ feats_sampled = torch.cat(feats_sampled, dim=2) # (N, V, sum(C))
95
+
96
+ if return_packed:
97
+ # flatten the first two dimensions: (N*V, C)
98
+ feats_sampled = feats_sampled.view(-1, feats_sampled.shape[-1])
99
+ if hasattr(verts, "verts_padded_to_packed_idx"):
100
+ idx = (
101
+ verts.verts_padded_to_packed_idx()
102
+ .view(-1, 1)
103
+ .expand(-1, feats_sampled.shape[-1])
104
+ )
105
+ feats_sampled = feats_sampled.gather(0, idx) # (sum(V), C)
106
+
107
+ return feats_sampled