File size: 10,814 Bytes
d0e86f6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Optional
import torch
from actionmesh.model.utils.tensor_ops import merge_batch_time, split_batch_time
from pytorch3d.ops import sample_farthest_points
from pytorch3d.ops.utils import masked_gather
class SamplingType(str, Enum):
"""Supported point cloud sampling strategies."""
RANDOM = "random"
FPS = "fps"
FPS_FULL = "fps_full"
# ---------------------------------------------------------------------------
# Low-level helpers
# ---------------------------------------------------------------------------
def _farthest_point_sample(
points: torch.Tensor,
n_samples: int,
random_start_point: bool = True,
sampling_type: SamplingType = SamplingType.FPS,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Run farthest-point sampling on a (B, N, D) point cloud.
Uses the PyTorch3D GPU kernel when *points* lives on CUDA, otherwise falls
back to the CPU ``fpsample.bucket_fps_kdline_sampling`` implementation.
Args:
points: (B, N, D) input point cloud.
n_samples: number of points to select.
random_start_point: randomise the FPS seed point.
sampling_type: ``FPS`` uses only XYZ for distance computation,
``FPS_FULL`` uses all D channels.
Returns:
sampled_points: (B, n_samples, D)
indices: (B, n_samples) or (1, n_samples)
"""
if points.ndim != 3:
raise ValueError("Expected 3-D tensor (B, N, D), " f"got {points.ndim}-D")
if points.is_cuda:
if sampling_type == SamplingType.FPS:
distance_input = points[..., :3]
else:
distance_input = points
_, indices = sample_farthest_points(
distance_input,
K=n_samples,
random_start_point=random_start_point,
)
sampled_points = masked_gather(points, indices)
else:
from fpsample import fpsample
if points.shape[0] != 1:
raise ValueError("CPU FPS only supports batch size 1")
start_idx = None if random_start_point else 0
if sampling_type == SamplingType.FPS:
distance_input = points[0, :, :3]
else:
distance_input = points[0]
# bucket_fps_kdline_sampling requires a "level" parameter controlling
# the k-d tree bucket granularity. Lower = faster but less accurate.
num_points = points.shape[1]
kdline_level = 5 if num_points <= 25_000 else 7
indices = fpsample.bucket_fps_kdline_sampling(
distance_input,
n_samples,
kdline_level,
start_idx=start_idx,
)
indices = torch.from_numpy(indices)[None].long()
sampled_points = masked_gather(points, indices)
return sampled_points, indices
def sample_from_indices(
points: torch.Tensor,
indices: torch.Tensor,
) -> torch.Tensor:
"""Gather points by pre-computed indices.
Args:
points: (B, N_PTS, D)
indices: (B, M) or (1, M)
Returns:
(B, M, D) gathered points.
"""
if points.ndim != 3:
raise ValueError(f"Expected 3-D points, got {points.ndim}-D")
if indices.ndim != 2:
raise ValueError(f"Expected 2-D indices, got {indices.ndim}-D")
if indices.shape[0] == 1:
indices = indices.expand(points.shape[0], -1)
if indices.shape[0] != points.shape[0]:
raise ValueError(
"Batch size mismatch: "
f"points {points.shape[0]} vs "
f"indices {indices.shape[0]}"
)
return masked_gather(points, indices)
# ---------------------------------------------------------------------------
# Strategy helpers (one per sampling path)
# ---------------------------------------------------------------------------
def _sample_identity(
points: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Not enough points to downsample — return everything unchanged."""
indices = torch.arange(
points.shape[1],
device=points.device,
).reshape(1, -1)
return points, indices
def _sample_random(
points: torch.Tensor,
n_samples: int,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Independent uniform random sampling per batch element."""
batch_size = points.shape[0]
n_pts = points.shape[1]
indices = torch.stack(
[torch.randperm(n_pts)[:n_samples] for _ in range(batch_size)],
).to(points.device)
sampled_points = sample_from_indices(points, indices)
return sampled_points, indices
def _sample_fps(
points: torch.Tensor,
n_samples: int,
sampling_type: SamplingType,
fps_max_points: Optional[int],
fps_random: bool,
fps_chunks: int,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Farthest-point sampling with optional random pre-sampling and chunking.
When *fps_max_points* is set the input is first randomly reduced to at most
that many points (but never fewer than *n_samples*) before FPS is applied,
*fps_chunks* splits the (possibly pre-sampled) point cloud along the point
axis and runs independent FPS on each chunk, concatenating the results.
This can help when N is very large.
"""
batch_size = points.shape[0]
# Optional random pre-sampling to cap the FPS input size
if fps_max_points is not None:
n_pre = max(fps_max_points, n_samples)
n_pts = points.shape[1]
pre_indices = torch.stack(
[torch.randperm(n_pts)[:n_pre] for _ in range(batch_size)],
).to(points.device)
points_pre = sample_from_indices(points, pre_indices)
else:
n_pre = points.shape[1]
points_pre = points
# If pre-sampling already gave us <= n_samples points, nothing left to do
if n_pre <= n_samples:
indices = torch.arange(
points_pre.shape[1],
device=points.device,
).reshape(1, -1)
return points_pre, indices
# Chunked FPS
chunk_size = n_samples // fps_chunks
points_list: list[torch.Tensor] = []
indices_list: list[torch.Tensor] = []
for chunk_id, chunk in enumerate(points_pre.chunk(fps_chunks, dim=1)):
chunk_out, chunk_indices = _farthest_point_sample(
chunk,
n_samples=chunk_size,
random_start_point=fps_random,
sampling_type=sampling_type,
)
offset = chunk_id * (n_pre // fps_chunks)
points_list.append(chunk_out)
indices_list.append(chunk_indices + offset)
indices = torch.cat(indices_list, dim=1)
sampled_points = torch.cat(points_list, dim=1)
return sampled_points, indices
# ---------------------------------------------------------------------------
# Main entry point
# ---------------------------------------------------------------------------
def sample_pc(
points: torch.Tensor,
n_samples: int,
sampling_type: SamplingType | str = SamplingType.RANDOM,
fps_max_points: Optional[int] = None,
fps_random: bool = True,
fps_chunks: int = 1,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Sample *n_samples* points from a batched point cloud.
Args:
points: (B, N_PTS, D) input point cloud.
n_samples: number of points to keep.
sampling_type: strategy — ``RANDOM``, ``FPS``
or ``FPS_FULL``.
fps_max_points: if set, randomly pre-sample at
most this many points before running FPS.
fps_random: randomise the FPS starting point.
fps_chunks: split the point cloud into this many
chunks and run FPS independently on each.
Returns:
sampled_points: (B, n_samples, D)
indices: (B, n_samples) or (1, n_samples)
"""
if not isinstance(points, torch.Tensor):
raise TypeError(f"Expected torch.Tensor, got {type(points)}")
if points.ndim != 3:
raise ValueError("Expected 3-D (B, N_PTS, D), " f"got {points.ndim}-D")
if n_samples % fps_chunks != 0:
raise ValueError(
f"n_samples ({n_samples}) must be "
f"divisible by fps_chunks ({fps_chunks})"
)
# Accept plain strings for backward compatibility
if isinstance(sampling_type, str):
sampling_type = SamplingType(sampling_type)
if points.shape[1] <= n_samples:
return _sample_identity(points)
if sampling_type is SamplingType.RANDOM:
return _sample_random(points, n_samples)
if sampling_type.value.startswith("fps"):
return _sample_fps(
points,
n_samples,
sampling_type,
fps_max_points,
fps_random,
fps_chunks,
)
raise ValueError(f"Unsupported sampling type: {sampling_type}")
def sample_pc_grouped(
points: torch.Tensor,
n_samples: int,
n_grouped_frames: int,
sampling_type: SamplingType | str = SamplingType.FPS,
fps_max_points: Optional[int] = None,
fps_random: bool = True,
fps_chunks: int = 1,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Sample using the first frame, then broadcast indices
across all frames.
Treats the batch dimension as ``(B * n_grouped_frames)``
and ensures every frame of each batch element shares the
same sampled point indices.
Args:
points: (B*T, N_PTS, D) input point cloud where
B*T = batch_size * n_grouped_frames.
n_samples: number of points to keep per frame.
n_grouped_frames: number of frames per batch element.
sampling_type: forwarded to :func:`sample_pc`.
fps_max_points: forwarded to :func:`sample_pc`.
fps_random: forwarded to :func:`sample_pc`.
fps_chunks: forwarded to :func:`sample_pc`.
Returns:
sampled_points: (B*T, n_samples, D)
indices: (B*T, n_samples)
"""
# Accept plain strings for backward compatibility
if isinstance(sampling_type, str):
sampling_type = SamplingType(sampling_type)
# (B*T, N_PTS, D) -> (B, T, N_PTS, D)
points_batched = split_batch_time(
points,
n_grouped_frames,
)
# Sample on first timestep only
_, indices = sample_pc(
points=points_batched[:, 0],
n_samples=n_samples,
sampling_type=sampling_type,
fps_max_points=fps_max_points,
fps_random=fps_random,
fps_chunks=fps_chunks,
)
# Broadcast indices to all timesteps
indices = indices.unsqueeze(1).repeat(
1,
n_grouped_frames,
1,
)
indices = merge_batch_time(indices)
sampled_points = masked_gather(points, indices)
return sampled_points, indices
|