file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
lit/containers/scene.py | Python | from dataclasses import dataclass, field
from pathlib import Path
from typing import List
import camtools as ct
import numpy as np
import open3d as o3d
from tqdm import tqdm
from lit.containers.base_container import BaseContainer
from lit.containers.fg_box import FGBox
from lit.containers.frame import Frame
from lit.recon_utils import (
bboxes_to_lineset,
get_indices_inside_bboxes,
get_indices_outside_bboxes,
remove_statistical_outlier,
)
@dataclass
class Scene(BaseContainer):
"""
Class to handle a scene of frames. This includes:
- Collecting frame data and into a scene data.
- Saving the scene to disk.
- Loading the scene from disk.
- Scene processing, e.g. filtering, reconstruction, etc.
Save as a .pth file with PyTorch.
"""
scene_name: str = None
frames: List[Frame] = field(default_factory=list)
# Backup normalizer pose (not used).
_normalizer_pose: np.ndarray = None
_is_pose_normalized: bool = False
def __post_init__(self):
super().__post_init__()
if self.frames is None:
self.frames = []
def to_dict(self):
return {
"scene_name": self.scene_name,
"frames": [frame.to_dict() for frame in self.frames],
}
@classmethod
def from_dict(cls, dict_data: dict):
return cls(
scene_name=dict_data["scene_name"],
frames=[Frame.from_dict(frame) for frame in dict_data["frames"]],
)
def normalize_poses(self):
"""
Normalize frames' poses for better reconstruction, and save the
normalizer pose to self.normalizer_pose.
"""
frame_poses = [frame.frame_pose for frame in self.frames]
frame_poses = np.stack(frame_poses, axis=0)
center_index = len(frame_poses) // 2
center_pose = frame_poses[center_index]
# Before: +x points to front.
# After : +y points to front, +x points to right.
rotate_x_y_R = ct.convert.euler_to_R(yaw=np.pi / 2, pitch=0, roll=0)
rotate_x_y_pose = np.eye(4)
rotate_x_y_pose[:3, :3] = rotate_x_y_R
self._normalizer_pose = rotate_x_y_pose @ np.linalg.inv(center_pose)
for frame in self.frames:
frame.frame_pose = self._normalizer_pose @ frame.frame_pose
self._is_pose_normalized = True
def undo_normalize_poses(self):
"""
Undo normalization of poses, by applying the inverse of the normalizer
pose to all frames.
"""
if not self._is_pose_normalized:
raise ValueError("Poses are not normalized yet.")
inv_normalizer_pose = np.linalg.inv(self._normalizer_pose)
for frame in self.frames:
frame.frame_pose = inv_normalizer_pose @ frame.frame_pose
def __len__(self):
return len(self.frames)
def __getitem__(self, idx):
return self.frames[idx]
def __iter__(self):
return iter(self.frames)
def __str__(self) -> str:
return f"Scene({self.scene_name}, {len(self.frames)} frames)"
def __repr__(self) -> str:
return self.__str__()
def is_frame_valid(self, index):
"""
Check if frame at index is valid (same scene name and index).
"""
return (
self.frames[index].scene_name == self.scene_name
and self.frames[index].frame_index == index
)
def sample_by_indices(self, frame_indices):
"""
Sample a subset of frames by indices. This will modify the Scene.
"""
self.frames = [self.frames[i] for i in frame_indices]
def append_frame(self, frame: Frame, check_valid=True):
"""
Append a frame to the scene.
Args:
frame: Frame to append.
check_valid: Check frames's frame_index for sequential appending
and check the scene name.
"""
if check_valid:
if frame.scene_name != self.scene_name:
raise ValueError(
f"Scene name {frame.scene_name} does not match "
f"scene name {self.scene_name}"
)
if len(self.frames) != frame.frame_index:
raise ValueError(
f"Frame index {frame.frame_index} does not match "
f"scene length {len(self.frames)}"
)
self.frames.append(frame)
def extract_fg(
self,
select_labels: List[int] = None,
verbose: bool = False,
):
"""
Extract foreground from the scene.
Args:
select_labels: List of enabled labels. Set to None to enable all.
Returns:
[FGBox, FGBox, ...] # A flat list of FGBoxes in all frames.
Each FGBox already contains the scene_name and frame_index
information so it knows which frame it belongs to.
Notes on FGBox:
FGBox(local_points, local_bbox, frame_pose, frame_index)
"""
fg_boxes = []
for frame in tqdm(
self,
desc="Extracting foreground points",
disable=not verbose,
):
# Hard-code: only the top lidar is used.
top_lidar_start_idx = 0
top_lidar_end_idx = frame.num_points_of_each_lidar[0]
# Extract points.
local_points = frame.local_points[top_lidar_start_idx:top_lidar_end_idx][
:, :3
]
# Extract bboxes by label.
if select_labels is None:
bbox_mask = np.ones_like(frame.local_bboxes[:, 7], dtype=np.bool)
else:
bbox_mask = np.isin(frame.local_bboxes[:, 7], select_labels)
local_bboxes = frame.local_bboxes[bbox_mask]
indices_inside_bboxes = get_indices_inside_bboxes(
points=local_points, bboxes=local_bboxes
)
# Mask object_ids.
assert len(frame.object_ids) == len(bbox_mask)
object_ids = [
object_id
for (object_id, mask) in zip(frame.object_ids, bbox_mask)
if mask
]
# Prepare FGBoxes.
assert len(local_bboxes) == len(indices_inside_bboxes) == len(object_ids)
for (
local_bbox,
indices,
object_id,
) in zip(
local_bboxes,
indices_inside_bboxes,
object_ids,
):
if len(indices) == 0:
continue
fg_boxes.append(
FGBox(
scene_name=frame.scene_name,
frame_index=frame.frame_index,
frame_pose=frame.frame_pose,
object_id=object_id,
local_points=local_points[indices],
local_bbox=local_bbox,
)
)
return fg_boxes
def extract_bg(
self,
enabled_lidars=(0,),
remove_foreground=True,
expand_box_ratio=1.0,
raise_bbox=0.0,
per_frame_rso_nb_neighbors=0,
per_frame_rso_std_ratio=0.0,
verbose=False,
):
"""
Extract background from the scene by removing points within the
annotated bounding boxes.
Args:
enabled_lidars: Indices of lidars to use.
0: Top
1: Front
2: Side left
3: Side right
4: Rear
remove_foreground: Remove foreground points within bboxes.
expand_box_ratio: Expand the bbox by this ratio.
raise_bbox: Raise the bbox z-axis by this amount (in meters) to keep
points on the ground. This is useful only when remove_foreground
is True.
per_frame_rso_nb_neighbors:
Number of neighbors to use for statistical outlier removal, per frame.
per_frame_rso_std_ratio:
Standard deviation ratio for statistical outlier removal, per frame.
Returns:
{
"points": (N, 3) background points.
"lidar_poses": (N, 4, 4) per-point lidar poses.
"lidar_centers": (N, 3) per-point lidar centers.
"linesets": List of Open3D line sets.
}
"""
if not isinstance(enabled_lidars, (list, tuple)):
raise ValueError(
f"enabled_lidars must be a list or tuple, got {enabled_lidars}"
)
all_points = [] # (N, 3) after concatenation.
all_lidar_poses = [] # (N, 4, 4) after concatenation.
all_linesets = [] # List of Open3D line sets. TODO: return all_bboxes.
all_unique_lidar_poses = [] # (M, 4, 4) after concatenation.
for frame in tqdm(
self,
desc="Extracting background points",
disable=not verbose,
):
# Compute lidar start/end indices.
lidar_start_indices = np.cumsum([0] + frame.num_points_of_each_lidar[:-1])
lidar_end_indices = np.cumsum(frame.num_points_of_each_lidar)
frame_pose = frame.frame_pose
local_points = []
lidar_poses = []
for lidar_idx in enabled_lidars:
# Collect points
start_idx = lidar_start_indices[lidar_idx]
end_idx = lidar_end_indices[lidar_idx]
lidar_local_points = frame.local_points[start_idx:end_idx][:, :3]
# Remove foreground points.
if remove_foreground:
local_bboxes = np.copy(frame.local_bboxes)
if expand_box_ratio != 1.0:
local_bboxes[:, 3:6] *= expand_box_ratio
if raise_bbox != 0:
local_bboxes[:, 2] += raise_bbox
outside_indices = get_indices_outside_bboxes(
lidar_local_points, local_bboxes
)
lidar_local_points = lidar_local_points[outside_indices]
# Remove statistical outliers.
if per_frame_rso_nb_neighbors != 0:
lidar_local_points = remove_statistical_outlier(
lidar_local_points,
nb_neighbors=per_frame_rso_nb_neighbors,
std_ratio=per_frame_rso_std_ratio,
)
local_points.append(lidar_local_points)
# Collect lidar poses.
lidar_pose = frame_pose @ frame.lidar_to_vehicle_poses[lidar_idx]
lidar_poses.append(
np.tile(lidar_pose, (len(lidar_local_points), 1, 1)),
)
all_unique_lidar_poses.append(lidar_pose)
local_points = np.concatenate(local_points, axis=0)
points = ct.transform.transform_points(local_points, frame_pose)
all_points.append(points)
lidar_poses = np.concatenate(lidar_poses, axis=0)
all_lidar_poses.append(lidar_poses)
# Bboxes.
linesets = bboxes_to_lineset(
bboxes=frame.local_bboxes,
frame_pose=frame_pose,
)
all_linesets.append(linesets)
all_points = np.concatenate(all_points, axis=0).astype(np.float32)
all_lidar_poses = np.concatenate(all_lidar_poses, axis=0).astype(np.float32)
bbox_lineset = o3d.geometry.LineSet()
for lineset in all_linesets:
bbox_lineset += lineset
all_lidar_centers = all_lidar_poses[:, :3, 3]
all_unique_lidar_poses = np.array(all_unique_lidar_poses).astype(np.float32)
return {
"points": all_points,
"lidar_poses": all_lidar_poses,
"lidar_centers": all_lidar_centers,
"lineset": bbox_lineset,
"unique_lidar_poses": all_unique_lidar_poses,
}
def visualize(self):
# Extract background.
bg_data = self.extract_bg(
enabled_lidars=(0,),
remove_foreground=True,
raise_bbox=0.0,
)
# Extract foreground.
fg_boxes = self.extract_fg(
select_labels=(1,),
verbose=False,
)
# Visualize.
bg_points = bg_data["points"]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(bg_points)
o3d.visualization.draw_geometries([pcd])
def test_load_scene():
scene_dir = Path("/media/data/projects/lit/lit_data/nuscenes/scene")
scene_name = "0ac05652a4c44374998be876ba5cd6fd.pkl"
scene_path = scene_dir / scene_name
scene = Scene.load(scene_path)
print(f"Loaded scene: {scene} of {len(scene)} frames")
if __name__ == "__main__":
test_load_scene()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/containers/sim_frame.py | Python | from dataclasses import dataclass
import camtools as ct
import numpy as np
from lit.containers.base_container import BaseContainer
@dataclass
class SimFrame(BaseContainer):
"""
Storing data for simulated frames.
"""
frame_index: int = None # Frame index in the scene.
frame_pose: np.ndarray = None # (4, 4) pose of the frame (vehicle)
local_points: np.ndarray = None # (N, 3) points in local coordinates.
local_bboxes: np.ndarray = None # (M, 7) bboxes in local coordinates.
incident_angles: np.ndarray = None # (N, ) incident angles of the points.
def __post_init__(self):
super().__post_init__()
def to_dict(self):
return {
"frame_index": self.frame_index,
"frame_pose": self.frame_pose,
"local_points": self.local_points,
"local_bboxes": self.local_bboxes,
"incident_angles": self.incident_angles,
}
@classmethod
def from_dict(cls, dict_data: dict):
return cls(
frame_index=(
dict_data["frame_index"].item()
if isinstance(dict_data["frame_index"], np.ndarray)
else dict_data["frame_index"]
),
frame_pose=dict_data["frame_pose"],
local_points=dict_data["local_points"],
local_bboxes=dict_data["local_bboxes"],
incident_angles=dict_data["incident_angles"],
)
def get_ray_keep_inputs_with_lidar_center(self, lidar_center: np.ndarray):
"""
Computes inputs for raydrop given the lidar center.
Parameters:
- lidar_center: numpy.ndarray of shape (3,) representing the LiDAR sensor's position.
Returns:
- ray_drop_input: (N, 5) array consisting of direction (dir_x, dir_y, dir_z), distance, and incident angle for each point.
"""
# Adjust points relative to the lidar_center
adjusted_points = self.local_points - lidar_center
# Rotate points 90 degress around z-axis to align waymo to nuScenes
rotation_matrix_90_deg_z = np.array(
[[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
)
adjusted_points = ct.transform.transform_points(
adjusted_points, rotation_matrix_90_deg_z
)
# Normalize vectors for direction calculation
normalized_vectors = adjusted_points / np.linalg.norm(
adjusted_points, axis=1, keepdims=True
)
dir_x, dir_y, dir_z = normalized_vectors.T
# Calculate the distance from the lidar to each point
dist = np.linalg.norm(adjusted_points, axis=1)
# Use the stored incident angles
incident_angle = self.incident_angles
# Construct the ray_drop input array (N, 5)
ray_drop_input = np.column_stack((dir_x, dir_y, dir_z, dist, incident_angle))
return ray_drop_input
def get_ray_keep_inputs(self, lidar_to_vehicle_pose):
"""
Returns (N, 5) array.
dir_x, dir_y, dir_z, dist, incident_angle
"""
C = ct.convert.pose_to_C(lidar_to_vehicle_pose)
return self.get_ray_keep_inputs_with_lidar_center(C)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/containers/sim_scene.py | Python | from dataclasses import dataclass, field
from pathlib import Path
from typing import List
import numpy as np
from lit.containers.base_container import BaseContainer
from lit.containers.sim_frame import SimFrame
@dataclass
class SimScene(BaseContainer):
"""
Storing data for simulated scene.
"""
sim_frames: List[SimFrame] = field(default_factory=list)
def __post_init__(self):
super().__post_init__()
if self.sim_frames is None:
self.sim_frames = []
def to_dict(self):
return {
"sim_frames": [sim_frame.to_dict() for sim_frame in self.sim_frames],
}
@classmethod
def from_dict(cls, dict_data: dict):
return cls(
sim_frames=[
SimFrame.from_dict(sim_frame) for sim_frame in dict_data["sim_frames"]
],
)
def append_frame(self, sim_frame: SimFrame):
if not isinstance(sim_frame, SimFrame):
raise ValueError(f"sim_frame must be SimFrame, got {type(sim_frame)}")
self.sim_frames.append(sim_frame)
def save_sim_frames(self, sim_scene_dir: Path):
"""
Save each frame's points in local coordinates in e.g. 0000.npz.
We save as .npz rather than .pkl for compatibility with OpenPCDet.
Example directory structure:
sim_nuscenes/ # <- sim_dir
├── scene_name_0000/ # <- sim_scene_dir
│ ├── 0000.npz
│ ├── 0001.npz
│ ├── ...
├── scene_name_0001/ # <- sim_scene_dir
│ ├── 0000.npz
│ ├── 0001.npz
│ ├── ...
├── ...
"""
sim_scene_dir.mkdir(parents=True, exist_ok=True)
for sim_frame in self.sim_frames:
npz_path = sim_scene_dir / f"{sim_frame.frame_index:04d}.npz"
np.savez_compressed(npz_path, **sim_frame.to_dict())
@classmethod
def load_sim_frames(cls, sim_scene_dir: Path):
"""
See the directory structure in SimScene.save_sim_frames().
"""
sim_scene_dir = Path(sim_scene_dir)
if not sim_scene_dir.is_dir():
raise FileNotFoundError(f"Directory not found: {sim_scene_dir}")
sim_frame_paths = sorted(sim_scene_dir.glob("*.npz"), key=lambda x: x.stem)
if len(sim_frame_paths) == 0:
raise FileNotFoundError(f"No .npz files found in {sim_scene_dir}")
sim_frames = []
for i, sim_frame_path in enumerate(sim_frame_paths):
npz_data = np.load(sim_frame_path, allow_pickle=True)
sim_frame = SimFrame.from_dict(npz_data)
# Sanity check the frame index.
expected_index = int(sim_frame_path.stem)
if sim_frame.frame_index != expected_index:
raise ValueError(
f"Frame index mismatch for {sim_frame_path}: "
f"expected {expected_index}, got {sim_frame.frame_index}"
)
sim_frames.append(sim_frame)
# Sanity check the frame indices are sorted in ascending order
frame_indices = [sim_frame.frame_index for sim_frame in sim_frames]
if not all(
frame_indices[i] < frame_indices[i + 1]
for i in range(len(frame_indices) - 1)
):
raise ValueError("Frame indices are not sorted in ascending order.")
return cls(sim_frames=sim_frames)
def save(self, path: Path, verbose=False):
raise RuntimeError("Use SimScene.save_sim_frames() instead.")
@classmethod
def load(cls, path: Path):
raise RuntimeError("Use SimScene.load_sim_frames() instead.")
def __len__(self):
return len(self.sim_frames)
def __getitem__(self, idx):
return self.sim_frames[idx]
def get_frame_by_frame_index(self, frame_index: int):
"""
Get a frame by frame_index, as self.sim_frames may not be sequential.
"""
for frame in self.sim_frames:
if frame.frame_index == frame_index:
return frame
raise ValueError(f"Cannot find frame with frame_index {frame_index}.")
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/copy_paste_utils.py | Python | from pathlib import Path
import camtools as ct
import numpy as np
import open3d as o3d
import torch
from lit.containers.fg_scene import FGScene
from lit.lidar import KITTILidarIntrinsics, Lidar, NuScenesLidarIntrinsics
from lit.path_utils import get_lit_paths
from lit.raycast_engine_gpu import RaycastEngineGPU
from lit.recon_utils import (
bbox_to_lineset,
get_indices_inside_bbox,
get_indices_outside_bboxes,
)
g_raycast_engine = RaycastEngineGPU()
def recompute_voxels(
dataloader: torch.utils.data.DataLoader,
batch_dict: dict,
) -> dict:
"""
Given batch_dict, recompute voxels based on the points in batch_dict.
This is useful if the points in batch_dict have been modified, e.g. by
copy-pasting points from another dataset.
This is essentially extracted steps in DatasetTemplate.prepare_data(), which
is called by KittiDataset.__getitem__() and its friends.
Args:
dataloader: The dataloader object used to run feature encoder and
data processor.
batch_dict: dict. Must at least contain "points".
- The batch_dict will be modified in-place!
- Must have "points".
- Batch size must be 1, i.e. "points" must be (N, 4) and
points[:, 0] must be all 0.
- Must be in numpy. Call recompute_voxels before passing to GPU.
Returns:
batch_dict: dict. The modified batch_dict.
"""
before_points_shape = batch_dict["points"].shape
before_voxels_shape = batch_dict["voxels"].shape
before_voxel_coords_shape = batch_dict["voxel_coords"].shape
before_voxel_num_points_shape = batch_dict["voxel_num_points"].shape
# Check the dimensions of points.
if batch_dict["points"].ndim != 2 or batch_dict["points"].shape[1] != 4:
raise ValueError("points must be (N, 4)")
# Check that batch size is 1.
if not np.allclose(batch_dict["points"][:, 0], 0):
raise ValueError("points[:, 0] must be all 0")
# 1. Pop the keys that will be re-computed.
for key in ["voxels", "voxel_coords", "voxel_num_points"]:
batch_dict.pop(key, None)
# 2. Strip the batch dimension.
batch_dict["points"] = batch_dict["points"][:, 1:]
# 3. Re-run feature encoder and data processor.
batch_dict = dataloader.dataset.point_feature_encoder.forward(batch_dict)
batch_dict = dataloader.dataset.data_processor.forward(batch_dict)
# 4. Add back the batch dimension.
batch_dict["points"] = np.concatenate(
[np.zeros((len(batch_dict["points"]), 1)), batch_dict["points"]], axis=1
)
batch_dict["voxel_coords"] = np.concatenate(
[np.zeros((len(batch_dict["voxel_coords"]), 1)), batch_dict["voxel_coords"]],
axis=1,
)
after_points_shape = batch_dict["points"].shape
after_voxels_shape = batch_dict["voxels"].shape
after_voxel_coords_shape = batch_dict["voxel_coords"].shape
after_voxel_num_points_shape = batch_dict["voxel_num_points"].shape
# if (
# before_points_shape != after_points_shape
# or len(before_voxels_shape) != len(after_voxels_shape)
# or before_voxels_shape[1:] != after_voxels_shape[1:]
# or len(before_voxel_coords_shape) != len(after_voxel_coords_shape)
# or before_voxel_coords_shape[1:] != after_voxel_coords_shape[1:]
# or len(before_voxel_num_points_shape) != len(after_voxel_num_points_shape)
# ):
# print("[Before]")
# print(f"- points.shape : {before_points_shape}")
# print(f"- voxels.shape : {before_voxels_shape}")
# print(f"- voxel_coords.shape : {before_voxel_coords_shape}")
# print(f"- voxel_num_points.shape: {before_voxel_num_points_shape}")
# print("[After]")
# print(f"- points.shape : {after_points_shape}")
# print(f"- voxels.shape : {after_voxels_shape}")
# print(f"- voxel_coords.shape : {after_voxel_coords_shape}")
# print(f"- voxel_num_points.shape: {after_voxel_num_points_shape}")
# raise ValueError("Recomputed voxels have invalid shapes.")
return batch_dict
def compute_bbox_pseudo_pose(bbox: np.ndarray) -> np.ndarray:
"""
Compute the bbox's pseudo pose, which is the pose of transforming the axis
aligned box centered at [0, 0, 0] to the current bbox.
"""
theta = bbox[6]
R = np.array(
[
[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
R = R.T
t = bbox[:3]
pseudo_pose = np.eye(4)
pseudo_pose[:3, :3] = R
pseudo_pose[:3, 3] = t
return pseudo_pose
def copy_paste_nuscenes_to_kitti(
dataloader: torch.utils.data.DataLoader,
batch_dict: dict,
src_domain: str,
dst_style: str,
dst_bbox_size: str,
):
"""
Replace points in batch_dict with points from nuScenes dataset.
Args:
dataloader: The dataloader object used to run feature encoder and
data processor.
batch_dict: dict. Must at least contain "points".
- The batch_dict will be modified in-place!
- Must have "points".
- Batch size must be 1, i.e. "points" must be (N, 4) and
points[:, 0] must be all 0.
- Must be in numpy.
src_domain: str. {"kitti_pcd", "nuscenes_mesh", "nuscenes_pcd"}
- "nuscenes_mesh": Use points from the reconstructed nuScenes mesh.
- "nuscenes_pcd": Use points from the raw fg nuScenes point cloud.
dst_style: str. {"kitti", "nuscenes"}
- "kitti": Use KITTI lidar parameters.
- "nuscenes": Use nuScenes lidar parameters.
dst_bbox_size: str. {"kitti", "nuscenes"}
- "kitti": Keep KITTI bbox size.
- "nuscenes": Replace bbox to match nuScenes bbox size.
Return:
batch_dict: dict. The modified batch_dict.
"""
# Sanity checks.
if src_domain not in ["kitti_pcd", "nuscenes_mesh", "nuscenes_pcd"]:
raise ValueError(f"Invalid src_domain: {src_domain}")
if dst_style not in ["kitti", "nuscenes"]:
raise ValueError(f"Invalid dst_style: {dst_style}")
if dst_bbox_size not in ["kitti", "nuscenes"]:
raise ValueError(f"Invalid dst_bbox_size: {dst_bbox_size}")
if batch_dict["points"].ndim != 2 or batch_dict["points"].shape[1] != 4:
raise ValueError("points must be (N, 4)")
if not np.allclose(batch_dict["points"][:, 0], 0):
raise ValueError("points[:, 0] must be all 0")
# Strip the batch dimension.
points = batch_dict["points"][:, 1:4]
kitti_bboxes = batch_dict["gt_boxes"][0] # (1, N, 8) -> (N, 8)
# Copy and paste.
lit_paths = get_lit_paths(
data_version="v1",
data_domain="nuscenes",
)
num_points_before = len(points)
assert src_domain in {"kitti_pcd", "nuscenes_mesh"}
fg_scene_paths = sorted(list(lit_paths.fg_dir.glob("*.pkl")))
dst_bboxes = []
for kitti_bbox in kitti_bboxes:
# Randomly pick an fg_scene, then randomly pick a fg_object (mesh).
fg_scene_path = np.random.choice(fg_scene_paths)
fg_scene = FGScene.load(fg_scene_path)
fb_object = fg_scene[np.random.randint(len(fg_scene))]
# Get vertices and triangles.
# The vertices's bbox shall be centered according to fg_object.
vertices = np.copy(fb_object.mesh_vertices)
triangles = np.copy(fb_object.mesh_triangles)
if not np.allclose(
(np.min(vertices, axis=0) + np.max(vertices, axis=0)) / 2,
[0, 0, 0],
atol=1e-5,
rtol=1e-5,
):
raise ValueError("Vertices must be centered at [0, 0, 0].")
# Lidar pose is only used if the src_domain is nuscenes_mesh and visualization.
lidar_pose = np.eye(4)
lidar_pose[:3, 3] = np.array(dataloader.dataset.dataset_cfg["SHIFT_COOR"])
# Compute dst_bbox, and scale vertices to match dst_bbox size.
_, _, _, kitti_dx, kitti_dy, kitti_dz, _, _ = kitti_bbox
mesh_dx, mesh_dy, mesh_dz = np.max(vertices, axis=0) - np.min(vertices, axis=0)
if dst_bbox_size == "kitti":
dst_bbox = np.copy(kitti_bbox)
vertices = vertices * np.array(
[
kitti_dx / mesh_dx,
kitti_dy / mesh_dy,
kitti_dz / mesh_dz,
]
)
elif dst_bbox_size == "nuscenes":
dst_bbox = np.copy(kitti_bbox)
# Change the extents of the dst_bbox to match the mesh.
dst_bbox[3:6] = np.array([mesh_dx, mesh_dy, mesh_dz])
# Raise the center of the dst_bbox, such that its "ground plane"
# still touches the ground.
dst_bbox[2] = kitti_bbox[2] - kitti_dz / 2 + mesh_dz / 2
else:
raise ValueError(f"Invalid dst_bbox_size: {dst_bbox_size}")
dst_bboxes.append(dst_bbox)
# Compute new points.
if src_domain == "kitti_pcd":
if dst_style != "kitti":
raise ValueError("Only dst_style=kitti is supported.")
if dst_bbox_size != "nuscenes":
raise ValueError("Only dst_bbox_size=nuscenes is supported.")
inside_indices = get_indices_inside_bbox(points, kitti_bbox)
if len(inside_indices) == 0:
new_points = np.zeros((0, 3), dtype=np.float32)
else:
inside_points = points[inside_indices]
# Center inside_points.
pseudo_pose_kitti = compute_bbox_pseudo_pose(kitti_bbox)
inside_points = ct.transform.transform_points(
inside_points, np.linalg.inv(pseudo_pose_kitti)
)
# Scale inside_points, while keeping it centered.
inside_points = inside_points * np.array(
[
mesh_dx / kitti_dx,
mesh_dy / kitti_dy,
mesh_dz / kitti_dz,
]
)
# Put the scaled inside_points back to the dst pose.
pseudo_pose_dst = compute_bbox_pseudo_pose(dst_bbox)
inside_points = ct.transform.transform_points(
inside_points, pseudo_pose_dst
)
new_points = inside_points
elif src_domain == "nuscenes_mesh":
# Compute the pseudo pose, which is the pose of transforming the axis
# aligned box centered at [0, 0, 0] to the bbox.
pseudo_pose = compute_bbox_pseudo_pose(dst_bbox)
# Put the vertices to the bbox.
vertices = ct.transform.transform_points(vertices, pseudo_pose)
# Raycast the mesh to get the new points.
if dst_style == "kitti":
lidar_intrinsics = KITTILidarIntrinsics()
elif dst_style == "nuscenes":
lidar_intrinsics = NuScenesLidarIntrinsics()
else:
raise ValueError(f"Invalid dst_style: {dst_style}")
lidar = Lidar(intrinsics=lidar_intrinsics, pose=lidar_pose)
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(vertices)
mesh.triangles = o3d.utility.Vector3iVector(triangles)
mesh.compute_vertex_normals()
try:
new_points, _ = g_raycast_engine.lidar_intersect_mesh(
lidar=lidar, mesh=mesh
)
except RuntimeError as e:
import pickle
mesh_vertices = np.asarray(mesh.vertices)
mesh_triangles = np.asarray(mesh.triangles)
data_dict = {
"lidar": lidar,
"mesh_vertices": mesh_vertices,
"mesh_triangles": mesh_triangles,
}
pkl_path = Path("raycast_error_data.pkl")
with open(pkl_path, "wb") as f:
pickle.dump(data_dict, f)
print(f"RuntimeError: {e}")
print(f"Saved debug data to: {pkl_path}")
import ipdb
ipdb.set_trace()
else:
raise ValueError(f"Unimplemented src_domain: {src_domain}")
# Cut: Remove points inside KITTI bbox.
outside_indices = get_indices_outside_bboxes(points, kitti_bbox[None])
points = points[outside_indices]
# Copy-Paste: Paste the new_points into points.
points = np.concatenate([points, new_points], axis=0)
# Visualize.
visualize = False
if visualize:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
kitti_bbox_ls = bbox_to_lineset(kitti_bbox).paint_uniform_color([0, 0, 1])
dst_bbox_ls = bbox_to_lineset(dst_bbox).paint_uniform_color([1, 0, 0])
lidar_frame = ct.camera.create_camera_frustums(
Ks=None, Ts=[np.linalg.inv(lidar_pose)], size=1
)
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2)
o3d.visualization.draw_geometries(
[pcd, kitti_bbox_ls, dst_bbox_ls, lidar_frame, axes]
)
num_points_after = len(points)
print(f"Copy-pasted: {num_points_before} -> {num_points_after} points")
# Update batch_dict.
points = np.concatenate([np.zeros((len(points), 1)), points], axis=1)
batch_dict["points"] = points
dst_bboxes = np.array(dst_bboxes).reshape((-1, 8)) # (N, 8), N can be 0
if dst_bboxes[None].shape != batch_dict["gt_boxes"].shape:
raise ValueError(f"{dst_bboxes[None].shape} != {batch_dict['gt_boxes'].shape}")
batch_dict["gt_boxes"] = dst_bboxes[None]
# Recompute voxel features.
batch_dict = recompute_voxels(dataloader=dataloader, batch_dict=batch_dict)
return batch_dict
def main():
pass
if __name__ == "__main__":
main()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/ext/__init__.py | Python | # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited.
from pathlib import Path
from torch.utils.cpp_extension import load
def p(rel_path):
abs_path = Path(__file__).parent / rel_path
return str(abs_path)
lit_ext = load(
name="lit_ext",
sources=[
p("lit_ext/bind.cpp"),
p("lit_ext/lit_ext.cpp"),
],
extra_cflags=["-O2"],
extra_cuda_cflags=["-O2", "-Xcompiler -fno-gnu-unique"],
verbose=True,
)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/ext/lit_ext/bind.cpp | C++ | #include <torch/extension.h>
#include "lit_ext.h"
using namespace pybind11::literals;
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("split_mesh_by_cc", &split_mesh_by_cc,
"Split mesh into meshes by connected components.", "vertices"_a,
"triangles"_a);
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/ext/lit_ext/lit_ext.cpp | C++ | #include <torch/extension.h>
#include <iostream>
#include <queue>
#include <unordered_map>
#include <unordered_set>
#include <vector>
std::pair<torch::Tensor, torch::Tensor> select_by_index(
const torch::Tensor &vertices,
const torch::Tensor &triangles,
const std::vector<int64_t> &vertex_indices) {
std::unordered_map<int64_t, int64_t> new_vertex_map;
std::vector<int64_t> new_vertex_indices;
for (size_t i = 0; i < vertex_indices.size(); ++i) {
int64_t old_index = vertex_indices[i];
if (new_vertex_map.find(old_index) == new_vertex_map.end()) {
int64_t new_index = new_vertex_indices.size();
new_vertex_indices.push_back(old_index);
new_vertex_map[old_index] = new_index;
}
}
torch::Tensor new_vertices = vertices.index_select(
0, torch::tensor(new_vertex_indices, torch::kInt64));
std::vector<int64_t> new_triangle_indices;
auto triangles_accessor = triangles.accessor<int64_t, 2>();
for (int64_t i = 0; i < triangles.size(0); ++i) {
int64_t v0 = triangles_accessor[i][0];
int64_t v1 = triangles_accessor[i][1];
int64_t v2 = triangles_accessor[i][2];
if (new_vertex_map.find(v0) != new_vertex_map.end() &&
new_vertex_map.find(v1) != new_vertex_map.end() &&
new_vertex_map.find(v2) != new_vertex_map.end()) {
new_triangle_indices.push_back(i);
}
}
torch::Tensor new_triangles =
torch::empty({static_cast<int64_t>(new_triangle_indices.size()), 3},
triangles.options());
auto new_triangles_accessor = new_triangles.accessor<int64_t, 2>();
for (size_t i = 0; i < new_triangle_indices.size(); ++i) {
int64_t idx = new_triangle_indices[i];
new_triangles_accessor[i][0] =
new_vertex_map[triangles_accessor[idx][0]];
new_triangles_accessor[i][1] =
new_vertex_map[triangles_accessor[idx][1]];
new_triangles_accessor[i][2] =
new_vertex_map[triangles_accessor[idx][2]];
}
return std::make_pair(new_vertices, new_triangles);
}
std::vector<std::pair<torch::Tensor, torch::Tensor>> split_mesh_by_cc(
const torch::Tensor &vertices, const torch::Tensor &triangles) {
AT_ASSERT(vertices.dtype() == torch::kFloat32, "Vertices must be float32");
AT_ASSERT(triangles.dtype() == torch::kInt64, "Triangles must be int64");
AT_ASSERT(vertices.dim() == 2 && vertices.size(1) == 3,
"Vertices must have a shape of (N, 3)");
AT_ASSERT(triangles.dim() == 2 && triangles.size(1) == 3,
"Triangles must have a shape of (M, 3)");
// Ensure tensors are on CPU
auto cpu_vertices = vertices.to(torch::kCPU);
auto cpu_triangles = triangles.to(torch::kCPU);
auto vertices_accessor = cpu_vertices.accessor<float, 2>();
auto triangles_accessor = cpu_triangles.accessor<int64_t, 2>();
int64_t n_vertices = cpu_vertices.size(0);
int64_t n_triangles = cpu_triangles.size(0);
// Build adjacency list
std::vector<std::unordered_set<int64_t>> adjacency(n_vertices);
for (int64_t i = 0; i < n_triangles; ++i) {
adjacency[triangles_accessor[i][0]].insert(triangles_accessor[i][1]);
adjacency[triangles_accessor[i][1]].insert(triangles_accessor[i][2]);
adjacency[triangles_accessor[i][2]].insert(triangles_accessor[i][0]);
}
// Find connected components using BFS
std::vector<bool> visited(n_vertices, false);
std::vector<std::vector<int64_t>> components;
for (int64_t i = 0; i < n_vertices; ++i) {
if (!visited[i]) {
std::vector<int64_t> component;
std::queue<int64_t> queue;
queue.push(i);
visited[i] = true;
while (!queue.empty()) {
int64_t v = queue.front();
queue.pop();
component.push_back(v);
for (auto adj_v : adjacency[v]) {
if (!visited[adj_v]) {
queue.push(adj_v);
visited[adj_v] = true;
}
}
}
components.push_back(component);
}
}
// Split meshes
std::vector<std::pair<torch::Tensor, torch::Tensor>> split_meshes;
for (const auto &comp : components) {
auto split_mesh = select_by_index(cpu_vertices, cpu_triangles, comp);
split_meshes.push_back(split_mesh);
}
return split_meshes;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/ext/lit_ext/lit_ext.h | C/C++ Header | #pragma once
#include <torch/extension.h>
/// @brief Split a mesh into multiple meshes based on connected components.
///
/// @param vertices Tensor of vertices. Shape: (num_vertices, 3).
/// @param triangles Tensor of triangle indices. Each row represents a triangle,
/// with each element being an index into the vertices tensor. Shape:
/// (num_triangles, 3).
/// @return A vector of pairs, where each pair contains the vertices and
/// triangles tensors of a connected component.
std::vector<std::pair<torch::Tensor, torch::Tensor>> split_mesh_by_cc(
const torch::Tensor &vertices, const torch::Tensor &triangles);
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/complete.py | Python | import argparse
import copy
import json
import shutil
import time
from pathlib import Path
import camtools as ct
import numpy as np
import open3d as o3d
import skimage.measure
import torch
import trimesh
from pycg import vis
from torch import nn, optim
import lit.extern.deepsdf.deep_sdf as deep_sdf
_script_dir = Path(__file__).resolve().absolute().parent
_deepsdf_root = _script_dir
def load_sv2_point_cloud(synset_id: str, object_id: str, data_root: Path) -> np.ndarray:
surface_samples_path = (
data_root / "SurfaceSamples" / "ShapeNetV2" / synset_id / f"{object_id}.ply"
)
mesh = o3d.io.read_triangle_mesh(str(surface_samples_path))
points = np.asarray(mesh.vertices)
return points
def _load_mean_latent(train_latent_dir):
train_latent_dir = Path(train_latent_dir)
with torch.no_grad():
train_latent_paths = sorted(list(train_latent_dir.glob("*.pth")))
train_latents = [
torch.load(str(p)).reshape((1, 256)).cpu().numpy()
for p in train_latent_paths
]
train_latents = np.concatenate(train_latents, axis=0)
mean_latent = np.mean(train_latents, axis=0, keepdims=True)
mean_latent = torch.tensor(mean_latent, dtype=torch.float32).cuda()
return mean_latent
class DeepSDFEngine:
def __init__(
self,
specs_path,
ckpt_path,
mean_latent_path,
iters=500,
lr=1e-4,
num_samples=2048,
voxel_resolution=256,
max_batch=32**3,
l2reg=True,
clamp_dist=0.1,
verbose=False,
):
self.specs_path = Path(specs_path)
self.ckpt_path = Path(ckpt_path)
self.mean_latent_path = Path(mean_latent_path)
self.iters = iters
self.lr = lr
self.num_samples = num_samples
self.voxel_resolution = voxel_resolution
self.max_batch = max_batch
self.l2reg = l2reg
self.clamp_dist = clamp_dist
self.verbose = verbose
self.decoder = self._load_network()
self.mean_latent = torch.load(self.mean_latent_path)
@staticmethod
def _compute_normalization(world_points, buffer=1.03):
"""
Normalize points, such that:
1. The center (avg of min max bounds, not centroid) of the object is at
the origin.
2. Max distance of a point from the origin is (1 * buffer).
Normalization does not change axes convention. Both ShapeNet axes and
Canonical axes points can be normalized.
"""
if len(world_points) == 0:
raise ValueError("Points array is empty.")
min_vals = np.min(world_points, axis=0)
max_vals = np.max(world_points, axis=0)
center = (min_vals + max_vals) / 2.0
offset = -center
centered_points = world_points - center
max_distance = np.max(np.linalg.norm(centered_points, axis=1))
max_distance *= buffer
scale = 1.0 / max_distance
# Handles nan scale (e.g. when len(world_points) == 1)
if not np.isfinite(scale):
scale = 1.0
return offset, scale
@staticmethod
def _normalize_points(world_points, offset, scale):
return (world_points + offset) * scale
@staticmethod
def _denormalize_points(deepsdf_points, offset, scale):
return deepsdf_points / scale - offset
@staticmethod
def _rotate_canonical_to_shapenet(points: np.ndarray):
"""
Rotate points from our (Waymo) convention to ShapeNet convention.
"""
if points.ndim != 2 or points.shape[1] != 3:
raise ValueError("Input points array must have shape (N, 3)")
R = np.array(
[
[0, -1, 0],
[0, 0, 1],
[-1, 0, 0],
]
)
transform = np.eye(4)
transform[:3, :3] = R
return ct.transform.transform_points(points, transform)
@staticmethod
def _rotate_shapenet_to_canonical(points: np.ndarray):
"""
Rotate points from ShapeNet convention back to our (Waymo) convention.
"""
if points.ndim != 2 or points.shape[1] != 3:
raise ValueError("Input points array must have shape (N, 3)")
R = np.array(
[
[0, 0, -1],
[-1, 0, 0],
[0, 1, 0],
]
)
transform = np.eye(4)
transform[:3, :3] = R
return ct.transform.transform_points(points, transform)
def _load_network(self):
specs = json.load(open(self.specs_path))
arch = __import__(
"lit.extern.deepsdf.networks." + specs["NetworkArch"], fromlist=["Decoder"]
)
latent_size = specs["CodeLength"]
decoder = arch.Decoder(latent_size, **specs["NetworkSpecs"]).cuda()
decoder = nn.DataParallel(decoder)
checkpoint = torch.load(self.ckpt_path)
decoder.load_state_dict(checkpoint["model_state_dict"])
decoder = decoder.module
decoder.eval()
return decoder
def _optimize_latent(self, points):
"""
Optimizes the latent code to fit the given points. The points are assumed
to be normalized and in ShapeNet axes.
Return:
(1, 256) latent code.
"""
points = torch.tensor(points, dtype=torch.float32, device="cuda")
latent = torch.clone(self.mean_latent).cuda().requires_grad_(True)
optimizer = optim.Adam([latent], lr=self.lr)
loss_l1 = nn.L1Loss()
actual_num_samples = min(len(points), self.num_samples)
last_log_time = time.time()
for iteration in range(self.iters):
optimizer.zero_grad()
indices = torch.randperm(len(points))[:actual_num_samples]
sampled_points = points[indices]
latent_inputs = latent.expand(actual_num_samples, -1)
inputs = torch.cat([latent_inputs, sampled_points], dim=1)
pred_sdf = self.decoder(inputs)
pred_sdf = torch.clamp(pred_sdf, -self.clamp_dist, self.clamp_dist)
sdf_target = torch.zeros_like(pred_sdf)
loss = loss_l1(pred_sdf, sdf_target)
if self.l2reg:
loss += 1e-4 * torch.mean(latent.pow(2))
loss.backward()
optimizer.step()
if self.verbose and iteration % 100 == 0:
current_time = time.time()
elapsed_time = current_time - last_log_time
last_log_time = current_time
print(
f"[Iter {iteration}] "
f"Loss: {loss.item():.04f}, "
f"Elapsed: {elapsed_time:.04f} sec"
)
return latent
def _optimize_mv_latents(self, mv_points):
"""
Optimizes multiple latent codes to fit given sets of multi-view points
independently by minimizing the L1 loss between the predicted SDF values
and the target SDF values. The optimization is performed in a batched
manner using the forward_batched function of the decoder.
Each point cloud in `mv_points` is sampled or duplicated to have
exactly `self.num_samples` points within each iteration to ensure diversity
in the sampled points over iterations.
Args:
mv_points (List[np.ndarray]): A list of numpy arrays, each of
shape (N_i, 3), where N_i is the number of points in the i-th
point cloud, and each point is a 3D coordinate (x, y, z). The
points are assumed to be normalized and aligned with the
ShapeNet coordinate axes.
Returns:
torch.Tensor: The optimized latent vectors of shape (B, L), where L
is the latent size, referred to as mv_latents.
"""
B = len(mv_points)
device = "cuda"
# Convert mv_points to a list of tensors on GPU
mv_points_tensors = [
torch.tensor(points, dtype=torch.float32, device=device)
for points in mv_points
]
# Replicate mean_latent for each item in the batch
mv_latents = self.mean_latent.repeat(B, 1).requires_grad_(True)
optimizer = optim.Adam([mv_latents], lr=self.lr)
loss_l1 = nn.L1Loss(reduction="none")
last_log_time = time.time()
for iteration in range(self.iters):
optimizer.zero_grad()
sampled_points_tensors = []
for points in mv_points_tensors:
if len(points) < self.num_samples:
# Sample with replacement if there are not enough points
indices = torch.randint(
len(points), (self.num_samples,), device=device
)
else:
# Sample without replacement if there are enough points
indices = torch.randperm(len(points), device=device)[
: self.num_samples
]
sampled_points = points[indices]
sampled_points_tensors.append(sampled_points)
# Stack all sampled points into a single tensor
mv_points_tensor = torch.stack(sampled_points_tensors, dim=0)
latent_inputs = mv_latents.unsqueeze(1).expand(-1, self.num_samples, -1)
inputs = torch.cat([latent_inputs, mv_points_tensor], dim=2).view(
-1, self.mean_latent.size(1) + 3
)
pred_sdf = self.decoder(inputs).view(B, self.num_samples, -1)
pred_sdf = torch.clamp(pred_sdf, -self.clamp_dist, self.clamp_dist)
sdf_target = torch.zeros_like(pred_sdf)
loss = loss_l1(pred_sdf, sdf_target).mean(dim=2)
total_loss = loss.mean()
individual_losses = loss.mean(dim=1)
total_loss.backward()
optimizer.step()
if self.verbose and iteration % 100 == 0:
current_time = time.time()
elapsed_time = current_time - last_log_time
last_log_time = current_time
individual_loss_str = ", ".join([f"{l:.4f}" for l in individual_losses])
print(
f"[Iter {iteration}] Avg Loss: {total_loss:.4f}, Individual Losses: {individual_loss_str}, Elapsed: {elapsed_time:.2f} sec"
)
return mv_latents
def points_to_mesh(
self,
points: np.ndarray,
do_normalize: bool,
fallback_to_mean_latent: bool = True,
) -> o3d.geometry.TriangleMesh:
"""
Converts ShapeNet axes points to a ShapeNet axes mesh.
Args:
points: (N, 3) array of points in ShapeNet axes.
do_normalize: bool, if True, normalize the points before processing.
Returns:
mesh: Open3D mesh object in ShapeNet axes.
"""
# Keep original points for visualization
original_points = points.copy()
# Normalize
if do_normalize:
offset, scale = self._compute_normalization(points)
points = self._normalize_points(points, offset, scale)
# Mesh to latent
if len(points) < 10:
print(f"[WARNING] Falling back to mean latent with {len(points)} points.")
latent = self.mean_latent
# Conclusion: because of the small number of points, the
# optimization doesn't really optimize the latent code much, this
# makes the pred_mesh look like the mean_mesh. We just simply use
# the mean_latent to avoid numerical instability.
vis_mean_latent = False
if vis_mean_latent:
pred_latent = self._optimize_latent(points)
pred_mesh = self.latent_to_mesh(pred_latent)
pred_mesh.compute_vertex_normals()
pred_mesh.paint_uniform_color([1, 0, 0])
mean_latent = self.mean_latent
mean_mesh = self.latent_to_mesh(mean_latent)
mean_mesh.compute_vertex_normals()
mean_mesh.paint_uniform_color([0, 0, 1])
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)
vis.show_3d(
[pred_mesh, axes],
[mean_mesh, axes],
)
else:
latent = self._optimize_latent(points)
# Latent to mesh
try:
mesh = self.latent_to_mesh(latent)
except:
if fallback_to_mean_latent:
print("[WARNING] Falling back to mean latent as latent_to_mesh fails")
latent = self.mean_latent
mesh = self.latent_to_mesh(latent)
else:
raise
# Denormalize
if do_normalize:
# Denormalize mesh if normalization was performed
mesh.vertices = o3d.utility.Vector3dVector(
self._denormalize_points(np.asarray(mesh.vertices), offset, scale)
)
visualize = False
if visualize:
# Original PCD is blue
original_pcd = o3d.geometry.PointCloud()
original_pcd.points = o3d.utility.Vector3dVector(original_points)
original_pcd.paint_uniform_color([0, 0, 1])
# Maybe-normalized PCD is red
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd.paint_uniform_color([1, 0, 0])
# Mesh is always in the original scale
mesh.compute_vertex_normals()
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)
geometries = [original_pcd, pcd, mesh, axes]
o3d.visualization.draw_geometries(geometries)
return mesh
def mv_points_to_mesh(self, mv_points, do_normalize=True):
"""
Processes multiple sets of points and generates the corresponding meshes.
Args:
mv_points_list (List[np.ndarray]): A list of arrays where each array
represents a set of points in a point cloud.
do_normalize (bool): If True, normalizes each point cloud before processing.
Returns:
List[o3d.geometry.TriangleMesh]: A list of Open3D mesh objects.
"""
normalization_params = []
if do_normalize:
normalized_points = []
for points in mv_points:
offset, scale = self._compute_normalization(points)
normalized_points.append(self._normalize_points(points, offset, scale))
normalization_params.append((offset, scale))
mv_points = normalized_points
mv_latents = self._optimize_mv_latents(mv_points)
meshes = [self.latent_to_mesh(latent.unsqueeze(0)) for latent in mv_latents]
if do_normalize:
for mesh, (offset, scale) in zip(meshes, normalization_params):
mesh.vertices = o3d.utility.Vector3dVector(
self._denormalize_points(np.asarray(mesh.vertices), offset, scale)
)
return meshes
def canonical_points_to_mesh(
self,
points: np.ndarray,
do_normalize: bool,
) -> o3d.geometry.TriangleMesh:
"""
Canonical axes points to canonical axes mesh.
Args:
points: (N, 3) array of points in canonical axes.
do_normalize: bool, if True, normalize the points before processing.
Returns:
mesh: Open3D mesh object in canonical axes.
"""
# Rotate canonical -> shapenet
points = DeepSDFEngine._rotate_canonical_to_shapenet(points)
# Recon
mesh = self.points_to_mesh(points, do_normalize=do_normalize)
# Rotate shapenet -> canonical
mesh.vertices = o3d.utility.Vector3dVector(
DeepSDFEngine._rotate_shapenet_to_canonical(np.asarray(mesh.vertices))
)
return mesh
def np_latent_to_mesh(self, latent):
"""
latent: (256,) numpy float32 array.
"""
latent_vec = torch.tensor(latent, dtype=torch.float32).cuda().reshape(1, 256)
return self.latent_to_mesh(latent_vec)
def latent_to_mesh(self, latent_vec):
"""
ShapeNet axes latent code to ShapeNet axes mesh.
latent_vec: (1, 256)? torch tensor in CUDA.
"""
self.decoder.eval()
voxel_origin = np.array([-1, -1, -1], dtype=np.float32)
voxel_size = 2.0 / (self.voxel_resolution - 1)
grid = np.meshgrid(
np.linspace(voxel_origin[0], voxel_origin[0] + 2, self.voxel_resolution),
np.linspace(voxel_origin[1], voxel_origin[1] + 2, self.voxel_resolution),
np.linspace(voxel_origin[2], voxel_origin[2] + 2, self.voxel_resolution),
indexing="ij",
)
grid = np.stack(grid, axis=-1).reshape(-1, 3)
grid_torch = torch.from_numpy(grid).float().cuda()
sdf_values = torch.zeros(grid_torch.shape[0], 1).cuda()
num_samples = grid_torch.shape[0]
head = 0
while head < num_samples:
sample_subset = grid_torch[
head : min(head + self.max_batch, num_samples), :
]
latent_inputs = latent_vec.expand(sample_subset.size(0), -1)
inputs = torch.cat([latent_inputs, sample_subset], dim=1)
sdf_values[head : min(head + self.max_batch, num_samples)] = self.decoder(
inputs
).detach()
head += self.max_batch
sdf_shape = (
self.voxel_resolution,
self.voxel_resolution,
self.voxel_resolution,
)
sdf_values_np = sdf_values.cpu().numpy().reshape(sdf_shape)
verts, faces, normals, _ = skimage.measure.marching_cubes(
sdf_values_np, level=0.0, spacing=[voxel_size] * 3
)
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(verts + voxel_origin)
mesh.triangles = o3d.utility.Vector3iVector(faces)
return mesh
def main():
parser = argparse.ArgumentParser(
description="Reconstruct a shape from a partial point cloud using DeepSDF."
)
parser.add_argument(
"--specs_path",
"-s",
default="examples/cars/specs.json",
help="Path to the experiment specifications.",
)
parser.add_argument(
"--ckpt_path",
"-c",
default="examples/cars/ModelParameters/2000.pth",
help="Checkpoint path.",
)
parser.add_argument(
"--train_latent_dir",
"-t",
default="examples/cars/Reconstructions/2000/Codes/ShapeNetV2/02958343",
help="Directory containing the training set latent code .pth files.",
)
parser.add_argument(
"--resolution",
"-r",
dest="voxel_resolution",
default=256,
type=int,
help="Voxel resolution (vr) in a cube of size 2, voxel_size = 2.0 / (vr - 1)",
)
parser.add_argument(
"--iters",
type=int,
default=500,
help="Number of iterations for latent code optimization.",
)
parser.add_argument(
"--lr", type=float, default=1e-4, help="Learning rate for optimizer."
)
deep_sdf.add_common_args(parser)
args = parser.parse_args()
deep_sdf.configure_logging(args)
# Package specs, ckpt, mean_latent to _deepsdf_root/packaged
packaged_dir = _deepsdf_root / "packaged"
specs_path = packaged_dir / "specs.json"
ckpt_path = packaged_dir / "ckpt.pth"
mean_latent_path = packaged_dir / "mean_latent.pth"
shutil.copy(args.specs_path, specs_path)
shutil.copy(args.ckpt_path, ckpt_path)
mean_latent = _load_mean_latent(args.train_latent_dir)
torch.save(mean_latent, mean_latent_path)
print(f"[packaged] {specs_path}")
print(f"[packaged] {ckpt_path}")
print(f"[packaged] {mean_latent_path}")
# Initialize reconstructor from packaged files
reconstructor = DeepSDFEngine(
specs_path=specs_path,
ckpt_path=ckpt_path,
mean_latent_path=mean_latent_path,
iters=args.iters,
lr=args.lr,
num_samples=2048,
voxel_resolution=args.voxel_resolution,
max_batch=32**3,
l2reg=True,
clamp_dist=0.1,
verbose=True,
)
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)
print("\n# Processing points_00")
points_00 = load_sv2_point_cloud(
synset_id="02958343",
object_id="ebc59fa7d366b486122181f48ecf7852",
data_root=Path("data"),
)
mesh_00 = reconstructor.points_to_mesh(points_00, do_normalize=True)
mesh_00.compute_vertex_normals()
pcd_00 = o3d.geometry.PointCloud()
pcd_00.points = o3d.utility.Vector3dVector(points_00)
print("\n# Processing points_01")
points_01 = load_sv2_point_cloud(
synset_id="02958343",
object_id="fd7741b7927726bda37f3fc191551700",
data_root=Path("data"),
)
mesh_01 = reconstructor.points_to_mesh(points_01, do_normalize=True)
mesh_01.compute_vertex_normals()
pcd_01 = o3d.geometry.PointCloud()
pcd_01.points = o3d.utility.Vector3dVector(points_01)
print("\n# Processing points_02")
points_02 = load_sv2_point_cloud(
synset_id="02958343",
object_id="fe3dc721f5026196d61b6a34f3fd808c",
data_root=Path("data"),
)
mesh_02 = reconstructor.points_to_mesh(points_02, do_normalize=True)
mesh_02.compute_vertex_normals()
pcd_02 = o3d.geometry.PointCloud()
pcd_02.points = o3d.utility.Vector3dVector(points_02)
print("\n# Processing 3 mv_points")
mv_points = [points_00, points_01, points_02]
meshes = reconstructor.mv_points_to_mesh(mv_points, do_normalize=True)
for mesh in meshes:
mesh.compute_vertex_normals()
vis.show_3d(
[mesh_00, pcd_00, axes],
[meshes[0], pcd_00, axes],
[mesh_01, pcd_01, axes],
[meshes[1], pcd_01, axes],
[mesh_02, pcd_02, axes],
[meshes[2], pcd_02, axes],
)
print("\n# Processing 10 mv_points")
mv_points = [points_00] * 50
meshes = reconstructor.mv_points_to_mesh(mv_points, do_normalize=True)
if __name__ == "__main__":
np.set_printoptions(precision=4, suppress=True)
main()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/deep_sdf/__init__.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
from lit.extern.deepsdf.deep_sdf.data import *
from lit.extern.deepsdf.deep_sdf.mesh import *
from lit.extern.deepsdf.deep_sdf.metrics.chamfer import *
from lit.extern.deepsdf.deep_sdf.utils import *
from lit.extern.deepsdf.deep_sdf.workspace import *
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/deep_sdf/data.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import glob
import logging
import os
import random
import numpy as np
import torch
import torch.utils.data
import lit.extern.deepsdf.deep_sdf.workspace as ws
def get_instance_filenames(data_source, split):
npzfiles = []
for dataset in split:
for class_name in split[dataset]:
for instance_name in split[dataset][class_name]:
instance_filename = os.path.join(
dataset, class_name, instance_name + ".npz"
)
if not os.path.isfile(
os.path.join(data_source, ws.sdf_samples_subdir, instance_filename)
):
# raise RuntimeError(
# 'Requested non-existent file "' + instance_filename + "'"
# )
logging.warning(
"Requested non-existent file '{}'".format(instance_filename)
)
npzfiles += [instance_filename]
return npzfiles
class NoMeshFileError(RuntimeError):
"""Raised when a mesh file is not found in a shape directory"""
pass
class MultipleMeshFileError(RuntimeError):
""" "Raised when a there a multiple mesh files in a shape directory"""
pass
def find_mesh_in_directory(shape_dir):
mesh_filenames = list(glob.iglob(shape_dir + "/**/*.obj")) + list(
glob.iglob(shape_dir + "/*.obj")
)
if len(mesh_filenames) == 0:
raise NoMeshFileError()
elif len(mesh_filenames) > 1:
raise MultipleMeshFileError()
return mesh_filenames[0]
def remove_nans(tensor):
tensor_nan = torch.isnan(tensor[:, 3])
return tensor[~tensor_nan, :]
def read_sdf_samples_into_ram(filename):
npz = np.load(filename)
pos_tensor = torch.from_numpy(npz["pos"])
neg_tensor = torch.from_numpy(npz["neg"])
return [pos_tensor, neg_tensor]
def unpack_sdf_samples(filename, subsample=None):
npz = np.load(filename)
if subsample is None:
return npz
pos_tensor = remove_nans(torch.from_numpy(npz["pos"]))
neg_tensor = remove_nans(torch.from_numpy(npz["neg"]))
# split the sample into half
half = int(subsample / 2)
random_pos = (torch.rand(half) * pos_tensor.shape[0]).long()
random_neg = (torch.rand(half) * neg_tensor.shape[0]).long()
sample_pos = torch.index_select(pos_tensor, 0, random_pos)
sample_neg = torch.index_select(neg_tensor, 0, random_neg)
samples = torch.cat([sample_pos, sample_neg], 0)
return samples
def unpack_sdf_samples_from_ram(data, subsample=None):
if subsample is None:
return data
pos_tensor = data[0]
neg_tensor = data[1]
# split the sample into half
half = int(subsample / 2)
pos_size = pos_tensor.shape[0]
neg_size = neg_tensor.shape[0]
pos_start_ind = random.randint(0, pos_size - half)
sample_pos = pos_tensor[pos_start_ind : (pos_start_ind + half)]
if neg_size <= half:
random_neg = (torch.rand(half) * neg_tensor.shape[0]).long()
sample_neg = torch.index_select(neg_tensor, 0, random_neg)
else:
neg_start_ind = random.randint(0, neg_size - half)
sample_neg = neg_tensor[neg_start_ind : (neg_start_ind + half)]
samples = torch.cat([sample_pos, sample_neg], 0)
return samples
class SDFSamples(torch.utils.data.Dataset):
def __init__(
self,
data_source,
split,
subsample,
load_ram=False,
print_filename=False,
num_files=1000000,
):
self.subsample = subsample
self.data_source = data_source
self.npyfiles = get_instance_filenames(data_source, split)
logging.debug(
"using "
+ str(len(self.npyfiles))
+ " shapes from data source "
+ data_source
)
self.load_ram = load_ram
if load_ram:
self.loaded_data = []
for f in self.npyfiles:
filename = os.path.join(self.data_source, ws.sdf_samples_subdir, f)
npz = np.load(filename)
pos_tensor = remove_nans(torch.from_numpy(npz["pos"]))
neg_tensor = remove_nans(torch.from_numpy(npz["neg"]))
self.loaded_data.append(
[
pos_tensor[torch.randperm(pos_tensor.shape[0])],
neg_tensor[torch.randperm(neg_tensor.shape[0])],
]
)
def __len__(self):
return len(self.npyfiles)
def __getitem__(self, idx):
filename = os.path.join(
self.data_source, ws.sdf_samples_subdir, self.npyfiles[idx]
)
if self.load_ram:
return (
unpack_sdf_samples_from_ram(self.loaded_data[idx], self.subsample),
idx,
)
else:
return unpack_sdf_samples(filename, self.subsample), idx
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/deep_sdf/mesh.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import logging
import time
import numpy as np
import plyfile
import skimage.measure
import torch
import lit.extern.deepsdf.deep_sdf.utils
def create_mesh(
decoder, latent_vec, filename, N=256, max_batch=32**3, offset=None, scale=None
):
start = time.time()
ply_filename = filename
decoder.eval()
# NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle
voxel_origin = [-1, -1, -1]
voxel_size = 2.0 / (N - 1)
overall_index = torch.arange(0, N**3, 1, out=torch.LongTensor())
samples = torch.zeros(N**3, 4)
# transform first 3 columns
# to be the x, y, z index
samples[:, 2] = overall_index % N
samples[:, 1] = (overall_index.long() / N) % N
samples[:, 0] = ((overall_index.long() / N) / N) % N
# transform first 3 columns
# to be the x, y, z coordinate
samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]
samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]
samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]
num_samples = N**3
samples.requires_grad = False
head = 0
while head < num_samples:
sample_subset = samples[head : min(head + max_batch, num_samples), 0:3].cuda()
samples[head : min(head + max_batch, num_samples), 3] = (
deep_sdf.utils.decode_sdf(decoder, latent_vec, sample_subset)
.squeeze(1)
.detach()
.cpu()
)
head += max_batch
sdf_values = samples[:, 3]
sdf_values = sdf_values.reshape(N, N, N)
end = time.time()
print("sampling takes: %f" % (end - start))
convert_sdf_samples_to_ply(
sdf_values.data.cpu(),
voxel_origin,
voxel_size,
ply_filename + ".ply",
offset,
scale,
)
def convert_sdf_samples_to_ply(
pytorch_3d_sdf_tensor,
voxel_grid_origin,
voxel_size,
ply_filename_out,
offset=None,
scale=None,
):
"""
Convert sdf samples to .ply
:param pytorch_3d_sdf_tensor: a torch.FloatTensor of shape (n,n,n)
:voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid
:voxel_size: float, the size of the voxels
:ply_filename_out: string, path of the filename to save to
This function adapted from: https://github.com/RobotLocomotion/spartan
"""
start_time = time.time()
numpy_3d_sdf_tensor = pytorch_3d_sdf_tensor.numpy()
# skimage>=0.18.0: marching_cubes_lewiner->marching_cubes
verts, faces, normals, values = skimage.measure.marching_cubes(
numpy_3d_sdf_tensor, level=0.0, spacing=[voxel_size] * 3
)
# transform from voxel coordinates to camera coordinates
# note x and y are flipped in the output of marching_cubes
mesh_points = np.zeros_like(verts)
mesh_points[:, 0] = voxel_grid_origin[0] + verts[:, 0]
mesh_points[:, 1] = voxel_grid_origin[1] + verts[:, 1]
mesh_points[:, 2] = voxel_grid_origin[2] + verts[:, 2]
# apply additional offset and scale
if scale is not None:
mesh_points = mesh_points / scale
if offset is not None:
mesh_points = mesh_points - offset
# try writing to the ply file
num_verts = verts.shape[0]
num_faces = faces.shape[0]
verts_tuple = np.zeros((num_verts,), dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")])
for i in range(0, num_verts):
verts_tuple[i] = tuple(mesh_points[i, :])
faces_building = []
for i in range(0, num_faces):
faces_building.append(((faces[i, :].tolist(),)))
faces_tuple = np.array(faces_building, dtype=[("vertex_indices", "i4", (3,))])
el_verts = plyfile.PlyElement.describe(verts_tuple, "vertex")
el_faces = plyfile.PlyElement.describe(faces_tuple, "face")
ply_data = plyfile.PlyData([el_verts, el_faces])
logging.debug("saving mesh to %s" % (ply_filename_out))
ply_data.write(ply_filename_out)
logging.debug(
"converting to ply format and writing to file took {} s".format(
time.time() - start_time
)
)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/deep_sdf/metrics/chamfer.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import numpy as np
from scipy.spatial import cKDTree as KDTree
import trimesh
def compute_trimesh_chamfer(gt_points, gen_mesh, offset, scale, num_mesh_samples=30000):
"""
This function computes a symmetric chamfer distance, i.e. the sum of both chamfers.
gt_points: trimesh.points.PointCloud of just poins, sampled from the surface (see
compute_metrics.ply for more documentation)
gen_mesh: trimesh.base.Trimesh of output mesh from whichever autoencoding reconstruction
method (see compute_metrics.py for more)
"""
gen_points_sampled = trimesh.sample.sample_surface(gen_mesh, num_mesh_samples)[0]
gen_points_sampled = gen_points_sampled / scale - offset
# only need numpy array of points
# gt_points_np = gt_points.vertices
gt_points_np = gt_points.vertices
# one direction
gen_points_kd_tree = KDTree(gen_points_sampled)
one_distances, one_vertex_ids = gen_points_kd_tree.query(gt_points_np)
gt_to_gen_chamfer = np.mean(np.square(one_distances))
# other direction
gt_points_kd_tree = KDTree(gt_points_np)
two_distances, two_vertex_ids = gt_points_kd_tree.query(gen_points_sampled)
gen_to_gt_chamfer = np.mean(np.square(two_distances))
return gt_to_gen_chamfer + gen_to_gt_chamfer
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/deep_sdf/utils.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import logging
import torch
def add_common_args(arg_parser):
arg_parser.add_argument(
"--debug",
dest="debug",
default=False,
action="store_true",
help="If set, debugging messages will be printed",
)
arg_parser.add_argument(
"--quiet",
"-q",
dest="quiet",
default=False,
action="store_true",
help="If set, only warnings will be printed",
)
arg_parser.add_argument(
"--log",
dest="logfile",
default=None,
help="If set, the log will be saved using the specified filename.",
)
def configure_logging(args):
logger = logging.getLogger()
if args.debug:
logger.setLevel(logging.DEBUG)
elif args.quiet:
logger.setLevel(logging.WARNING)
else:
logger.setLevel(logging.INFO)
logger_handler = logging.StreamHandler()
formatter = logging.Formatter("DeepSdf - %(levelname)s - %(message)s")
logger_handler.setFormatter(formatter)
logger.addHandler(logger_handler)
if args.logfile is not None:
file_logger_handler = logging.FileHandler(args.logfile)
file_logger_handler.setFormatter(formatter)
logger.addHandler(file_logger_handler)
def decode_sdf(decoder, latent_vector, queries):
num_samples = queries.shape[0]
if latent_vector is None:
inputs = queries
else:
latent_repeat = latent_vector.expand(num_samples, -1)
inputs = torch.cat([latent_repeat, queries], 1)
sdf = decoder(inputs)
return sdf
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/deep_sdf/workspace.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
import os
import torch
model_params_subdir = "ModelParameters"
optimizer_params_subdir = "OptimizerParameters"
latent_codes_subdir = "LatentCodes"
logs_filename = "Logs.pth"
reconstructions_subdir = "Reconstructions"
reconstruction_meshes_subdir = "Meshes"
reconstruction_codes_subdir = "Codes"
specifications_filename = "specs.json"
data_source_map_filename = ".datasources.json"
evaluation_subdir = "Evaluation"
sdf_samples_subdir = "SdfSamples"
surface_samples_subdir = "SurfaceSamples"
normalization_param_subdir = "NormalizationParameters"
training_meshes_subdir = "TrainingMeshes"
def load_experiment_specifications(experiment_directory):
filename = os.path.join(experiment_directory, specifications_filename)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include specifications file "
+ '"specs.json"'.format(experiment_directory)
)
return json.load(open(filename))
def load_model_parameters(experiment_directory, checkpoint, decoder):
filename = os.path.join(
experiment_directory, model_params_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception('model state dict "{}" does not exist'.format(filename))
data = torch.load(filename)
decoder.load_state_dict(data["model_state_dict"])
return data["epoch"]
def build_decoder(experiment_directory, experiment_specs):
arch = __import__(
"networks." + experiment_specs["NetworkArch"], fromlist=["Decoder"]
)
latent_size = experiment_specs["CodeLength"]
decoder = arch.Decoder(latent_size, **experiment_specs["NetworkSpecs"]).cuda()
return decoder
def load_decoder(
experiment_directory, experiment_specs, checkpoint, data_parallel=True
):
decoder = build_decoder(experiment_directory, experiment_specs)
if data_parallel:
decoder = torch.nn.DataParallel(decoder)
epoch = load_model_parameters(experiment_directory, checkpoint, decoder)
return (decoder, epoch)
def load_latent_vectors(experiment_directory, checkpoint):
filename = os.path.join(
experiment_directory, latent_codes_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include a latent code file"
+ " for checkpoint '{}'".format(experiment_directory, checkpoint)
)
data = torch.load(filename)
if isinstance(data["latent_codes"], torch.Tensor):
num_vecs = data["latent_codes"].size()[0]
lat_vecs = []
for i in range(num_vecs):
lat_vecs.append(data["latent_codes"][i].cuda())
return lat_vecs
else:
num_embeddings, embedding_dim = data["latent_codes"]["weight"].shape
lat_vecs = torch.nn.Embedding(num_embeddings, embedding_dim)
lat_vecs.load_state_dict(data["latent_codes"])
return lat_vecs.weight.data.detach()
def get_data_source_map_filename(data_dir):
return os.path.join(data_dir, data_source_map_filename)
def get_reconstructed_mesh_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_meshes_subdir,
dataset,
class_name,
instance_name + ".ply",
)
def get_reconstructed_code_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_codes_subdir,
dataset,
class_name,
instance_name + ".pth",
)
def get_evaluation_dir(experiment_dir, checkpoint, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, evaluation_subdir, checkpoint)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_model_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, model_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_optimizer_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, optimizer_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_latent_codes_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, latent_codes_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_normalization_params_filename(
data_dir, dataset_name, class_name, instance_name
):
return os.path.join(
data_dir,
normalization_param_subdir,
dataset_name,
class_name,
instance_name + ".npz",
)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/evaluate.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import argparse
import json
import logging
import os
import numpy as np
import trimesh
import lit.extern.deepsdf.deep_sdf
import lit.extern.deepsdf.deep_sdf.workspace as ws
def evaluate(experiment_directory, checkpoint, data_dir, split_filename):
with open(split_filename, "r") as f:
split = json.load(f)
chamfer_results = []
for dataset in split:
for class_name in split[dataset]:
for instance_name in split[dataset][class_name]:
logging.debug(
"evaluating " + os.path.join(dataset, class_name, instance_name)
)
reconstructed_mesh_filename = ws.get_reconstructed_mesh_filename(
experiment_directory, checkpoint, dataset, class_name, instance_name
)
logging.debug(
'reconstructed mesh is "' + reconstructed_mesh_filename + '"'
)
ground_truth_samples_filename = os.path.join(
data_dir,
"SurfaceSamples",
dataset,
class_name,
instance_name + ".ply",
)
logging.debug(
"ground truth samples are " + ground_truth_samples_filename
)
normalization_params_filename = os.path.join(
data_dir,
"NormalizationParameters",
dataset,
class_name,
instance_name + ".npz",
)
logging.debug(
"normalization params are " + ground_truth_samples_filename
)
ground_truth_points = trimesh.load(ground_truth_samples_filename)
reconstruction = trimesh.load(reconstructed_mesh_filename)
normalization_params = np.load(normalization_params_filename)
chamfer_dist = deep_sdf.metrics.chamfer.compute_trimesh_chamfer(
ground_truth_points,
reconstruction,
normalization_params["offset"],
normalization_params["scale"],
)
logging.debug("chamfer distance: " + str(chamfer_dist))
chamfer_results.append(
(os.path.join(dataset, class_name, instance_name), chamfer_dist)
)
with open(
os.path.join(
ws.get_evaluation_dir(experiment_directory, checkpoint, True), "chamfer.csv"
),
"w",
) as f:
f.write("shape, chamfer_dist\n")
for result in chamfer_results:
f.write("{}, {}\n".format(result[0], result[1]))
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Evaluate a DeepSDF autodecoder")
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory. This directory should include experiment specifications in "
+ '"specs.json", and logging will be done in this directory as well.',
)
arg_parser.add_argument(
"--checkpoint",
"-c",
dest="checkpoint",
default="latest",
help="The checkpoint to test.",
)
arg_parser.add_argument(
"--data",
"-d",
dest="data_source",
required=True,
help="The data source directory.",
)
arg_parser.add_argument(
"--split",
"-s",
dest="split_filename",
required=True,
help="The split to evaluate.",
)
deep_sdf.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_sdf.configure_logging(args)
evaluate(
args.experiment_directory,
args.checkpoint,
args.data_source,
args.split_filename,
)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/explore_results.py | Python | import argparse
import copy
import json
import shutil
from pathlib import Path
import camtools as ct
import numpy as np
import open3d as o3d
import skimage.measure
import torch
import trimesh
from pycg import vis
from torch import nn, optim
from tqdm import tqdm
import lit.extern.deepsdf.deep_sdf
_script_dir = Path(__file__).resolve().absolute().parent
_deepsdf_root = _script_dir
import sys
import ipdb
sys.excepthook = lambda et, ev, tb: (
None
if issubclass(et, (KeyboardInterrupt, SystemExit))
else (print(f"Unhandled exception: {ev}"), ipdb.post_mortem(tb))
)
class DeepSDFMesher:
"""
Minimal DeepSDF Latent->Mesh converter.
"""
def __init__(
self,
specs_path,
ckpt_path,
voxel_resolution=128,
max_batch=32**3,
):
self.specs_path = Path(specs_path)
self.ckpt_path = Path(ckpt_path)
self.voxel_resolution = voxel_resolution
self.max_batch = max_batch
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.decoder = self._load_network()
def _load_network(self):
specs = json.load(open(self.specs_path))
arch = __import__(
"lit.extern.deepsdf.networks." + specs["NetworkArch"], fromlist=["Decoder"]
)
latent_size = specs["CodeLength"]
decoder = arch.Decoder(latent_size, **specs["NetworkSpecs"]).to(self.device)
decoder = nn.DataParallel(decoder)
checkpoint = torch.load(self.ckpt_path)
decoder.load_state_dict(checkpoint["model_state_dict"])
decoder = decoder.module
decoder.eval()
return decoder
def latent_to_mesh(self, latent):
self.decoder.eval()
latent = latent.to(self.device)
voxel_origin = np.array([-1, -1, -1], dtype=np.float32)
voxel_size = 2.0 / (self.voxel_resolution - 1)
grid = np.meshgrid(
np.linspace(voxel_origin[0], voxel_origin[0] + 2, self.voxel_resolution),
np.linspace(voxel_origin[1], voxel_origin[1] + 2, self.voxel_resolution),
np.linspace(voxel_origin[2], voxel_origin[2] + 2, self.voxel_resolution),
indexing="ij",
)
grid = np.stack(grid, axis=-1).reshape(-1, 3)
grid_torch = torch.from_numpy(grid).float().cuda()
sdf_values = torch.zeros(grid_torch.shape[0], 1).cuda()
num_samples = grid_torch.shape[0]
head = 0
while head < num_samples:
sample_subset = grid_torch[
head : min(head + self.max_batch, num_samples), :
]
latent_inputs = latent.expand(sample_subset.size(0), -1)
inputs = torch.cat([latent_inputs, sample_subset], dim=1)
sdf_values[head : min(head + self.max_batch, num_samples)] = self.decoder(
inputs
).detach()
head += self.max_batch
sdf_shape = (
self.voxel_resolution,
self.voxel_resolution,
self.voxel_resolution,
)
sdf_values_np = sdf_values.cpu().numpy().reshape(sdf_shape)
verts, faces, normals, _ = skimage.measure.marching_cubes(
sdf_values_np, level=0.0, spacing=[voxel_size] * 3
)
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(verts + voxel_origin)
mesh.triangles = o3d.utility.Vector3iVector(faces)
mesh.compute_vertex_normals()
return mesh
def explore(epoch):
# Latents saved by train_deep_sdf.py
train_latents_path = Path(f"examples/cars/LatentCodes/{epoch}.pth")
train_latents_dict = torch.load(train_latents_path)
# Shape is (1608, 256)
train_latents = train_latents_dict["latent_codes"]["weight"]
# Load the split
split_path = Path("examples/splits/sv2_cars_train.json")
with open(split_path, "r") as f:
split_data = json.load(f)
object_ids = split_data["ShapeNetV2"]["02958343"]
# Reconstructed latent codes of the training set
recon_latent_dir = Path(
f"examples/cars/Reconstructions/{epoch}/Codes/ShapeNetV2/02958343"
)
mesher = DeepSDFMesher(
specs_path="examples/cars/specs.json",
ckpt_path=f"examples/cars/ModelParameters/{epoch}.pth",
)
# Compare train latent and recon latent of training set
for object_index, object_id in enumerate(object_ids):
# Load recon latent.
# Given pre-trained network parameters, we recover the latent codes for
# each training samples. For each training sample, the recon_latent
# shall be close to train_latent, as they are all coming from the
# training set.
recon_latent_path = recon_latent_dir / f"{object_id}.pth"
recon_latent = torch.load(recon_latent_path).requires_grad_(False)
recon_latent = recon_latent.cpu().flatten()
recon_mesh = mesher.latent_to_mesh(recon_latent)
recon_mesh.compute_vertex_normals()
# Load train latent.
# There are latent codes for all training samples, where the codes are
# optimized together with the network parameters,
train_latent = train_latents[object_index]
train_mesh = mesher.latent_to_mesh(train_latent)
train_mesh.compute_vertex_normals()
# Visualize
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)
scenes = vis.show_3d(
[recon_mesh, axes],
[train_mesh, axes],
use_new_api=False,
)
pass
def all_train_latents_to_mesh(epoch):
"""
Convert all train latent codes to mesh.
Load latent codes from: examples/cars/LatentCodes/2000.pth
Save meshes to : examples/cars/LatentMeshes/2000/object_id.ply
"""
train_latents_path = Path(f"examples/cars/LatentCodes/{epoch}.pth")
train_latents_dict = torch.load(train_latents_path)
# Assuming {"latent_codes:": {"weight": tensor}}
train_latents = train_latents_dict["latent_codes"]["weight"]
split_path = Path("examples/splits/sv2_cars_train.json")
with open(split_path, "r") as f:
split_data = json.load(f)
object_ids = split_data["ShapeNetV2"]["02958343"]
# The length of train_latents should match the length of object_ids
assert len(train_latents) == len(object_ids)
mesher = DeepSDFMesher(
specs_path="examples/cars/specs.json",
ckpt_path=f"examples/cars/ModelParameters/{epoch}.pth",
)
output_dir = Path(f"examples/cars/LatentMeshes/{epoch}")
output_dir.mkdir(parents=True, exist_ok=True)
for object_index, object_id in tqdm(
enumerate(object_ids),
total=len(object_ids),
desc="Generating Meshes",
):
train_latent = train_latents[object_index].detach()
mesh = mesher.latent_to_mesh(train_latent)
mesh.compute_vertex_normals()
mesh_path = output_dir / f"{object_id}.ply"
o3d.io.write_triangle_mesh(str(mesh_path), mesh)
print(f"Saved mesh for object_id {object_id} at {mesh_path}")
def render_geometries_with_default_camera(geometries, height, width, visible=False):
"""
Render a mesh using Open3D legacy visualizer. This requires a display.
Args:
mesh: Open3d TriangleMesh.
height: int image height.
width: int image width.
visible: bool whether to show the window. Your machine must have a monitor.
Returns:
image: (H, W, 3) float32 np.ndarray image.
"""
vis = o3d.visualization.Visualizer()
vis.create_window(width=width, height=height, visible=visible)
for geometry in geometries:
vis.add_geometry(geometry)
for geometry in geometries:
vis.update_geometry(geometry)
vis.poll_events()
vis.update_renderer()
buffer = vis.capture_screen_float_buffer()
vis.destroy_window()
return np.array(buffer)
def render_meshes_to_images(
epoch,
image_height=480,
image_width=640,
visible=False,
):
"""
Render saved meshes to images.
Args:
epoch: int, epoch number of the meshes.
output_dir: Path or str, directory to save the rendered images.
image_height: int, the height of the rendered images.
image_width: int, the width of the rendered images.
visible: bool, if True the Open3D window will be shown (requires a display).
"""
mesh_dir = Path(f"examples/cars/LatentMeshes/{epoch}")
output_image_dir = Path(f"examples/cars/LatentImages/{epoch}")
output_image_dir.mkdir(parents=True, exist_ok=True)
for mesh_path in tqdm(
list(mesh_dir.glob("*.ply")), desc="Rendering meshes to images"
):
mesh = o3d.io.read_triangle_mesh(str(mesh_path))
mesh.compute_vertex_normals()
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)
im_render = render_geometries_with_default_camera(
geometries=[mesh, axes],
height=image_height,
width=image_width,
visible=visible,
)
im_path = output_image_dir / f"{mesh_path.stem}.png"
ct.io.imwrite(im_path, im_render)
print(f"Saved rendered image to {im_path}")
def main():
parser = argparse.ArgumentParser(
description="Explore or convert train and recon latent codes to mesh."
)
parser.add_argument(
"--explore",
action="store_true",
help="Explore train vs recon latent codes",
)
parser.add_argument(
"--convert",
action="store_true",
help="Convert train latent codes to mesh",
)
parser.add_argument(
"--render",
action="store_true",
help="Render the mesh to images",
)
parser.add_argument(
"--epoch",
type=int,
default=2000,
help="Epoch number for the files to use (default: 2000)",
)
args = parser.parse_args()
# Check that only one action is specified
if sum([args.explore, args.convert, args.render]) != 1:
raise ValueError("Please specify exactly one action.")
if args.explore:
explore(args.epoch)
elif args.convert:
all_train_latents_to_mesh(args.epoch)
elif args.render:
render_meshes_to_images(args.epoch)
if __name__ == "__main__":
main()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/gen_valid_splits.py | Python | from pathlib import Path
import json
import argparse
from explore_results import render_geometries_with_default_camera
import open3d as o3d
from tqdm import tqdm
import camtools as ct
import numpy as np
_data_root = Path("data")
_synset_id = "02958343"
# Results from pre-processing
_norm_params_dir = _data_root / "NormalizationParameters" / "ShapeNetV2" / _synset_id
_sdf_samples_dir = _data_root / "SdfSamples" / "ShapeNetV2" / _synset_id
_surface_samples_dir = _data_root / "SurfaceSamples" / "ShapeNetV2" / _synset_id
# Rendered point clouds
_legit_point_cloud_dir = Path("examples") / "cars" / "LegitPointClouds"
_select_point_cloud_dir = Path("examples") / "cars" / "SelectPointClouds"
# Output lists
_legit_json_path = Path(f"examples/splits/sv2_cars_legit.json")
_train_json_path = Path(f"examples/splits/sv2_cars_train.json")
_test_json_path = Path(f"examples/splits/sv2_cars_test.json")
def get_object_ids_from_dir(data_dir):
"""
Extract object IDs from filenames in the given directory path.
Object IDs are derived from filenames by removing the file extension.
"""
object_ids = set()
for path in data_dir.iterdir():
if path.is_file():
object_ids.add(path.stem)
return object_ids
def write_object_ids(synset_id, object_ids, output_file_path):
"""
Write the given object IDs to a JSON file with the specified structure
and print the number of items written.
"""
data = {"ShapeNetV2": {synset_id: object_ids}}
# Ensure the output directory exists
output_file_path.parent.mkdir(parents=True, exist_ok=True)
# Write data to the JSON file
with open(output_file_path, "w") as f:
json.dump(data, f, indent=2)
print(f"{len(object_ids)} items written to {output_file_path}")
def gen_legit():
# Extract object IDs from each directory
norm_object_ids = get_object_ids_from_dir(_norm_params_dir)
sdf_object_ids = get_object_ids_from_dir(_sdf_samples_dir)
surface_object_ids = get_object_ids_from_dir(_surface_samples_dir)
# Find the intersection of object IDs across all directories
object_ids = sorted(
norm_object_ids.intersection(sdf_object_ids, surface_object_ids)
)
# Write to json
write_object_ids(
synset_id=_synset_id,
object_ids=object_ids,
output_file_path=_legit_json_path,
)
def render_legit():
# Read json to get object ids
with open(_legit_json_path, "r") as f:
data = json.load(f)
object_ids = data["ShapeNetV2"][_synset_id]
# Render point clouds
for object_id in tqdm(object_ids, desc="Rendering point clouds"):
pcd_path = _surface_samples_dir / f"{object_id}.ply"
pcd = o3d.io.read_point_cloud(str(pcd_path))
# Rotate pcd along y-axis by 90 degrees.
transform = np.array(
[
[0, 0, 1, 0],
[0, 1, 0, 0],
[-1, 0, 0, 0],
[0, 0, 0, 1],
]
)
pcd.transform(transform)
im_render = render_geometries_with_default_camera(
geometries=[pcd], height=480, width=640, visible=False
)
im_render = ct.image.crop_white_boarders(im_render)
_legit_point_cloud_dir.mkdir(parents=True, exist_ok=True)
pcd_path = _legit_point_cloud_dir / f"{object_id}.png"
ct.io.imwrite(pcd_path, im_render)
def gen_train_test_split():
# Split object IDs for train and test
split_index = int(len(object_ids) * 0.9)
train_object_ids = object_ids[:split_index]
test_object_ids = object_ids[split_index:]
write_object_ids(
synset_id=synset_id,
object_ids=train_object_ids,
output_file_path=_train_json_path,
)
write_object_ids(
synset_id=synset_id,
object_ids=test_object_ids,
output_file_path=_test_json_path,
)
def main():
parser = argparse.ArgumentParser(
description="Explore or convert train and recon latent codes to mesh."
)
parser.add_argument(
"--gen_legit",
action="store_true",
help="Generate legit (can be pre-processed) sample list to sv2_cars_legit.json",
)
parser.add_argument(
"--render_legit",
action="store_true",
help="Render all point clouds in sv2_cars_legit.json to examples/cars/LegitPointClouds",
)
parser.add_argument(
"--gen_train_test_split",
action="store_true",
help="Generate train (0.95) and test (0.05) split from examples/cars/SelectPointClouds",
)
parser.add_argument(
"--epoch",
type=int,
default=2000,
help="Epoch number for the files to use (default: 2000)",
)
args = parser.parse_args()
if sum([args.gen_legit, args.render_legit, args.gen_train_test_split]) != 1:
raise ValueError("Exactly one action must be specified")
if args.gen_legit:
gen_legit()
elif args.render_legit:
render_legit()
elif args.gen_train_test_split:
gen_train_test_split()
if __name__ == "__main__":
main()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/generate_training_meshes.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import argparse
import json
import os
import numpy as np
import torch
import lit.extern.deepsdf.deep_sdf
import lit.extern.deepsdf.deep_sdf.workspace as ws
def code_to_mesh(experiment_directory, checkpoint, keep_normalized=False):
specs_filename = os.path.join(experiment_directory, "specs.json")
if not os.path.isfile(specs_filename):
raise Exception(
'The experiment directory does not include specifications file "specs.json"'
)
specs = json.load(open(specs_filename))
arch = __import__(
"lit.extern.deepsdf.networks." + specs["NetworkArch"], fromlist=["Decoder"]
)
latent_size = specs["CodeLength"]
decoder = arch.Decoder(latent_size, **specs["NetworkSpecs"])
decoder = torch.nn.DataParallel(decoder)
saved_model_state = torch.load(
os.path.join(experiment_directory, ws.model_params_subdir, checkpoint + ".pth")
)
saved_model_epoch = saved_model_state["epoch"]
decoder.load_state_dict(saved_model_state["model_state_dict"])
decoder = decoder.module.cuda()
decoder.eval()
latent_vectors = ws.load_latent_vectors(experiment_directory, checkpoint)
train_split_file = specs["TrainSplit"]
with open(train_split_file, "r") as f:
train_split = json.load(f)
data_source = specs["DataSource"]
instance_filenames = deep_sdf.data.get_instance_filenames(data_source, train_split)
print(len(instance_filenames), " vs ", len(latent_vectors))
for i, latent_vector in enumerate(latent_vectors):
dataset_name, class_name, instance_name = instance_filenames[i].split("/")
instance_name = instance_name.split(".")[0]
print("{} {} {}".format(dataset_name, class_name, instance_name))
mesh_dir = os.path.join(
experiment_directory,
ws.training_meshes_subdir,
str(saved_model_epoch),
dataset_name,
class_name,
)
print(mesh_dir)
if not os.path.isdir(mesh_dir):
os.makedirs(mesh_dir)
mesh_filename = os.path.join(mesh_dir, instance_name)
print(instance_filenames[i])
offset = None
scale = None
if not keep_normalized:
normalization_params = np.load(
ws.get_normalization_params_filename(
data_source, dataset_name, class_name, instance_name
)
)
offset = normalization_params["offset"]
scale = normalization_params["scale"]
with torch.no_grad():
deep_sdf.mesh.create_mesh(
decoder,
latent_vector,
mesh_filename,
N=256,
max_batch=int(2**18),
offset=offset,
scale=scale,
)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(
description="Use a trained DeepSDF decoder to generate a mesh given a latent code."
)
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory which includes specifications and saved model "
+ "files to use for reconstruction",
)
arg_parser.add_argument(
"--checkpoint",
"-c",
dest="checkpoint",
default="latest",
help="The checkpoint weights to use. This can be a number indicated an epoch "
+ "or 'latest' for the latest weights (this is the default)",
)
arg_parser.add_argument(
"--keep_normalization",
dest="keep_normalized",
default=False,
action="store_true",
help="If set, keep the meshes in the normalized scale.",
)
deep_sdf.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_sdf.configure_logging(args)
code_to_mesh(args.experiment_directory, args.checkpoint, args.keep_normalized)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/networks/deep_sdf_decoder.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
def __init__(
self,
latent_size,
dims,
dropout=None,
dropout_prob=0.0,
norm_layers=(),
latent_in=(),
weight_norm=False,
xyz_in_all=None,
use_tanh=False,
latent_dropout=False,
):
super(Decoder, self).__init__()
def make_scene():
return []
dims = [latent_size + 3] + dims + [1]
self.num_layers = len(dims)
self.norm_layers = norm_layers
self.latent_in = latent_in
self.latent_dropout = latent_dropout
if self.latent_dropout:
self.lat_dp = nn.Dropout(0.2)
self.xyz_in_all = xyz_in_all
self.weight_norm = weight_norm
for layer in range(0, self.num_layers - 1):
if layer + 1 in latent_in:
out_dim = dims[layer + 1] - dims[0]
else:
out_dim = dims[layer + 1]
if self.xyz_in_all and layer != self.num_layers - 2:
out_dim -= 3
if weight_norm and layer in self.norm_layers:
setattr(
self,
"lin" + str(layer),
nn.utils.weight_norm(nn.Linear(dims[layer], out_dim)),
)
else:
setattr(self, "lin" + str(layer), nn.Linear(dims[layer], out_dim))
if (
(not weight_norm)
and self.norm_layers is not None
and layer in self.norm_layers
):
setattr(self, "bn" + str(layer), nn.LayerNorm(out_dim))
self.use_tanh = use_tanh
if use_tanh:
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.dropout_prob = dropout_prob
self.dropout = dropout
self.th = nn.Tanh()
def forward_batched(self, input):
"""
Handles batched input of shape (B, N, L+3)
"""
B, N, _ = input.shape
xyz = input[..., -3:] # Shape: (B, N, 3)
if input.shape[-1] > 3 and self.latent_dropout:
latent_vecs = input[..., :-3] # Shape: (B, N, L)
latent_vecs = F.dropout(latent_vecs, p=0.2, training=self.training)
x = torch.cat([latent_vecs, xyz], dim=-1) # Shape: (B, N, L+3)
else:
x = input # Shape: (B, N, L+3)
for layer in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(layer))
if layer in self.latent_in:
# Shape: (B, N, 2*L+3) or similar
x = torch.cat([x, input], dim=-1)
elif layer != 0 and self.xyz_in_all:
# Shape: (B, N, current_dim+3)
x = torch.cat([x, xyz], dim=-1)
x = lin(x)
# Apply tanh on the last layer if use_tanh is True
if layer == self.num_layers - 2 and self.use_tanh:
x = self.tanh(x)
# Apply ReLU and optionally BatchNorm on intermediate layers
if layer < self.num_layers - 2:
if (
self.norm_layers is not None
and layer in self.norm_layers
and not self.weight_norm
):
bn = getattr(self, "bn" + str(layer))
# BatchNorm1d expects (B, C, L) but we have (B, N, C), so we
# need to permute
x = x.permute(0, 2, 1) # Shape: (B, C, N)
x = bn(x)
x = x.permute(0, 2, 1) # Shape: (B, N, C)
x = self.relu(x)
if self.dropout is not None and layer in self.dropout:
x = F.dropout(x, p=self.dropout_prob, training=self.training)
if hasattr(self, "th"):
x = self.th(x)
return x
def forward_batched_naive(self, input):
"""
Handles batched input of shape (B, N, L+3) by calling forward_single()
for each item in the batch.
"""
B, N, _ = input.shape
outputs = []
for b in range(B):
single_input = input[b] # Shape: (N, L+3)
single_output = self.forward_single(single_input) # Process a single item
outputs.append(single_output)
# Concatenate all single outputs along the batch dimension Each
# single_output is expected to have shape (N, 1), so the result will
# have shape (B, N, 1)
outputs = torch.stack(outputs, dim=0)
return outputs
# input: N x (L+3)
def forward_single(self, input):
xyz = input[:, -3:]
if input.shape[1] > 3 and self.latent_dropout:
latent_vecs = input[:, :-3]
latent_vecs = F.dropout(latent_vecs, p=0.2, training=self.training)
x = torch.cat([latent_vecs, xyz], 1)
else:
x = input
for layer in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(layer))
if layer in self.latent_in:
x = torch.cat([x, input], 1)
elif layer != 0 and self.xyz_in_all:
x = torch.cat([x, xyz], 1)
x = lin(x)
# last layer Tanh
if layer == self.num_layers - 2 and self.use_tanh:
x = self.tanh(x)
if layer < self.num_layers - 2:
if (
self.norm_layers is not None
and layer in self.norm_layers
and not self.weight_norm
):
bn = getattr(self, "bn" + str(layer))
x = bn(x)
x = self.relu(x)
if self.dropout is not None and layer in self.dropout:
x = F.dropout(x, p=self.dropout_prob, training=self.training)
if hasattr(self, "th"):
x = self.th(x)
return x
def forward(self, input):
"""
Forward pass for the decoder module that supports both single and batched inputs.
This method automatically handles inputs of different dimensions: - If
the input is of shape (N, L+3), it's treated as a single input. - If the
input is of shape (B, N, L+3), it's treated as batched input.
For batched inputs, this method calls both the optimized batched forward
method and a naive batched method that processes inputs one-by-one and
compares their results.
Args:
input (torch.Tensor): Input tensor of shape (N, L+3) for single
input or (B, N, L+3) for batched input.
Returns:
torch.Tensor: The output of the decoder. Shape (N, 1) for single
input, (B, N, 1) for batched input.
"""
if input.dim() == 2:
# (N, 1)
return self.forward_single(input)
elif input.dim() == 3:
# (B, N, 1)
return self.forward_batched(input)
else:
raise ValueError("Unsupported input dimensions")
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/plot_log.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import lit.extern.deepsdf.deep_sdf
import lit.extern.deepsdf.deep_sdf.workspace as ws
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
def load_logs(experiment_directory, type):
logs = torch.load(os.path.join(experiment_directory, ws.logs_filename))
logging.info("latest epoch is {}".format(logs["epoch"]))
num_iters = len(logs["loss"])
iters_per_epoch = num_iters / logs["epoch"]
logging.info("{} iters per epoch".format(iters_per_epoch))
smoothed_loss_41 = running_mean(logs["loss"], 41)
smoothed_loss_1601 = running_mean(logs["loss"], 1601)
fig, ax = plt.subplots()
if type == "loss":
ax.plot(
np.arange(num_iters) / iters_per_epoch,
logs["loss"],
"#82c6eb",
np.arange(20, num_iters - 20) / iters_per_epoch,
smoothed_loss_41,
"#2a9edd",
np.arange(800, num_iters - 800) / iters_per_epoch,
smoothed_loss_1601,
"#16628b",
)
ax.set(xlabel="Epoch", ylabel="Loss", title="Training Loss")
elif type == "learning_rate":
combined_lrs = np.array(logs["learning_rate"])
ax.plot(
np.arange(combined_lrs.shape[0]),
combined_lrs[:, 0],
np.arange(combined_lrs.shape[0]),
combined_lrs[:, 1],
)
ax.set(xlabel="Epoch", ylabel="Learning Rate", title="Learning Rates")
elif type == "time":
ax.plot(logs["timing"], "#833eb7")
ax.set(xlabel="Epoch", ylabel="Time per Epoch (s)", title="Timing")
elif type == "lat_mag":
ax.plot(logs["latent_magnitude"])
ax.set(xlabel="Epoch", ylabel="Magnitude", title="Latent Vector Magnitude")
elif type == "param_mag":
for _name, mags in logs["param_magnitude"].items():
ax.plot(mags)
ax.set(xlabel="Epoch", ylabel="Magnitude", title="Parameter Magnitude")
ax.legend(logs["param_magnitude"].keys())
else:
raise Exception('unrecognized plot type "{}"'.format(type))
ax.grid()
plt.show()
if __name__ == "__main__":
import argparse
arg_parser = argparse.ArgumentParser(description="Plot DeepSDF training logs")
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory. This directory should include experiment "
+ "specifications in 'specs.json', and logging will be done in this directory "
+ "as well",
)
arg_parser.add_argument("--type", "-t", dest="type", default="loss")
deep_sdf.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_sdf.configure_logging(args)
load_logs(args.experiment_directory, args.type)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/preprocess_data.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import lit.extern.deepsdf.deep_sdf
import lit.extern.deepsdf.deep_sdf.workspace as ws
def filter_classes_glob(patterns, classes):
import fnmatch
passed_classes = set()
for pattern in patterns:
passed_classes = passed_classes.union(
set(filter(lambda x: fnmatch.fnmatch(x, pattern), classes))
)
return list(passed_classes)
def filter_classes_regex(patterns, classes):
import re
passed_classes = set()
for pattern in patterns:
regex = re.compile(pattern)
passed_classes = passed_classes.union(set(filter(regex.match, classes)))
return list(passed_classes)
def filter_classes(patterns, classes):
if patterns[0] == "glob":
return filter_classes_glob(patterns, classes[1:])
elif patterns[0] == "regex":
return filter_classes_regex(patterns, classes[1:])
else:
return filter_classes_glob(patterns, classes)
def process_mesh(mesh_filepath, target_filepath, executable, additional_args):
logging.info(mesh_filepath + " --> " + target_filepath)
command = [executable, "-m", mesh_filepath, "-o", target_filepath] + additional_args
subproc = subprocess.Popen(command, stdout=subprocess.DEVNULL)
subproc.wait()
def append_data_source_map(data_dir, name, source):
data_source_map_filename = ws.get_data_source_map_filename(data_dir)
print("data sources stored to " + data_source_map_filename)
data_source_map = {}
if os.path.isfile(data_source_map_filename):
with open(data_source_map_filename, "r") as f:
data_source_map = json.load(f)
if name in data_source_map:
if not data_source_map[name] == os.path.abspath(source):
raise RuntimeError(
"Cannot add data with the same name and a different source."
)
else:
data_source_map[name] = os.path.abspath(source)
with open(data_source_map_filename, "w") as f:
json.dump(data_source_map, f, indent=2)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description="Pre-processes data from a data source and append the results to "
+ "a dataset.",
)
arg_parser.add_argument(
"--data_dir",
"-d",
dest="data_dir",
required=True,
help="The directory which holds all preprocessed data.",
)
arg_parser.add_argument(
"--source",
"-s",
dest="source_dir",
required=True,
help="The directory which holds the data to preprocess and append.",
)
arg_parser.add_argument(
"--name",
"-n",
dest="source_name",
default=None,
help="The name to use for the data source. If unspecified, it defaults to the "
+ "directory name.",
)
arg_parser.add_argument(
"--split",
dest="split_filename",
required=True,
help="A split filename defining the shapes to be processed.",
)
arg_parser.add_argument(
"--skip",
dest="skip",
default=False,
action="store_true",
help="If set, previously-processed shapes will be skipped",
)
arg_parser.add_argument(
"--threads",
dest="num_threads",
default=8,
help="The number of threads to use to process the data.",
)
arg_parser.add_argument(
"--test",
"-t",
dest="test_sampling",
default=False,
action="store_true",
help="If set, the script will produce SDF samplies for testing",
)
arg_parser.add_argument(
"--surface",
dest="surface_sampling",
default=False,
action="store_true",
help="If set, the script will produce mesh surface samples for evaluation. "
+ "Otherwise, the script will produce SDF samples for training.",
)
deep_sdf.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_sdf.configure_logging(args)
additional_general_args = []
deepsdf_dir = os.path.dirname(os.path.abspath(__file__))
if args.surface_sampling:
executable = os.path.join(deepsdf_dir, "bin/SampleVisibleMeshSurface")
subdir = ws.surface_samples_subdir
extension = ".ply"
else:
executable = os.path.join(deepsdf_dir, "bin/PreprocessMesh")
subdir = ws.sdf_samples_subdir
extension = ".npz"
if args.test_sampling:
additional_general_args += ["-t"]
with open(args.split_filename, "r") as f:
split = json.load(f)
if args.source_name is None:
args.source_name = os.path.basename(os.path.normpath(args.source_dir))
dest_dir = os.path.join(args.data_dir, subdir, args.source_name)
logging.info(
"Preprocessing data from "
+ args.source_dir
+ " and placing the results in "
+ dest_dir
)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
if args.surface_sampling:
normalization_param_dir = os.path.join(
args.data_dir, ws.normalization_param_subdir, args.source_name
)
if not os.path.isdir(normalization_param_dir):
os.makedirs(normalization_param_dir)
append_data_source_map(args.data_dir, args.source_name, args.source_dir)
class_directories = split[args.source_name]
meshes_targets_and_specific_args = []
for class_dir in class_directories:
class_path = os.path.join(args.source_dir, class_dir)
instance_dirs = class_directories[class_dir]
logging.debug(
"Processing " + str(len(instance_dirs)) + " instances of class " + class_dir
)
target_dir = os.path.join(dest_dir, class_dir)
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
for instance_dir in instance_dirs:
shape_dir = os.path.join(class_path, instance_dir)
processed_filepath = os.path.join(target_dir, instance_dir + extension)
if args.skip and os.path.isfile(processed_filepath):
logging.debug("skipping " + processed_filepath)
continue
try:
mesh_filename = deep_sdf.data.find_mesh_in_directory(shape_dir)
specific_args = []
if args.surface_sampling:
normalization_param_target_dir = os.path.join(
normalization_param_dir, class_dir
)
if not os.path.isdir(normalization_param_target_dir):
os.mkdir(normalization_param_target_dir)
normalization_param_filename = os.path.join(
normalization_param_target_dir, instance_dir + ".npz"
)
specific_args = ["-n", normalization_param_filename]
meshes_targets_and_specific_args.append(
(
os.path.join(shape_dir, mesh_filename),
processed_filepath,
specific_args,
)
)
except deep_sdf.data.NoMeshFileError:
logging.warning("No mesh found for instance " + instance_dir)
except deep_sdf.data.MultipleMeshFileError:
logging.warning("Multiple meshes found for instance " + instance_dir)
with concurrent.futures.ThreadPoolExecutor(
max_workers=int(args.num_threads)
) as executor:
for (
mesh_filepath,
target_filepath,
specific_args,
) in meshes_targets_and_specific_args:
executor.submit(
process_mesh,
mesh_filepath,
target_filepath,
executable,
specific_args + additional_general_args,
)
executor.shutdown()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/reconstruct.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import argparse
import json
import logging
import os
import random
import time
import torch
import lit.extern.deepsdf.deep_sdf
import lit.extern.deepsdf.deep_sdf.workspace as ws
def reconstruct(
decoder,
num_iterations,
latent_size,
test_sdf,
stat,
clamp_dist,
num_samples=30000,
lr=5e-4,
l2reg=False,
):
def adjust_learning_rate(
initial_lr, optimizer, num_iterations, decreased_by, adjust_lr_every
):
lr = initial_lr * ((1 / decreased_by) ** (num_iterations // adjust_lr_every))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
decreased_by = 10
adjust_lr_every = int(num_iterations / 2)
if type(stat) == type(0.1):
latent = torch.ones(1, latent_size).normal_(mean=0, std=stat).cuda()
else:
latent = torch.normal(stat[0].detach(), stat[1].detach()).cuda()
latent.requires_grad = True
optimizer = torch.optim.Adam([latent], lr=lr)
loss_num = 0
loss_l1 = torch.nn.L1Loss()
for e in range(num_iterations):
decoder.eval()
sdf_data = deep_sdf.data.unpack_sdf_samples_from_ram(
test_sdf, num_samples
).cuda()
xyz = sdf_data[:, 0:3]
sdf_gt = sdf_data[:, 3].unsqueeze(1)
sdf_gt = torch.clamp(sdf_gt, -clamp_dist, clamp_dist)
adjust_learning_rate(lr, optimizer, e, decreased_by, adjust_lr_every)
optimizer.zero_grad()
latent_inputs = latent.expand(num_samples, -1)
inputs = torch.cat([latent_inputs, xyz], 1).cuda()
pred_sdf = decoder(inputs)
# TODO: why is this needed?
if e == 0:
pred_sdf = decoder(inputs)
pred_sdf = torch.clamp(pred_sdf, -clamp_dist, clamp_dist)
loss = loss_l1(pred_sdf, sdf_gt)
if l2reg:
loss += 1e-4 * torch.mean(latent.pow(2))
loss.backward()
optimizer.step()
if e % 50 == 0:
logging.debug(loss.cpu().data.numpy())
logging.debug(e)
logging.debug(latent.norm())
loss_num = loss.cpu().data.numpy()
return loss_num, latent
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(
description="Use a trained DeepSDF decoder to reconstruct a shape given SDF "
+ "samples."
)
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory which includes specifications and saved model "
+ "files to use for reconstruction",
)
arg_parser.add_argument(
"--checkpoint",
"-c",
dest="checkpoint",
default="latest",
help="The checkpoint weights to use. This can be a number indicated an epoch "
+ "or 'latest' for the latest weights (this is the default)",
)
arg_parser.add_argument(
"--data",
"-d",
dest="data_source",
required=True,
help="The data source directory.",
)
arg_parser.add_argument(
"--split",
"-s",
dest="split_filename",
required=True,
help="The split to reconstruct.",
)
arg_parser.add_argument(
"--iters",
dest="iterations",
default=800,
help="The number of iterations of latent code optimization to perform. "
"As the loss is defined over the SDF field, the number of iterations "
"does not need to be very large. To verify this, print the loss over the"
"iterations during optimization.",
)
arg_parser.add_argument(
"--resolution",
"-r",
dest="voxel_resolution",
default=256,
type=int,
help="Voxel resolution (vr) in a cube of size 2, voxel_size = 2.0 / (vr - 1)",
)
arg_parser.add_argument(
"--skip",
dest="skip",
action="store_true",
help="Skip meshes which have already been reconstructed.",
)
deep_sdf.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_sdf.configure_logging(args)
def empirical_stat(latent_vecs, indices):
lat_mat = torch.zeros(0).cuda()
for ind in indices:
lat_mat = torch.cat([lat_mat, latent_vecs[ind]], 0)
mean = torch.mean(lat_mat, 0)
var = torch.var(lat_mat, 0)
return mean, var
specs_filename = os.path.join(args.experiment_directory, "specs.json")
if not os.path.isfile(specs_filename):
raise Exception(
'The experiment directory does not include specifications file "specs.json"'
)
specs = json.load(open(specs_filename))
arch = __import__(
"lit.extern.deepsdf.networks." + specs["NetworkArch"], fromlist=["Decoder"]
)
latent_size = specs["CodeLength"]
decoder = arch.Decoder(latent_size, **specs["NetworkSpecs"])
decoder = torch.nn.DataParallel(decoder)
saved_model_state = torch.load(
os.path.join(
args.experiment_directory, ws.model_params_subdir, args.checkpoint + ".pth"
)
)
saved_model_epoch = saved_model_state["epoch"]
decoder.load_state_dict(saved_model_state["model_state_dict"])
decoder = decoder.module.cuda()
with open(args.split_filename, "r") as f:
split = json.load(f)
npz_filenames = deep_sdf.data.get_instance_filenames(args.data_source, split)
random.shuffle(npz_filenames)
logging.debug(decoder)
err_sum = 0.0
repeat = 1
save_latvec_only = False
rerun = 0
reconstruction_dir = os.path.join(
args.experiment_directory, ws.reconstructions_subdir, str(saved_model_epoch)
)
if not os.path.isdir(reconstruction_dir):
os.makedirs(reconstruction_dir)
reconstruction_meshes_dir = os.path.join(
reconstruction_dir, ws.reconstruction_meshes_subdir
)
if not os.path.isdir(reconstruction_meshes_dir):
os.makedirs(reconstruction_meshes_dir)
reconstruction_codes_dir = os.path.join(
reconstruction_dir, ws.reconstruction_codes_subdir
)
if not os.path.isdir(reconstruction_codes_dir):
os.makedirs(reconstruction_codes_dir)
for ii, npz in enumerate(npz_filenames):
if "npz" not in npz:
continue
full_filename = os.path.join(args.data_source, ws.sdf_samples_subdir, npz)
logging.debug("loading {}".format(npz))
data_sdf = deep_sdf.data.read_sdf_samples_into_ram(full_filename)
for k in range(repeat):
if rerun > 1:
mesh_filename = os.path.join(
reconstruction_meshes_dir, npz[:-4] + "-" + str(k + rerun)
)
latent_filename = os.path.join(
reconstruction_codes_dir, npz[:-4] + "-" + str(k + rerun) + ".pth"
)
else:
mesh_filename = os.path.join(reconstruction_meshes_dir, npz[:-4])
latent_filename = os.path.join(
reconstruction_codes_dir, npz[:-4] + ".pth"
)
if (
args.skip
and os.path.isfile(mesh_filename + ".ply")
and os.path.isfile(latent_filename)
):
continue
logging.info("reconstructing {}".format(npz))
data_sdf[0] = data_sdf[0][torch.randperm(data_sdf[0].shape[0])]
data_sdf[1] = data_sdf[1][torch.randperm(data_sdf[1].shape[0])]
start = time.time()
err, latent = reconstruct(
decoder,
int(args.iterations),
latent_size,
data_sdf,
0.01, # [emp_mean,emp_var],
0.1,
num_samples=8000,
lr=5e-3,
l2reg=True,
)
logging.debug("reconstruct time: {}".format(time.time() - start))
err_sum += err
logging.debug("current_error avg: {}".format((err_sum / (ii + 1))))
logging.debug(ii)
logging.debug("latent: {}".format(latent.detach().cpu().numpy()))
decoder.eval()
if not os.path.exists(os.path.dirname(mesh_filename)):
os.makedirs(os.path.dirname(mesh_filename))
if not save_latvec_only:
start = time.time()
with torch.no_grad():
deep_sdf.mesh.create_mesh(
decoder,
latent,
mesh_filename,
N=args.voxel_resolution,
max_batch=int(2**18),
)
logging.debug("total time: {}".format(time.time() - start))
if not os.path.exists(os.path.dirname(latent_filename)):
os.makedirs(os.path.dirname(latent_filename))
torch.save(latent.unsqueeze(0), latent_filename)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/show_interior_samples.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import ctypes
import sys
import OpenGL.GL as gl
import pypangolin as pango
import lit.extern.deepsdf.deep_sdf.data
if __name__ == "__main__":
npz_filename = sys.argv[1]
data = deep_sdf.data.read_sdf_samples_into_ram(npz_filename)
xyz_neg = data[1][:, 0:3].numpy().astype(ctypes.c_float)
win = pango.CreateWindowAndBind("Interior Samples | " + npz_filename, 640, 480)
gl.glEnable(gl.GL_DEPTH_TEST)
pm = pango.ProjectionMatrix(640, 480, 420, 420, 320, 240, 0.1, 1000)
mv = pango.ModelViewLookAt(-0, 0.5, -3, 0, 0, 0, pango.AxisY)
s_cam = pango.OpenGlRenderState(pm, mv)
handler = pango.Handler3D(s_cam)
d_cam = (
pango.CreateDisplay()
.SetBounds(
pango.Attach(0),
pango.Attach(1),
pango.Attach(0),
pango.Attach(1),
-640.0 / 480.0,
)
.SetHandler(handler)
)
pango.CreatePanel("ui").SetBounds(
pango.Attach(0), pango.Attach(1), pango.Attach(0), pango.Attach(0)
)
while not pango.ShouldQuit():
gl.glClear(gl.GL_COLOR_BUFFER_BIT + gl.GL_DEPTH_BUFFER_BIT)
d_cam.Activate(s_cam)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glColor3ub(255, 255, 255)
gl.glVertexPointer(
3, gl.GL_FLOAT, 0, xyz_neg.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
)
gl.glDrawArrays(gl.GL_POINTS, 0, xyz_neg.shape[0])
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
pango.FinishFrame()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/src/PreprocessMesh.cpp | C++ | // Copyright 2004-present Facebook. All Rights Reserved.
#include <cnpy.h>
#include <pangolin/geometry/geometry.h>
#include <pangolin/geometry/glgeometry.h>
#include <pangolin/gl/gl.h>
#include <pangolin/pangolin.h>
#include <CLI/CLI.hpp>
#include <chrono>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <random>
#include <string>
#include <vector>
#include "Utils.h"
extern pangolin::GlSlProgram GetShaderProgram();
void SampleFromSurface(pangolin::Geometry& geom,
std::vector<Eigen::Vector3f>& surfpts,
int num_sample) {
float total_area = 0.0f;
std::vector<float> cdf_by_area;
std::vector<Eigen::Vector3i> linearized_faces;
for (const auto& object : geom.objects) {
auto it_vert_indices = object.second.attributes.find("vertex_indices");
if (it_vert_indices != object.second.attributes.end()) {
pangolin::Image<uint32_t> ibo =
pangolin::get<pangolin::Image<uint32_t>>(
it_vert_indices->second);
for (int i = 0; i < ibo.h; ++i) {
linearized_faces.emplace_back(ibo(0, i), ibo(1, i), ibo(2, i));
}
}
}
pangolin::Image<float> vertices = pangolin::get<pangolin::Image<float>>(
geom.buffers["geometry"].attributes["vertex"]);
for (const Eigen::Vector3i& face : linearized_faces) {
float area = TriangleArea((Eigen::Vector3f)Eigen::Map<Eigen::Vector3f>(
vertices.RowPtr(face(0))),
(Eigen::Vector3f)Eigen::Map<Eigen::Vector3f>(
vertices.RowPtr(face(1))),
(Eigen::Vector3f)Eigen::Map<Eigen::Vector3f>(
vertices.RowPtr(face(2))));
if (std::isnan(area)) {
area = 0.f;
}
total_area += area;
if (cdf_by_area.empty()) {
cdf_by_area.push_back(area);
} else {
cdf_by_area.push_back(cdf_by_area.back() + area);
}
}
std::random_device seeder;
std::mt19937 generator(seeder());
std::uniform_real_distribution<float> rand_dist(0.0, total_area);
while ((int)surfpts.size() < num_sample) {
float tri_sample = rand_dist(generator);
std::vector<float>::iterator tri_index_iter =
lower_bound(cdf_by_area.begin(), cdf_by_area.end(), tri_sample);
int tri_index = tri_index_iter - cdf_by_area.begin();
const Eigen::Vector3i& face = linearized_faces[tri_index];
surfpts.push_back(SamplePointFromTriangle(
Eigen::Map<Eigen::Vector3f>(vertices.RowPtr(face(0))),
Eigen::Map<Eigen::Vector3f>(vertices.RowPtr(face(1))),
Eigen::Map<Eigen::Vector3f>(vertices.RowPtr(face(2)))));
}
}
void SampleSDFNearSurface(KdVertexListTree& kdTree,
std::vector<Eigen::Vector3f>& vertices,
std::vector<Eigen::Vector3f>& xyz_surf,
std::vector<Eigen::Vector3f>& normals,
std::vector<Eigen::Vector3f>& xyz,
std::vector<float>& sdfs,
int num_rand_samples,
float variance,
float second_variance,
float bounding_cube_dim,
int num_votes) {
float stdv = sqrt(variance);
std::random_device seeder;
std::mt19937 generator(seeder());
std::uniform_real_distribution<float> rand_dist(0.0, 1.0);
std::vector<Eigen::Vector3f> xyz_used;
std::vector<Eigen::Vector3f> second_samples;
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_int_distribution<int> vert_ind(0, vertices.size() - 1);
std::normal_distribution<float> perterb_norm(0, stdv);
std::normal_distribution<float> perterb_second(0, sqrt(second_variance));
for (unsigned int i = 0; i < xyz_surf.size(); i++) {
Eigen::Vector3f surface_p = xyz_surf[i];
Eigen::Vector3f samp1 = surface_p;
Eigen::Vector3f samp2 = surface_p;
for (int j = 0; j < 3; j++) {
samp1[j] += perterb_norm(rng);
samp2[j] += perterb_second(rng);
}
xyz.push_back(samp1);
xyz.push_back(samp2);
}
for (int s = 0; s < (int)(num_rand_samples); s++) {
xyz.push_back(Eigen::Vector3f(rand_dist(generator) * bounding_cube_dim -
bounding_cube_dim / 2,
rand_dist(generator) * bounding_cube_dim -
bounding_cube_dim / 2,
rand_dist(generator) * bounding_cube_dim -
bounding_cube_dim / 2));
}
// now compute sdf for each xyz sample
for (int s = 0; s < (int)xyz.size(); s++) {
Eigen::Vector3f samp_vert = xyz[s];
std::vector<int> cl_indices(num_votes);
std::vector<float> cl_distances(num_votes);
kdTree.knnSearch(samp_vert.data(), num_votes, cl_indices.data(),
cl_distances.data());
int num_pos = 0;
float sdf;
for (int ind = 0; ind < num_votes; ind++) {
uint32_t cl_ind = cl_indices[ind];
Eigen::Vector3f cl_vert = vertices[cl_ind];
Eigen::Vector3f ray_vec = samp_vert - cl_vert;
float ray_vec_leng = ray_vec.norm();
if (ind == 0) {
// if close to the surface, use point plane distance
if (ray_vec_leng < stdv)
sdf = fabs(normals[cl_ind].dot(ray_vec));
else
sdf = ray_vec_leng;
}
float d = normals[cl_ind].dot(ray_vec / ray_vec_leng);
if (d > 0) num_pos++;
}
// all or nothing , else ignore the point
if ((num_pos == 0) || (num_pos == num_votes)) {
xyz_used.push_back(samp_vert);
if (num_pos <= (num_votes / 2)) {
sdf = -sdf;
}
sdfs.push_back(sdf);
}
}
xyz = xyz_used;
}
void writeSDFToNPY(std::vector<Eigen::Vector3f>& xyz,
std::vector<float>& sdfs,
std::string filename) {
unsigned int num_vert = xyz.size();
std::vector<float> data(num_vert * 4);
int data_i = 0;
for (unsigned int i = 0; i < num_vert; i++) {
Eigen::Vector3f v = xyz[i];
float s = sdfs[i];
for (int j = 0; j < 3; j++) data[data_i++] = v[j];
data[data_i++] = s;
}
cnpy::npy_save(filename, &data[0], {(long unsigned int)num_vert, 4}, "w");
}
void writeSDFToNPZ(std::vector<Eigen::Vector3f>& xyz,
std::vector<float>& sdfs,
std::string filename,
bool print_num = false) {
unsigned int num_vert = xyz.size();
std::vector<float> pos;
std::vector<float> neg;
for (unsigned int i = 0; i < num_vert; i++) {
Eigen::Vector3f v = xyz[i];
float s = sdfs[i];
if (s > 0) {
for (int j = 0; j < 3; j++) pos.push_back(v[j]);
pos.push_back(s);
} else {
for (int j = 0; j < 3; j++) neg.push_back(v[j]);
neg.push_back(s);
}
}
cnpy::npz_save(filename, "pos", &pos[0],
{(long unsigned int)(pos.size() / 4.0), 4}, "w");
cnpy::npz_save(filename, "neg", &neg[0],
{(long unsigned int)(neg.size() / 4.0), 4}, "a");
if (print_num) {
std::cout << "pos num: " << pos.size() / 4.0 << std::endl;
std::cout << "neg num: " << neg.size() / 4.0 << std::endl;
}
}
void writeSDFToPLY(std::vector<Eigen::Vector3f>& xyz,
std::vector<float>& sdfs,
std::string filename,
bool neg_only = true,
bool pos_only = false) {
int num_verts;
if (neg_only) {
num_verts = 0;
for (int i = 0; i < (int)sdfs.size(); i++) {
float s = sdfs[i];
if (s <= 0) num_verts++;
}
} else if (pos_only) {
num_verts = 0;
for (int i = 0; i < (int)sdfs.size(); i++) {
float s = sdfs[i];
if (s >= 0) num_verts++;
}
} else {
num_verts = xyz.size();
}
std::ofstream plyFile;
plyFile.open(filename);
plyFile << "ply\n";
plyFile << "format ascii 1.0\n";
plyFile << "element vertex " << num_verts << "\n";
plyFile << "property float x\n";
plyFile << "property float y\n";
plyFile << "property float z\n";
plyFile << "property uchar red\n";
plyFile << "property uchar green\n";
plyFile << "property uchar blue\n";
plyFile << "end_header\n";
for (int i = 0; i < (int)sdfs.size(); i++) {
Eigen::Vector3f v = xyz[i];
float sdf = sdfs[i];
bool neg = (sdf <= 0);
bool pos = (sdf >= 0);
if (neg) sdf = -sdf;
int sdf_i = std::min((int)(sdf * 255), 255);
if (!neg_only && pos)
plyFile << v[0] << " " << v[1] << " " << v[2] << " " << 0 << " "
<< 0 << " " << sdf_i << "\n";
if (!pos_only && neg)
plyFile << v[0] << " " << v[1] << " " << v[2] << " " << sdf_i << " "
<< 0 << " " << 0 << "\n";
}
plyFile.close();
}
int main(int argc, char** argv) {
std::string meshFileName;
bool vis = false;
std::string npyFileName;
std::string plyFileNameOut;
std::string spatial_samples_npz;
bool save_ply = true;
bool test_flag = false;
float variance = 0.005;
int num_sample = 500000;
float rejection_criteria_obs = 0.02f;
float rejection_criteria_tri = 0.03f;
float num_samp_near_surf_ratio = 47.0f / 50.0f;
CLI::App app{"PreprocessMesh"};
app.add_option("-m", meshFileName, "Mesh File Name for Reading")
->required();
app.add_flag("-v", vis, "enable visualization");
app.add_option("-o", npyFileName, "Save npy pc to here")->required();
app.add_option("--ply", plyFileNameOut, "Save ply pc to here");
app.add_option("-s", num_sample, "Save ply pc to here");
app.add_option("--var", variance, "Set Variance");
app.add_flag("--sply", save_ply, "save ply point cloud for visualization");
app.add_flag("-t", test_flag, "test_flag");
app.add_option("-n", spatial_samples_npz, "spatial samples from file");
CLI11_PARSE(app, argc, argv);
if (test_flag) variance = 0.05;
float second_variance = variance / 10;
std::cout << "variance: " << variance << " second: " << second_variance
<< std::endl;
if (test_flag) {
second_variance = variance / 100;
num_samp_near_surf_ratio = 45.0f / 50.0f;
num_sample = 250000;
}
std::cout << spatial_samples_npz << std::endl;
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
pangolin::Geometry geom = pangolin::LoadGeometry(meshFileName);
std::cout << geom.objects.size() << " objects" << std::endl;
// linearize the object indices
{
int total_num_faces = 0;
for (const auto& object : geom.objects) {
auto it_vert_indices =
object.second.attributes.find("vertex_indices");
if (it_vert_indices != object.second.attributes.end()) {
pangolin::Image<uint32_t> ibo =
pangolin::get<pangolin::Image<uint32_t>>(
it_vert_indices->second);
total_num_faces += ibo.h;
}
}
// const int total_num_indices = total_num_faces * 3;
pangolin::ManagedImage<uint8_t> new_buffer(3 * sizeof(uint32_t),
total_num_faces);
pangolin::Image<uint32_t> new_ibo =
new_buffer.UnsafeReinterpret<uint32_t>().SubImage(
0, 0, 3, total_num_faces);
int index = 0;
for (const auto& object : geom.objects) {
auto it_vert_indices =
object.second.attributes.find("vertex_indices");
if (it_vert_indices != object.second.attributes.end()) {
pangolin::Image<uint32_t> ibo =
pangolin::get<pangolin::Image<uint32_t>>(
it_vert_indices->second);
for (int i = 0; i < ibo.h; ++i) {
new_ibo.Row(index).CopyFrom(ibo.Row(i));
++index;
}
}
}
geom.objects.clear();
auto faces = geom.objects.emplace(std::string("mesh"),
pangolin::Geometry::Element());
faces->second.Reinitialise(3 * sizeof(uint32_t), total_num_faces);
faces->second.CopyFrom(new_buffer);
new_ibo = faces->second.UnsafeReinterpret<uint32_t>().SubImage(
0, 0, 3, total_num_faces);
faces->second.attributes["vertex_indices"] = new_ibo;
}
// remove textures
geom.textures.clear();
pangolin::Image<uint32_t> modelFaces =
pangolin::get<pangolin::Image<uint32_t>>(
geom.objects.begin()->second.attributes["vertex_indices"]);
float max_dist = BoundingCubeNormalization(geom, true);
if (vis)
pangolin::CreateWindowAndBind("Main", 640, 480);
else
pangolin::CreateWindowAndBind("Main", 1, 1);
glEnable(GL_DEPTH_TEST);
glDisable(GL_DITHER);
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_POLYGON_SMOOTH);
glHint(GL_POINT_SMOOTH, GL_DONT_CARE);
glHint(GL_LINE_SMOOTH, GL_DONT_CARE);
glHint(GL_POLYGON_SMOOTH_HINT, GL_DONT_CARE);
glDisable(GL_MULTISAMPLE_ARB);
glShadeModel(GL_FLAT);
// Define Projection and initial ModelView matrix
pangolin::OpenGlRenderState s_cam(
// pangolin::ProjectionMatrix(640,480,420,420,320,240,0.05,100),
pangolin::ProjectionMatrixOrthographic(-max_dist, max_dist,
-max_dist, max_dist, 0, 2.5),
pangolin::ModelViewLookAt(0, 0, -1, 0, 0, 0, pangolin::AxisY));
pangolin::OpenGlRenderState s_cam2(
pangolin::ProjectionMatrixOrthographic(-max_dist, max_dist,
max_dist, -max_dist, 0, 2.5),
pangolin::ModelViewLookAt(0, 0, -1, 0, 0, 0, pangolin::AxisY));
// Create Interactive View in window
pangolin::Handler3D handler(s_cam);
pangolin::GlGeometry gl_geom = pangolin::ToGlGeometry(geom);
pangolin::GlSlProgram prog = GetShaderProgram();
if (vis) {
pangolin::View& d_cam =
pangolin::CreateDisplay()
.SetBounds(0.0, 1.0, 0.0, 1.0, -640.0f / 480.0f)
.SetHandler(&handler);
while (!pangolin::ShouldQuit()) {
// Clear screen and activate view to render into
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// glEnable(GL_CULL_FACE);
// glCullFace(GL_FRONT);
d_cam.Activate(s_cam);
prog.Bind();
prog.SetUniform("MVP", s_cam.GetProjectionModelViewMatrix());
prog.SetUniform("V", s_cam.GetModelViewMatrix());
pangolin::GlDraw(prog, gl_geom, nullptr);
prog.Unbind();
// Swap frames and Process Events
pangolin::FinishFrame();
}
}
// Create Framebuffer with attached textures
size_t w = 400;
size_t h = 400;
pangolin::GlRenderBuffer zbuffer(w, h, GL_DEPTH_COMPONENT32);
pangolin::GlTexture normals(w, h, GL_RGBA32F);
pangolin::GlTexture vertices(w, h, GL_RGBA32F);
pangolin::GlFramebuffer framebuffer(vertices, normals, zbuffer);
// View points around a sphere.
std::vector<Eigen::Vector3f> views =
EquiDistPointsOnSphere(100, max_dist * 1.1);
std::vector<Eigen::Vector4f> point_normals;
std::vector<Eigen::Vector4f> point_verts;
size_t num_tri = modelFaces.h;
std::vector<Eigen::Vector4f> tri_id_normal_test(num_tri);
for (size_t j = 0; j < num_tri; j++)
tri_id_normal_test[j] = Eigen::Vector4f(0.0f, 0.0f, 0.0f, 0.0f);
int total_obs = 0;
int wrong_obs = 0;
for (unsigned int v = 0; v < views.size(); v++) {
// change camera location
s_cam2.SetModelViewMatrix(
pangolin::ModelViewLookAt(views[v][0], views[v][1], views[v][2],
0, 0, 0, pangolin::AxisY));
// Draw the scene to the framebuffer
framebuffer.Bind();
glViewport(0, 0, w, h);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
prog.Bind();
prog.SetUniform("MVP", s_cam2.GetProjectionModelViewMatrix());
prog.SetUniform("V", s_cam2.GetModelViewMatrix());
prog.SetUniform("ToWorld", s_cam2.GetModelViewMatrix().Inverse());
prog.SetUniform("slant_thr", -1.0f, 1.0f);
prog.SetUniform("ttt", 1.0, 0, 0, 1);
pangolin::GlDraw(prog, gl_geom, nullptr);
prog.Unbind();
framebuffer.Unbind();
pangolin::TypedImage img_normals;
normals.Download(img_normals);
std::vector<Eigen::Vector4f> im_norms = ValidPointsAndTrisFromIm(
img_normals.UnsafeReinterpret<Eigen::Vector4f>(),
tri_id_normal_test, total_obs, wrong_obs);
point_normals.insert(point_normals.end(), im_norms.begin(),
im_norms.end());
pangolin::TypedImage img_verts;
vertices.Download(img_verts);
std::vector<Eigen::Vector4f> im_verts = ValidPointsFromIm(
img_verts.UnsafeReinterpret<Eigen::Vector4f>());
point_verts.insert(point_verts.end(), im_verts.begin(), im_verts.end());
}
int bad_tri = 0;
for (unsigned int t = 0; t < tri_id_normal_test.size(); t++) {
if (tri_id_normal_test[t][3] < 0.0f) bad_tri++;
}
std::cout << meshFileName << std::endl;
std::cout << (float)(wrong_obs) / float(total_obs) << std::endl;
std::cout << (float)(bad_tri) / float(num_tri) << std::endl;
float wrong_ratio = (float)(wrong_obs) / float(total_obs);
float bad_tri_ratio = (float)(bad_tri) / float(num_tri);
if (wrong_ratio > rejection_criteria_obs ||
bad_tri_ratio > rejection_criteria_tri) {
std::cout << "mesh rejected" << std::endl;
// return 0;
}
std::vector<Eigen::Vector3f> vertices2;
// std::vector<Eigen::Vector3f> vertices_all;
std::vector<Eigen::Vector3f> normals2;
for (unsigned int v = 0; v < point_verts.size(); v++) {
vertices2.push_back(point_verts[v].head<3>());
normals2.push_back(point_normals[v].head<3>());
}
KdVertexList kdVerts(vertices2);
KdVertexListTree kdTree_surf(3, kdVerts);
kdTree_surf.buildIndex();
std::vector<Eigen::Vector3f> xyz;
std::vector<Eigen::Vector3f> xyz_surf;
std::vector<float> sdf;
int num_samp_near_surf = (int)(47 * num_sample / 50);
std::cout << "num_samp_near_surf: " << num_samp_near_surf << std::endl;
SampleFromSurface(geom, xyz_surf, num_samp_near_surf / 2);
auto start = std::chrono::high_resolution_clock::now();
SampleSDFNearSurface(kdTree_surf, vertices2, xyz_surf, normals2, xyz, sdf,
num_sample - num_samp_near_surf, variance,
second_variance, 2, 11);
auto finish = std::chrono::high_resolution_clock::now();
auto elapsed =
std::chrono::duration_cast<std::chrono::seconds>(finish - start)
.count();
std::cout << elapsed << std::endl;
if (save_ply) {
writeSDFToPLY(xyz, sdf, plyFileNameOut, false, true);
}
std::cout << "num points sampled: " << xyz.size() << std::endl;
std::size_t save_npz = npyFileName.find("npz");
if (save_npz == std::string::npos)
writeSDFToNPY(xyz, sdf, npyFileName);
else {
writeSDFToNPZ(xyz, sdf, npyFileName, true);
}
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/src/SampleVisibleMeshSurface.cpp | C++ | // Copyright 2004-present Facebook. All Rights Reserved.
#include <cnpy.h>
#include <pangolin/geometry/geometry.h>
#include <pangolin/geometry/glgeometry.h>
#include <pangolin/gl/gl.h>
#include <pangolin/pangolin.h>
#include <CLI/CLI.hpp>
#include <chrono>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <random>
#include <string>
#include <vector>
#include "Utils.h"
extern pangolin::GlSlProgram GetShaderProgram();
void SavePointsToPLY(const std::vector<Eigen::Vector3f>& verts,
const std::string outputfile) {
const std::size_t num_verts = verts.size();
Eigen::Vector3f v;
std::ofstream plyFile;
plyFile.open(outputfile);
plyFile << "ply\n";
plyFile << "format ascii 1.0\n";
plyFile << "element vertex " << num_verts << "\n";
plyFile << "property float x\n";
plyFile << "property float y\n";
plyFile << "property float z\n";
plyFile << "element face " << (num_verts / 3) << "\n";
plyFile << "property list uchar int vertex_index\n";
plyFile << "end_header\n";
for (uint i = 0; i < num_verts; i++) {
v = verts[i];
plyFile << v[0] << " " << v[1] << " " << v[2] << "\n";
}
for (uint i = 0; i < num_verts; i += 3) {
plyFile << "3 " << i << " " << (i + 1) << " " << (i + 2) << "\n";
}
plyFile.close();
}
void SaveNormalizationParamsToNPZ(const Eigen::Vector3f offset,
const float scale,
const std::string filename) {
cnpy::npz_save(filename, "offset", offset.data(), {3ul}, "w");
cnpy::npz_save(filename, "scale", &scale, {1ul}, "a");
}
void SampleFromSurfaceInside(pangolin::Geometry& geom,
std::vector<Eigen::Vector3f>& surfpts,
int num_sample,
KdVertexListTree& kdTree,
std::vector<Eigen::Vector3f>& surface_vertices,
std::vector<Eigen::Vector3f>& surface_normals,
float delta) {
float total_area = 0.0f;
std::vector<float> cdf_by_area;
std::vector<Eigen::Vector3i> linearized_faces;
for (const auto& object : geom.objects) {
auto it_vert_indices = object.second.attributes.find("vertex_indices");
if (it_vert_indices != object.second.attributes.end()) {
pangolin::Image<uint32_t> ibo =
pangolin::get<pangolin::Image<uint32_t>>(
it_vert_indices->second);
for (uint i = 0; i < ibo.h; ++i) {
linearized_faces.emplace_back(ibo(0, i), ibo(1, i), ibo(2, i));
}
}
}
pangolin::Image<float> vertices = pangolin::get<pangolin::Image<float>>(
geom.buffers["geometry"].attributes["vertex"]);
for (const Eigen::Vector3i& face : linearized_faces) {
float area = TriangleArea((Eigen::Vector3f)Eigen::Map<Eigen::Vector3f>(
vertices.RowPtr(face(0))),
(Eigen::Vector3f)Eigen::Map<Eigen::Vector3f>(
vertices.RowPtr(face(1))),
(Eigen::Vector3f)Eigen::Map<Eigen::Vector3f>(
vertices.RowPtr(face(2))));
if (std::isnan(area)) {
area = 0.f;
}
total_area += area;
if (cdf_by_area.empty()) {
cdf_by_area.push_back(area);
} else {
cdf_by_area.push_back(cdf_by_area.back() + area);
}
}
std::random_device seeder;
std::mt19937 generator(seeder());
std::uniform_real_distribution<float> rand_dist(0.0, total_area);
while ((int)surfpts.size() < num_sample) {
float tri_sample = rand_dist(generator);
std::vector<float>::iterator tri_index_iter =
lower_bound(cdf_by_area.begin(), cdf_by_area.end(), tri_sample);
int tri_index = tri_index_iter - cdf_by_area.begin();
const Eigen::Vector3i& face = linearized_faces[tri_index];
Eigen::Vector3f point = SamplePointFromTriangle(
Eigen::Map<Eigen::Vector3f>(vertices.RowPtr(face(0))),
Eigen::Map<Eigen::Vector3f>(vertices.RowPtr(face(1))),
Eigen::Map<Eigen::Vector3f>(vertices.RowPtr(face(2))));
// Now test if this point is on the shell
int cl_index;
float cl_distance;
kdTree.knnSearch(point.data(), 1, &cl_index, &cl_distance);
Eigen::Vector3f cl_vert = surface_vertices[cl_index];
Eigen::Vector3f cl_normal = surface_normals[cl_index];
Eigen::Vector3f ray_vec = cl_vert - point;
float point_plane = fabs(cl_normal.dot(ray_vec));
if (point_plane > delta) continue;
surfpts.push_back(point);
}
}
int main(int argc, char** argv) {
std::string meshFileName;
std::string plyOutFile;
std::string normalizationOutputFile;
int num_sample = 30000;
CLI::App app{"SampleVisibleMeshSurface"};
app.add_option("-m", meshFileName, "Mesh File Name for Reading")
->required();
app.add_option("-o", plyOutFile, "Save npy pc to here")->required();
app.add_option("-n", normalizationOutputFile, "Save normalization");
app.add_option("-s", num_sample, "Save ply pc to here");
CLI11_PARSE(app, argc, argv);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
pangolin::Geometry geom = pangolin::LoadGeometry(meshFileName);
std::cout << geom.objects.size() << " objects" << std::endl;
// linearize the object indices
{
int total_num_faces = 0;
for (const auto& object : geom.objects) {
auto it_vert_indices =
object.second.attributes.find("vertex_indices");
if (it_vert_indices != object.second.attributes.end()) {
pangolin::Image<uint32_t> ibo =
pangolin::get<pangolin::Image<uint32_t>>(
it_vert_indices->second);
total_num_faces += ibo.h;
}
}
// const int total_num_indices = total_num_faces * 3;
pangolin::ManagedImage<uint8_t> new_buffer(3 * sizeof(uint32_t),
total_num_faces);
pangolin::Image<uint32_t> new_ibo =
new_buffer.UnsafeReinterpret<uint32_t>().SubImage(
0, 0, 3, total_num_faces);
int index = 0;
for (const auto& object : geom.objects) {
auto it_vert_indices =
object.second.attributes.find("vertex_indices");
if (it_vert_indices != object.second.attributes.end()) {
pangolin::Image<uint32_t> ibo =
pangolin::get<pangolin::Image<uint32_t>>(
it_vert_indices->second);
for (int i = 0; i < ibo.h; ++i) {
new_ibo.Row(index).CopyFrom(ibo.Row(i));
++index;
}
}
}
geom.objects.clear();
auto faces = geom.objects.emplace(std::string("mesh"),
pangolin::Geometry::Element());
faces->second.Reinitialise(3 * sizeof(uint32_t), total_num_faces);
faces->second.CopyFrom(new_buffer);
new_ibo = faces->second.UnsafeReinterpret<uint32_t>().SubImage(
0, 0, 3, total_num_faces);
faces->second.attributes["vertex_indices"] = new_ibo;
}
// remove textures
geom.textures.clear();
pangolin::Image<uint32_t> modelFaces =
pangolin::get<pangolin::Image<uint32_t>>(
geom.objects.begin()->second.attributes["vertex_indices"]);
// float max_dist = BoundingCubeNormalization(geom, true);
pangolin::CreateWindowAndBind("Main", 1, 1);
glEnable(GL_DEPTH_TEST);
glDisable(GL_DITHER);
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_POLYGON_SMOOTH);
glHint(GL_POINT_SMOOTH, GL_DONT_CARE);
glHint(GL_LINE_SMOOTH, GL_DONT_CARE);
glHint(GL_POLYGON_SMOOTH_HINT, GL_DONT_CARE);
glDisable(GL_MULTISAMPLE_ARB);
glShadeModel(GL_FLAT);
// Define Projection and initial ModelView matrix
pangolin::OpenGlRenderState s_cam2(
pangolin::ProjectionMatrixOrthographic(-1, 1, 1, -1, 0, 2.5),
pangolin::ModelViewLookAt(0, 0, -1, 0, 0, 0, pangolin::AxisY));
// Load geometry
pangolin::GlGeometry gl_geom = pangolin::ToGlGeometry(geom);
pangolin::GlSlProgram prog = GetShaderProgram();
// Create Framebuffer with attached textures
size_t w = 400;
size_t h = 400;
pangolin::GlRenderBuffer zbuffer(w, h, GL_DEPTH_COMPONENT32);
pangolin::GlTexture normals(w, h, GL_RGBA32F);
pangolin::GlTexture vertices(w, h, GL_RGBA32F);
pangolin::GlFramebuffer framebuffer(vertices, normals, zbuffer);
// View points around a sphere.
std::vector<Eigen::Vector3f> views = EquiDistPointsOnSphere(100, 1.1);
std::vector<Eigen::Vector4f> point_normals;
std::vector<Eigen::Vector4f> point_verts;
size_t num_tri = modelFaces.h;
std::vector<Eigen::Vector4f> tri_id_normal_test(num_tri);
for (size_t j = 0; j < num_tri; j++)
tri_id_normal_test[j] = Eigen::Vector4f(0.0f, 0.0f, 0.0f, 0.0f);
int total_obs = 0;
int wrong_obs = 0;
for (unsigned int v = 0; v < views.size(); v++) {
// change camera location
s_cam2.SetModelViewMatrix(
pangolin::ModelViewLookAt(views[v][0], views[v][1], views[v][2],
0, 0, 0, pangolin::AxisY));
// Draw the scene to the framebuffer
framebuffer.Bind();
glViewport(0, 0, w, h);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
prog.Bind();
prog.SetUniform("MVP", s_cam2.GetProjectionModelViewMatrix());
prog.SetUniform("V", s_cam2.GetModelViewMatrix());
prog.SetUniform("ToWorld", s_cam2.GetModelViewMatrix().Inverse());
prog.SetUniform("slant_thr", -1.0f, 1.0f);
prog.SetUniform("ttt", 1.0, 0, 0, 1);
pangolin::GlDraw(prog, gl_geom, nullptr);
prog.Unbind();
framebuffer.Unbind();
pangolin::TypedImage img_normals;
normals.Download(img_normals);
std::vector<Eigen::Vector4f> im_norms = ValidPointsAndTrisFromIm(
img_normals.UnsafeReinterpret<Eigen::Vector4f>(),
tri_id_normal_test, total_obs, wrong_obs);
point_normals.insert(point_normals.end(), im_norms.begin(),
im_norms.end());
pangolin::TypedImage img_verts;
vertices.Download(img_verts);
std::vector<Eigen::Vector4f> im_verts = ValidPointsFromIm(
img_verts.UnsafeReinterpret<Eigen::Vector4f>());
point_verts.insert(point_verts.end(), im_verts.begin(), im_verts.end());
}
std::vector<Eigen::Vector3f> vertices2;
// std::vector<Eigen::Vector3f> vertices_all;
std::vector<Eigen::Vector3f> normals2;
for (unsigned int v = 0; v < point_verts.size(); v++) {
vertices2.push_back(point_verts[v].head<3>());
normals2.push_back(point_normals[v].head<3>());
}
KdVertexList kdVerts(vertices2);
KdVertexListTree kdTree_surf(3, kdVerts);
kdTree_surf.buildIndex();
std::vector<Eigen::Vector3f> surf_pts;
SampleFromSurfaceInside(geom, surf_pts, num_sample, kdTree_surf, vertices2,
normals2, 0.00001);
SavePointsToPLY(surf_pts, plyOutFile);
if (!normalizationOutputFile.empty()) {
const std::pair<Eigen::Vector3f, float> normalizationParams =
ComputeNormalizationParameters(geom);
SaveNormalizationParamsToNPZ(normalizationParams.first,
normalizationParams.second,
normalizationOutputFile);
}
std::cout << "ended correctly" << std::endl;
return 0;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/src/ShaderProgram.cpp | C++ | // Copyright 2004-present Facebook. All Rights Reserved.
#include <pangolin/gl/glsl.h>
constexpr const char* shaderText = R"Shader(
@start vertex
#version 330 core
layout(location = 0) in vec3 vertex;
//layout(location = 2) in vec3 vertexNormal_model;
out vec4 position_world;
out vec4 position_camera;
out vec3 viewDirection_camera;
//out vec3 normal;
uniform mat4 MVP;
uniform mat4 V;
void main(){
// Projected image coordinate
gl_Position = MVP * vec4(vertex,1);
// world coordinate location of the vertex
position_world = vec4(vertex,1);
position_camera = V * vec4(vertex, 1);
viewDirection_camera = normalize(vec3(0,0,0) - position_camera.xyz);
}
@start geometry
#version 330
layout ( triangles ) in;
layout ( triangle_strip, max_vertices = 3 ) out;
in vec4 position_world[];
in vec3 viewDirection_camera[];
out vec3 normal_camera;
out vec3 normal_world;
out vec4 xyz_world;
out vec3 viewDirection_cam;
out vec4 xyz_camera;
uniform mat4 V;
void main()
{
vec3 A = position_world[1].xyz - position_world[0].xyz;
vec3 B = position_world[2].xyz - position_world[0].xyz;
vec3 normal = normalize(cross(A,B));
vec3 normal_cam = (V * vec4(normal,0)).xyz;
gl_Position = gl_in[0].gl_Position;
normal_camera = normal_cam;
normal_world = normal;
xyz_world = position_world[0];
xyz_camera = V * xyz_world;
viewDirection_cam = viewDirection_camera[0];
gl_PrimitiveID = gl_PrimitiveIDIn;
EmitVertex();
gl_Position = gl_in[1].gl_Position;
normal_camera = normal_cam;
normal_world = normal;
xyz_world = position_world[1];
xyz_camera = V * xyz_world;
viewDirection_cam = viewDirection_camera[1];
gl_PrimitiveID = gl_PrimitiveIDIn;
EmitVertex();
gl_Position = gl_in[2].gl_Position;
normal_camera = normal_cam;
normal_world = normal;
xyz_world = position_world[2];
xyz_camera = V * xyz_world;
viewDirection_cam = viewDirection_camera[2];
gl_PrimitiveID = gl_PrimitiveIDIn;
EmitVertex();
EndPrimitive();
}
@start fragment
#version 330 core
in vec3 viewDirection_cam;
in vec3 normal_world;
in vec3 normal_camera;
in vec4 xyz_world;
in vec4 xyz_camera;
// in int gl_PrimitiveID ;
uniform vec2 slant_thr;
varying vec4 ttt;
uniform mat4 V;
uniform mat4 ToWorld;
layout(location = 0) out vec4 FragColor;
layout(location = 1) out vec4 FragColor2;
layout(location = 2) out vec4 FragColor3;
layout(location = 3) out vec4 FragColor4;
void main(){
//vec3 view_vector = normalize(vec3(0,0,1) - xyz_camera.xyz);
vec3 view_vector = vec3(0,0,1);
vec4 test = vec4(0,0,0,1);
// Check if we need to flip the normal.
vec3 normal_world_cor;// = normal_world;
float d = dot(normalize(normal_camera), normalize(view_vector));
if (abs(d) < 0.001) {
FragColor = vec4(0,0,0,0);
FragColor2 = vec4(0,0,0,0);
FragColor3 = vec4(0,0,0,0);
return;
}
else{
if (d < 0) {
test = vec4(0,1,0,1);
normal_world_cor = -normal_world;
} else {
normal_world_cor= normal_world;
}
FragColor = xyz_world;
FragColor.w = gl_PrimitiveID + 1.0f;
FragColor2 = vec4(normalize(normal_world_cor),1);
FragColor2.w = gl_PrimitiveID + 1.0f;
}
}
)Shader";
pangolin::GlSlProgram GetShaderProgram() {
pangolin::GlSlProgram program;
program.AddShader(pangolin::GlSlAnnotatedShader, shaderText);
program.Link();
return program;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/src/Utils.cpp | C++ | // Copyright 2004-present Facebook. All Rights Reserved.
#include "Utils.h"
#include <random>
std::vector<Eigen::Vector3f> EquiDistPointsOnSphere(const uint numSamples,
const float radius) {
std::vector<Eigen::Vector3f> points(numSamples);
const float offset = 2.f / numSamples;
const float increment = static_cast<float>(M_PI) * (3.f - std::sqrt(5.f));
for (uint i = 0; i < numSamples; i++) {
const float y = ((i * offset) - 1) + (offset / 2);
const float r = std::sqrt(1 - std::pow(y, 2.f));
const float phi = (i + 1.f) * increment;
const float x = cos(phi) * r;
const float z = sin(phi) * r;
points[i] = radius * Eigen::Vector3f(x, y, z);
}
return points;
}
std::vector<Eigen::Vector4f> ValidPointsFromIm(
const pangolin::Image<Eigen::Vector4f>& verts) {
std::vector<Eigen::Vector4f> points;
Eigen::Vector4f v;
for (unsigned int w = 0; w < verts.w; w++) {
for (unsigned int h = 0; h < verts.h; h++) {
v = verts(w, h);
if (v[3] == 0.0f) {
continue;
}
points.push_back(v);
}
}
return points;
}
std::vector<Eigen::Vector4f> ValidPointsAndTrisFromIm(
const pangolin::Image<Eigen::Vector4f>& pixNorms,
std::vector<Eigen::Vector4f>& tris,
int& totalObs,
int& wrongObs) {
std::vector<Eigen::Vector4f> points;
Eigen::Vector4f n;
for (unsigned int w = 0; w < pixNorms.w; w++) {
for (unsigned int h = 0; h < pixNorms.h; h++) {
n = pixNorms(w, h);
if (n[3] == 0.0f) continue;
totalObs++;
const std::size_t triInd =
static_cast<std::size_t>(n[3] + 0.01f) - 1;
Eigen::Vector4f triTrack = tris[triInd];
if (triTrack[3] == 0.0f)
tris[triInd] = n;
else if (triTrack[3] > 0.0f) {
const float dot = triTrack.head<3>().dot(n.head<3>());
if (dot < 0.0f) {
tris[triInd][3] = -1.0f;
wrongObs++;
}
} else if (triTrack[3] < 0.0f) {
wrongObs++;
}
points.push_back(n);
}
}
return points;
}
float TriangleArea(const Eigen::Vector3f& a,
const Eigen::Vector3f& b,
const Eigen::Vector3f& c) {
const Eigen::Vector3f ab = b - a;
const Eigen::Vector3f ac = c - a;
float costheta = ab.dot(ac) / (ab.norm() * ac.norm());
if (costheta < -1) // meaning theta is pi
costheta = std::cos(static_cast<float>(M_PI) * 359.f / 360);
else if (costheta > 1) // meaning theta is zero
costheta = std::cos(static_cast<float>(M_PI) * 1.f / 360);
const float sinTheta = std::sqrt(1 - costheta * costheta);
return 0.5f * ab.norm() * ac.norm() * sinTheta;
}
Eigen::Vector3f SamplePointFromTriangle(const Eigen::Vector3f& a,
const Eigen::Vector3f& b,
const Eigen::Vector3f& c) {
std::random_device seeder;
std::mt19937 generator(seeder());
std::uniform_real_distribution<float> rand_dist(0.0, 1.0);
const float r1 = rand_dist(generator);
const float r2 = rand_dist(generator);
return Eigen::Vector3f((1 - std::sqrt(r1)) * a +
std::sqrt(r1) * (1 - r2) * b +
r2 * std::sqrt(r1) * c);
}
// TODO: duplicated w/ below
std::pair<Eigen::Vector3f, float> ComputeNormalizationParameters(
pangolin::Geometry& geom, const float buffer) {
float xMin = 1000000, xMax = -1000000, yMin = 1000000, yMax = -1000000,
zMin = 1000000, zMax = -1000000;
pangolin::Image<float> vertices = pangolin::get<pangolin::Image<float>>(
geom.buffers["geometry"].attributes["vertex"]);
const std::size_t numVertices = vertices.h;
///////// Only consider vertices that were used in some face
std::vector<unsigned char> verticesUsed(numVertices, 0);
// turn to true if the vertex is used
for (const auto& object : geom.objects) {
auto itVertIndices = object.second.attributes.find("vertex_indices");
if (itVertIndices != object.second.attributes.end()) {
pangolin::Image<uint32_t> ibo =
pangolin::get<pangolin::Image<uint32_t>>(
itVertIndices->second);
for (uint i = 0; i < ibo.h; ++i) {
for (uint j = 0; j < 3; ++j) {
verticesUsed[ibo(j, i)] = 1;
}
}
}
}
/////////
// compute min max in each dimension
for (size_t i = 0; i < numVertices; i++) {
// pass when it's not used.
if (verticesUsed[i] == 0) continue;
xMin = fmin(xMin, vertices(0, i));
yMin = fmin(yMin, vertices(1, i));
zMin = fmin(zMin, vertices(2, i));
xMax = fmax(xMax, vertices(0, i));
yMax = fmax(yMax, vertices(1, i));
zMax = fmax(zMax, vertices(2, i));
}
const Eigen::Vector3f center((xMax + xMin) / 2.0f, (yMax + yMin) / 2.0f,
(zMax + zMin) / 2.0f);
// make the mean zero
float maxDistance = -1.0f;
for (size_t i = 0; i < numVertices; i++) {
// pass when it's not used.
if (verticesUsed[i] == false) continue;
const float dist =
(Eigen::Map<Eigen::Vector3f>(vertices.RowPtr(i)) - center)
.norm();
maxDistance = std::max(maxDistance, dist);
}
// add some buffer
maxDistance *= buffer;
return {-1 * center, (1.f / maxDistance)};
}
float BoundingCubeNormalization(pangolin::Geometry& geom,
bool fitToUnitSphere,
const float buffer) {
float xMin = 1000000, xMax = -1000000, yMin = 1000000, yMax = -1000000,
zMin = 1000000, zMax = -1000000;
pangolin::Image<float> vertices = pangolin::get<pangolin::Image<float>>(
geom.buffers["geometry"].attributes["vertex"]);
const std::size_t numVertices = vertices.h;
///////// Only consider vertices that were used in some face
std::vector<unsigned char> verticesUsed(numVertices, 0);
// turn to true if the vertex is used
for (const auto& object : geom.objects) {
auto itVertIndices = object.second.attributes.find("vertex_indices");
if (itVertIndices != object.second.attributes.end()) {
pangolin::Image<uint32_t> ibo =
pangolin::get<pangolin::Image<uint32_t>>(
itVertIndices->second);
for (uint i = 0; i < ibo.h; ++i) {
for (uint j = 0; j < 3; ++j) {
verticesUsed[ibo(j, i)] = 1;
}
}
}
}
/////////
// compute min max in each dimension
for (size_t i = 0; i < numVertices; i++) {
// pass when it's not used.
if (verticesUsed[i] == 0) continue;
xMin = fmin(xMin, vertices(0, i));
yMin = fmin(yMin, vertices(1, i));
zMin = fmin(zMin, vertices(2, i));
xMax = fmax(xMax, vertices(0, i));
yMax = fmax(yMax, vertices(1, i));
zMax = fmax(zMax, vertices(2, i));
}
const float xCenter = (xMax + xMin) / 2.0f;
const float yCenter = (yMax + yMin) / 2.0f;
const float zCenter = (zMax + zMin) / 2.0f;
// make the mean zero
float maxDistance = -1.0f;
for (size_t i = 0; i < numVertices; i++) {
// pass when it's not used.
if (verticesUsed[i] == false) continue;
vertices(0, i) -= xCenter;
vertices(1, i) -= yCenter;
vertices(2, i) -= zCenter;
const float dist =
Eigen::Map<Eigen::Vector3f>(vertices.RowPtr(i)).norm();
maxDistance = std::max(maxDistance, dist);
}
// add some buffer
maxDistance *= buffer;
if (fitToUnitSphere) {
for (size_t i = 0; i < numVertices; i++) {
vertices(0, i) /= maxDistance;
vertices(1, i) /= maxDistance;
vertices(2, i) /= maxDistance;
}
maxDistance = 1;
}
return maxDistance;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/src/Utils.h | C/C++ Header | // Copyright 2004-present Facebook. All Rights Reserved.
#include <vector>
// NB: This differs from the GitHub version due to the different
// location of the nanoflann header when installing from source
#include <pangolin/geometry/geometry.h>
#include <pangolin/pangolin.h>
#include <Eigen/Core>
#include <nanoflann.hpp>
struct KdVertexList {
public:
KdVertexList(const std::vector<Eigen::Vector3f>& points)
: points_(points) {}
inline size_t kdtree_get_point_count() const { return points_.size(); }
inline float kdtree_distance(const float* p1,
const size_t idx_p2,
size_t /*size*/) const {
Eigen::Map<const Eigen::Vector3f> p(p1);
return (p - points_[idx_p2]).squaredNorm();
}
inline float kdtree_get_pt(const size_t idx, int dim) const {
return points_[idx](dim);
}
template <class BBOX>
bool kdtree_get_bbox(BBOX& /*bb*/) const {
return false;
}
private:
std::vector<Eigen::Vector3f> points_;
};
using KdVertexListTree = nanoflann::KDTreeSingleIndexAdaptor<
nanoflann::L2_Simple_Adaptor<float, KdVertexList>,
KdVertexList,
3,
int>;
std::vector<Eigen::Vector3f> EquiDistPointsOnSphere(const uint numSamples,
const float radius);
std::vector<Eigen::Vector4f> ValidPointsFromIm(
const pangolin::Image<Eigen::Vector4f>& verts);
std::vector<Eigen::Vector4f> ValidPointsAndTrisFromIm(
const pangolin::Image<Eigen::Vector4f>& pixNorms,
std::vector<Eigen::Vector4f>& tris,
int& totalObs,
int& wrongObs);
float TriangleArea(const Eigen::Vector3f& a,
const Eigen::Vector3f& b,
const Eigen::Vector3f& c);
Eigen::Vector3f SamplePointFromTriangle(const Eigen::Vector3f& a,
const Eigen::Vector3f& b,
const Eigen::Vector3f& c);
std::pair<Eigen::Vector3f, float> ComputeNormalizationParameters(
pangolin::Geometry& geom, const float buffer = 1.03);
float BoundingCubeNormalization(pangolin::Geometry& geom,
const bool fitToUnitSphere,
const float buffer = 1.03);
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/src/cnpy.cpp | C++ | // Copyright (C) 2011 Carl Rogers
// Released under MIT License
// license available in LICENSE file, or at
// http://www.opensource.org/licenses/mit-license.php
#include "cnpy.h"
#include <stdint.h>
#include <algorithm>
#include <complex>
#include <cstdlib>
#include <cstring>
#include <iomanip>
#include <regex>
#include <stdexcept>
char cnpy::BigEndianTest() {
int x = 1;
return (((char*)&x)[0]) ? '<' : '>';
}
char cnpy::map_type(const std::type_info& t) {
if (t == typeid(float)) return 'f';
if (t == typeid(double)) return 'f';
if (t == typeid(long double)) return 'f';
if (t == typeid(int)) return 'i';
if (t == typeid(char)) return 'i';
if (t == typeid(short)) return 'i';
if (t == typeid(long)) return 'i';
if (t == typeid(long long)) return 'i';
if (t == typeid(unsigned char)) return 'u';
if (t == typeid(unsigned short)) return 'u';
if (t == typeid(unsigned long)) return 'u';
if (t == typeid(unsigned long long)) return 'u';
if (t == typeid(unsigned int)) return 'u';
if (t == typeid(bool)) return 'b';
if (t == typeid(std::complex<float>)) return 'c';
if (t == typeid(std::complex<double>)) return 'c';
if (t == typeid(std::complex<long double>))
return 'c';
else
return '?';
}
template <>
std::vector<char>& cnpy::operator+=(std::vector<char>& lhs,
const std::string rhs) {
lhs.insert(lhs.end(), rhs.begin(), rhs.end());
return lhs;
}
template <>
std::vector<char>& cnpy::operator+=(std::vector<char>& lhs, const char* rhs) {
// write in little endian
size_t len = strlen(rhs);
lhs.reserve(len);
for (size_t byte = 0; byte < len; byte++) {
lhs.push_back(rhs[byte]);
}
return lhs;
}
void cnpy::parse_npy_header(unsigned char* buffer,
size_t& word_size,
std::vector<size_t>& shape,
bool& fortran_order) {
// std::string magic_string(buffer,6);
uint8_t major_version = *reinterpret_cast<uint8_t*>(buffer + 6);
uint8_t minor_version = *reinterpret_cast<uint8_t*>(buffer + 7);
uint16_t header_len = *reinterpret_cast<uint16_t*>(buffer + 8);
std::string header(reinterpret_cast<char*>(buffer + 9), header_len);
size_t loc1, loc2;
// fortran order
loc1 = header.find("fortran_order") + 16;
fortran_order = (header.substr(loc1, 4) == "True" ? true : false);
// shape
loc1 = header.find("(");
loc2 = header.find(")");
std::regex num_regex("[0-9][0-9]*");
std::smatch sm;
shape.clear();
std::string str_shape = header.substr(loc1 + 1, loc2 - loc1 - 1);
while (std::regex_search(str_shape, sm, num_regex)) {
shape.push_back(std::stoi(sm[0].str()));
str_shape = sm.suffix().str();
}
// endian, word size, data type
// byte order code | stands for not applicable.
// not sure when this applies except for byte array
loc1 = header.find("descr") + 9;
bool littleEndian =
(header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian);
// char type = header[loc1+1];
// assert(type == map_type(T));
std::string str_ws = header.substr(loc1 + 2);
loc2 = str_ws.find("'");
word_size = atoi(str_ws.substr(0, loc2).c_str());
}
void cnpy::parse_npy_header(FILE* fp,
size_t& word_size,
std::vector<size_t>& shape,
bool& fortran_order) {
char buffer[256];
size_t res = fread(buffer, sizeof(char), 11, fp);
if (res != 11) throw std::runtime_error("parse_npy_header: failed fread");
std::string header = fgets(buffer, 256, fp);
assert(header[header.size() - 1] == '\n');
size_t loc1, loc2;
// fortran order
loc1 = header.find("fortran_order");
if (loc1 == std::string::npos)
throw std::runtime_error(
"parse_npy_header: failed to find header keyword: "
"'fortran_order'");
loc1 += 16;
fortran_order = (header.substr(loc1, 4) == "True" ? true : false);
// shape
loc1 = header.find("(");
loc2 = header.find(")");
if (loc1 == std::string::npos || loc2 == std::string::npos)
throw std::runtime_error(
"parse_npy_header: failed to find header keyword: '(' or ')'");
std::regex num_regex("[0-9][0-9]*");
std::smatch sm;
shape.clear();
std::string str_shape = header.substr(loc1 + 1, loc2 - loc1 - 1);
while (std::regex_search(str_shape, sm, num_regex)) {
shape.push_back(std::stoi(sm[0].str()));
str_shape = sm.suffix().str();
}
// endian, word size, data type
// byte order code | stands for not applicable.
// not sure when this applies except for byte array
loc1 = header.find("descr");
if (loc1 == std::string::npos)
throw std::runtime_error(
"parse_npy_header: failed to find header keyword: 'descr'");
loc1 += 9;
bool littleEndian =
(header[loc1] == '<' || header[loc1] == '|' ? true : false);
assert(littleEndian);
// char type = header[loc1+1];
// assert(type == map_type(T));
std::string str_ws = header.substr(loc1 + 2);
loc2 = str_ws.find("'");
word_size = atoi(str_ws.substr(0, loc2).c_str());
}
void cnpy::parse_zip_footer(FILE* fp,
uint16_t& nrecs,
size_t& global_header_size,
size_t& global_header_offset) {
std::vector<char> footer(22);
fseek(fp, -22, SEEK_END);
size_t res = fread(&footer[0], sizeof(char), 22, fp);
if (res != 22) throw std::runtime_error("parse_zip_footer: failed fread");
uint16_t disk_no, disk_start, nrecs_on_disk, comment_len;
disk_no = *(uint16_t*)&footer[4];
disk_start = *(uint16_t*)&footer[6];
nrecs_on_disk = *(uint16_t*)&footer[8];
nrecs = *(uint16_t*)&footer[10];
global_header_size = *(uint32_t*)&footer[12];
global_header_offset = *(uint32_t*)&footer[16];
comment_len = *(uint16_t*)&footer[20];
assert(disk_no == 0);
assert(disk_start == 0);
assert(nrecs_on_disk == nrecs);
assert(comment_len == 0);
}
cnpy::NpyArray load_the_npy_file(FILE* fp) {
std::vector<size_t> shape;
size_t word_size;
bool fortran_order;
cnpy::parse_npy_header(fp, word_size, shape, fortran_order);
cnpy::NpyArray arr(shape, word_size, fortran_order);
size_t nread = fread(arr.data<char>(), 1, arr.num_bytes(), fp);
if (nread != arr.num_bytes())
throw std::runtime_error("load_the_npy_file: failed fread");
return arr;
}
cnpy::NpyArray load_the_npz_array(FILE* fp,
uint32_t compr_bytes,
uint32_t uncompr_bytes) {
std::vector<unsigned char> buffer_compr(compr_bytes);
std::vector<unsigned char> buffer_uncompr(uncompr_bytes);
size_t nread = fread(&buffer_compr[0], 1, compr_bytes, fp);
if (nread != compr_bytes)
throw std::runtime_error("load_the_npy_file: failed fread");
int err;
z_stream d_stream;
d_stream.zalloc = Z_NULL;
d_stream.zfree = Z_NULL;
d_stream.opaque = Z_NULL;
d_stream.avail_in = 0;
d_stream.next_in = Z_NULL;
err = inflateInit2(&d_stream, -MAX_WBITS);
d_stream.avail_in = compr_bytes;
d_stream.next_in = &buffer_compr[0];
d_stream.avail_out = uncompr_bytes;
d_stream.next_out = &buffer_uncompr[0];
err = inflate(&d_stream, Z_FINISH);
err = inflateEnd(&d_stream);
std::vector<size_t> shape;
size_t word_size;
bool fortran_order;
cnpy::parse_npy_header(&buffer_uncompr[0], word_size, shape, fortran_order);
cnpy::NpyArray array(shape, word_size, fortran_order);
size_t offset = uncompr_bytes - array.num_bytes();
memcpy(array.data<unsigned char>(), &buffer_uncompr[0] + offset,
array.num_bytes());
return array;
}
cnpy::npz_t cnpy::npz_load(std::string fname) {
FILE* fp = fopen(fname.c_str(), "rb");
if (!fp) {
throw std::runtime_error("npz_load: Error! Unable to open file " +
fname + "!");
}
cnpy::npz_t arrays;
while (1) {
std::vector<char> local_header(30);
size_t headerres = fread(&local_header[0], sizeof(char), 30, fp);
if (headerres != 30) throw std::runtime_error("npz_load: failed fread");
// if we've reached the global header, stop reading
if (local_header[2] != 0x03 || local_header[3] != 0x04) break;
// read in the variable name
uint16_t name_len = *(uint16_t*)&local_header[26];
std::string varname(name_len, ' ');
size_t vname_res = fread(&varname[0], sizeof(char), name_len, fp);
if (vname_res != name_len)
throw std::runtime_error("npz_load: failed fread");
// erase the lagging .npy
varname.erase(varname.end() - 4, varname.end());
// read in the extra field
uint16_t extra_field_len = *(uint16_t*)&local_header[28];
if (extra_field_len > 0) {
std::vector<char> buff(extra_field_len);
size_t efield_res =
fread(&buff[0], sizeof(char), extra_field_len, fp);
if (efield_res != extra_field_len)
throw std::runtime_error("npz_load: failed fread");
}
uint16_t compr_method =
*reinterpret_cast<uint16_t*>(&local_header[0] + 8);
uint32_t compr_bytes =
*reinterpret_cast<uint32_t*>(&local_header[0] + 18);
uint32_t uncompr_bytes =
*reinterpret_cast<uint32_t*>(&local_header[0] + 22);
if (compr_method == 0) {
arrays[varname] = load_the_npy_file(fp);
} else {
arrays[varname] =
load_the_npz_array(fp, compr_bytes, uncompr_bytes);
}
}
fclose(fp);
return arrays;
}
cnpy::NpyArray cnpy::npz_load(std::string fname, std::string varname) {
FILE* fp = fopen(fname.c_str(), "rb");
if (!fp) throw std::runtime_error("npz_load: Unable to open file " + fname);
while (1) {
std::vector<char> local_header(30);
size_t header_res = fread(&local_header[0], sizeof(char), 30, fp);
if (header_res != 30)
throw std::runtime_error("npz_load: failed fread");
// if we've reached the global header, stop reading
if (local_header[2] != 0x03 || local_header[3] != 0x04) break;
// read in the variable name
uint16_t name_len = *(uint16_t*)&local_header[26];
std::string vname(name_len, ' ');
size_t vname_res = fread(&vname[0], sizeof(char), name_len, fp);
if (vname_res != name_len)
throw std::runtime_error("npz_load: failed fread");
vname.erase(vname.end() - 4, vname.end()); // erase the lagging .npy
// read in the extra field
uint16_t extra_field_len = *(uint16_t*)&local_header[28];
fseek(fp, extra_field_len, SEEK_CUR); // skip past the extra field
uint16_t compr_method =
*reinterpret_cast<uint16_t*>(&local_header[0] + 8);
uint32_t compr_bytes =
*reinterpret_cast<uint32_t*>(&local_header[0] + 18);
uint32_t uncompr_bytes =
*reinterpret_cast<uint32_t*>(&local_header[0] + 22);
if (vname == varname) {
NpyArray array = (compr_method == 0)
? load_the_npy_file(fp)
: load_the_npz_array(fp, compr_bytes,
uncompr_bytes);
fclose(fp);
return array;
} else {
// skip past the data
uint32_t size = *(uint32_t*)&local_header[22];
fseek(fp, size, SEEK_CUR);
}
}
fclose(fp);
// if we get here, we haven't found the variable in the file
throw std::runtime_error("npz_load: Variable name " + varname +
" not found in " + fname);
}
cnpy::NpyArray cnpy::npy_load(std::string fname) {
FILE* fp = fopen(fname.c_str(), "rb");
if (!fp) throw std::runtime_error("npy_load: Unable to open file " + fname);
NpyArray arr = load_the_npy_file(fp);
fclose(fp);
return arr;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/src/cnpy.h | C/C++ Header | // Copyright (C) 2011 Carl Rogers
// Released under MIT License
// license available in LICENSE file, or at
// http://www.opensource.org/licenses/mit-license.php
#ifndef LIBCNPY_H_
#define LIBCNPY_H_
#include <stdint.h>
#include <zlib.h>
#include <cassert>
#include <cstdio>
#include <iostream>
#include <map>
#include <memory>
#include <numeric>
#include <sstream>
#include <stdexcept>
#include <string>
#include <typeinfo>
#include <vector>
namespace cnpy {
struct NpyArray {
NpyArray(const std::vector<size_t>& _shape,
size_t _word_size,
bool _fortran_order)
: shape(_shape), word_size(_word_size), fortran_order(_fortran_order) {
num_vals = 1;
for (size_t i = 0; i < shape.size(); i++) num_vals *= shape[i];
data_holder = std::shared_ptr<std::vector<char>>(
new std::vector<char>(num_vals * word_size));
}
NpyArray() : shape(0), word_size(0), fortran_order(0), num_vals(0) {}
template <typename T>
T* data() {
return reinterpret_cast<T*>(&(*data_holder)[0]);
}
template <typename T>
const T* data() const {
return reinterpret_cast<T*>(&(*data_holder)[0]);
}
template <typename T>
std::vector<T> as_vec() const {
const T* p = data<T>();
return std::vector<T>(p, p + num_vals);
}
size_t num_bytes() const { return data_holder->size(); }
std::shared_ptr<std::vector<char>> data_holder;
std::vector<size_t> shape;
size_t word_size;
bool fortran_order;
size_t num_vals;
};
using npz_t = std::map<std::string, NpyArray>;
char BigEndianTest();
char map_type(const std::type_info& t);
template <typename T>
std::vector<char> create_npy_header(const std::vector<size_t>& shape);
void parse_npy_header(FILE* fp,
size_t& word_size,
std::vector<size_t>& shape,
bool& fortran_order);
void parse_npy_header(unsigned char* buffer,
size_t& word_size,
std::vector<size_t>& shape,
bool& fortran_order);
void parse_zip_footer(FILE* fp,
uint16_t& nrecs,
size_t& global_header_size,
size_t& global_header_offset);
npz_t npz_load(std::string fname);
NpyArray npz_load(std::string fname, std::string varname);
NpyArray npy_load(std::string fname);
template <typename T>
std::vector<char>& operator+=(std::vector<char>& lhs, const T rhs) {
// write in little endian
for (size_t byte = 0; byte < sizeof(T); byte++) {
char val = *((char*)&rhs + byte);
lhs.push_back(val);
}
return lhs;
}
template <>
std::vector<char>& operator+=(std::vector<char>& lhs, const std::string rhs);
template <>
std::vector<char>& operator+=(std::vector<char>& lhs, const char* rhs);
template <typename T>
void npy_save(std::string fname,
const T* data,
const std::vector<size_t> shape,
std::string mode = "w") {
FILE* fp = NULL;
std::vector<size_t>
true_data_shape; // if appending, the shape of existing + new data
if (mode == "a") fp = fopen(fname.c_str(), "r+b");
if (fp) {
// file exists. we need to append to it. read the header, modify the
// array size
size_t word_size;
bool fortran_order;
parse_npy_header(fp, word_size, true_data_shape, fortran_order);
assert(!fortran_order);
if (word_size != sizeof(T)) {
std::cout << "libnpy error: " << fname << " has word size "
<< word_size << " but npy_save appending data sized "
<< sizeof(T) << "\n";
assert(word_size == sizeof(T));
}
if (true_data_shape.size() != shape.size()) {
std::cout << "libnpy error: npy_save attempting to append "
"misdimensioned data to "
<< fname << "\n";
assert(true_data_shape.size() != shape.size());
}
for (size_t i = 1; i < shape.size(); i++) {
if (shape[i] != true_data_shape[i]) {
std::cout << "libnpy error: npy_save attempting to append "
"misshaped data to "
<< fname << "\n";
assert(shape[i] == true_data_shape[i]);
}
}
true_data_shape[0] += shape[0];
} else {
fp = fopen(fname.c_str(), "wb");
true_data_shape = shape;
}
std::vector<char> header = create_npy_header<T>(true_data_shape);
size_t nels = std::accumulate(shape.begin(), shape.end(), 1,
std::multiplies<size_t>());
fseek(fp, 0, SEEK_SET);
fwrite(&header[0], sizeof(char), header.size(), fp);
fseek(fp, 0, SEEK_END);
fwrite(data, sizeof(T), nels, fp);
fclose(fp);
}
template <typename T>
void npz_save(std::string zipname,
std::string fname,
const T* data,
const std::vector<size_t>& shape,
std::string mode = "w") {
// first, append a .npy to the fname
fname += ".npy";
// now, on with the show
FILE* fp = NULL;
uint16_t nrecs = 0;
size_t global_header_offset = 0;
std::vector<char> global_header;
if (mode == "a") fp = fopen(zipname.c_str(), "r+b");
if (fp) {
// zip file exists. we need to add a new npy file to it.
// first read the footer. this gives us the offset and size of the
// global header then read and store the global header. below, we will
// write the the new data at the start of the global header then append
// the global header and footer below it
size_t global_header_size;
parse_zip_footer(fp, nrecs, global_header_size, global_header_offset);
fseek(fp, global_header_offset, SEEK_SET);
global_header.resize(global_header_size);
size_t res =
fread(&global_header[0], sizeof(char), global_header_size, fp);
if (res != global_header_size) {
throw std::runtime_error(
"npz_save: header read error while adding to existing zip");
}
fseek(fp, global_header_offset, SEEK_SET);
} else {
fp = fopen(zipname.c_str(), "wb");
}
std::vector<char> npy_header = create_npy_header<T>(shape);
size_t nels = std::accumulate(shape.begin(), shape.end(), 1,
std::multiplies<size_t>());
size_t nbytes = nels * sizeof(T) + npy_header.size();
// get the CRC of the data to be added
uint32_t crc = crc32(0L, (uint8_t*)&npy_header[0], npy_header.size());
crc = crc32(crc, (uint8_t*)data, nels * sizeof(T));
// build the local header
std::vector<char> local_header;
local_header += "PK"; // first part of sig
local_header += (uint16_t)0x0403; // second part of sig
local_header += (uint16_t)20; // min version to extract
local_header += (uint16_t)0; // general purpose bit flag
local_header += (uint16_t)0; // compression method
local_header += (uint16_t)0; // file last mod time
local_header += (uint16_t)0; // file last mod date
local_header += (uint32_t)crc; // crc
local_header += (uint32_t)nbytes; // compressed size
local_header += (uint32_t)nbytes; // uncompressed size
local_header += (uint16_t)fname.size(); // fname length
local_header += (uint16_t)0; // extra field length
local_header += fname;
// build global header
global_header += "PK"; // first part of sig
global_header += (uint16_t)0x0201; // second part of sig
global_header += (uint16_t)20; // version made by
global_header.insert(global_header.end(), local_header.begin() + 4,
local_header.begin() + 30);
global_header += (uint16_t)0; // file comment length
global_header += (uint16_t)0; // disk number where file starts
global_header += (uint16_t)0; // internal file attributes
global_header += (uint32_t)0; // external file attributes
global_header +=
(uint32_t)global_header_offset; // relative offset of local file
// header, since it begins where
// the global header used to begin
global_header += fname;
// build footer
std::vector<char> footer;
footer += "PK"; // first part of sig
footer += (uint16_t)0x0605; // second part of sig
footer += (uint16_t)0; // number of this disk
footer += (uint16_t)0; // disk where footer starts
footer += (uint16_t)(nrecs + 1); // number of records on this disk
footer += (uint16_t)(nrecs + 1); // total number of records
footer += (uint32_t)global_header.size(); // nbytes of global headers
footer += (uint32_t)(global_header_offset + nbytes +
local_header
.size()); // offset of start of global
// headers, since global header now
// starts after newly written array
footer += (uint16_t)0; // zip file comment length
// write everything
fwrite(&local_header[0], sizeof(char), local_header.size(), fp);
fwrite(&npy_header[0], sizeof(char), npy_header.size(), fp);
fwrite(data, sizeof(T), nels, fp);
fwrite(&global_header[0], sizeof(char), global_header.size(), fp);
fwrite(&footer[0], sizeof(char), footer.size(), fp);
fclose(fp);
}
template <typename T>
void npy_save(std::string fname,
const std::vector<T> data,
std::string mode = "w") {
std::vector<size_t> shape;
shape.push_back(data.size());
npy_save(fname, &data[0], shape, mode);
}
template <typename T>
void npz_save(std::string zipname,
std::string fname,
const std::vector<T> data,
std::string mode = "w") {
std::vector<size_t> shape;
shape.push_back(data.size());
npz_save(zipname, fname, &data[0], shape, mode);
}
template <typename T>
std::vector<char> create_npy_header(const std::vector<size_t>& shape) {
std::vector<char> dict;
dict += "{'descr': '";
dict += BigEndianTest();
dict += map_type(typeid(T));
dict += std::to_string(sizeof(T));
dict += "', 'fortran_order': False, 'shape': (";
dict += std::to_string(shape[0]);
for (size_t i = 1; i < shape.size(); i++) {
dict += ", ";
dict += std::to_string(shape[i]);
}
if (shape.size() == 1) dict += ",";
dict += "), }";
// pad with spaces so that preamble+dict is modulo 16 bytes. preamble is 10
// bytes. dict needs to end with \n
int remainder = 16 - (10 + dict.size()) % 16;
dict.insert(dict.end(), remainder, ' ');
dict.back() = '\n';
std::vector<char> header;
header += (char)0x93;
header += "NUMPY";
header += (char)0x01; // major version of numpy format
header += (char)0x00; // minor version of numpy format
header += (uint16_t)dict.size();
header.insert(header.end(), dict.begin(), dict.end());
return header;
}
} // namespace cnpy
#endif
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/deepsdf/train_deep_sdf.py | Python | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
import logging
import math
import os
import signal
import sys
import time
import torch
import torch.utils.data as data_utils
import lit.extern.deepsdf.deep_sdf
import lit.extern.deepsdf.deep_sdf.workspace as ws
class LearningRateSchedule:
def get_learning_rate(self, epoch):
pass
class ConstantLearningRateSchedule(LearningRateSchedule):
def __init__(self, value):
self.value = value
def get_learning_rate(self, epoch):
return self.value
class StepLearningRateSchedule(LearningRateSchedule):
def __init__(self, initial, interval, factor):
self.initial = initial
self.interval = interval
self.factor = factor
def get_learning_rate(self, epoch):
return self.initial * (self.factor ** (epoch // self.interval))
class WarmupLearningRateSchedule(LearningRateSchedule):
def __init__(self, initial, warmed_up, length):
self.initial = initial
self.warmed_up = warmed_up
self.length = length
def get_learning_rate(self, epoch):
if epoch > self.length:
return self.warmed_up
return self.initial + (self.warmed_up - self.initial) * epoch / self.length
def get_learning_rate_schedules(specs):
schedule_specs = specs["LearningRateSchedule"]
schedules = []
for schedule_specs in schedule_specs:
if schedule_specs["Type"] == "Step":
schedules.append(
StepLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Interval"],
schedule_specs["Factor"],
)
)
elif schedule_specs["Type"] == "Warmup":
schedules.append(
WarmupLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Final"],
schedule_specs["Length"],
)
)
elif schedule_specs["Type"] == "Constant":
schedules.append(ConstantLearningRateSchedule(schedule_specs["Value"]))
else:
raise Exception(
'no known learning rate schedule of type "{}"'.format(
schedule_specs["Type"]
)
)
return schedules
def save_model(experiment_directory, filename, decoder, epoch):
model_params_dir = ws.get_model_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "model_state_dict": decoder.state_dict()},
os.path.join(model_params_dir, filename),
)
def save_optimizer(experiment_directory, filename, optimizer, epoch):
optimizer_params_dir = ws.get_optimizer_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "optimizer_state_dict": optimizer.state_dict()},
os.path.join(optimizer_params_dir, filename),
)
def load_optimizer(experiment_directory, filename, optimizer):
full_filename = os.path.join(
ws.get_optimizer_params_dir(experiment_directory), filename
)
if not os.path.isfile(full_filename):
raise Exception(
'optimizer state dict "{}" does not exist'.format(full_filename)
)
data = torch.load(full_filename)
optimizer.load_state_dict(data["optimizer_state_dict"])
return data["epoch"]
def save_latent_vectors(experiment_directory, filename, latent_vec, epoch):
latent_codes_dir = ws.get_latent_codes_dir(experiment_directory, True)
all_latents = latent_vec.state_dict()
torch.save(
{"epoch": epoch, "latent_codes": all_latents},
os.path.join(latent_codes_dir, filename),
)
# TODO: duplicated in workspace
def load_latent_vectors(experiment_directory, filename, lat_vecs):
full_filename = os.path.join(
ws.get_latent_codes_dir(experiment_directory), filename
)
if not os.path.isfile(full_filename):
raise Exception('latent state file "{}" does not exist'.format(full_filename))
data = torch.load(full_filename)
if isinstance(data["latent_codes"], torch.Tensor):
# for backwards compatibility
if not lat_vecs.num_embeddings == data["latent_codes"].size()[0]:
raise Exception(
"num latent codes mismatched: {} vs {}".format(
lat_vecs.num_embeddings, data["latent_codes"].size()[0]
)
)
if not lat_vecs.embedding_dim == data["latent_codes"].size()[2]:
raise Exception("latent code dimensionality mismatch")
for i, lat_vec in enumerate(data["latent_codes"]):
lat_vecs.weight.data[i, :] = lat_vec
else:
lat_vecs.load_state_dict(data["latent_codes"])
return data["epoch"]
def save_logs(
experiment_directory,
loss_log,
lr_log,
timing_log,
lat_mag_log,
param_mag_log,
epoch,
):
torch.save(
{
"epoch": epoch,
"loss": loss_log,
"learning_rate": lr_log,
"timing": timing_log,
"latent_magnitude": lat_mag_log,
"param_magnitude": param_mag_log,
},
os.path.join(experiment_directory, ws.logs_filename),
)
def load_logs(experiment_directory):
full_filename = os.path.join(experiment_directory, ws.logs_filename)
if not os.path.isfile(full_filename):
raise Exception('log file "{}" does not exist'.format(full_filename))
data = torch.load(full_filename)
return (
data["loss"],
data["learning_rate"],
data["timing"],
data["latent_magnitude"],
data["param_magnitude"],
data["epoch"],
)
def clip_logs(loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, epoch):
iters_per_epoch = len(loss_log) // len(lr_log)
loss_log = loss_log[: (iters_per_epoch * epoch)]
lr_log = lr_log[:epoch]
timing_log = timing_log[:epoch]
lat_mag_log = lat_mag_log[:epoch]
for n in param_mag_log:
param_mag_log[n] = param_mag_log[n][:epoch]
return (loss_log, lr_log, timing_log, lat_mag_log, param_mag_log)
def get_spec_with_default(specs, key, default):
try:
return specs[key]
except KeyError:
return default
def get_mean_latent_vector_magnitude(latent_vectors):
return torch.mean(torch.norm(latent_vectors.weight.data.detach(), dim=1))
def append_parameter_magnitudes(param_mag_log, model):
for name, param in model.named_parameters():
if len(name) > 7 and name[:7] == "module.":
name = name[7:]
if name not in param_mag_log.keys():
param_mag_log[name] = []
param_mag_log[name].append(param.data.norm().item())
def main_function(experiment_directory, continue_from, batch_split):
logging.debug("running " + experiment_directory)
specs = ws.load_experiment_specifications(experiment_directory)
logging.info("Experiment description: \n" + "".join(specs["Description"]))
data_source = specs["DataSource"]
train_split_file = specs["TrainSplit"]
arch = __import__(
"lit.extern.deepsdf.networks." + specs["NetworkArch"], fromlist=["Decoder"]
)
logging.debug(specs["NetworkSpecs"])
latent_size = specs["CodeLength"]
checkpoints = list(
range(
specs["SnapshotFrequency"],
specs["NumEpochs"] + 1,
specs["SnapshotFrequency"],
)
)
for checkpoint in specs["AdditionalSnapshots"]:
checkpoints.append(checkpoint)
checkpoints.sort()
lr_schedules = get_learning_rate_schedules(specs)
grad_clip = get_spec_with_default(specs, "GradientClipNorm", None)
if grad_clip is not None:
logging.debug("clipping gradients to max norm {}".format(grad_clip))
def save_latest(epoch):
save_model(experiment_directory, "latest.pth", decoder, epoch)
save_optimizer(experiment_directory, "latest.pth", optimizer_all, epoch)
save_latent_vectors(experiment_directory, "latest.pth", lat_vecs, epoch)
def save_checkpoints(epoch):
save_model(experiment_directory, str(epoch) + ".pth", decoder, epoch)
save_optimizer(experiment_directory, str(epoch) + ".pth", optimizer_all, epoch)
save_latent_vectors(experiment_directory, str(epoch) + ".pth", lat_vecs, epoch)
def signal_handler(sig, frame):
logging.info("Stopping early...")
sys.exit(0)
def adjust_learning_rate(lr_schedules, optimizer, epoch):
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedules[i].get_learning_rate(epoch)
def empirical_stat(latent_vecs, indices):
lat_mat = torch.zeros(0).cuda()
for ind in indices:
lat_mat = torch.cat([lat_mat, latent_vecs[ind]], 0)
mean = torch.mean(lat_mat, 0)
var = torch.var(lat_mat, 0)
return mean, var
signal.signal(signal.SIGINT, signal_handler)
num_samp_per_scene = specs["SamplesPerScene"]
scene_per_batch = specs["ScenesPerBatch"]
clamp_dist = specs["ClampingDistance"]
minT = -clamp_dist
maxT = clamp_dist
enforce_minmax = True
do_code_regularization = get_spec_with_default(specs, "CodeRegularization", True)
code_reg_lambda = get_spec_with_default(specs, "CodeRegularizationLambda", 1e-4)
code_bound = get_spec_with_default(specs, "CodeBound", None)
decoder = arch.Decoder(latent_size, **specs["NetworkSpecs"]).cuda()
logging.info("training with {} GPU(s)".format(torch.cuda.device_count()))
# if torch.cuda.device_count() > 1:
decoder = torch.nn.DataParallel(decoder)
num_epochs = specs["NumEpochs"]
log_frequency = get_spec_with_default(specs, "LogFrequency", 10)
with open(train_split_file, "r") as f:
train_split = json.load(f)
sdf_dataset = deep_sdf.data.SDFSamples(
data_source, train_split, num_samp_per_scene, load_ram=False
)
num_data_loader_threads = get_spec_with_default(specs, "DataLoaderThreads", 1)
logging.debug("loading data with {} threads".format(num_data_loader_threads))
sdf_loader = data_utils.DataLoader(
sdf_dataset,
batch_size=scene_per_batch,
shuffle=True,
num_workers=num_data_loader_threads,
drop_last=True,
)
logging.debug("torch num_threads: {}".format(torch.get_num_threads()))
num_scenes = len(sdf_dataset)
logging.info("There are {} scenes".format(num_scenes))
logging.debug(decoder)
lat_vecs = torch.nn.Embedding(num_scenes, latent_size, max_norm=code_bound)
torch.nn.init.normal_(
lat_vecs.weight.data,
0.0,
get_spec_with_default(specs, "CodeInitStdDev", 1.0) / math.sqrt(latent_size),
)
logging.debug(
"initialized with mean magnitude {}".format(
get_mean_latent_vector_magnitude(lat_vecs)
)
)
loss_l1 = torch.nn.L1Loss(reduction="sum")
optimizer_all = torch.optim.Adam(
[
{
"params": decoder.parameters(),
"lr": lr_schedules[0].get_learning_rate(0),
},
{
"params": lat_vecs.parameters(),
"lr": lr_schedules[1].get_learning_rate(0),
},
]
)
loss_log = []
lr_log = []
lat_mag_log = []
timing_log = []
param_mag_log = {}
start_epoch = 1
if continue_from is not None:
logging.info('continuing from "{}"'.format(continue_from))
lat_epoch = load_latent_vectors(
experiment_directory, continue_from + ".pth", lat_vecs
)
model_epoch = ws.load_model_parameters(
experiment_directory, continue_from, decoder
)
optimizer_epoch = load_optimizer(
experiment_directory, continue_from + ".pth", optimizer_all
)
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, log_epoch = load_logs(
experiment_directory
)
if not log_epoch == model_epoch:
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log = clip_logs(
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, model_epoch
)
if not (model_epoch == optimizer_epoch and model_epoch == lat_epoch):
raise RuntimeError(
"epoch mismatch: {} vs {} vs {} vs {}".format(
model_epoch, optimizer_epoch, lat_epoch, log_epoch
)
)
start_epoch = model_epoch + 1
logging.debug("loaded")
logging.info("starting from epoch {}".format(start_epoch))
logging.info(
"Number of decoder parameters: {}".format(
sum(p.data.nelement() for p in decoder.parameters())
)
)
logging.info(
"Number of shape code parameters: {} (# codes {}, code dim {})".format(
lat_vecs.num_embeddings * lat_vecs.embedding_dim,
lat_vecs.num_embeddings,
lat_vecs.embedding_dim,
)
)
for epoch in range(start_epoch, num_epochs + 1):
start = time.time()
logging.info("epoch {}...".format(epoch))
decoder.train()
adjust_learning_rate(lr_schedules, optimizer_all, epoch)
for sdf_data, indices in sdf_loader:
# Process the input data
sdf_data = sdf_data.reshape(-1, 4)
num_sdf_samples = sdf_data.shape[0]
sdf_data.requires_grad = False
xyz = sdf_data[:, 0:3]
sdf_gt = sdf_data[:, 3].unsqueeze(1)
if enforce_minmax:
sdf_gt = torch.clamp(sdf_gt, minT, maxT)
xyz = torch.chunk(xyz, batch_split)
indices = torch.chunk(
indices.unsqueeze(-1).repeat(1, num_samp_per_scene).view(-1),
batch_split,
)
sdf_gt = torch.chunk(sdf_gt, batch_split)
batch_loss = 0.0
optimizer_all.zero_grad()
for i in range(batch_split):
batch_vecs = lat_vecs(indices[i])
input = torch.cat([batch_vecs, xyz[i]], dim=1)
# NN optimization
pred_sdf = decoder(input)
if enforce_minmax:
pred_sdf = torch.clamp(pred_sdf, minT, maxT)
chunk_loss = loss_l1(pred_sdf, sdf_gt[i].cuda()) / num_sdf_samples
if do_code_regularization:
l2_size_loss = torch.sum(torch.norm(batch_vecs, dim=1))
reg_loss = (
code_reg_lambda * min(1, epoch / 100) * l2_size_loss
) / num_sdf_samples
chunk_loss = chunk_loss + reg_loss.cuda()
chunk_loss.backward()
batch_loss += chunk_loss.item()
logging.debug("loss = {}".format(batch_loss))
loss_log.append(batch_loss)
if grad_clip is not None:
torch.nn.utils.clip_grad_norm_(decoder.parameters(), grad_clip)
optimizer_all.step()
end = time.time()
seconds_elapsed = end - start
timing_log.append(seconds_elapsed)
lr_log.append([schedule.get_learning_rate(epoch) for schedule in lr_schedules])
lat_mag_log.append(get_mean_latent_vector_magnitude(lat_vecs))
append_parameter_magnitudes(param_mag_log, decoder)
if epoch in checkpoints:
save_checkpoints(epoch)
if epoch % log_frequency == 0:
save_latest(epoch)
save_logs(
experiment_directory,
loss_log,
lr_log,
timing_log,
lat_mag_log,
param_mag_log,
epoch,
)
if __name__ == "__main__":
import argparse
arg_parser = argparse.ArgumentParser(description="Train a DeepSDF autodecoder")
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory. This directory should include "
+ "experiment specifications in 'specs.json', and logging will be "
+ "done in this directory as well.",
)
arg_parser.add_argument(
"--continue",
"-c",
dest="continue_from",
help="A snapshot to continue from. This can be 'latest' to continue"
+ "from the latest running snapshot, or an integer corresponding to "
+ "an epochal snapshot.",
)
arg_parser.add_argument(
"--batch_split",
dest="batch_split",
default=1,
help="This splits the batch into separate subbatches which are "
+ "processed separately, with gradients accumulated across all "
+ "subbatches. This allows for training with large effective batch "
+ "sizes in memory constrained environments.",
)
deep_sdf.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_sdf.configure_logging(args)
main_function(args.experiment_directory, args.continue_from, int(args.batch_split))
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/examples/helpers.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include <vector_types.h>
#include "vec_math.h"
__forceinline__ __device__ float3 toSRGB(const float3& c) {
float invGamma = 1.0f / 2.4f;
float3 powed = make_float3(powf(c.x, invGamma), powf(c.y, invGamma),
powf(c.z, invGamma));
return make_float3(
c.x < 0.0031308f ? 12.92f * c.x : 1.055f * powed.x - 0.055f,
c.y < 0.0031308f ? 12.92f * c.y : 1.055f * powed.y - 0.055f,
c.z < 0.0031308f ? 12.92f * c.z : 1.055f * powed.z - 0.055f);
}
__forceinline__ __device__ unsigned char quantizeUnsigned8Bits(float x) {
x = clamp(x, 0.0f, 1.0f);
enum { N = (1 << 8) - 1, Np1 = (1 << 8) };
return (unsigned char)min((unsigned int)(x * (float)Np1), (unsigned int)N);
}
__forceinline__ __device__ uchar4 make_color(const float3& c) {
// first apply gamma, then convert to unsigned char
float3 srgb = toSRGB(clamp(c, 0.0f, 1.0f));
return make_uchar4(quantizeUnsigned8Bits(srgb.x),
quantizeUnsigned8Bits(srgb.y),
quantizeUnsigned8Bits(srgb.z), 255u);
}
__forceinline__ __device__ uchar4 make_color(const float4& c) {
return make_color(make_float3(c.x, c.y, c.z));
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/examples/triangle.cu | CUDA | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <math_constants.h>
#include <optix.h>
#include "helpers.h"
#include "triangle.h"
#include "vec_math.h"
extern "C" {
__constant__ Params params;
}
static __forceinline__ __device__ void setPayload(float3 p) {
// Payload registers takes 32-bit integer values, float_as_int casts a float
// to a 32-bit integer with bits preserved.
optixSetPayload_0(float_as_int(p.x));
optixSetPayload_1(float_as_int(p.y));
optixSetPayload_2(float_as_int(p.z));
}
static __forceinline__ __device__ void computeRay(uint3 idx,
uint3 dim,
float3& origin,
float3& direction) {
// const float3 U = params.cam_u;
// const float3 V = params.cam_v;
// const float3 W = params.cam_w;
// const float2 d = 2.0f * make_float2(static_cast<float>(idx.x) /
// static_cast<float>(dim.x),
// static_cast<float>(idx.y) /
// static_cast<float>(dim.y)) -
// 1.0f;
// origin = params.cam_eye;
// direction = normalize(d.x * U + d.y * V + W);
origin = params.rays_o[idx.y * params.width + idx.x];
direction = params.rays_d[idx.y * params.width + idx.x];
}
extern "C" __global__ void __raygen__rg() {
// Lookup our location within the launch grid
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
// Map our launch idx to a screen location and create a ray from the camera
// location through the screen
float3 ray_origin, ray_direction;
computeRay(make_uint3(idx.x, idx.y, 0), dim, ray_origin, ray_direction);
// Trace the ray against our scene hierarchy
unsigned int p0, p1, p2;
optixTrace(params.handle, // See Params class in triangle.h
ray_origin, // float3
ray_direction, // float3
0.0f, // Min intersection distance
1e16f, // Max intersection distance
0.0f, // rayTime -- used for motion blur
OptixVisibilityMask(255), // Specify always visible
OPTIX_RAY_FLAG_NONE,
0, // SBT offset -- See SBT discussion
1, // SBT stride -- See SBT discussion
0, // missSBTIndex -- See SBT discussion
p0, // optixSetPayload_0, returned from hit or miss kernel
p1, // optixSetPayload_1, returned from hit or miss kernel
p2 // optixSetPayload_2, returned from hit or miss kernel
);
// Convert the ray cast result values back to floats.
float3 result = make_float3(0);
result.x = int_as_float(p0);
result.y = int_as_float(p1);
result.z = int_as_float(p2);
// Record results in our output raster
params.image[idx.y * params.width + idx.x] = result;
}
extern "C" __global__ void __miss__ms() {
MissData* miss_data = reinterpret_cast<MissData*>(optixGetSbtDataPointer());
// https://stackoverflow.com/a/15514595/1255535
setPayload(make_float3(CUDART_INF_F, CUDART_INF_F, CUDART_INF_F));
}
extern "C" __global__ void __closesthit__ch() {
// When built-in triangle intersection is used, a number of fundamental
// attributes are provided by the OptiX API, indlucing barycentric
// coordinates.
const float2 barycentrics = optixGetTriangleBarycentrics();
setPayload(make_float3(barycentrics, 1.0f));
// Compute intersection point coordinates.
const float3 ray_origin = optixGetWorldRayOrigin();
const float3 ray_direction = optixGetWorldRayDirection();
// Get the hit distance.
const float t = optixGetRayTmax();
// Compute the intersection point.
const float3 p = ray_origin + t * ray_direction;
setPayload(p);
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/examples/triangle.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <vector_types.h>
struct Params {
unsigned int width;
unsigned int height;
float3* image; // Each element contains R, G, B float32 value of a pixel
float3* rays_o;
float3* rays_d;
OptixTraversableHandle handle;
};
struct RayGenData {
// No data needed
};
struct MissData {
float3 bg_color;
};
struct HitGroupData {
// No data needed
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/examples/triangle.py | Python | import ctypes # C interop helpers
import os
import pickle
import time
from pathlib import Path
import camtools as ct
import cupy as cp # CUDA bindings
import numpy as np # Packing of structures in C-compatible format
import open3d as o3d
import optix
import path_util
from PIL import Image, ImageOps # Image IO
from pynvrtc.compiler import Program
class Logger:
def __init__(self):
self.num_mssgs = 0
def __call__(self, level, tag, mssg):
print("[{:>2}][{:>12}]: {}".format(level, tag, mssg))
self.num_mssgs += 1
class OptixEngine:
"""
Wrapper to store "global" variables and provide a better API.
"""
def __init__(self) -> None:
self.logger = Logger()
self.ctx = self.create_ctx()
self.pipeline_options = OptixEngine.set_pipeline_options()
# Compile static code.
triangle_cu = os.path.join(os.path.dirname(__file__), "triangle.cu")
triangle_ptx = OptixEngine.compile_cuda(triangle_cu)
self.module = self.create_module(self.ctx, self.pipeline_options, triangle_ptx)
# State for geometry.
self.is_geometry_set = False
# Sensor info.
self.width = None
self.height = None
def set_sensor(self, width, height):
self.width = width
self.height = height
def set_geometry(self, vertices: np.ndarray, triangles: np.ndarray) -> None:
self.gas_handle, self.d_gas_output_buffer = self.create_accel(
self.ctx,
np_vertices=vertices,
np_triangles=triangles,
)
self.prog_groups = self.create_program_groups(self.ctx, self.module)
self.pipeline = self.create_pipeline(
self.ctx,
self.prog_groups,
self.pipeline_options,
)
(
self.d_raygen_sbt,
self.d_miss_sbt,
self.d_hitgroup_sbt,
self.sbt,
) = self.create_sbt(self.prog_groups)
self.is_geometry_set = True
def launch(self, rays_o, rays_d):
"""
rays_o: (N, 3) array of ray origins.
rays_d: (N, 3) array of ray directions.
"""
if not self.is_geometry_set:
raise RuntimeError("Mesh is not set. Call set_geometry() first.")
print("Launching ... ")
h_im = np.zeros((self.width, self.height, 3), np.float32)
d_im = cp.array(h_im)
d_rays_o = cp.array(rays_o, dtype="f4")
d_rays_d = cp.array(rays_d, dtype="f4")
params = [
("u4", "width", self.width), # uint32_t int
("u4", "height", self.height), # uint32_t int
("u8", "image", d_im.data.ptr), # uint64_t pointer
("u8", "rays_o", d_rays_o.data.ptr), # uint64_t pointer
("u8", "rays_d", d_rays_d.data.ptr), # uint64_t pointer
("u8", "trav_handle", self.gas_handle), # uint64_t pointer
]
formats = [x[0] for x in params]
names = [x[1] for x in params]
values = [x[2] for x in params]
itemsize = OptixEngine.get_aligned_itemsize(formats, 8)
params_dtype = np.dtype(
{"names": names, "formats": formats, "itemsize": itemsize, "align": True}
)
h_params = np.array([tuple(values)], dtype=params_dtype)
d_params = OptixEngine.array_to_device_memory(h_params)
stream = cp.cuda.Stream()
optix.launch(
self.pipeline,
stream.ptr,
d_params.ptr,
h_params.dtype.itemsize,
self.sbt,
self.width,
self.height,
1, # depth
)
stream.synchronize()
h_im = cp.asnumpy(d_im)
return h_im
def create_ctx(self):
print("Creating optix device context ...")
# OptiX param struct fields can be set with optional
# keyword constructor arguments.
ctx_options = optix.DeviceContextOptions(
logCallbackFunction=self.logger, logCallbackLevel=4
)
# They can also be set and queried as properties on the struct
if optix.version()[1] >= 2:
ctx_options.validationMode = optix.DEVICE_CONTEXT_VALIDATION_MODE_ALL
cu_ctx = 0
return optix.deviceContextCreate(cu_ctx, ctx_options)
def create_accel(self, ctx, np_vertices, np_triangles):
"""
Args:
ctx: Optix context.
np_vertices: (N, 3) array of vertices.
np_triangles: (M, 3) array of triangle indices.
"""
ct.sanity.assert_shape_nx3(np_vertices, name="np_vertices")
ct.sanity.assert_shape_nx3(np_triangles, name="np_triangles")
accel_options = optix.AccelBuildOptions(
buildFlags=int(optix.BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS),
operation=optix.BUILD_OPERATION_BUILD,
)
np_vertices = np.ascontiguousarray(np_vertices.flatten())
np_triangles = np.ascontiguousarray(np_triangles.flatten())
vertices = cp.array(np_vertices, dtype="f4")
indices = cp.array(np_triangles, dtype="u4")
triangle_input_flags = [optix.GEOMETRY_FLAG_NONE] # One flag is sufficient
triangle_input = optix.BuildInputTriangleArray()
triangle_input.vertexFormat = optix.VERTEX_FORMAT_FLOAT3
triangle_input.numVertices = len(vertices) // 3
triangle_input.vertexBuffers = [vertices.data.ptr]
triangle_input.indexFormat = optix.INDICES_FORMAT_UNSIGNED_INT3
triangle_input.numIndexTriplets = len(indices) // 3
triangle_input.indexBuffer = indices.data.ptr
triangle_input.flags = triangle_input_flags
triangle_input.numSbtRecords = 1
gas_buffer_sizes = ctx.accelComputeMemoryUsage(
[accel_options], [triangle_input]
)
d_temp_buffer_gas = cp.cuda.alloc(gas_buffer_sizes.tempSizeInBytes)
d_gas_output_buffer = cp.cuda.alloc(gas_buffer_sizes.outputSizeInBytes)
gas_handle = ctx.accelBuild(
0, # CUDA stream
[accel_options],
[triangle_input],
d_temp_buffer_gas.ptr,
gas_buffer_sizes.tempSizeInBytes,
d_gas_output_buffer.ptr,
gas_buffer_sizes.outputSizeInBytes,
[], # emitted properties
)
return (gas_handle, d_gas_output_buffer)
def create_module(self, ctx, pipeline_options, triangle_ptx):
print("Creating optix module ...")
module_options = optix.ModuleCompileOptions(
maxRegisterCount=optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT,
optLevel=optix.COMPILE_OPTIMIZATION_DEFAULT,
debugLevel=optix.COMPILE_DEBUG_LEVEL_DEFAULT,
)
module, log = ctx.moduleCreateFromPTX(
module_options, pipeline_options, triangle_ptx
)
print("\tModule create log: <<<{}>>>".format(log))
return module
def create_program_groups(self, ctx, module):
print("Creating program groups ... ")
raygen_prog_group_desc = optix.ProgramGroupDesc()
raygen_prog_group_desc.raygenModule = module
raygen_prog_group_desc.raygenEntryFunctionName = "__raygen__rg"
raygen_prog_group, log = ctx.programGroupCreate([raygen_prog_group_desc])
print("\tProgramGroup raygen create log: <<<{}>>>".format(log))
miss_prog_group_desc = optix.ProgramGroupDesc()
miss_prog_group_desc.missModule = module
miss_prog_group_desc.missEntryFunctionName = "__miss__ms"
miss_prog_group, log = ctx.programGroupCreate([miss_prog_group_desc])
print("\tProgramGroup miss create log: <<<{}>>>".format(log))
hitgroup_prog_group_desc = optix.ProgramGroupDesc()
hitgroup_prog_group_desc.hitgroupModuleCH = module
hitgroup_prog_group_desc.hitgroupEntryFunctionNameCH = "__closesthit__ch"
hitgroup_prog_group, log = ctx.programGroupCreate([hitgroup_prog_group_desc])
print("\tProgramGroup hitgroup create log: <<<{}>>>".format(log))
return [raygen_prog_group[0], miss_prog_group[0], hitgroup_prog_group[0]]
def create_pipeline(self, ctx, program_groups, pipeline_compile_options):
print("Creating pipeline ... ")
max_trace_depth = 1
pipeline_link_options = optix.PipelineLinkOptions()
pipeline_link_options.maxTraceDepth = max_trace_depth
pipeline_link_options.debugLevel = optix.COMPILE_DEBUG_LEVEL_FULL
log = ""
pipeline = ctx.pipelineCreate(
pipeline_compile_options, pipeline_link_options, program_groups, log
)
stack_sizes = optix.StackSizes()
for prog_group in program_groups:
optix.util.accumulateStackSizes(prog_group, stack_sizes)
(
dc_stack_size_from_trav,
dc_stack_size_from_state,
cc_stack_size,
) = optix.util.computeStackSizes(
stack_sizes, max_trace_depth, 0, 0 # maxCCDepth # maxDCDepth
)
pipeline.setStackSize(
dc_stack_size_from_trav,
dc_stack_size_from_state,
cc_stack_size,
1, # maxTraversableDepth
)
return pipeline
def create_sbt(self, prog_groups):
print("Creating sbt ... ")
(raygen_prog_group, miss_prog_group, hitgroup_prog_group) = prog_groups
header_format = "{}B".format(optix.SBT_RECORD_HEADER_SIZE)
# Raygen record
formats = [header_format]
itemsize = OptixEngine.get_aligned_itemsize(formats, optix.SBT_RECORD_ALIGNMENT)
dtype = np.dtype(
{
"names": ["header"],
"formats": formats,
"itemsize": itemsize,
"align": True,
}
)
h_raygen_sbt = np.array([0], dtype=dtype)
optix.sbtRecordPackHeader(raygen_prog_group, h_raygen_sbt)
d_raygen_sbt = OptixEngine.array_to_device_memory(h_raygen_sbt)
# Miss record
formats = [header_format, "f4", "f4", "f4"]
itemsize = OptixEngine.get_aligned_itemsize(formats, optix.SBT_RECORD_ALIGNMENT)
dtype = np.dtype(
{
"names": ["header", "r", "g", "b"],
"formats": formats,
"itemsize": itemsize,
"align": True,
}
)
h_miss_sbt = np.array([(0, 0.3, 0.1, 0.2)], dtype=dtype) # MissData
optix.sbtRecordPackHeader(miss_prog_group, h_miss_sbt)
d_miss_sbt = OptixEngine.array_to_device_memory(h_miss_sbt)
# Hitgroup record
formats = [header_format]
itemsize = OptixEngine.get_aligned_itemsize(formats, optix.SBT_RECORD_ALIGNMENT)
dtype = np.dtype(
{
"names": ["header"],
"formats": formats,
"itemsize": itemsize,
"align": True,
}
)
h_hitgroup_sbt = np.array([(0)], dtype=dtype)
optix.sbtRecordPackHeader(hitgroup_prog_group, h_hitgroup_sbt)
d_hitgroup_sbt = OptixEngine.array_to_device_memory(h_hitgroup_sbt)
sbt = optix.ShaderBindingTable(
raygenRecord=d_raygen_sbt.ptr,
missRecordBase=d_miss_sbt.ptr,
missRecordStrideInBytes=h_miss_sbt.dtype.itemsize,
missRecordCount=1,
hitgroupRecordBase=d_hitgroup_sbt.ptr,
hitgroupRecordStrideInBytes=h_hitgroup_sbt.dtype.itemsize,
hitgroupRecordCount=1,
)
return d_raygen_sbt, d_miss_sbt, d_hitgroup_sbt, sbt
@staticmethod
def set_pipeline_options():
if optix.version()[1] >= 2:
return optix.PipelineCompileOptions(
usesMotionBlur=False,
traversableGraphFlags=int(
optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS
),
numPayloadValues=3,
numAttributeValues=3,
exceptionFlags=int(optix.EXCEPTION_FLAG_NONE),
pipelineLaunchParamsVariableName="params",
usesPrimitiveTypeFlags=optix.PRIMITIVE_TYPE_FLAGS_TRIANGLE,
)
else:
return optix.PipelineCompileOptions(
usesMotionBlur=False,
traversableGraphFlags=int(
optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS
),
numPayloadValues=3,
numAttributeValues=3,
exceptionFlags=int(optix.EXCEPTION_FLAG_NONE),
pipelineLaunchParamsVariableName="params",
)
@staticmethod
def get_aligned_itemsize(formats, alignment):
def round_up(val, mult_of):
if val % mult_of == 0:
return val
else:
return val + mult_of - val % mult_of
names = []
for i in range(len(formats)):
names.append("x" + str(i))
temp_dtype = np.dtype({"names": names, "formats": formats, "align": True})
return round_up(temp_dtype.itemsize, alignment)
@staticmethod
def optix_version_gte(version):
if optix.version()[0] > version[0]:
return True
if optix.version()[0] == version[0] and optix.version()[1] >= version[1]:
return True
return False
@staticmethod
def array_to_device_memory(numpy_array, stream=cp.cuda.Stream()):
byte_size = numpy_array.size * numpy_array.dtype.itemsize
h_ptr = ctypes.c_void_p(numpy_array.ctypes.data)
d_mem = cp.cuda.memory.alloc(byte_size)
d_mem.copy_from_async(h_ptr, byte_size, stream)
return d_mem
@staticmethod
def compile_cuda(cuda_file):
with open(cuda_file, "rb") as f:
src = f.read()
nvrtc_dll = os.environ.get("NVRTC_DLL")
if nvrtc_dll is None:
nvrtc_dll = ""
print("NVRTC_DLL = {}".format(nvrtc_dll))
prog = Program(src.decode(), cuda_file, lib_name=nvrtc_dll)
compile_options = [
"-use_fast_math",
"-lineinfo",
"-default-device",
"-std=c++11",
"-rdc",
"true",
#'-IC:\\Program Files\\NVIDIA GPU Computing Toolkit\CUDA\\v11.1\include'
f"-I{path_util.cuda_tk_path}",
f"-I{path_util.include_path}",
]
# Optix 7.0 compiles need path to system stddef.h
# the value of optix.stddef_path is compiled in constant. When building
# the module, the value can be specified via an environment variable, e.g.
# export PYOPTIX_STDDEF_DIR="/usr/include/linux"
if optix.version()[1] == 0:
compile_options.append(f"-I{path_util.stddef_path}")
ptx = prog.compile(compile_options)
return ptx
def main():
vertices = np.array(
[
[-0.5, -0.5, 0.0],
[0.5, -0.5, 0.0],
[0.0, 0.5, 0.0],
[0.5, 0.5, 0.0],
[-0.5, 0.5, 0.0],
[0.0, -0.5, 0.0],
],
dtype=np.float32,
)
triangles = np.array(
[
[0, 1, 2],
[3, 4, 5],
],
dtype=np.int32,
)
cube = o3d.geometry.TriangleMesh.create_box(width=0.25, height=0.25, depth=0.25)
sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.25)
mesh = cube + sphere
vertices = np.array(mesh.vertices, dtype=np.float32)
triangles = np.array(mesh.triangles, dtype=np.int32)
# Read real-world mesh. ################################################
script_dir = Path(__file__).parent.absolute().resolve()
pyoptix_root = script_dir.parent
data_dir = pyoptix_root / "data"
raycast_data_path = data_dir / "raycast_data.pkl"
raycast_mesh_path = data_dir / "raycast_mesh.ply"
with open(raycast_data_path, "rb") as f:
raycast_data = pickle.load(f)
raycast_mesh = o3d.io.read_triangle_mesh(str(raycast_mesh_path))
raycast_mesh.compute_vertex_normals()
# o3d.visualization.draw_geometries([raycast_mesh])
rays_o = raycast_data["rays"][:, :3]
rays_d = raycast_data["rays"][:, 3:]
width = raycast_data["lidar_intrinsics"].horizontal_res
height = raycast_data["lidar_intrinsics"].vertical_res
assert len(rays_o) == len(rays_d) == width * height
# use_real_mesh = True
# if use_real_mesh:
triangles = np.array(raycast_mesh.triangles, dtype=np.int32)
vertices = np.array(raycast_mesh.vertices, dtype=np.float32)
########################################################################
# Create engine and set mesh.
oe = OptixEngine()
start_time = time.time()
oe.set_sensor(width=width, height=height)
oe.set_geometry(vertices, triangles)
im_render = oe.launch(rays_o=rays_o, rays_d=rays_d)
print(f"Ray casting took {time.time() - start_time:.5f} seconds.")
# Plot im_render as a point cloud. Invalid values are +inf.
im_render = im_render.reshape((oe.height * oe.width, 3))
im_render_mask = np.isfinite(im_render[:, 0])
points = im_render[im_render_mask]
max_range = 70
lidar_center = rays_o[0]
point_dists = np.linalg.norm(points - lidar_center, axis=1)
points = points[point_dists < max_range]
# Draw rays as lineset.
# Points: all of rays_o and rays_d
# Lines: all of rays_o -> rays_d
ls = o3d.geometry.LineSet()
ls_points = np.vstack((rays_o, rays_o + rays_d))
num_lines = len(rays_o)
ls_lines = np.vstack((np.arange(num_lines), np.arange(num_lines) + num_lines)).T
ls.points = o3d.utility.Vector3dVector(ls_points)
ls.lines = o3d.utility.Vector2iVector(ls_lines)
# Remove points with coordinates smaller tha 5.
# points = points[np.abs(points[:, 0]) > 5]
axes = o3d.geometry.TriangleMesh.create_coordinate_frame()
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
# o3d.visualization.draw_geometries([pcd, raycast_mesh, ls])
# # PIL expects [ y, x ] resolution
# im_render = im_render.reshape((height, width, 3))
# im_render = (im_render * 255).astype(np.uint8)
# # PIL expects y = 0 at bottom
# img = ImageOps.flip(Image.fromarray(im_render, "RGB"))
# img.show()
# img.save("triangle.png")
if __name__ == "__main__":
main()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/examples/vec_math.h | C/C++ Header | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#if defined(__CUDACC__) || defined(__CUDABE__)
#define SUTIL_HOSTDEVICE __host__ __device__
#define SUTIL_INLINE __forceinline__
#define CONST_STATIC_INIT(...)
#else
#define SUTIL_HOSTDEVICE
#define SUTIL_INLINE inline
#define CONST_STATIC_INIT(...) = __VA_ARGS__
#endif
#include <vector_functions.h>
#include <vector_types.h>
#if !defined(__CUDACC_RTC__)
#include <cmath>
#include <cstdlib>
#endif
/* scalar functions used in vector functions */
#ifndef M_PIf
#define M_PIf 3.14159265358979323846f
#endif
#ifndef M_PI_2f
#define M_PI_2f 1.57079632679489661923f
#endif
#ifndef M_1_PIf
#define M_1_PIf 0.318309886183790671538f
#endif
#if !defined(__CUDACC__)
SUTIL_INLINE SUTIL_HOSTDEVICE int max(int a, int b) { return a > b ? a : b; }
SUTIL_INLINE SUTIL_HOSTDEVICE int min(int a, int b) { return a < b ? a : b; }
SUTIL_INLINE SUTIL_HOSTDEVICE long long max(long long a, long long b) {
return a > b ? a : b;
}
SUTIL_INLINE SUTIL_HOSTDEVICE long long min(long long a, long long b) {
return a < b ? a : b;
}
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int max(unsigned int a, unsigned int b) {
return a > b ? a : b;
}
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int min(unsigned int a, unsigned int b) {
return a < b ? a : b;
}
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long max(unsigned long long a,
unsigned long long b) {
return a > b ? a : b;
}
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long min(unsigned long long a,
unsigned long long b) {
return a < b ? a : b;
}
/** lerp */
SUTIL_INLINE SUTIL_HOSTDEVICE float lerp(const float a,
const float b,
const float t) {
return a + t * (b - a);
}
/** bilerp */
SUTIL_INLINE SUTIL_HOSTDEVICE float bilerp(const float x00,
const float x10,
const float x01,
const float x11,
const float u,
const float v) {
return lerp(lerp(x00, x10, u), lerp(x01, x11, u), v);
}
template <typename IntegerType>
SUTIL_INLINE SUTIL_HOSTDEVICE IntegerType roundUp(IntegerType x,
IntegerType y) {
return ((x + y - 1) / y) * y;
}
#endif
/** clamp */
SUTIL_INLINE SUTIL_HOSTDEVICE float clamp(const float f,
const float a,
const float b) {
return fmaxf(a, fminf(f, b));
}
/* float2 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const float s) {
return make_float2(s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const int2& a) {
return make_float2(float(a.x), float(a.y));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const uint2& a) {
return make_float2(float(a.x), float(a.y));
}
/** @} */
/** negate */
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator-(const float2& a) {
return make_float2(-a.x, -a.y);
}
/** min
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float2 fminf(const float2& a, const float2& b) {
return make_float2(fminf(a.x, b.x), fminf(a.y, b.y));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float fminf(const float2& a) {
return fminf(a.x, a.y);
}
/** @} */
/** max
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float2 fmaxf(const float2& a, const float2& b) {
return make_float2(fmaxf(a.x, b.x), fmaxf(a.y, b.y));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float fmaxf(const float2& a) {
return fmaxf(a.x, a.y);
}
/** @} */
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator+(const float2& a,
const float2& b) {
return make_float2(a.x + b.x, a.y + b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator+(const float2& a, const float b) {
return make_float2(a.x + b, a.y + b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator+(const float a, const float2& b) {
return make_float2(a + b.x, a + b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(float2& a, const float2& b) {
a.x += b.x;
a.y += b.y;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator-(const float2& a,
const float2& b) {
return make_float2(a.x - b.x, a.y - b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator-(const float2& a, const float b) {
return make_float2(a.x - b, a.y - b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator-(const float a, const float2& b) {
return make_float2(a - b.x, a - b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(float2& a, const float2& b) {
a.x -= b.x;
a.y -= b.y;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator*(const float2& a,
const float2& b) {
return make_float2(a.x * b.x, a.y * b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator*(const float2& a, const float s) {
return make_float2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator*(const float s, const float2& a) {
return make_float2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float2& a, const float2& s) {
a.x *= s.x;
a.y *= s.y;
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float2& a, const float s) {
a.x *= s;
a.y *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator/(const float2& a,
const float2& b) {
return make_float2(a.x / b.x, a.y / b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator/(const float2& a, const float s) {
float inv = 1.0f / s;
return a * inv;
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 operator/(const float s, const float2& a) {
return make_float2(s / a.x, s / a.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(float2& a, const float s) {
float inv = 1.0f / s;
a *= inv;
}
/** @} */
/** lerp */
SUTIL_INLINE SUTIL_HOSTDEVICE float2 lerp(const float2& a,
const float2& b,
const float t) {
return a + t * (b - a);
}
/** bilerp */
SUTIL_INLINE SUTIL_HOSTDEVICE float2 bilerp(const float2& x00,
const float2& x10,
const float2& x01,
const float2& x11,
const float u,
const float v) {
return lerp(lerp(x00, x10, u), lerp(x01, x11, u), v);
}
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float2 clamp(const float2& v,
const float a,
const float b) {
return make_float2(clamp(v.x, a, b), clamp(v.y, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 clamp(const float2& v,
const float2& a,
const float2& b) {
return make_float2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y));
}
/** @} */
/** dot product */
SUTIL_INLINE SUTIL_HOSTDEVICE float dot(const float2& a, const float2& b) {
return a.x * b.x + a.y * b.y;
}
/** length */
SUTIL_INLINE SUTIL_HOSTDEVICE float length(const float2& v) {
return sqrtf(dot(v, v));
}
/** normalize */
SUTIL_INLINE SUTIL_HOSTDEVICE float2 normalize(const float2& v) {
float invLen = 1.0f / sqrtf(dot(v, v));
return v * invLen;
}
/** floor */
SUTIL_INLINE SUTIL_HOSTDEVICE float2 floor(const float2& v) {
return make_float2(::floorf(v.x), ::floorf(v.y));
}
/** reflect */
SUTIL_INLINE SUTIL_HOSTDEVICE float2 reflect(const float2& i, const float2& n) {
return i - 2.0f * n * dot(n, i);
}
/** Faceforward
* Returns N if dot(i, nref) > 0; else -N;
* Typical usage is N = faceforward(N, -ray.dir, N);
* Note that this is opposite of what faceforward does in Cg and GLSL */
SUTIL_INLINE SUTIL_HOSTDEVICE float2 faceforward(const float2& n,
const float2& i,
const float2& nref) {
return n * copysignf(1.0f, dot(i, nref));
}
/** exp */
SUTIL_INLINE SUTIL_HOSTDEVICE float2 expf(const float2& v) {
return make_float2(::expf(v.x), ::expf(v.y));
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE float getByIndex(const float2& v, int i) {
return ((float*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(float2& v, int i, float x) {
((float*)(&v))[i] = x;
}
/* float3 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float s) {
return make_float3(s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float2& a) {
return make_float3(a.x, a.y, 0.0f);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const int3& a) {
return make_float3(float(a.x), float(a.y), float(a.z));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const uint3& a) {
return make_float3(float(a.x), float(a.y), float(a.z));
}
/** @} */
/** negate */
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator-(const float3& a) {
return make_float3(-a.x, -a.y, -a.z);
}
/** min
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float3 fminf(const float3& a, const float3& b) {
return make_float3(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float fminf(const float3& a) {
return fminf(fminf(a.x, a.y), a.z);
}
/** @} */
/** max
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float3 fmaxf(const float3& a, const float3& b) {
return make_float3(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float fmaxf(const float3& a) {
return fmaxf(fmaxf(a.x, a.y), a.z);
}
/** @} */
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator+(const float3& a,
const float3& b) {
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator+(const float3& a, const float b) {
return make_float3(a.x + b, a.y + b, a.z + b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator+(const float a, const float3& b) {
return make_float3(a + b.x, a + b.y, a + b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(float3& a, const float3& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator-(const float3& a,
const float3& b) {
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator-(const float3& a, const float b) {
return make_float3(a.x - b, a.y - b, a.z - b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator-(const float a, const float3& b) {
return make_float3(a - b.x, a - b.y, a - b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(float3& a, const float3& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator*(const float3& a,
const float3& b) {
return make_float3(a.x * b.x, a.y * b.y, a.z * b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator*(const float3& a, const float s) {
return make_float3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator*(const float s, const float3& a) {
return make_float3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float3& a, const float3& s) {
a.x *= s.x;
a.y *= s.y;
a.z *= s.z;
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float3& a, const float s) {
a.x *= s;
a.y *= s;
a.z *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator/(const float3& a,
const float3& b) {
return make_float3(a.x / b.x, a.y / b.y, a.z / b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator/(const float3& a, const float s) {
float inv = 1.0f / s;
return a * inv;
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 operator/(const float s, const float3& a) {
return make_float3(s / a.x, s / a.y, s / a.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(float3& a, const float s) {
float inv = 1.0f / s;
a *= inv;
}
/** @} */
/** lerp */
SUTIL_INLINE SUTIL_HOSTDEVICE float3 lerp(const float3& a,
const float3& b,
const float t) {
return a + t * (b - a);
}
/** bilerp */
SUTIL_INLINE SUTIL_HOSTDEVICE float3 bilerp(const float3& x00,
const float3& x10,
const float3& x01,
const float3& x11,
const float u,
const float v) {
return lerp(lerp(x00, x10, u), lerp(x01, x11, u), v);
}
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float3 clamp(const float3& v,
const float a,
const float b) {
return make_float3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 clamp(const float3& v,
const float3& a,
const float3& b) {
return make_float3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z));
}
/** @} */
/** dot product */
SUTIL_INLINE SUTIL_HOSTDEVICE float dot(const float3& a, const float3& b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
/** cross product */
SUTIL_INLINE SUTIL_HOSTDEVICE float3 cross(const float3& a, const float3& b) {
return make_float3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x);
}
/** length */
SUTIL_INLINE SUTIL_HOSTDEVICE float length(const float3& v) {
return sqrtf(dot(v, v));
}
/** normalize */
SUTIL_INLINE SUTIL_HOSTDEVICE float3 normalize(const float3& v) {
float invLen = 1.0f / sqrtf(dot(v, v));
return v * invLen;
}
/** floor */
SUTIL_INLINE SUTIL_HOSTDEVICE float3 floor(const float3& v) {
return make_float3(::floorf(v.x), ::floorf(v.y), ::floorf(v.z));
}
/** reflect */
SUTIL_INLINE SUTIL_HOSTDEVICE float3 reflect(const float3& i, const float3& n) {
return i - 2.0f * n * dot(n, i);
}
/** Faceforward
* Returns N if dot(i, nref) > 0; else -N;
* Typical usage is N = faceforward(N, -ray.dir, N);
* Note that this is opposite of what faceforward does in Cg and GLSL */
SUTIL_INLINE SUTIL_HOSTDEVICE float3 faceforward(const float3& n,
const float3& i,
const float3& nref) {
return n * copysignf(1.0f, dot(i, nref));
}
/** exp */
SUTIL_INLINE SUTIL_HOSTDEVICE float3 expf(const float3& v) {
return make_float3(::expf(v.x), ::expf(v.y), ::expf(v.z));
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE float getByIndex(const float3& v, int i) {
return ((float*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(float3& v, int i, float x) {
((float*)(&v))[i] = x;
}
/* float4 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float s) {
return make_float4(s, s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float3& a) {
return make_float4(a.x, a.y, a.z, 0.0f);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const int4& a) {
return make_float4(float(a.x), float(a.y), float(a.z), float(a.w));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const uint4& a) {
return make_float4(float(a.x), float(a.y), float(a.z), float(a.w));
}
/** @} */
/** negate */
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator-(const float4& a) {
return make_float4(-a.x, -a.y, -a.z, -a.w);
}
/** min
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float4 fminf(const float4& a, const float4& b) {
return make_float4(fminf(a.x, b.x), fminf(a.y, b.y), fminf(a.z, b.z),
fminf(a.w, b.w));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float fminf(const float4& a) {
return fminf(fminf(a.x, a.y), fminf(a.z, a.w));
}
/** @} */
/** max
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float4 fmaxf(const float4& a, const float4& b) {
return make_float4(fmaxf(a.x, b.x), fmaxf(a.y, b.y), fmaxf(a.z, b.z),
fmaxf(a.w, b.w));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float fmaxf(const float4& a) {
return fmaxf(fmaxf(a.x, a.y), fmaxf(a.z, a.w));
}
/** @} */
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator+(const float4& a,
const float4& b) {
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator+(const float4& a, const float b) {
return make_float4(a.x + b, a.y + b, a.z + b, a.w + b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator+(const float a, const float4& b) {
return make_float4(a + b.x, a + b.y, a + b.z, a + b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(float4& a, const float4& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator-(const float4& a,
const float4& b) {
return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator-(const float4& a, const float b) {
return make_float4(a.x - b, a.y - b, a.z - b, a.w - b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator-(const float a, const float4& b) {
return make_float4(a - b.x, a - b.y, a - b.z, a - b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(float4& a, const float4& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
a.w -= b.w;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator*(const float4& a,
const float4& s) {
return make_float4(a.x * s.x, a.y * s.y, a.z * s.z, a.w * s.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator*(const float4& a, const float s) {
return make_float4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator*(const float s, const float4& a) {
return make_float4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float4& a, const float4& s) {
a.x *= s.x;
a.y *= s.y;
a.z *= s.z;
a.w *= s.w;
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(float4& a, const float s) {
a.x *= s;
a.y *= s;
a.z *= s;
a.w *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator/(const float4& a,
const float4& b) {
return make_float4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator/(const float4& a, const float s) {
float inv = 1.0f / s;
return a * inv;
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 operator/(const float s, const float4& a) {
return make_float4(s / a.x, s / a.y, s / a.z, s / a.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(float4& a, const float s) {
float inv = 1.0f / s;
a *= inv;
}
/** @} */
/** lerp */
SUTIL_INLINE SUTIL_HOSTDEVICE float4 lerp(const float4& a,
const float4& b,
const float t) {
return a + t * (b - a);
}
/** bilerp */
SUTIL_INLINE SUTIL_HOSTDEVICE float4 bilerp(const float4& x00,
const float4& x10,
const float4& x01,
const float4& x11,
const float u,
const float v) {
return lerp(lerp(x00, x10, u), lerp(x01, x11, u), v);
}
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float4 clamp(const float4& v,
const float a,
const float b) {
return make_float4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b),
clamp(v.w, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 clamp(const float4& v,
const float4& a,
const float4& b) {
return make_float4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w));
}
/** @} */
/** dot product */
SUTIL_INLINE SUTIL_HOSTDEVICE float dot(const float4& a, const float4& b) {
return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;
}
/** length */
SUTIL_INLINE SUTIL_HOSTDEVICE float length(const float4& r) {
return sqrtf(dot(r, r));
}
/** normalize */
SUTIL_INLINE SUTIL_HOSTDEVICE float4 normalize(const float4& v) {
float invLen = 1.0f / sqrtf(dot(v, v));
return v * invLen;
}
/** floor */
SUTIL_INLINE SUTIL_HOSTDEVICE float4 floor(const float4& v) {
return make_float4(::floorf(v.x), ::floorf(v.y), ::floorf(v.z),
::floorf(v.w));
}
/** reflect */
SUTIL_INLINE SUTIL_HOSTDEVICE float4 reflect(const float4& i, const float4& n) {
return i - 2.0f * n * dot(n, i);
}
/**
* Faceforward
* Returns N if dot(i, nref) > 0; else -N;
* Typical usage is N = faceforward(N, -ray.dir, N);
* Note that this is opposite of what faceforward does in Cg and GLSL
*/
SUTIL_INLINE SUTIL_HOSTDEVICE float4 faceforward(const float4& n,
const float4& i,
const float4& nref) {
return n * copysignf(1.0f, dot(i, nref));
}
/** exp */
SUTIL_INLINE SUTIL_HOSTDEVICE float4 expf(const float4& v) {
return make_float4(::expf(v.x), ::expf(v.y), ::expf(v.z), ::expf(v.w));
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE float getByIndex(const float4& v, int i) {
return ((float*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(float4& v, int i, float x) {
((float*)(&v))[i] = x;
}
/* int functions */
/******************************************************************************/
/** clamp */
SUTIL_INLINE SUTIL_HOSTDEVICE int clamp(const int f, const int a, const int b) {
return max(a, min(f, b));
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE int getByIndex(const int1& v, int i) {
return ((int*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(int1& v, int i, int x) {
((int*)(&v))[i] = x;
}
/* int2 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int2 make_int2(const int s) {
return make_int2(s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int2 make_int2(const float2& a) {
return make_int2(int(a.x), int(a.y));
}
/** @} */
/** negate */
SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator-(const int2& a) {
return make_int2(-a.x, -a.y);
}
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE int2 min(const int2& a, const int2& b) {
return make_int2(min(a.x, b.x), min(a.y, b.y));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE int2 max(const int2& a, const int2& b) {
return make_int2(max(a.x, b.x), max(a.y, b.y));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator+(const int2& a, const int2& b) {
return make_int2(a.x + b.x, a.y + b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(int2& a, const int2& b) {
a.x += b.x;
a.y += b.y;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator-(const int2& a, const int2& b) {
return make_int2(a.x - b.x, a.y - b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator-(const int2& a, const int b) {
return make_int2(a.x - b, a.y - b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(int2& a, const int2& b) {
a.x -= b.x;
a.y -= b.y;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator*(const int2& a, const int2& b) {
return make_int2(a.x * b.x, a.y * b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator*(const int2& a, const int s) {
return make_int2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int2 operator*(const int s, const int2& a) {
return make_int2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(int2& a, const int s) {
a.x *= s;
a.y *= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int2 clamp(const int2& v,
const int a,
const int b) {
return make_int2(clamp(v.x, a, b), clamp(v.y, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE int2 clamp(const int2& v,
const int2& a,
const int2& b) {
return make_int2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const int2& a, const int2& b) {
return a.x == b.x && a.y == b.y;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const int2& a, const int2& b) {
return a.x != b.x || a.y != b.y;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE int getByIndex(const int2& v, int i) {
return ((int*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(int2& v, int i, int x) {
((int*)(&v))[i] = x;
}
/* int3 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const int s) {
return make_int3(s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const float3& a) {
return make_int3(int(a.x), int(a.y), int(a.z));
}
/** @} */
/** negate */
SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator-(const int3& a) {
return make_int3(-a.x, -a.y, -a.z);
}
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE int3 min(const int3& a, const int3& b) {
return make_int3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE int3 max(const int3& a, const int3& b) {
return make_int3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator+(const int3& a, const int3& b) {
return make_int3(a.x + b.x, a.y + b.y, a.z + b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(int3& a, const int3& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator-(const int3& a, const int3& b) {
return make_int3(a.x - b.x, a.y - b.y, a.z - b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(int3& a, const int3& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator*(const int3& a, const int3& b) {
return make_int3(a.x * b.x, a.y * b.y, a.z * b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator*(const int3& a, const int s) {
return make_int3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator*(const int s, const int3& a) {
return make_int3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(int3& a, const int s) {
a.x *= s;
a.y *= s;
a.z *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator/(const int3& a, const int3& b) {
return make_int3(a.x / b.x, a.y / b.y, a.z / b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator/(const int3& a, const int s) {
return make_int3(a.x / s, a.y / s, a.z / s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int3 operator/(const int s, const int3& a) {
return make_int3(s / a.x, s / a.y, s / a.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(int3& a, const int s) {
a.x /= s;
a.y /= s;
a.z /= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int3 clamp(const int3& v,
const int a,
const int b) {
return make_int3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE int3 clamp(const int3& v,
const int3& a,
const int3& b) {
return make_int3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const int3& a, const int3& b) {
return a.x == b.x && a.y == b.y && a.z == b.z;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const int3& a, const int3& b) {
return a.x != b.x || a.y != b.y || a.z != b.z;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE int getByIndex(const int3& v, int i) {
return ((int*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(int3& v, int i, int x) {
((int*)(&v))[i] = x;
}
/* int4 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int s) {
return make_int4(s, s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const float4& a) {
return make_int4((int)a.x, (int)a.y, (int)a.z, (int)a.w);
}
/** @} */
/** negate */
SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator-(const int4& a) {
return make_int4(-a.x, -a.y, -a.z, -a.w);
}
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE int4 min(const int4& a, const int4& b) {
return make_int4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z),
min(a.w, b.w));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE int4 max(const int4& a, const int4& b) {
return make_int4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z),
max(a.w, b.w));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator+(const int4& a, const int4& b) {
return make_int4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(int4& a, const int4& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator-(const int4& a, const int4& b) {
return make_int4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(int4& a, const int4& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
a.w -= b.w;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator*(const int4& a, const int4& b) {
return make_int4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator*(const int4& a, const int s) {
return make_int4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator*(const int s, const int4& a) {
return make_int4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(int4& a, const int s) {
a.x *= s;
a.y *= s;
a.z *= s;
a.w *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator/(const int4& a, const int4& b) {
return make_int4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator/(const int4& a, const int s) {
return make_int4(a.x / s, a.y / s, a.z / s, a.w / s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 operator/(const int s, const int4& a) {
return make_int4(s / a.x, s / a.y, s / a.z, s / a.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(int4& a, const int s) {
a.x /= s;
a.y /= s;
a.z /= s;
a.w /= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int4 clamp(const int4& v,
const int a,
const int b) {
return make_int4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b),
clamp(v.w, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 clamp(const int4& v,
const int4& a,
const int4& b) {
return make_int4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const int4& a, const int4& b) {
return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const int4& a, const int4& b) {
return a.x != b.x || a.y != b.y || a.z != b.z || a.w != b.w;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE int getByIndex(const int4& v, int i) {
return ((int*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(int4& v, int i, int x) {
((int*)(&v))[i] = x;
}
/* uint functions */
/******************************************************************************/
/** clamp */
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int clamp(const unsigned int f,
const unsigned int a,
const unsigned int b) {
return max(a, min(f, b));
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int getByIndex(const uint1& v,
unsigned int i) {
return ((unsigned int*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(uint1& v, int i, unsigned int x) {
((unsigned int*)(&v))[i] = x;
}
/* uint2 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 make_uint2(const unsigned int s) {
return make_uint2(s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 make_uint2(const float2& a) {
return make_uint2((unsigned int)a.x, (unsigned int)a.y);
}
/** @} */
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 min(const uint2& a, const uint2& b) {
return make_uint2(min(a.x, b.x), min(a.y, b.y));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 max(const uint2& a, const uint2& b) {
return make_uint2(max(a.x, b.x), max(a.y, b.y));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator+(const uint2& a, const uint2& b) {
return make_uint2(a.x + b.x, a.y + b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(uint2& a, const uint2& b) {
a.x += b.x;
a.y += b.y;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator-(const uint2& a, const uint2& b) {
return make_uint2(a.x - b.x, a.y - b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator-(const uint2& a,
const unsigned int b) {
return make_uint2(a.x - b, a.y - b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(uint2& a, const uint2& b) {
a.x -= b.x;
a.y -= b.y;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator*(const uint2& a, const uint2& b) {
return make_uint2(a.x * b.x, a.y * b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator*(const uint2& a,
const unsigned int s) {
return make_uint2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 operator*(const unsigned int s,
const uint2& a) {
return make_uint2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(uint2& a, const unsigned int s) {
a.x *= s;
a.y *= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 clamp(const uint2& v,
const unsigned int a,
const unsigned int b) {
return make_uint2(clamp(v.x, a, b), clamp(v.y, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 clamp(const uint2& v,
const uint2& a,
const uint2& b) {
return make_uint2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const uint2& a, const uint2& b) {
return a.x == b.x && a.y == b.y;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const uint2& a, const uint2& b) {
return a.x != b.x || a.y != b.y;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int getByIndex(const uint2& v,
unsigned int i) {
return ((unsigned int*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(uint2& v, int i, unsigned int x) {
((unsigned int*)(&v))[i] = x;
}
/* uint3 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const unsigned int s) {
return make_uint3(s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const float3& a) {
return make_uint3((unsigned int)a.x, (unsigned int)a.y, (unsigned int)a.z);
}
/** @} */
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 min(const uint3& a, const uint3& b) {
return make_uint3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 max(const uint3& a, const uint3& b) {
return make_uint3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator+(const uint3& a, const uint3& b) {
return make_uint3(a.x + b.x, a.y + b.y, a.z + b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(uint3& a, const uint3& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator-(const uint3& a, const uint3& b) {
return make_uint3(a.x - b.x, a.y - b.y, a.z - b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(uint3& a, const uint3& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator*(const uint3& a, const uint3& b) {
return make_uint3(a.x * b.x, a.y * b.y, a.z * b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator*(const uint3& a,
const unsigned int s) {
return make_uint3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator*(const unsigned int s,
const uint3& a) {
return make_uint3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(uint3& a, const unsigned int s) {
a.x *= s;
a.y *= s;
a.z *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator/(const uint3& a, const uint3& b) {
return make_uint3(a.x / b.x, a.y / b.y, a.z / b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator/(const uint3& a,
const unsigned int s) {
return make_uint3(a.x / s, a.y / s, a.z / s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 operator/(const unsigned int s,
const uint3& a) {
return make_uint3(s / a.x, s / a.y, s / a.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(uint3& a, const unsigned int s) {
a.x /= s;
a.y /= s;
a.z /= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 clamp(const uint3& v,
const unsigned int a,
const unsigned int b) {
return make_uint3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 clamp(const uint3& v,
const uint3& a,
const uint3& b) {
return make_uint3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const uint3& a, const uint3& b) {
return a.x == b.x && a.y == b.y && a.z == b.z;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const uint3& a, const uint3& b) {
return a.x != b.x || a.y != b.y || a.z != b.z;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory
*/
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int getByIndex(const uint3& v,
unsigned int i) {
return ((unsigned int*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory
*/
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(uint3& v, int i, unsigned int x) {
((unsigned int*)(&v))[i] = x;
}
/* uint4 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const unsigned int s) {
return make_uint4(s, s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const float4& a) {
return make_uint4((unsigned int)a.x, (unsigned int)a.y, (unsigned int)a.z,
(unsigned int)a.w);
}
/** @} */
/** min
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 min(const uint4& a, const uint4& b) {
return make_uint4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z),
min(a.w, b.w));
}
/** @} */
/** max
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 max(const uint4& a, const uint4& b) {
return make_uint4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z),
max(a.w, b.w));
}
/** @} */
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator+(const uint4& a, const uint4& b) {
return make_uint4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(uint4& a, const uint4& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator-(const uint4& a, const uint4& b) {
return make_uint4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(uint4& a, const uint4& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
a.w -= b.w;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator*(const uint4& a, const uint4& b) {
return make_uint4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator*(const uint4& a,
const unsigned int s) {
return make_uint4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator*(const unsigned int s,
const uint4& a) {
return make_uint4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(uint4& a, const unsigned int s) {
a.x *= s;
a.y *= s;
a.z *= s;
a.w *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator/(const uint4& a, const uint4& b) {
return make_uint4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator/(const uint4& a,
const unsigned int s) {
return make_uint4(a.x / s, a.y / s, a.z / s, a.w / s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 operator/(const unsigned int s,
const uint4& a) {
return make_uint4(s / a.x, s / a.y, s / a.z, s / a.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(uint4& a, const unsigned int s) {
a.x /= s;
a.y /= s;
a.z /= s;
a.w /= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 clamp(const uint4& v,
const unsigned int a,
const unsigned int b) {
return make_uint4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b),
clamp(v.w, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 clamp(const uint4& v,
const uint4& a,
const uint4& b) {
return make_uint4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const uint4& a, const uint4& b) {
return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const uint4& a, const uint4& b) {
return a.x != b.x || a.y != b.y || a.z != b.z || a.w != b.w;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory
*/
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned int getByIndex(const uint4& v,
unsigned int i) {
return ((unsigned int*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory
*/
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(uint4& v, int i, unsigned int x) {
((unsigned int*)(&v))[i] = x;
}
/* long long functions */
/******************************************************************************/
/** clamp */
SUTIL_INLINE SUTIL_HOSTDEVICE long long clamp(const long long f,
const long long a,
const long long b) {
return max(a, min(f, b));
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE long long getByIndex(const longlong1& v, int i) {
return ((long long*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(longlong1& v,
int i,
long long x) {
((long long*)(&v))[i] = x;
}
/* longlong2 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 make_longlong2(const long long s) {
return make_longlong2(s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 make_longlong2(const float2& a) {
return make_longlong2(int(a.x), int(a.y));
}
/** @} */
/** negate */
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator-(const longlong2& a) {
return make_longlong2(-a.x, -a.y);
}
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 min(const longlong2& a,
const longlong2& b) {
return make_longlong2(min(a.x, b.x), min(a.y, b.y));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 max(const longlong2& a,
const longlong2& b) {
return make_longlong2(max(a.x, b.x), max(a.y, b.y));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator+(const longlong2& a,
const longlong2& b) {
return make_longlong2(a.x + b.x, a.y + b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(longlong2& a,
const longlong2& b) {
a.x += b.x;
a.y += b.y;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator-(const longlong2& a,
const longlong2& b) {
return make_longlong2(a.x - b.x, a.y - b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator-(const longlong2& a,
const long long b) {
return make_longlong2(a.x - b, a.y - b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(longlong2& a,
const longlong2& b) {
a.x -= b.x;
a.y -= b.y;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator*(const longlong2& a,
const longlong2& b) {
return make_longlong2(a.x * b.x, a.y * b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator*(const longlong2& a,
const long long s) {
return make_longlong2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 operator*(const long long s,
const longlong2& a) {
return make_longlong2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(longlong2& a, const long long s) {
a.x *= s;
a.y *= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 clamp(const longlong2& v,
const long long a,
const long long b) {
return make_longlong2(clamp(v.x, a, b), clamp(v.y, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 clamp(const longlong2& v,
const longlong2& a,
const longlong2& b) {
return make_longlong2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const longlong2& a,
const longlong2& b) {
return a.x == b.x && a.y == b.y;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const longlong2& a,
const longlong2& b) {
return a.x != b.x || a.y != b.y;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE long long getByIndex(const longlong2& v, int i) {
return ((long long*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(longlong2& v,
int i,
long long x) {
((long long*)(&v))[i] = x;
}
/* longlong3 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const long long s) {
return make_longlong3(s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const float3& a) {
return make_longlong3((long long)a.x, (long long)a.y, (long long)a.z);
}
/** @} */
/** negate */
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator-(const longlong3& a) {
return make_longlong3(-a.x, -a.y, -a.z);
}
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 min(const longlong3& a,
const longlong3& b) {
return make_longlong3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 max(const longlong3& a,
const longlong3& b) {
return make_longlong3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator+(const longlong3& a,
const longlong3& b) {
return make_longlong3(a.x + b.x, a.y + b.y, a.z + b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(longlong3& a,
const longlong3& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator-(const longlong3& a,
const longlong3& b) {
return make_longlong3(a.x - b.x, a.y - b.y, a.z - b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(longlong3& a,
const longlong3& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator*(const longlong3& a,
const longlong3& b) {
return make_longlong3(a.x * b.x, a.y * b.y, a.z * b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator*(const longlong3& a,
const long long s) {
return make_longlong3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator*(const long long s,
const longlong3& a) {
return make_longlong3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(longlong3& a, const long long s) {
a.x *= s;
a.y *= s;
a.z *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator/(const longlong3& a,
const longlong3& b) {
return make_longlong3(a.x / b.x, a.y / b.y, a.z / b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator/(const longlong3& a,
const long long s) {
return make_longlong3(a.x / s, a.y / s, a.z / s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 operator/(const long long s,
const longlong3& a) {
return make_longlong3(s / a.x, s / a.y, s / a.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(longlong3& a, const long long s) {
a.x /= s;
a.y /= s;
a.z /= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 clamp(const longlong3& v,
const long long a,
const long long b) {
return make_longlong3(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 clamp(const longlong3& v,
const longlong3& a,
const longlong3& b) {
return make_longlong3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const longlong3& a,
const longlong3& b) {
return a.x == b.x && a.y == b.y && a.z == b.z;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const longlong3& a,
const longlong3& b) {
return a.x != b.x || a.y != b.y || a.z != b.z;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE long long getByIndex(const longlong3& v, int i) {
return ((long long*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(longlong3& v, int i, int x) {
((long long*)(&v))[i] = x;
}
/* longlong4 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const long long s) {
return make_longlong4(s, s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const float4& a) {
return make_longlong4((long long)a.x, (long long)a.y, (long long)a.z,
(long long)a.w);
}
/** @} */
/** negate */
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator-(const longlong4& a) {
return make_longlong4(-a.x, -a.y, -a.z, -a.w);
}
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 min(const longlong4& a,
const longlong4& b) {
return make_longlong4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z),
min(a.w, b.w));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 max(const longlong4& a,
const longlong4& b) {
return make_longlong4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z),
max(a.w, b.w));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator+(const longlong4& a,
const longlong4& b) {
return make_longlong4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(longlong4& a,
const longlong4& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator-(const longlong4& a,
const longlong4& b) {
return make_longlong4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(longlong4& a,
const longlong4& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
a.w -= b.w;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator*(const longlong4& a,
const longlong4& b) {
return make_longlong4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator*(const longlong4& a,
const long long s) {
return make_longlong4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator*(const long long s,
const longlong4& a) {
return make_longlong4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(longlong4& a, const long long s) {
a.x *= s;
a.y *= s;
a.z *= s;
a.w *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator/(const longlong4& a,
const longlong4& b) {
return make_longlong4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator/(const longlong4& a,
const long long s) {
return make_longlong4(a.x / s, a.y / s, a.z / s, a.w / s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 operator/(const long long s,
const longlong4& a) {
return make_longlong4(s / a.x, s / a.y, s / a.z, s / a.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(longlong4& a, const long long s) {
a.x /= s;
a.y /= s;
a.z /= s;
a.w /= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 clamp(const longlong4& v,
const long long a,
const long long b) {
return make_longlong4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b),
clamp(v.w, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 clamp(const longlong4& v,
const longlong4& a,
const longlong4& b) {
return make_longlong4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const longlong4& a,
const longlong4& b) {
return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const longlong4& a,
const longlong4& b) {
return a.x != b.x || a.y != b.y || a.z != b.z || a.w != b.w;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE long long getByIndex(const longlong4& v, int i) {
return ((long long*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(longlong4& v,
int i,
long long x) {
((long long*)(&v))[i] = x;
}
/* ulonglong functions */
/******************************************************************************/
/** clamp */
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long clamp(
const unsigned long long f,
const unsigned long long a,
const unsigned long long b) {
return max(a, min(f, b));
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long getByIndex(const ulonglong1& v,
unsigned int i) {
return ((unsigned long long*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(ulonglong1& v,
int i,
unsigned long long x) {
((unsigned long long*)(&v))[i] = x;
}
/* ulonglong2 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2
make_ulonglong2(const unsigned long long s) {
return make_ulonglong2(s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 make_ulonglong2(const float2& a) {
return make_ulonglong2((unsigned long long)a.x, (unsigned long long)a.y);
}
/** @} */
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 min(const ulonglong2& a,
const ulonglong2& b) {
return make_ulonglong2(min(a.x, b.x), min(a.y, b.y));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 max(const ulonglong2& a,
const ulonglong2& b) {
return make_ulonglong2(max(a.x, b.x), max(a.y, b.y));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator+(const ulonglong2& a,
const ulonglong2& b) {
return make_ulonglong2(a.x + b.x, a.y + b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(ulonglong2& a,
const ulonglong2& b) {
a.x += b.x;
a.y += b.y;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator-(const ulonglong2& a,
const ulonglong2& b) {
return make_ulonglong2(a.x - b.x, a.y - b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator-(const ulonglong2& a,
const unsigned long long b) {
return make_ulonglong2(a.x - b, a.y - b);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(ulonglong2& a,
const ulonglong2& b) {
a.x -= b.x;
a.y -= b.y;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator*(const ulonglong2& a,
const ulonglong2& b) {
return make_ulonglong2(a.x * b.x, a.y * b.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator*(const ulonglong2& a,
const unsigned long long s) {
return make_ulonglong2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 operator*(const unsigned long long s,
const ulonglong2& a) {
return make_ulonglong2(a.x * s, a.y * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(ulonglong2& a,
const unsigned long long s) {
a.x *= s;
a.y *= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 clamp(const ulonglong2& v,
const unsigned long long a,
const unsigned long long b) {
return make_ulonglong2(clamp(v.x, a, b), clamp(v.y, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 clamp(const ulonglong2& v,
const ulonglong2& a,
const ulonglong2& b) {
return make_ulonglong2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const ulonglong2& a,
const ulonglong2& b) {
return a.x == b.x && a.y == b.y;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const ulonglong2& a,
const ulonglong2& b) {
return a.x != b.x || a.y != b.y;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long getByIndex(const ulonglong2& v,
unsigned int i) {
return ((unsigned long long*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory */
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(ulonglong2& v,
int i,
unsigned long long x) {
((unsigned long long*)(&v))[i] = x;
}
/* ulonglong3 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3
make_ulonglong3(const unsigned long long s) {
return make_ulonglong3(s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 make_ulonglong3(const float3& a) {
return make_ulonglong3((unsigned long long)a.x, (unsigned long long)a.y,
(unsigned long long)a.z);
}
/** @} */
/** min */
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 min(const ulonglong3& a,
const ulonglong3& b) {
return make_ulonglong3(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z));
}
/** max */
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 max(const ulonglong3& a,
const ulonglong3& b) {
return make_ulonglong3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z));
}
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator+(const ulonglong3& a,
const ulonglong3& b) {
return make_ulonglong3(a.x + b.x, a.y + b.y, a.z + b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(ulonglong3& a,
const ulonglong3& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator-(const ulonglong3& a,
const ulonglong3& b) {
return make_ulonglong3(a.x - b.x, a.y - b.y, a.z - b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(ulonglong3& a,
const ulonglong3& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator*(const ulonglong3& a,
const ulonglong3& b) {
return make_ulonglong3(a.x * b.x, a.y * b.y, a.z * b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator*(const ulonglong3& a,
const unsigned long long s) {
return make_ulonglong3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator*(const unsigned long long s,
const ulonglong3& a) {
return make_ulonglong3(a.x * s, a.y * s, a.z * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(ulonglong3& a,
const unsigned long long s) {
a.x *= s;
a.y *= s;
a.z *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator/(const ulonglong3& a,
const ulonglong3& b) {
return make_ulonglong3(a.x / b.x, a.y / b.y, a.z / b.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator/(const ulonglong3& a,
const unsigned long long s) {
return make_ulonglong3(a.x / s, a.y / s, a.z / s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 operator/(const unsigned long long s,
const ulonglong3& a) {
return make_ulonglong3(s / a.x, s / a.y, s / a.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(ulonglong3& a,
const unsigned long long s) {
a.x /= s;
a.y /= s;
a.z /= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 clamp(const ulonglong3& v,
const unsigned long long a,
const unsigned long long b) {
return make_ulonglong3(clamp(v.x, a, b), clamp(v.y, a, b),
clamp(v.z, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 clamp(const ulonglong3& v,
const ulonglong3& a,
const ulonglong3& b) {
return make_ulonglong3(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const ulonglong3& a,
const ulonglong3& b) {
return a.x == b.x && a.y == b.y && a.z == b.z;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const ulonglong3& a,
const ulonglong3& b) {
return a.x != b.x || a.y != b.y || a.z != b.z;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory
*/
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long getByIndex(const ulonglong3& v,
unsigned int i) {
return ((unsigned long long*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory
*/
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(ulonglong3& v,
int i,
unsigned long long x) {
((unsigned long long*)(&v))[i] = x;
}
/* ulonglong4 functions */
/******************************************************************************/
/** additional constructors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4
make_ulonglong4(const unsigned long long s) {
return make_ulonglong4(s, s, s, s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const float4& a) {
return make_ulonglong4((unsigned long long)a.x, (unsigned long long)a.y,
(unsigned long long)a.z, (unsigned long long)a.w);
}
/** @} */
/** min
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 min(const ulonglong4& a,
const ulonglong4& b) {
return make_ulonglong4(min(a.x, b.x), min(a.y, b.y), min(a.z, b.z),
min(a.w, b.w));
}
/** @} */
/** max
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 max(const ulonglong4& a,
const ulonglong4& b) {
return make_ulonglong4(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z),
max(a.w, b.w));
}
/** @} */
/** add
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator+(const ulonglong4& a,
const ulonglong4& b) {
return make_ulonglong4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator+=(ulonglong4& a,
const ulonglong4& b) {
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
/** @} */
/** subtract
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator-(const ulonglong4& a,
const ulonglong4& b) {
return make_ulonglong4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator-=(ulonglong4& a,
const ulonglong4& b) {
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
a.w -= b.w;
}
/** @} */
/** multiply
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator*(const ulonglong4& a,
const ulonglong4& b) {
return make_ulonglong4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator*(const ulonglong4& a,
const unsigned long long s) {
return make_ulonglong4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator*(const unsigned long long s,
const ulonglong4& a) {
return make_ulonglong4(a.x * s, a.y * s, a.z * s, a.w * s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator*=(ulonglong4& a,
const unsigned long long s) {
a.x *= s;
a.y *= s;
a.z *= s;
a.w *= s;
}
/** @} */
/** divide
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator/(const ulonglong4& a,
const ulonglong4& b) {
return make_ulonglong4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator/(const ulonglong4& a,
const unsigned long long s) {
return make_ulonglong4(a.x / s, a.y / s, a.z / s, a.w / s);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 operator/(const unsigned long long s,
const ulonglong4& a) {
return make_ulonglong4(s / a.x, s / a.y, s / a.z, s / a.w);
}
SUTIL_INLINE SUTIL_HOSTDEVICE void operator/=(ulonglong4& a,
const unsigned long long s) {
a.x /= s;
a.y /= s;
a.z /= s;
a.w /= s;
}
/** @} */
/** clamp
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 clamp(const ulonglong4& v,
const unsigned long long a,
const unsigned long long b) {
return make_ulonglong4(clamp(v.x, a, b), clamp(v.y, a, b), clamp(v.z, a, b),
clamp(v.w, a, b));
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 clamp(const ulonglong4& v,
const ulonglong4& a,
const ulonglong4& b) {
return make_ulonglong4(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y),
clamp(v.z, a.z, b.z), clamp(v.w, a.w, b.w));
}
/** @} */
/** equality
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator==(const ulonglong4& a,
const ulonglong4& b) {
return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w;
}
SUTIL_INLINE SUTIL_HOSTDEVICE bool operator!=(const ulonglong4& a,
const ulonglong4& b) {
return a.x != b.x || a.y != b.y || a.z != b.z || a.w != b.w;
}
/** @} */
/** If used on the device, this could place the the 'v' in local memory
*/
SUTIL_INLINE SUTIL_HOSTDEVICE unsigned long long getByIndex(const ulonglong4& v,
unsigned int i) {
return ((unsigned long long*)(&v))[i];
}
/** If used on the device, this could place the the 'v' in local memory
*/
SUTIL_INLINE SUTIL_HOSTDEVICE void setByIndex(ulonglong4& v,
int i,
unsigned long long x) {
((unsigned long long*)(&v))[i] = x;
}
/******************************************************************************/
/** Narrowing functions
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int2 make_int2(const int3& v0) {
return make_int2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int2 make_int2(const int4& v0) {
return make_int2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const int4& v0) {
return make_int3(v0.x, v0.y, v0.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 make_uint2(const uint3& v0) {
return make_uint2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint2 make_uint2(const uint4& v0) {
return make_uint2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const uint4& v0) {
return make_uint3(v0.x, v0.y, v0.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 make_longlong2(const longlong3& v0) {
return make_longlong2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong2 make_longlong2(const longlong4& v0) {
return make_longlong2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const longlong4& v0) {
return make_longlong3(v0.x, v0.y, v0.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 make_ulonglong2(const ulonglong3& v0) {
return make_ulonglong2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong2 make_ulonglong2(const ulonglong4& v0) {
return make_ulonglong2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3 make_ulonglong3(const ulonglong4& v0) {
return make_ulonglong3(v0.x, v0.y, v0.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const float3& v0) {
return make_float2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float2 make_float2(const float4& v0) {
return make_float2(v0.x, v0.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float4& v0) {
return make_float3(v0.x, v0.y, v0.z);
}
/** @} */
/** Assemble functions from smaller vectors
* @{
*/
SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const int v0, const int2& v1) {
return make_int3(v0, v1.x, v1.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int3 make_int3(const int2& v0, const int v1) {
return make_int3(v0.x, v0.y, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int v0,
const int v1,
const int2& v2) {
return make_int4(v0, v1, v2.x, v2.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int v0,
const int2& v1,
const int v2) {
return make_int4(v0, v1.x, v1.y, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int2& v0,
const int v1,
const int v2) {
return make_int4(v0.x, v0.y, v1, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int v0, const int3& v1) {
return make_int4(v0, v1.x, v1.y, v1.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int3& v0, const int v1) {
return make_int4(v0.x, v0.y, v0.z, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE int4 make_int4(const int2& v0, const int2& v1) {
return make_int4(v0.x, v0.y, v1.x, v1.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const unsigned int v0,
const uint2& v1) {
return make_uint3(v0, v1.x, v1.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint3 make_uint3(const uint2& v0,
const unsigned int v1) {
return make_uint3(v0.x, v0.y, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const unsigned int v0,
const unsigned int v1,
const uint2& v2) {
return make_uint4(v0, v1, v2.x, v2.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const unsigned int v0,
const uint2& v1,
const unsigned int v2) {
return make_uint4(v0, v1.x, v1.y, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const uint2& v0,
const unsigned int v1,
const unsigned int v2) {
return make_uint4(v0.x, v0.y, v1, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const unsigned int v0,
const uint3& v1) {
return make_uint4(v0, v1.x, v1.y, v1.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const uint3& v0,
const unsigned int v1) {
return make_uint4(v0.x, v0.y, v0.z, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE uint4 make_uint4(const uint2& v0,
const uint2& v1) {
return make_uint4(v0.x, v0.y, v1.x, v1.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const long long v0,
const longlong2& v1) {
return make_longlong3(v0, v1.x, v1.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong3 make_longlong3(const longlong2& v0,
const long long v1) {
return make_longlong3(v0.x, v0.y, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const long long v0,
const long long v1,
const longlong2& v2) {
return make_longlong4(v0, v1, v2.x, v2.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const long long v0,
const longlong2& v1,
const long long v2) {
return make_longlong4(v0, v1.x, v1.y, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const longlong2& v0,
const long long v1,
const long long v2) {
return make_longlong4(v0.x, v0.y, v1, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const long long v0,
const longlong3& v1) {
return make_longlong4(v0, v1.x, v1.y, v1.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const longlong3& v0,
const long long v1) {
return make_longlong4(v0.x, v0.y, v0.z, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE longlong4 make_longlong4(const longlong2& v0,
const longlong2& v1) {
return make_longlong4(v0.x, v0.y, v1.x, v1.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3
make_ulonglong3(const unsigned long long v0, const ulonglong2& v1) {
return make_ulonglong3(v0, v1.x, v1.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong3
make_ulonglong3(const ulonglong2& v0, const unsigned long long v1) {
return make_ulonglong3(v0.x, v0.y, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4
make_ulonglong4(const unsigned long long v0,
const unsigned long long v1,
const ulonglong2& v2) {
return make_ulonglong4(v0, v1, v2.x, v2.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4
make_ulonglong4(const unsigned long long v0,
const ulonglong2& v1,
const unsigned long long v2) {
return make_ulonglong4(v0, v1.x, v1.y, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4
make_ulonglong4(const ulonglong2& v0,
const unsigned long long v1,
const unsigned long long v2) {
return make_ulonglong4(v0.x, v0.y, v1, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4
make_ulonglong4(const unsigned long long v0, const ulonglong3& v1) {
return make_ulonglong4(v0, v1.x, v1.y, v1.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4
make_ulonglong4(const ulonglong3& v0, const unsigned long long v1) {
return make_ulonglong4(v0.x, v0.y, v0.z, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE ulonglong4 make_ulonglong4(const ulonglong2& v0,
const ulonglong2& v1) {
return make_ulonglong4(v0.x, v0.y, v1.x, v1.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float2& v0,
const float v1) {
return make_float3(v0.x, v0.y, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float3 make_float3(const float v0,
const float2& v1) {
return make_float3(v0, v1.x, v1.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float v0,
const float v1,
const float2& v2) {
return make_float4(v0, v1, v2.x, v2.y);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float v0,
const float2& v1,
const float v2) {
return make_float4(v0, v1.x, v1.y, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float2& v0,
const float v1,
const float v2) {
return make_float4(v0.x, v0.y, v1, v2);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float v0,
const float3& v1) {
return make_float4(v0, v1.x, v1.y, v1.z);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float3& v0,
const float v1) {
return make_float4(v0.x, v0.y, v0.z, v1);
}
SUTIL_INLINE SUTIL_HOSTDEVICE float4 make_float4(const float2& v0,
const float2& v1) {
return make_float4(v0.x, v0.y, v1.x, v1.y);
}
/** @} */
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/CMake/FindOptiX.cmake | CMake | #
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
if (TARGET OptiX::OptiX)
return()
endif()
macro(OptiX_config_message)
if (NOT DEFINED OptiX_FIND_QUIETLY)
message(${ARGN})
endif()
endmacro()
# Locate the OptiX distribution. Search relative to the SDK first, then look in the system.
find_path(OptiX_ROOT_DIR NAMES include/optix.h PATHS ${OptiX_INSTALL_DIR})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(OptiX
FOUND_VAR OptiX_FOUND
REQUIRED_VARS
OptiX_ROOT_DIR
REASON_FAILURE_MESSAGE
"OptiX installation not found on CMAKE_PREFIX_PATH (include/optix.h)"
)
if (NOT OptiX_FOUND)
set(OptiX_NOT_FOUND_MESSAGE "Unable to find OptiX, please add your OptiX installation to CMAKE_PREFIX_PATH")
return()
endif()
set(OptiX_INCLUDE_DIR ${OptiX_ROOT_DIR}/include)
add_library(OptiX::OptiX INTERFACE IMPORTED)
target_include_directories(OptiX::OptiX INTERFACE ${OptiX_INCLUDE_DIR})
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/main.cpp | C++ | // Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <pybind11/numpy.h>
#include <pybind11/operators.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
#define NOMINMAX
#include <cuda_runtime.h>
#include <optix.h>
#include <optix_denoiser_tiling.h>
#include <optix_function_table_definition.h>
#include <optix_stack_size.h>
#include <optix_stubs.h>
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <stdexcept>
namespace py = pybind11;
#define PYOPTIX_CHECK(call) \
do { \
OptixResult res = call; \
if (res != OPTIX_SUCCESS) \
throw std::runtime_error(optixGetErrorString(res)); \
} while (0)
#define PYOPTIX_CHECK_LOG(call) \
do { \
OptixResult res = call; \
if (res != OPTIX_SUCCESS) \
throw std::runtime_error(std::string(optixGetErrorString(res)) + \
": " + log_buf); \
} while (0)
#define COMMA ,
#if OPTIX_VERSION >= 70100
#define IF_OPTIX71(code) code
#define IF_OPTIX71_ELSE(code0, code1) code0
#else
#define IF_OPTIX71(code)
#define IF_OPTIX71_ELSE(code0, code1) code1
#endif
#if OPTIX_VERSION >= 70200
#define IF_OPTIX72(code) code
#else
#define IF_OPTIX72(code)
#endif
#if OPTIX_VERSION >= 70300
#define IF_OPTIX73(code) code
#else
#define IF_OPTIX73(code)
#endif
#if OPTIX_VERSION >= 70400
#define IF_OPTIX74(code) code
#else
#define IF_OPTIX74(code)
#endif
#if OPTIX_VERSION >= 70700
#define IF_OPTIX77(code) code
#else
#define IF_OPTIX77(code)
#endif
namespace pyoptix {
void context_log_cb(unsigned int level,
const char* tag,
const char* message,
void* cbdata);
void convertBuildInputs(py::list build_inputs_in,
std::vector<OptixBuildInput>& build_inputs);
//------------------------------------------------------------------------------
//
// Opaque type struct wrappers
//
//------------------------------------------------------------------------------
struct DeviceContext {
OptixDeviceContext deviceContext = 0;
py::object logCallbackFunction;
};
bool operator==(const DeviceContext& a, const DeviceContext& b) {
return a.deviceContext == b.deviceContext;
}
struct Module {
OptixModule module = 0;
};
bool operator==(const Module& a, const Module& b) {
return a.module == b.module;
}
struct ProgramGroup {
OptixProgramGroup programGroup = 0;
};
bool operator==(const ProgramGroup& a, const ProgramGroup& b) {
return a.programGroup == b.programGroup;
}
struct Pipeline {
OptixPipeline pipeline = 0;
};
bool operator==(const Pipeline& a, const Pipeline& b) {
return a.pipeline == b.pipeline;
}
struct Denoiser {
OptixDenoiser denoiser = 0;
};
bool operator==(const Denoiser& a, const Denoiser& b) {
return a.denoiser == b.denoiser;
}
//------------------------------------------------------------------------------
//
// Proxy objets to modify some functionality in the optix param structs
//
//------------------------------------------------------------------------------
struct DeviceContextOptions {
DeviceContextOptions(
py::object log_callback_function,
int32_t log_callback_level IF_OPTIX72(
COMMA OptixDeviceContextValidationMode validation_mode)) {
logCallbackFunction = log_callback_function;
if (!logCallbackFunction.is_none()) {
options.logCallbackFunction = pyoptix::context_log_cb;
options.logCallbackData = logCallbackFunction.ptr();
}
options.logCallbackLevel = log_callback_level;
IF_OPTIX72(options.validationMode = validation_mode;)
}
// Log callback needs additional backing
py::object logCallbackFunction;
OptixDeviceContextOptions options{};
};
struct BuildInputTriangleArray {
BuildInputTriangleArray(
const py::list& vertexBuffers_, // list of CUdeviceptr
OptixVertexFormat vertexFormat,
unsigned int vertexStrideInBytes,
CUdeviceptr indexBuffer,
unsigned int numIndexTriplets,
OptixIndicesFormat indexFormat,
unsigned int indexStrideInBytes,
CUdeviceptr preTransform,
const py::list& flags_, // list of uint32_t
unsigned int numSbtRecords,
CUdeviceptr sbtIndexOffsetBuffer,
unsigned int sbtIndexOffsetSizeInBytes,
unsigned int sbtIndexOffsetStrideInBytes,
unsigned int primitiveIndexOffset
IF_OPTIX71(COMMA OptixTransformFormat transformFormat)) {
memset(&build_input, 0, sizeof(OptixBuildInputTriangleArray));
vertexBuffers = vertexBuffers_.cast<std::vector<CUdeviceptr>>();
build_input.vertexFormat = vertexFormat;
build_input.vertexStrideInBytes = vertexStrideInBytes;
build_input.indexBuffer = indexBuffer;
build_input.numIndexTriplets = numIndexTriplets;
build_input.indexFormat = indexFormat;
build_input.indexStrideInBytes = indexStrideInBytes;
build_input.preTransform = preTransform;
flags = flags_.cast<std::vector<unsigned int>>();
build_input.numSbtRecords = numSbtRecords;
build_input.sbtIndexOffsetBuffer = sbtIndexOffsetBuffer;
build_input.sbtIndexOffsetSizeInBytes = sbtIndexOffsetSizeInBytes;
build_input.sbtIndexOffsetStrideInBytes = sbtIndexOffsetStrideInBytes;
build_input.primitiveIndexOffset = primitiveIndexOffset;
build_input.numSbtRecords = numSbtRecords;
IF_OPTIX71(build_input.transformFormat = transformFormat;)
}
void sync() {
build_input.vertexBuffers = vertexBuffers.data();
build_input.flags = flags.data();
}
std::vector<unsigned int> flags;
std::vector<CUdeviceptr> vertexBuffers;
OptixBuildInputTriangleArray build_input{};
};
#if OPTIX_VERSION >= 70200
struct BuildInputCurveArray {
BuildInputCurveArray(OptixPrimitiveType curveType,
unsigned int numPrimitives,
const py::list& vertexBuffers_,
unsigned int numVertices,
unsigned int vertexStrideInBytes,
const py::list& widthBuffers_,
unsigned int widthStrideInBytes,
const py::list& normalBuffers_,
unsigned int normalStrideInBytes,
CUdeviceptr indexBuffer,
unsigned int indexStrideInBytes,
unsigned int flag,
unsigned int primitiveIndexOffset) {
memset(&build_input, 0, sizeof(OptixBuildInputCurveArray));
build_input.curveType = curveType;
build_input.numPrimitives = numPrimitives;
vertexBuffers = vertexBuffers_.cast<std::vector<CUdeviceptr>>();
build_input.numVertices = numVertices;
build_input.vertexStrideInBytes = vertexStrideInBytes;
widthBuffers = widthBuffers_.cast<std::vector<CUdeviceptr>>();
build_input.widthStrideInBytes = widthStrideInBytes;
normalBuffers = normalBuffers_.cast<std::vector<CUdeviceptr>>();
build_input.normalStrideInBytes = normalStrideInBytes;
build_input.indexBuffer = indexBuffer;
build_input.indexStrideInBytes = indexStrideInBytes;
build_input.flag = flag;
build_input.primitiveIndexOffset = primitiveIndexOffset;
}
void sync() {
build_input.vertexBuffers = vertexBuffers.data();
build_input.widthBuffers = widthBuffers.data();
build_input.normalBuffers = normalBuffers.data();
}
std::vector<CUdeviceptr> vertexBuffers;
std::vector<CUdeviceptr> widthBuffers;
std::vector<CUdeviceptr> normalBuffers;
OptixBuildInputCurveArray build_input{};
};
#endif // OPTIX_VERSION >= 70200
struct BuildInputCustomPrimitiveArray {
BuildInputCustomPrimitiveArray(
const py::list& aabbBuffers_, // list of CUdeviceptr
unsigned int numPrimitives,
unsigned int strideInBytes,
const py::list& flags_, // list of uint32_t
unsigned int numSbtRecords,
CUdeviceptr sbtIndexOffsetBuffer,
unsigned int sbtIndexOffsetSizeInBytes,
unsigned int sbtIndexOffsetStrideInBytes,
unsigned int primitiveIndexOffset) {
aabbBuffers = aabbBuffers_.cast<std::vector<CUdeviceptr>>();
build_input.numPrimitives = numPrimitives;
build_input.strideInBytes = strideInBytes;
flags = flags_.cast<std::vector<unsigned int>>();
build_input.numSbtRecords = numSbtRecords;
build_input.sbtIndexOffsetBuffer = sbtIndexOffsetBuffer;
build_input.sbtIndexOffsetSizeInBytes = sbtIndexOffsetSizeInBytes;
build_input.sbtIndexOffsetStrideInBytes = sbtIndexOffsetStrideInBytes;
build_input.primitiveIndexOffset = primitiveIndexOffset;
}
void sync() {
build_input.aabbBuffers = aabbBuffers.data();
build_input.flags = flags.data();
}
std::vector<unsigned int> flags;
std::vector<CUdeviceptr> aabbBuffers;
OptixBuildInputCustomPrimitiveArray build_input{};
};
struct BuildInputInstanceArray {
BuildInputInstanceArray(CUdeviceptr instances,
CUdeviceptr instancePointers,
unsigned int numInstances) {
if (instances && instancePointers)
throw std::runtime_error(
"BuildInputInstanceArray created with both instances and "
"instance pointers");
build_input.instances = instances;
if (instances) setInstances(instances);
if (instancePointers) setInstancePointers(instancePointers);
build_input.numInstances = numInstances;
}
void setInstances(CUdeviceptr instances) {
build_type = OPTIX_BUILD_INPUT_TYPE_INSTANCES;
build_input.instances = instances;
}
void setInstancePointers(CUdeviceptr instances) {
build_type = OPTIX_BUILD_INPUT_TYPE_INSTANCE_POINTERS;
build_input.instances = instances;
}
OptixBuildInputType build_type = OPTIX_BUILD_INPUT_TYPE_INSTANCES;
OptixBuildInputInstanceArray build_input{};
};
struct Instance {
Instance(const py::list& transform, // 12 floats
uint32_t instanceId,
uint32_t sbtOffset,
uint32_t visibilityMask,
uint32_t flags,
OptixTraversableHandle traversableHandle) {
instance.instanceId = instanceId;
instance.sbtOffset = sbtOffset;
instance.visibilityMask = visibilityMask;
instance.flags = flags;
instance.traversableHandle = traversableHandle;
setTransform(transform);
}
void setTransform(const py::list& val) {
auto transform = val.cast<std::vector<float>>();
if (transform.size() != 12)
throw std::runtime_error(
"Instance ctor: transform array must be length 12");
for (int i = 0; i < transform.size(); ++i)
instance.transform[i] = transform[i];
}
OptixInstance instance{};
};
struct MotionOptions {
MotionOptions() {}
MotionOptions(const OptixMotionOptions& other) { options = other; }
MotionOptions(uint32_t numKeys,
uint32_t flags,
float timeBegin,
float timeEnd) {
options.numKeys = numKeys;
options.flags = flags;
options.timeBegin = timeBegin;
options.timeEnd = timeEnd;
}
OptixMotionOptions options{};
};
struct AccelEmitDesc {
AccelEmitDesc(CUdeviceptr result, OptixAccelPropertyType type) {
desc.result = result;
desc.type = type;
}
OptixAccelEmitDesc desc{};
};
struct StaticTransform {
StaticTransform(OptixTraversableHandle child,
const py::list& transform_,
const py::list& invTransform) {
transform.child = child;
setTransform(transform_);
setInvTransform(invTransform);
}
void setTransform(const py::list& val) {
auto transform_ = val.cast<std::vector<float>>();
if (transform_.size() != 12)
throw std::runtime_error(
"Instance ctor: transform array must be length 12");
for (int i = 0; i < transform_.size(); ++i)
transform.transform[i] = transform_[i];
}
void setInvTransform(const py::list& val) {
auto invTransform = val.cast<std::vector<float>>();
if (invTransform.size() != 12)
throw std::runtime_error(
"Instance ctor: invTransform array must be length 12");
for (int i = 0; i < invTransform.size(); ++i)
transform.invTransform[i] = invTransform[i];
}
py::bytes getBytes() const {
const char* transform_chars = reinterpret_cast<const char*>(&transform);
return std::string(transform_chars,
transform_chars + sizeof(OptixStaticTransform));
}
OptixStaticTransform transform{};
};
struct MatrixMotionTransform {
MatrixMotionTransform(OptixTraversableHandle child,
OptixMotionOptions motionOptions,
const py::list& transform) {
mtransform.child = child;
mtransform.motionOptions = motionOptions;
if (transform.size()) setTransform(transform);
}
void setTransform(const py::list& val) {
auto transform = val.cast<std::vector<float>>();
if (transform.size() < 24)
throw std::runtime_error(
"Transform array must be at least length 24");
if (transform.size() % 12)
throw std::runtime_error(
"Transform array length must be multiple of 12");
memcpy(mtransform.transform, transform.data(), sizeof(float) * 24);
extra_transform.assign(transform.begin() + 24, transform.end());
}
py::bytes getBytes() const {
// TODO: optimize this
const char* mtransform_chars =
reinterpret_cast<const char*>(&mtransform);
const char* extra_transform_chars =
reinterpret_cast<const char*>(extra_transform.data());
std::vector<char> data(
mtransform_chars,
mtransform_chars + sizeof(OptixMatrixMotionTransform));
data.insert(
data.end(), extra_transform_chars,
extra_transform_chars + sizeof(float) * extra_transform.size());
return std::string(data.begin(), data.end());
}
OptixMatrixMotionTransform mtransform{};
std::vector<float> extra_transform;
};
struct SRTData {
SRTData(float sx,
float a,
float b,
float pvx,
float sy,
float c,
float pvy,
float sz,
float pvz,
float qx,
float qy,
float qz,
float qw,
float tx,
float ty,
float tz) {
data.sx = sx;
data.a = a;
data.b = b;
data.pvx = pvx;
data.sy = sy;
data.c = c;
data.pvy = pvy;
data.sz = sz;
data.pvz = pvz;
data.qx = qx;
data.qy = qy;
data.qz = qz;
data.qw = qw;
data.tx = tx;
data.ty = ty;
data.tz = tz;
}
OptixSRTData data{};
};
struct PipelineCompileOptions {
PipelineCompileOptions(
bool usesMotionBlur,
uint32_t traversableGraphFlags,
int32_t numPayloadValues,
int32_t numAttributeValues,
uint32_t exceptionFlags,
const char* pipelineLaunchParamsVariableName_
IF_OPTIX71(COMMA int32_t usesPrimitiveTypeFlags)) {
options.usesMotionBlur = usesMotionBlur;
options.traversableGraphFlags = traversableGraphFlags;
options.numPayloadValues = numPayloadValues;
options.numAttributeValues = numAttributeValues;
options.exceptionFlags = exceptionFlags;
IF_OPTIX71(options.usesPrimitiveTypeFlags = usesPrimitiveTypeFlags;)
if (pipelineLaunchParamsVariableName_)
pipelineLaunchParamsVariableName =
pipelineLaunchParamsVariableName_;
}
void sync() {
options.pipelineLaunchParamsVariableName =
pipelineLaunchParamsVariableName.c_str();
}
// Strings need extra backing
std::string pipelineLaunchParamsVariableName;
OptixPipelineCompileOptions options{};
};
struct PipelineLinkOptions {
PipelineLinkOptions(unsigned int maxTraceDepth
#if OPTIX_VERSION < 70700
COMMA OptixCompileDebugLevel debugLevel
#endif
) {
options.maxTraceDepth = maxTraceDepth;
#if OPTIX_VERSION < 70700
options.debugLevel = debugLevel;
#endif
}
OptixPipelineLinkOptions options{};
};
struct ShaderBindingTable {
ShaderBindingTable(CUdeviceptr raygenRecord,
CUdeviceptr exceptionRecord,
CUdeviceptr missRecordBase,
unsigned int missRecordStrideInBytes,
unsigned int missRecordCount,
CUdeviceptr hitgroupRecordBase,
unsigned int hitgroupRecordStrideInBytes,
unsigned int hitgroupRecordCount,
CUdeviceptr callablesRecordBase,
unsigned int callablesRecordStrideInBytes,
unsigned int callablesRecordCount) {
sbt.raygenRecord = raygenRecord;
sbt.exceptionRecord = exceptionRecord;
sbt.missRecordBase = missRecordBase;
sbt.missRecordStrideInBytes = missRecordStrideInBytes;
sbt.missRecordCount = missRecordCount;
sbt.hitgroupRecordBase = hitgroupRecordBase;
sbt.hitgroupRecordStrideInBytes = hitgroupRecordStrideInBytes;
sbt.hitgroupRecordCount = hitgroupRecordCount;
sbt.callablesRecordBase = callablesRecordBase;
sbt.callablesRecordStrideInBytes = callablesRecordStrideInBytes;
sbt.callablesRecordCount = callablesRecordCount;
}
OptixShaderBindingTable sbt{};
};
struct StackSizes {
StackSizes() {}
StackSizes(unsigned int cssRG,
unsigned int cssMS,
unsigned int cssCH,
unsigned int cssAH,
unsigned int cssIS,
unsigned int cssCC,
unsigned int dssDC) {
ss.cssRG = cssRG;
ss.cssMS = cssMS;
ss.cssCH = cssCH;
ss.cssAH = cssAH;
ss.cssIS = cssIS;
ss.cssCC = cssCC;
ss.dssDC = dssDC;
}
OptixStackSizes ss{};
};
#if OPTIX_VERSION >= 70200
struct ModuleCompileBoundValueEntry {
ModuleCompileBoundValueEntry(size_t pipelineParamOffsetInBytes,
const py::buffer& boundValue,
const std::string& annotation) {
entry.pipelineParamOffsetInBytes = pipelineParamOffsetInBytes;
setBoundValue(boundValue);
setAnnotation(annotation);
}
ModuleCompileBoundValueEntry(const ModuleCompileBoundValueEntry& other) {
value = other.value;
annotation = other.annotation;
entry = other.entry;
}
ModuleCompileBoundValueEntry(ModuleCompileBoundValueEntry&& other) {
value = std::move(other.value);
annotation = std::move(other.annotation);
entry = other.entry;
}
void setBoundValue(const py::buffer& val) {
py::buffer_info binfo = val.request();
if (binfo.ndim != 1)
throw std::runtime_error(
"Multi-dimensional array passed as value for "
"optix.ModuleCompileBoundValueEntry.boundValue");
size_t byte_size = binfo.itemsize * binfo.shape[0];
const std::byte* bytes = reinterpret_cast<const std::byte*>(binfo.ptr);
value.clear();
std::copy(bytes, bytes + byte_size, std::back_inserter(value));
}
void setAnnotation(const std::string& val) { annotation = val; }
void sync() {
entry.annotation = annotation.c_str();
entry.sizeInBytes = value.size();
entry.boundValuePtr = value.data();
}
OptixModuleCompileBoundValueEntry entry{};
std::string annotation;
std::vector<std::byte> value;
};
#endif // OPTIX_VERSION >= 70200
#if OPTIX_VERSION >= 70400
struct PayloadType {
PayloadType() {}
PayloadType(const py::list& payload_semantics) {
setPayloadSemantics(payload_semantics);
}
void setPayloadSemantics(const py::list& val) {
payload_semantics = val.cast<std::vector<uint32_t>>();
}
void sync() {
payload_type.numPayloadValues = payload_semantics.size();
if (!payload_semantics.empty())
payload_type.payloadSemantics = payload_semantics.data();
else
payload_type.payloadSemantics = nullptr;
}
OptixPayloadType payload_type{};
std::vector<uint32_t> payload_semantics;
};
#endif // OPTIX_VERSION >= 70400
struct ModuleCompileOptions {
ModuleCompileOptions(
int32_t maxRegisterCount,
OptixCompileOptimizationLevel optLevel,
OptixCompileDebugLevel debugLevel IF_OPTIX72(
COMMA std::vector<pyoptix::ModuleCompileBoundValueEntry>&&
bound_values)
IF_OPTIX74(COMMA std::vector<pyoptix::PayloadType>&&
payload_types)) {
memset(&options, 0, sizeof(OptixModuleCompileOptions));
options.maxRegisterCount = maxRegisterCount;
options.optLevel = optLevel;
options.debugLevel = debugLevel;
IF_OPTIX72(pyboundValues = std::move(bound_values);)
IF_OPTIX74(pypayloadTypes = std::move(payload_types);)
}
void sync() {
return;
#if OPTIX_VERSION >= 70200
boundValues.clear();
for (auto& pybve : pyboundValues) {
pybve.sync();
boundValues.push_back(pybve.entry);
}
options.boundValues =
boundValues.empty() ? nullptr : boundValues.data();
options.numBoundValues = static_cast<uint32_t>(boundValues.size());
#endif
#if OPTIX_VERSION >= 70400
payloadTypes.clear();
for (auto& pypt : pypayloadTypes) {
pypt.sync();
payloadTypes.push_back(pypt.payload_type);
}
options.payloadTypes =
payloadTypes.empty() ? nullptr : payloadTypes.data();
options.numPayloadTypes = static_cast<uint32_t>(payloadTypes.size());
#endif
}
OptixModuleCompileOptions options{};
#if OPTIX_VERSION >= 70200
std::vector<pyoptix::ModuleCompileBoundValueEntry> pyboundValues;
std::vector<OptixModuleCompileBoundValueEntry> boundValues;
#endif
#if OPTIX_VERSION >= 70400
std::vector<pyoptix::PayloadType> pypayloadTypes;
std::vector<OptixPayloadType> payloadTypes;
#endif
};
#if OPTIX_VERSION >= 70100
struct BuiltinISOptions {
BuiltinISOptions(
OptixPrimitiveType builtinISModuleType,
int usesMotionBlur IF_OPTIX74(COMMA unsigned int buildFlags)
IF_OPTIX74(COMMA unsigned int curveEndcapFlags)) {
options.builtinISModuleType = builtinISModuleType;
options.usesMotionBlur = usesMotionBlur;
IF_OPTIX74(options.buildFlags = buildFlags;)
IF_OPTIX74(options.curveEndcapFlags = curveEndcapFlags;)
}
OptixBuiltinISOptions options{};
};
#endif
struct ProgramGroupDesc {
ProgramGroupDesc(uint32_t flags,
const char* raygenEntryFunctionName,
const pyoptix::Module raygenModule,
const char* missEntryFunctionName,
const pyoptix::Module missModule,
const char* exceptionEntryFunctionName,
const pyoptix::Module exceptionModule,
const char* callablesEntryFunctionNameDC,
const pyoptix::Module callablesModuleDC,
const char* callablesEntryFunctionNameCC,
const pyoptix::Module callablesModuleCC,
const char* hitgroupEntryFunctionNameCH,
const pyoptix::Module hitgroupModuleCH,
const char* hitgroupEntryFunctionNameAH,
const pyoptix::Module hitgroupModuleAH,
const char* hitgroupEntryFunctionNameIS,
const pyoptix::Module hitgroupModuleIS) {
program_group_desc.flags = flags;
if (raygenEntryFunctionName) {
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
entryFunctionName0 = raygenEntryFunctionName;
} else if (missEntryFunctionName) {
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
entryFunctionName0 = missEntryFunctionName;
} else if (exceptionEntryFunctionName) {
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_EXCEPTION;
entryFunctionName0 = exceptionEntryFunctionName;
} else if (callablesEntryFunctionNameDC ||
callablesEntryFunctionNameCC) {
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
entryFunctionName0 = callablesEntryFunctionNameDC
? callablesEntryFunctionNameDC
: "";
entryFunctionName1 = callablesEntryFunctionNameCC
? callablesEntryFunctionNameCC
: "";
} else if (hitgroupEntryFunctionNameCH || hitgroupEntryFunctionNameAH ||
hitgroupEntryFunctionNameIS) {
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
entryFunctionName0 = hitgroupEntryFunctionNameCH
? hitgroupEntryFunctionNameCH
: "";
entryFunctionName1 = hitgroupEntryFunctionNameAH
? hitgroupEntryFunctionNameAH
: "";
entryFunctionName2 = hitgroupEntryFunctionNameIS
? hitgroupEntryFunctionNameIS
: "";
}
if (raygenModule.module) {
program_group_desc.raygen.module = raygenModule.module;
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
} else if (missModule.module) {
program_group_desc.miss.module = missModule.module;
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_MISS;
} else if (exceptionModule.module) {
program_group_desc.exception.module = exceptionModule.module;
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_EXCEPTION;
} else if (callablesModuleDC.module || callablesModuleCC.module) {
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
program_group_desc.callables.moduleDC =
callablesModuleDC.module ? callablesModuleDC.module
: nullptr;
program_group_desc.callables.moduleCC =
callablesModuleDC.module ? callablesModuleCC.module
: nullptr;
} else if (hitgroupModuleCH.module || hitgroupModuleAH.module ||
hitgroupModuleIS.module) {
program_group_desc.kind = OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
program_group_desc.hitgroup.moduleCH =
hitgroupModuleCH.module ? hitgroupModuleCH.module : nullptr;
program_group_desc.hitgroup.moduleAH =
hitgroupModuleAH.module ? hitgroupModuleAH.module : nullptr;
program_group_desc.hitgroup.moduleIS =
hitgroupModuleIS.module ? hitgroupModuleIS.module : nullptr;
}
}
std::string entryFunctionName0;
std::string entryFunctionName1;
std::string entryFunctionName2;
OptixProgramGroupDesc program_group_desc{};
};
#if OPTIX_VERSION >= 70400
struct ProgramGroupOptions {
ProgramGroupOptions() {}
ProgramGroupOptions(const pyoptix::PayloadType& payload_type) {
setPayloadType(payload_type);
}
void setPayloadType(const pyoptix::PayloadType& payload_type_) {
payload_type = payload_type_.payload_type;
if (payload_type.numPayloadValues > 0)
options.payloadType = &payload_type;
else
options.payloadType = nullptr;
}
OptixPayloadType payload_type{};
OptixProgramGroupOptions options{};
};
#endif // OPTIX_VERSION >= 70400
//------------------------------------------------------------------------------
//
// Helpers
//
//------------------------------------------------------------------------------
constexpr size_t LOG_BUFFER_MAX_SIZE = 2048u;
void context_log_cb(unsigned int level,
const char* tag,
const char* message,
void* cbdata) {
py::object cb = py::reinterpret_borrow<py::object>(
reinterpret_cast<PyObject*>(cbdata));
cb(level, tag, message);
}
void convertBuildInputs(py::list build_inputs_in,
std::vector<OptixBuildInput>& build_inputs) {
build_inputs.resize(build_inputs_in.size());
int32_t idx = 0;
for (auto list_elem : build_inputs_in) {
if (py::isinstance<pyoptix::BuildInputTriangleArray>(list_elem)) {
pyoptix::BuildInputTriangleArray& tri_array =
list_elem.cast<pyoptix::BuildInputTriangleArray&>();
tri_array.sync();
build_inputs[idx].type = OPTIX_BUILD_INPUT_TYPE_TRIANGLES;
build_inputs[idx].triangleArray = tri_array.build_input;
}
#if OPTIX_VERSION >= 70100
else if (py::isinstance<pyoptix::BuildInputCurveArray>(list_elem)) {
pyoptix::BuildInputCurveArray& curve_array =
list_elem.cast<pyoptix::BuildInputCurveArray&>();
curve_array.sync();
build_inputs[idx].type = OPTIX_BUILD_INPUT_TYPE_CURVES;
build_inputs[idx].curveArray = curve_array.build_input;
}
#endif
else if (py::isinstance<pyoptix::BuildInputCustomPrimitiveArray>(
list_elem)) {
pyoptix::BuildInputCustomPrimitiveArray& cp_array =
list_elem.cast<pyoptix::BuildInputCustomPrimitiveArray&>();
cp_array.sync();
build_inputs[idx].type = OPTIX_BUILD_INPUT_TYPE_CUSTOM_PRIMITIVES;
build_inputs[idx].customPrimitiveArray = cp_array.build_input;
} else if (py::isinstance<pyoptix::BuildInputInstanceArray>(
list_elem)) {
pyoptix::BuildInputInstanceArray& inst_array =
list_elem.cast<pyoptix::BuildInputInstanceArray&>();
build_inputs[idx].type = inst_array.build_type;
build_inputs[idx].instanceArray = inst_array.build_input;
} else {
throw std::runtime_error(
"Context.accelComputeMemoryUsage called with non-build "
"input types"
" in buildInputs param");
}
++idx;
}
}
template <typename T>
py::bytes makeBytes(const std::vector<T>& v) {
const char* c = reinterpret_cast<const char*>(v.data());
const size_t c_size = v.size() * sizeof(T);
return py::bytes(c, c_size);
}
//------------------------------------------------------------------------------
//
// OptiX API error checked wrappers
//
//------------------------------------------------------------------------------
void init() { PYOPTIX_CHECK(optixInit()); }
py::tuple version() {
unsigned int major = OPTIX_VERSION / 10000;
unsigned int minor = (OPTIX_VERSION % 10000) / 100;
unsigned int micro = OPTIX_VERSION % 100;
return py::make_tuple(major, minor, micro);
}
const char* getErrorName(OptixResult result) {
return optixGetErrorName(result);
}
const char* getErrorString(OptixResult result) {
return optixGetErrorString(result);
}
pyoptix::DeviceContext deviceContextCreate(
uintptr_t fromContext, const pyoptix::DeviceContextOptions& options) {
pyoptix::DeviceContext ctx{};
ctx.logCallbackFunction = options.logCallbackFunction;
PYOPTIX_CHECK(
optixDeviceContextCreate(reinterpret_cast<CUcontext>(fromContext),
&options.options, &(ctx.deviceContext)));
return ctx;
}
void deviceContextDestroy(pyoptix::DeviceContext context) {
PYOPTIX_CHECK(optixDeviceContextDestroy(context.deviceContext));
}
py::object deviceContextGetProperty(pyoptix::DeviceContext context,
OptixDeviceProperty property) {
switch (property) {
// uint32_t
case OPTIX_DEVICE_PROPERTY_LIMIT_MAX_TRACE_DEPTH:
case OPTIX_DEVICE_PROPERTY_LIMIT_MAX_TRAVERSABLE_GRAPH_DEPTH:
case OPTIX_DEVICE_PROPERTY_LIMIT_MAX_PRIMITIVES_PER_GAS:
case OPTIX_DEVICE_PROPERTY_LIMIT_MAX_INSTANCES_PER_IAS:
case OPTIX_DEVICE_PROPERTY_RTCORE_VERSION:
case OPTIX_DEVICE_PROPERTY_LIMIT_MAX_INSTANCE_ID:
case OPTIX_DEVICE_PROPERTY_LIMIT_NUM_BITS_INSTANCE_VISIBILITY_MASK:
case OPTIX_DEVICE_PROPERTY_LIMIT_MAX_SBT_RECORDS_PER_GAS:
case OPTIX_DEVICE_PROPERTY_LIMIT_MAX_SBT_OFFSET: {
uint32_t value = 0u;
PYOPTIX_CHECK(optixDeviceContextGetProperty(
context.deviceContext, property, &value, sizeof(uint32_t)));
return py::int_(value);
}
default: {
throw std::runtime_error(
"Unrecognized optix.DeviceProperty passed to "
"DeviceContext.getProperty()");
}
}
}
void deviceContextSetLogCallback(pyoptix::DeviceContext context,
py::object callbackFunction,
uint32_t callbackLevel) {
context.logCallbackFunction = callbackFunction;
OptixLogCallback cb = nullptr;
void* cb_data = nullptr;
if (!context.logCallbackFunction.is_none()) {
cb = context_log_cb;
cb_data = context.logCallbackFunction.ptr();
}
PYOPTIX_CHECK(optixDeviceContextSetLogCallback(context.deviceContext, cb,
cb_data, callbackLevel));
}
void deviceContextSetCacheEnabled(pyoptix::DeviceContext context, int enabled) {
PYOPTIX_CHECK(
optixDeviceContextSetCacheEnabled(context.deviceContext, enabled));
}
void deviceContextSetCacheLocation(pyoptix::DeviceContext context,
const char* location) {
PYOPTIX_CHECK(optixDeviceContextSetCacheLocation(context.deviceContext,
location));
}
void deviceContextSetCacheDatabaseSizes(pyoptix::DeviceContext context,
size_t lowWaterMark,
size_t highWaterMark) {
PYOPTIX_CHECK(optixDeviceContextSetCacheDatabaseSizes(
context.deviceContext, lowWaterMark, highWaterMark));
}
py::bool_ deviceContextGetCacheEnabled(pyoptix::DeviceContext context) {
int32_t enabled = 0;
PYOPTIX_CHECK(
optixDeviceContextGetCacheEnabled(context.deviceContext, &enabled));
return py::bool_(enabled);
}
py::str deviceContextGetCacheLocation(pyoptix::DeviceContext context) {
constexpr size_t locationSize = 1024u;
char location[locationSize];
PYOPTIX_CHECK(optixDeviceContextGetCacheLocation(context.deviceContext,
location, locationSize));
return py::str(location);
}
py::tuple deviceContextGetCacheDatabaseSizes(pyoptix::DeviceContext context) {
size_t lowWaterMark;
size_t highWaterMark;
PYOPTIX_CHECK(optixDeviceContextGetCacheDatabaseSizes(
context.deviceContext, &lowWaterMark, &highWaterMark));
return py::make_tuple(lowWaterMark, highWaterMark);
}
// TODO: return log like other funcs
pyoptix::Pipeline pipelineCreate(
pyoptix::DeviceContext context,
const pyoptix::PipelineCompileOptions& pipelineCompileOptions,
const pyoptix::PipelineLinkOptions& pipelineLinkOptions,
const py::list& programGroups,
std::string& logString) {
std::vector<OptixProgramGroup> pgs;
for (const auto list_elem : programGroups) {
pyoptix::ProgramGroup pygroup = list_elem.cast<pyoptix::ProgramGroup>();
pgs.push_back(pygroup.programGroup);
}
size_t log_buf_size = LOG_BUFFER_MAX_SIZE;
char log_buf[LOG_BUFFER_MAX_SIZE];
log_buf[0] = '\0';
pyoptix::Pipeline pipeline{};
PYOPTIX_CHECK_LOG(optixPipelineCreate(
context.deviceContext, &pipelineCompileOptions.options,
&pipelineLinkOptions.options, pgs.data(),
static_cast<uint32_t>(pgs.size()), log_buf, &log_buf_size,
&pipeline.pipeline));
logString = log_buf;
return pipeline;
}
void pipelineDestroy(pyoptix::Pipeline pipeline) {
PYOPTIX_CHECK(optixPipelineDestroy(pipeline.pipeline));
}
void pipelineSetStackSize(pyoptix::Pipeline pipeline,
unsigned int directCallableStackSizeFromTraversal,
unsigned int directCallableStackSizeFromState,
unsigned int continuationStackSize,
unsigned int maxTraversableGraphDepth) {
PYOPTIX_CHECK(optixPipelineSetStackSize(
pipeline.pipeline, directCallableStackSizeFromTraversal,
directCallableStackSizeFromState, continuationStackSize,
maxTraversableGraphDepth));
}
#if OPTIX_VERSION < 70700
py::tuple moduleCreateFromPTX(
#else
py::tuple moduleCreate(
#endif
const pyoptix::DeviceContext& context,
pyoptix::ModuleCompileOptions& moduleCompileOptions,
pyoptix::PipelineCompileOptions& pipelineCompileOptions,
const std::string& PTX) {
size_t log_buf_size = LOG_BUFFER_MAX_SIZE;
char log_buf[LOG_BUFFER_MAX_SIZE];
log_buf[0] = '\0';
moduleCompileOptions.sync();
pipelineCompileOptions.sync();
pyoptix::Module module;
PYOPTIX_CHECK_LOG(
#if OPTIX_VERSION < 70700
optixModuleCreateFromPTX(
#else
optixModuleCreate(
#endif
context.deviceContext, &moduleCompileOptions.options,
&pipelineCompileOptions.options, PTX.c_str(),
static_cast<size_t>(PTX.size() + 1), log_buf, &log_buf_size,
&module.module));
return py::make_tuple(module, py::str(log_buf));
}
void moduleDestroy(pyoptix::Module module) {
PYOPTIX_CHECK(optixModuleDestroy(module.module));
}
#if OPTIX_VERSION >= 70100
pyoptix::Module builtinISModuleGet(
const pyoptix::DeviceContext& context,
pyoptix::ModuleCompileOptions& moduleCompileOptions,
pyoptix::PipelineCompileOptions& pipelineCompileOptions,
const pyoptix::BuiltinISOptions& builtinISOptions) {
moduleCompileOptions.sync();
pipelineCompileOptions.sync();
pyoptix::Module module;
PYOPTIX_CHECK(optixBuiltinISModuleGet(
context.deviceContext, &moduleCompileOptions.options,
&pipelineCompileOptions.options, &builtinISOptions.options,
&module.module));
return module;
}
#endif // OPTIX_VERSION >= 70100
pyoptix::StackSizes programGroupGetStackSize(
pyoptix::ProgramGroup programGroup
IF_OPTIX77(COMMA pyoptix::Pipeline pipeline)) {
pyoptix::StackSizes sizes;
PYOPTIX_CHECK(optixProgramGroupGetStackSize(
programGroup.programGroup,
&sizes.ss IF_OPTIX77(COMMA pipeline.pipeline)));
return sizes;
}
py::tuple programGroupCreate(
pyoptix::DeviceContext context,
const py::list& programDescriptions IF_OPTIX74(
COMMA std::optional<pyoptix::ProgramGroupOptions> options)) {
size_t log_buf_size = LOG_BUFFER_MAX_SIZE;
char log_buf[LOG_BUFFER_MAX_SIZE];
log_buf[0] = '\0';
std::vector<OptixProgramGroupDesc> program_groups_descs;
for (auto list_elem : programDescriptions) {
pyoptix::ProgramGroupDesc& pydesc =
list_elem.cast<pyoptix::ProgramGroupDesc&>();
switch (pydesc.program_group_desc.kind) {
case OPTIX_PROGRAM_GROUP_KIND_RAYGEN:
pydesc.program_group_desc.raygen.entryFunctionName =
!pydesc.entryFunctionName0.empty()
? pydesc.entryFunctionName0.c_str()
: nullptr;
break;
case OPTIX_PROGRAM_GROUP_KIND_MISS:
pydesc.program_group_desc.miss.entryFunctionName =
!pydesc.entryFunctionName0.empty()
? pydesc.entryFunctionName0.c_str()
: nullptr;
break;
case OPTIX_PROGRAM_GROUP_KIND_EXCEPTION:
pydesc.program_group_desc.exception.entryFunctionName =
!pydesc.entryFunctionName0.empty()
? pydesc.entryFunctionName0.c_str()
: nullptr;
break;
case OPTIX_PROGRAM_GROUP_KIND_HITGROUP:
pydesc.program_group_desc.hitgroup.entryFunctionNameCH =
!pydesc.entryFunctionName0.empty()
? pydesc.entryFunctionName0.c_str()
: nullptr;
pydesc.program_group_desc.hitgroup.entryFunctionNameAH =
!pydesc.entryFunctionName1.empty()
? pydesc.entryFunctionName1.c_str()
: nullptr;
pydesc.program_group_desc.hitgroup.entryFunctionNameIS =
!pydesc.entryFunctionName2.empty()
? pydesc.entryFunctionName2.c_str()
: nullptr;
break;
case OPTIX_PROGRAM_GROUP_KIND_CALLABLES:
pydesc.program_group_desc.callables.entryFunctionNameDC =
!pydesc.entryFunctionName0.empty()
? pydesc.entryFunctionName0.c_str()
: nullptr;
pydesc.program_group_desc.callables.entryFunctionNameCC =
!pydesc.entryFunctionName1.empty()
? pydesc.entryFunctionName1.c_str()
: nullptr;
break;
}
program_groups_descs.push_back(pydesc.program_group_desc);
}
std::vector<OptixProgramGroup> program_groups(programDescriptions.size());
#if OPTIX_VERSION < 70400
const OptixProgramGroupOptions opts{};
#else
const OptixProgramGroupOptions opts = options.has_value()
? options.value().options
: OptixProgramGroupOptions{};
#endif
PYOPTIX_CHECK_LOG(optixProgramGroupCreate(
context.deviceContext, program_groups_descs.data(),
static_cast<uint32_t>(program_groups_descs.size()), &opts, log_buf,
&log_buf_size, program_groups.data()));
py::list pygroups;
for (auto& group : program_groups)
pygroups.append(pyoptix::ProgramGroup{group});
return py::make_tuple(pygroups, py::str(log_buf));
}
void programGroupDestroy(pyoptix::ProgramGroup programGroup) {
PYOPTIX_CHECK(optixProgramGroupDestroy(programGroup.programGroup));
}
void launch(pyoptix::Pipeline pipeline,
uintptr_t stream,
CUdeviceptr pipelineParams,
size_t pipelineParamsSize,
const pyoptix::ShaderBindingTable& sbt,
uint32_t width,
uint32_t height,
uint32_t depth) {
PYOPTIX_CHECK(optixLaunch(pipeline.pipeline,
reinterpret_cast<CUstream>(stream),
pipelineParams, pipelineParamsSize, &sbt.sbt,
width, height, depth));
}
void sbtRecordPackHeader(pyoptix::ProgramGroup programGroup,
py::buffer sbtRecord) {
py::buffer_info binfo = sbtRecord.request();
// TODO: sanity check buffer
PYOPTIX_CHECK(
optixSbtRecordPackHeader(programGroup.programGroup, binfo.ptr));
}
py::bytes sbtRecordGetHeader(pyoptix::ProgramGroup programGroup) {
std::vector<char> bytes(OPTIX_SBT_RECORD_HEADER_SIZE, 0);
PYOPTIX_CHECK(
optixSbtRecordPackHeader(programGroup.programGroup, bytes.data()));
return makeBytes(bytes);
}
OptixAccelBufferSizes accelComputeMemoryUsage(pyoptix::DeviceContext context,
const py::list& accelOptions,
const py::list& buildInputs) {
const uint32_t num_inputs = buildInputs.size();
if (accelOptions.size() != num_inputs)
throw std::runtime_error(
"Context.accelComputeMemoryUsage called with mismatched number "
"of accel options and build inputs");
auto accel_options =
accelOptions.cast<std::vector<OptixAccelBuildOptions>>();
std::vector<OptixBuildInput> build_inputs;
convertBuildInputs(buildInputs, build_inputs);
OptixAccelBufferSizes bufferSizes{};
PYOPTIX_CHECK(optixAccelComputeMemoryUsage(
context.deviceContext, accel_options.data(), build_inputs.data(),
num_inputs, &bufferSizes));
return bufferSizes;
}
OptixTraversableHandle accelBuild(
pyoptix::DeviceContext context,
uintptr_t stream,
const py::list& accelOptions, // AccelBuildOptions
const py::list& buildInputs, //
CUdeviceptr tempBuffer,
size_t tempBufferSizeInBytes,
CUdeviceptr outputBuffer,
size_t outputBufferSizeInBytes,
const py::list& emittedProperties // AccelEmitDesc
) {
const uint32_t num_inputs = buildInputs.size();
if (accelOptions.size() != num_inputs)
throw std::runtime_error(
"Context.accelComputeMemoryUsage called with mismatched number "
"of accel options and build inputs");
auto accel_options =
accelOptions.cast<std::vector<OptixAccelBuildOptions>>();
std::vector<OptixBuildInput> build_inputs;
convertBuildInputs(buildInputs, build_inputs);
const uint32_t num_properties = emittedProperties.size();
const auto emitted_properties_temp =
emittedProperties.cast<std::vector<pyoptix::AccelEmitDesc>>();
std::vector<OptixAccelEmitDesc> emitted_properties;
for (auto desc : emitted_properties_temp)
emitted_properties.push_back(desc.desc);
OptixTraversableHandle output_handle;
PYOPTIX_CHECK(optixAccelBuild(
context.deviceContext, reinterpret_cast<CUstream>(stream),
accel_options.data(), build_inputs.data(), num_inputs, tempBuffer,
tempBufferSizeInBytes, outputBuffer, outputBufferSizeInBytes,
&output_handle,
emitted_properties.empty() ? nullptr : emitted_properties.data(),
num_properties));
return output_handle;
}
#if OPTIX_VERSION < 70600
#define RELOCATION_INFO OptixAccelRelocationInfo
#else
#define RELOCATION_INFO OptixRelocationInfo
#endif
RELOCATION_INFO accelGetRelocationInfo(pyoptix::DeviceContext context,
OptixTraversableHandle handle) {
RELOCATION_INFO info;
PYOPTIX_CHECK(
optixAccelGetRelocationInfo(context.deviceContext, handle, &info));
return info;
}
py::bool_ accelCheckRelocationCompatibility(pyoptix::DeviceContext context,
const RELOCATION_INFO* info) {
int compatible;
PYOPTIX_CHECK(
#if OPTIX_VERSION < 70600
optixAccelCheckRelocationCompatibility(
#else
optixCheckRelocationCompatibility(
#endif
context.deviceContext, info, &compatible));
return py::bool_(compatible);
}
OptixTraversableHandle accelRelocate(pyoptix::DeviceContext context,
uintptr_t stream,
const RELOCATION_INFO* info,
#if OPTIX_VERSION < 70600
CUdeviceptr instanceTraversableHandles,
size_t numInstanceTraversableHandles,
#else
const OptixRelocateInput* relocateInputs,
size_t numRelocateInputs,
#endif
CUdeviceptr targetAccel,
size_t targetAccelSizeInBytes
) {
OptixTraversableHandle targetHandle;
PYOPTIX_CHECK(optixAccelRelocate(
context.deviceContext, reinterpret_cast<CUstream>(stream), info,
#if OPTIX_VERSION < 70600
instanceTraversableHandles, numInstanceTraversableHandles,
#else
relocateInputs, numRelocateInputs,
#endif
targetAccel, targetAccelSizeInBytes, &targetHandle));
return targetHandle;
}
OptixTraversableHandle accelCompact(pyoptix::DeviceContext context,
uintptr_t stream,
OptixTraversableHandle inputHandle,
CUdeviceptr outputBuffer,
size_t outputBufferSizeInBytes) {
OptixTraversableHandle outputHandle;
PYOPTIX_CHECK(optixAccelCompact(
context.deviceContext, reinterpret_cast<CUstream>(stream),
inputHandle, outputBuffer, outputBufferSizeInBytes, &outputHandle));
return outputHandle;
}
OptixTraversableHandle convertPointerToTraversableHandle(
pyoptix::DeviceContext onDevice,
CUdeviceptr pointer,
OptixTraversableType traversableType) {
OptixTraversableHandle traversableHandle;
PYOPTIX_CHECK(optixConvertPointerToTraversableHandle(
onDevice.deviceContext, pointer, traversableType,
&traversableHandle));
return traversableHandle;
}
#if OPTIX_VERSION >= 70300
pyoptix::Denoiser denoiserCreate(pyoptix::DeviceContext context,
OptixDenoiserModelKind modelKind,
const OptixDenoiserOptions* options) {
pyoptix::Denoiser denoiser;
PYOPTIX_CHECK(optixDenoiserCreate(context.deviceContext, modelKind, options,
&denoiser.denoiser));
return denoiser;
}
#else
pyoptix::Denoiser denoiserCreate(pyoptix::DeviceContext context,
const OptixDenoiserOptions* options) {
pyoptix::Denoiser denoiser;
PYOPTIX_CHECK(optixDenoiserCreate(context.deviceContext, options,
&denoiser.denoiser));
return denoiser;
}
#endif
#if OPTIX_VERSION < 70300
void denoiserSetModel(pyoptix::Denoiser denoiser,
OptixDenoiserModelKind kind,
void* data,
size_t sizeInBytes) {
PYOPTIX_CHECK(
optixDenoiserSetModel(denoiser.denoiser, kind, data, sizeInBytes));
}
#endif
void denoiserDestroy(pyoptix::Denoiser denoiser) {
PYOPTIX_CHECK(optixDenoiserDestroy(denoiser.denoiser));
}
OptixDenoiserSizes denoiserComputeMemoryResources(
const pyoptix::Denoiser denoiser,
unsigned int outputWidth,
unsigned int outputHeight) {
OptixDenoiserSizes returnSizes;
PYOPTIX_CHECK(optixDenoiserComputeMemoryResources(
denoiser.denoiser, outputWidth, outputHeight, &returnSizes));
return returnSizes;
}
void denoiserSetup(pyoptix::Denoiser denoiser,
uintptr_t stream,
unsigned int inputWidth,
unsigned int inputHeight,
CUdeviceptr denoiserState,
size_t denoiserStateSizeInBytes,
CUdeviceptr scratch,
size_t scratchSizeInBytes) {
PYOPTIX_CHECK(optixDenoiserSetup(
denoiser.denoiser, reinterpret_cast<CUstream>(stream), inputWidth,
inputHeight, denoiserState, denoiserStateSizeInBytes, scratch,
scratchSizeInBytes));
}
#if OPTIX_VERSION >= 70300
void denoiserInvoke(pyoptix::Denoiser denoiser,
uintptr_t stream,
const OptixDenoiserParams* params,
CUdeviceptr denoiserState,
size_t denoiserStateSizeInBytes,
const OptixDenoiserGuideLayer* guideLayer,
const OptixDenoiserLayer* layers,
unsigned int numLayers,
unsigned int inputOffsetX,
unsigned int inputOffsetY,
CUdeviceptr scratch,
size_t scratchSizeInBytes) {
PYOPTIX_CHECK(optixDenoiserInvoke(
denoiser.denoiser, reinterpret_cast<CUstream>(stream), params,
denoiserState, denoiserStateSizeInBytes, guideLayer, layers,
numLayers, inputOffsetX, inputOffsetY, scratch,
scratchSizeInBytes));
}
#else
void denoiserInvoke(pyoptix::Denoiser denoiser,
uintptr_t stream,
const OptixDenoiserParams* params,
CUdeviceptr denoiserState,
size_t denoiserStateSizeInBytes,
const OptixImage2D* inputLayers,
unsigned int numInputLayers,
unsigned int inputOffsetX,
unsigned int inputOffsetY,
const OptixImage2D* outputLayer,
CUdeviceptr scratch,
size_t scratchSizeInBytes) {
PYOPTIX_CHECK(optixDenoiserInvoke(
denoiser.denoiser, reinterpret_cast<CUstream>(stream), params,
denoiserState, denoiserStateSizeInBytes, inputLayers,
numInputLayers, inputOffsetX, inputOffsetY, outputLayer, scratch,
scratchSizeInBytes));
}
#endif
void denoiserComputeIntensity(pyoptix::Denoiser denoiser,
uintptr_t stream,
const OptixImage2D* inputImage,
CUdeviceptr outputIntensity,
CUdeviceptr scratch,
size_t scratchSizeInBytes) {
PYOPTIX_CHECK(optixDenoiserComputeIntensity(
denoiser.denoiser, reinterpret_cast<CUstream>(stream), inputImage,
outputIntensity, scratch, scratchSizeInBytes));
}
#if OPTIX_VERSION >= 70200
void denoiserComputeAverageColor(pyoptix::Denoiser denoiser,
uintptr_t stream,
const OptixImage2D* inputImage,
CUdeviceptr outputAverageColor,
CUdeviceptr scratch,
size_t scratchSizeInBytes) {
PYOPTIX_CHECK(optixDenoiserComputeAverageColor(
denoiser.denoiser, reinterpret_cast<CUstream>(stream), inputImage,
outputAverageColor, scratch, scratchSizeInBytes));
}
#endif
void denoiserInvokeTiled(pyoptix::Denoiser denoiser,
uintptr_t stream,
const OptixDenoiserParams* params,
CUdeviceptr denoiserState,
size_t denoiserStateSizeInBytes,
const OptixDenoiserGuideLayer* guideLayer,
const py::list& layers,
CUdeviceptr scratch,
size_t scratchSizeInBytes,
unsigned int overlapWindowSizeInPixels,
unsigned int tileWidth,
unsigned int tileHeight) {
auto layers_vec = layers.cast<std::vector<OptixDenoiserLayer>>();
PYOPTIX_CHECK(optixUtilDenoiserInvokeTiled(
denoiser.denoiser, reinterpret_cast<CUstream>(stream), params,
denoiserState, denoiserStateSizeInBytes, guideLayer,
layers_vec.data(), layers_vec.size(), scratch, scratchSizeInBytes,
overlapWindowSizeInPixels, tileWidth, tileHeight));
}
//------------------------------------------------------------------------------
//
// optix util wrappers
//
//------------------------------------------------------------------------------
namespace util {
void accumulateStackSizes(pyoptix::ProgramGroup programGroup,
pyoptix::StackSizes& stackSizes IF_OPTIX77(
COMMA pyoptix::Pipeline pipeline)) {
PYOPTIX_CHECK(optixUtilAccumulateStackSizes(
programGroup.programGroup,
&stackSizes.ss IF_OPTIX77(COMMA pipeline.pipeline)));
}
py::tuple computeStackSizes(const pyoptix::StackSizes& stackSizes,
unsigned int maxTraceDepth,
unsigned int maxCCDepth,
unsigned int maxDCDepth) {
uint32_t directCallableStackSizeFromTraversal;
uint32_t directCallableStackSizeFromState;
uint32_t continuationStackSize;
PYOPTIX_CHECK(optixUtilComputeStackSizes(
&stackSizes.ss, maxTraceDepth, maxCCDepth, maxDCDepth,
&directCallableStackSizeFromTraversal,
&directCallableStackSizeFromState, &continuationStackSize));
return py::make_tuple(directCallableStackSizeFromTraversal,
directCallableStackSizeFromState,
continuationStackSize);
}
py::tuple computeStackSizesDCSplit(const pyoptix::StackSizes& stackSizes,
unsigned int dssDCFromTraversal,
unsigned int dssDCFromState,
unsigned int maxTraceDepth,
unsigned int maxCCDepth,
unsigned int maxDCDepthFromTraversal,
unsigned int maxDCDepthFromState) {
unsigned int directCallableStackSizeFromTraversal;
unsigned int directCallableStackSizeFromState;
unsigned int continuationStackSize;
PYOPTIX_CHECK(optixUtilComputeStackSizesDCSplit(
&stackSizes.ss, dssDCFromTraversal, dssDCFromState, maxTraceDepth,
maxCCDepth, maxDCDepthFromTraversal, maxDCDepthFromState,
&directCallableStackSizeFromTraversal,
&directCallableStackSizeFromState, &continuationStackSize));
return py::make_tuple(directCallableStackSizeFromTraversal,
directCallableStackSizeFromState,
continuationStackSize);
}
py::tuple computeStackSizesCssCCTree(const pyoptix::StackSizes& stackSizes,
unsigned int cssCCTree,
unsigned int maxTraceDepth,
unsigned int maxDCDepth) {
unsigned int directCallableStackSizeFromTraversal;
unsigned int directCallableStackSizeFromState;
unsigned int continuationStackSize;
PYOPTIX_CHECK(optixUtilComputeStackSizesCssCCTree(
&stackSizes.ss, cssCCTree, maxTraceDepth, maxDCDepth,
&directCallableStackSizeFromTraversal,
&directCallableStackSizeFromState, &continuationStackSize));
return py::make_tuple(directCallableStackSizeFromTraversal,
directCallableStackSizeFromState,
continuationStackSize);
}
py::tuple computeStackSizesSimplePathTracer(
pyoptix::ProgramGroup programGroupRG,
pyoptix::ProgramGroup programGroupMS1,
py::list programGroupCH1,
pyoptix::ProgramGroup programGroupMS2,
py::list programGroupCH2 IF_OPTIX77(COMMA pyoptix::Pipeline pipeline)) {
unsigned int directCallableStackSizeFromTraversal;
unsigned int directCallableStackSizeFromState;
unsigned int continuationStackSize;
auto ch1_py = programGroupCH1.cast<std::vector<pyoptix::ProgramGroup>>();
auto ch2_py = programGroupCH2.cast<std::vector<pyoptix::ProgramGroup>>();
std::vector<OptixProgramGroup> ch1;
std::vector<OptixProgramGroup> ch2;
for (auto& pypg : ch1_py) ch1.push_back(pypg.programGroup);
for (auto pypg : ch2_py) ch2.push_back(pypg.programGroup);
PYOPTIX_CHECK(optixUtilComputeStackSizesSimplePathTracer(
programGroupRG.programGroup, programGroupMS1.programGroup,
ch1.data(), ch1.size(), programGroupMS2.programGroup, ch2.data(),
ch2.size(), &directCallableStackSizeFromTraversal,
&directCallableStackSizeFromState,
&continuationStackSize IF_OPTIX77(COMMA pipeline.pipeline)));
return py::make_tuple(directCallableStackSizeFromTraversal,
directCallableStackSizeFromState,
continuationStackSize);
}
} // end namespace util
//------------------------------------------------------------------------------
//
// optix API additions for python bindings
//
//------------------------------------------------------------------------------
py::bytes getDeviceRepresentation(py::object obj) {
if (py::isinstance<pyoptix::MatrixMotionTransform>(obj)) {
const pyoptix::MatrixMotionTransform& xform =
obj.cast<pyoptix::MatrixMotionTransform>();
return xform.getBytes();
}
/*
else if( py::isinstance<pyoptix::SRTMotionTransform>( obj ) )
{
}
*/
else if (py::isinstance<pyoptix::StaticTransform>(obj)) {
} else if (py::isinstance<py::list>(obj)) {
const py::list& obj_list = obj.cast<py::list>();
if (obj_list.size() == 0)
throw std::runtime_error("Invalid input: Empty list");
auto first_elem = obj_list[0];
if (py::isinstance<pyoptix::Instance>(first_elem)) {
std::vector<OptixInstance> instances;
instances.reserve(obj_list.size());
for (auto list_elem : obj_list) {
if (!py::isinstance<pyoptix::Instance>(list_elem))
throw std::runtime_error(
"Input list contains mixed object types");
auto instance = list_elem.cast<pyoptix::Instance>();
instances.push_back(instance.instance);
}
return makeBytes(instances);
} else {
throw std::runtime_error(
"Input list contains unsupported object type");
}
}
return py::bytes("");
}
} // end namespace pyoptix
PYBIND11_MODULE(optix, m) {
m.doc() = R"pbdoc(
OptiX API
-----------------------
.. currentmodule:: optix
.. autosummary::
:toctree: _generate
)pbdoc";
cudaFree(0); // Init CUDA runtime
pyoptix::init();
//---------------------------------------------------------------------------
//
// Module Methods
//
//---------------------------------------------------------------------------
m.def("version", &pyoptix::version);
m.def("deviceContextCreate", &pyoptix::deviceContextCreate);
m.def("getErrorName", &pyoptix::getErrorName);
m.def("getErrorString", &pyoptix::getErrorString);
m.def("launch", &pyoptix::launch);
m.def("sbtRecordPackHeader", &pyoptix::sbtRecordPackHeader);
m.def("sbtRecordGetHeader", &pyoptix::sbtRecordGetHeader);
m.def("convertPointerToTraversableHandle",
&pyoptix::convertPointerToTraversableHandle);
m.def("getDeviceRepresentation", &pyoptix::getDeviceRepresentation);
//--------------------------------------------------------------------------
//
// Structs for interfacing with CUDA
//
//--------------------------------------------------------------------------
auto m_util = m.def_submodule("util", nullptr /*TODO: docstring*/);
m_util.def("accumulateStackSizes", &pyoptix::util::accumulateStackSizes);
m_util.def("computeStackSizes", &pyoptix::util::computeStackSizes);
m_util.def("computeStackSizesDCSplit",
&pyoptix::util::computeStackSizesDCSplit);
m_util.def("computeStackSizesCssCCTree",
&pyoptix::util::computeStackSizesCssCCTree);
m_util.def("computeStackSizesSimplePathTracer",
&pyoptix::util::computeStackSizesSimplePathTracer);
//--------------------------------------------------------------------------
//
// defines
//
//--------------------------------------------------------------------------
m.attr("SBT_RECORD_HEADER_SIZE") = OPTIX_SBT_RECORD_HEADER_SIZE;
m.attr("SBT_RECORD_ALIGNMENT") = OPTIX_SBT_RECORD_ALIGNMENT;
m.attr("ACCEL_BUFFER_BYTE_ALIGNMENT") = OPTIX_ACCEL_BUFFER_BYTE_ALIGNMENT;
m.attr("INSTANCE_BYTE_ALIGNMENT") = OPTIX_INSTANCE_BYTE_ALIGNMENT;
m.attr("AABB_BUFFER_BYTE_ALIGNMENT") = OPTIX_AABB_BUFFER_BYTE_ALIGNMENT;
m.attr("GEOMETRY_TRANSFORM_BYTE_ALIGNMENT") =
OPTIX_GEOMETRY_TRANSFORM_BYTE_ALIGNMENT;
m.attr("TRANSFORM_BYTE_ALIGNMENT") = OPTIX_TRANSFORM_BYTE_ALIGNMENT;
m.attr("COMPILE_DEFAULT_MAX_REGISTER_COUNT") =
OPTIX_COMPILE_DEFAULT_MAX_REGISTER_COUNT;
//--------------------------------------------------------------------------
//
// Enumerations
//
//--------------------------------------------------------------------------
py::enum_<OptixResult>(m, "Result", py::arithmetic())
.value("SUCCESS", OPTIX_SUCCESS)
.value("ERROR_INVALID_VALUE", OPTIX_ERROR_INVALID_VALUE)
.value("ERROR_HOST_OUT_OF_MEMORY", OPTIX_ERROR_HOST_OUT_OF_MEMORY)
.value("ERROR_INVALID_OPERATION", OPTIX_ERROR_INVALID_OPERATION)
.value("ERROR_FILE_IO_ERROR", OPTIX_ERROR_FILE_IO_ERROR)
.value("ERROR_INVALID_FILE_FORMAT", OPTIX_ERROR_INVALID_FILE_FORMAT)
.value("ERROR_DISK_CACHE_INVALID_PATH",
OPTIX_ERROR_DISK_CACHE_INVALID_PATH)
.value("ERROR_DISK_CACHE_PERMISSION_ERROR",
OPTIX_ERROR_DISK_CACHE_PERMISSION_ERROR)
.value("ERROR_DISK_CACHE_DATABASE_ERROR",
OPTIX_ERROR_DISK_CACHE_DATABASE_ERROR)
.value("ERROR_DISK_CACHE_INVALID_DATA",
OPTIX_ERROR_DISK_CACHE_INVALID_DATA)
.value("ERROR_LAUNCH_FAILURE", OPTIX_ERROR_LAUNCH_FAILURE)
.value("ERROR_INVALID_DEVICE_CONTEXT",
OPTIX_ERROR_INVALID_DEVICE_CONTEXT)
.value("ERROR_CUDA_NOT_INITIALIZED",
OPTIX_ERROR_CUDA_NOT_INITIALIZED)
#if OPTIX_VERSION >= 70200
.value("ERROR_VALIDATION_FAILURE", OPTIX_ERROR_VALIDATION_FAILURE)
#endif
#if OPTIX_VERSION >= 70700
.value("ERROR_INVALID_INPUT", OPTIX_ERROR_INVALID_INPUT)
#else
.value("ERROR_INVALID_PTX", OPTIX_ERROR_INVALID_PTX)
#endif
.value("ERROR_INVALID_LAUNCH_PARAMETER",
OPTIX_ERROR_INVALID_LAUNCH_PARAMETER)
.value("ERROR_INVALID_PAYLOAD_ACCESS",
OPTIX_ERROR_INVALID_PAYLOAD_ACCESS)
.value("ERROR_INVALID_ATTRIBUTE_ACCESS",
OPTIX_ERROR_INVALID_ATTRIBUTE_ACCESS)
.value("ERROR_INVALID_FUNCTION_USE",
OPTIX_ERROR_INVALID_FUNCTION_USE)
.value("ERROR_INVALID_FUNCTION_ARGUMENTS",
OPTIX_ERROR_INVALID_FUNCTION_ARGUMENTS)
.value("ERROR_PIPELINE_OUT_OF_CONSTANT_MEMORY",
OPTIX_ERROR_PIPELINE_OUT_OF_CONSTANT_MEMORY)
.value("ERROR_PIPELINE_LINK_ERROR", OPTIX_ERROR_PIPELINE_LINK_ERROR)
.value("ERROR_INTERNAL_COMPILER_ERROR",
OPTIX_ERROR_INTERNAL_COMPILER_ERROR)
.value("ERROR_DENOISER_MODEL_NOT_SET",
OPTIX_ERROR_DENOISER_MODEL_NOT_SET)
.value("ERROR_DENOISER_NOT_INITIALIZED",
OPTIX_ERROR_DENOISER_NOT_INITIALIZED)
#if OPTIX_VERSION >= 70600
.value("ERROR_NOT_COMPATIBLE", OPTIX_ERROR_NOT_COMPATIBLE)
#else
.value("ERROR_ACCEL_NOT_COMPATIBLE",
OPTIX_ERROR_ACCEL_NOT_COMPATIBLE)
#endif
.value("ERROR_NOT_SUPPORTED", OPTIX_ERROR_NOT_SUPPORTED)
.value("ERROR_UNSUPPORTED_ABI_VERSION",
OPTIX_ERROR_UNSUPPORTED_ABI_VERSION)
.value("ERROR_FUNCTION_TABLE_SIZE_MISMATCH",
OPTIX_ERROR_FUNCTION_TABLE_SIZE_MISMATCH)
.value("ERROR_INVALID_ENTRY_FUNCTION_OPTIONS",
OPTIX_ERROR_INVALID_ENTRY_FUNCTION_OPTIONS)
.value("ERROR_LIBRARY_NOT_FOUND", OPTIX_ERROR_LIBRARY_NOT_FOUND)
.value("ERROR_ENTRY_SYMBOL_NOT_FOUND",
OPTIX_ERROR_ENTRY_SYMBOL_NOT_FOUND)
#if OPTIX_VERSION >= 70200
.value("ERROR_LIBRARY_UNLOAD_FAILURE",
OPTIX_ERROR_LIBRARY_UNLOAD_FAILURE)
#endif
.value("ERROR_CUDA_ERROR", OPTIX_ERROR_CUDA_ERROR)
.value("ERROR_INTERNAL_ERROR", OPTIX_ERROR_INTERNAL_ERROR)
.value("ERROR_UNKNOWN", OPTIX_ERROR_UNKNOWN)
.export_values();
py::enum_<OptixDeviceProperty>(m, "DeviceProperty", py::arithmetic())
.value("DEVICE_PROPERTY_LIMIT_MAX_TRACE_DEPTH",
OPTIX_DEVICE_PROPERTY_LIMIT_MAX_TRACE_DEPTH)
.value("DEVICE_PROPERTY_LIMIT_MAX_TRAVERSABLE_GRAPH_DEPTH",
OPTIX_DEVICE_PROPERTY_LIMIT_MAX_TRAVERSABLE_GRAPH_DEPTH)
.value("DEVICE_PROPERTY_LIMIT_MAX_PRIMITIVES_PER_GAS",
OPTIX_DEVICE_PROPERTY_LIMIT_MAX_PRIMITIVES_PER_GAS)
.value("DEVICE_PROPERTY_LIMIT_MAX_INSTANCES_PER_IAS",
OPTIX_DEVICE_PROPERTY_LIMIT_MAX_INSTANCES_PER_IAS)
.value("DEVICE_PROPERTY_RTCORE_VERSION",
OPTIX_DEVICE_PROPERTY_RTCORE_VERSION)
.value("DEVICE_PROPERTY_LIMIT_MAX_INSTANCE_ID",
OPTIX_DEVICE_PROPERTY_LIMIT_MAX_INSTANCE_ID)
.value("DEVICE_PROPERTY_LIMIT_NUM_BITS_INSTANCE_VISIBILITY_MASK",
OPTIX_DEVICE_PROPERTY_LIMIT_NUM_BITS_INSTANCE_VISIBILITY_MASK)
.value("DEVICE_PROPERTY_LIMIT_MAX_SBT_RECORDS_PER_GAS",
OPTIX_DEVICE_PROPERTY_LIMIT_MAX_SBT_RECORDS_PER_GAS)
.value("DEVICE_PROPERTY_LIMIT_MAX_SBT_OFFSET",
OPTIX_DEVICE_PROPERTY_LIMIT_MAX_SBT_OFFSET)
.export_values();
#if OPTIX_VERSION >= 70200
py::enum_<OptixDeviceContextValidationMode>(
m, "DeviceContextValidationMode", py::arithmetic())
.value("DEVICE_CONTEXT_VALIDATION_MODE_OFF",
OPTIX_DEVICE_CONTEXT_VALIDATION_MODE_OFF)
.value("DEVICE_CONTEXT_VALIDATION_MODE_ALL",
OPTIX_DEVICE_CONTEXT_VALIDATION_MODE_ALL)
.export_values();
#endif
py::enum_<OptixGeometryFlags>(m, "GeometryFlags", py::arithmetic())
.value("GEOMETRY_FLAG_NONE", OPTIX_GEOMETRY_FLAG_NONE)
.value("GEOMETRY_FLAG_DISABLE_ANYHIT",
OPTIX_GEOMETRY_FLAG_DISABLE_ANYHIT)
.value("GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL",
OPTIX_GEOMETRY_FLAG_REQUIRE_SINGLE_ANYHIT_CALL)
.export_values();
py::enum_<OptixHitKind>(m, "HitKind", py::arithmetic())
.value("HIT_KIND_TRIANGLE_FRONT_FACE",
OPTIX_HIT_KIND_TRIANGLE_FRONT_FACE)
.value("HIT_KIND_TRIANGLE_BACK_FACE",
OPTIX_HIT_KIND_TRIANGLE_BACK_FACE)
.export_values();
py::enum_<OptixIndicesFormat>(m, "IndicesFormat", py::arithmetic())
IF_OPTIX71(.value("INDICES_FORMAT_NONE", OPTIX_INDICES_FORMAT_NONE))
.value("INDICES_FORMAT_UNSIGNED_SHORT3",
OPTIX_INDICES_FORMAT_UNSIGNED_SHORT3)
.value("INDICES_FORMAT_UNSIGNED_INT3",
OPTIX_INDICES_FORMAT_UNSIGNED_INT3)
.export_values();
py::enum_<OptixVertexFormat>(m, "VertexFormat", py::arithmetic())
IF_OPTIX71(.value("VERTEX_FORMAT_NONE", OPTIX_VERTEX_FORMAT_NONE))
.value("VERTEX_FORMAT_FLOAT3", OPTIX_VERTEX_FORMAT_FLOAT3)
.value("VERTEX_FORMAT_FLOAT2", OPTIX_VERTEX_FORMAT_FLOAT2)
.value("VERTEX_FORMAT_HALF3", OPTIX_VERTEX_FORMAT_HALF3)
.value("VERTEX_FORMAT_HALF2", OPTIX_VERTEX_FORMAT_HALF2)
.value("VERTEX_FORMAT_SNORM16_3",
OPTIX_VERTEX_FORMAT_SNORM16_3)
.value("VERTEX_FORMAT_SNORM16_2",
OPTIX_VERTEX_FORMAT_SNORM16_2)
.export_values();
#if OPTIX_VERSION >= 70100
py::enum_<OptixTransformFormat>(m, "TransformFormat", py::arithmetic())
.value("TRANSFORM_FORMAT_NONE", OPTIX_TRANSFORM_FORMAT_NONE)
.value("TRANSFORM_FORMAT_MATRIX_FLOAT12",
OPTIX_TRANSFORM_FORMAT_MATRIX_FLOAT12)
.export_values();
py::enum_<OptixPrimitiveType>(m, "PrimitiveType", py::arithmetic())
.value("PRIMITIVE_TYPE_CUSTOM", OPTIX_PRIMITIVE_TYPE_CUSTOM)
.value("PRIMITIVE_TYPE_ROUND_QUADRATIC_BSPLINE",
OPTIX_PRIMITIVE_TYPE_ROUND_QUADRATIC_BSPLINE)
.value("PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE",
OPTIX_PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE)
.value("PRIMITIVE_TYPE_ROUND_LINEAR",
OPTIX_PRIMITIVE_TYPE_ROUND_LINEAR)
.value("PRIMITIVE_TYPE_TRIANGLE", OPTIX_PRIMITIVE_TYPE_TRIANGLE)
.export_values();
py::enum_<OptixPrimitiveTypeFlags>(m, "PmitiveTypeFlags", py::arithmetic())
.value("PRIMITIVE_TYPE_FLAGS_CUSTOM",
OPTIX_PRIMITIVE_TYPE_FLAGS_CUSTOM)
.value("PRIMITIVE_TYPE_FLAGS_ROUND_QUADRATIC_BSPLINE",
OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_QUADRATIC_BSPLINE)
.value("PRIMITIVE_TYPE_FLAGS_ROUND_CUBIC_BSPLINE",
OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_CUBIC_BSPLINE)
.value("PRIMITIVE_TYPE_FLAGS_ROUND_LINEAR",
OPTIX_PRIMITIVE_TYPE_FLAGS_ROUND_LINEAR)
.value("PRIMITIVE_TYPE_FLAGS_TRIANGLE",
OPTIX_PRIMITIVE_TYPE_FLAGS_TRIANGLE)
.export_values();
#endif
py::enum_<OptixBuildInputType>(m, "BuildInputType", py::arithmetic())
.value("BUILD_INPUT_TYPE_TRIANGLES",
OPTIX_BUILD_INPUT_TYPE_TRIANGLES)
.value("BUILD_INPUT_TYPE_CUSTOM_PRIMITIVES",
OPTIX_BUILD_INPUT_TYPE_CUSTOM_PRIMITIVES)
.value("BUILD_INPUT_TYPE_INSTANCES",
OPTIX_BUILD_INPUT_TYPE_INSTANCES)
.value("BUILD_INPUT_TYPE_INSTANCE_POINTERS",
OPTIX_BUILD_INPUT_TYPE_INSTANCE_POINTERS)
IF_OPTIX71(.value("BUILD_INPUT_TYPE_CURVES",
OPTIX_BUILD_INPUT_TYPE_CURVES))
.export_values();
py::enum_<OptixInstanceFlags>(m, "InstanceFlags", py::arithmetic())
.value("INSTANCE_FLAG_NONE", OPTIX_INSTANCE_FLAG_NONE)
.value("INSTANCE_FLAG_DISABLE_TRIANGLE_FACE_CULLING",
OPTIX_INSTANCE_FLAG_DISABLE_TRIANGLE_FACE_CULLING)
.value("INSTANCE_FLAG_FLIP_TRIANGLE_FACING",
OPTIX_INSTANCE_FLAG_FLIP_TRIANGLE_FACING)
.value("INSTANCE_FLAG_DISABLE_ANYHIT",
OPTIX_INSTANCE_FLAG_DISABLE_ANYHIT)
.value("INSTANCE_FLAG_ENFORCE_ANYHIT",
OPTIX_INSTANCE_FLAG_ENFORCE_ANYHIT)
#if OPTIX_VERSION < 70400
.value("INSTANCE_FLAG_DISABLE_TRANSFORM",
OPTIX_INSTANCE_FLAG_DISABLE_TRANSFORM)
#endif
.export_values();
py::enum_<OptixBuildFlags>(m, "BuildFlags", py::arithmetic())
.value("BUILD_FLAG_NONE", OPTIX_BUILD_FLAG_NONE)
.value("BUILD_FLAG_ALLOW_UPDATE", OPTIX_BUILD_FLAG_ALLOW_UPDATE)
.value("BUILD_FLAG_ALLOW_COMPACTION",
OPTIX_BUILD_FLAG_ALLOW_COMPACTION)
.value("BUILD_FLAG_PREFER_FAST_TRACE",
OPTIX_BUILD_FLAG_PREFER_FAST_TRACE)
.value("BUILD_FLAG_PREFER_FAST_BUILD",
OPTIX_BUILD_FLAG_PREFER_FAST_BUILD)
.value("BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS",
OPTIX_BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS)
.export_values();
py::enum_<OptixBuildOperation>(m, "BuildOperation", py::arithmetic())
.value("BUILD_OPERATION_BUILD", OPTIX_BUILD_OPERATION_BUILD)
.value("BUILD_OPERATION_UPDATE", OPTIX_BUILD_OPERATION_UPDATE)
.export_values();
py::enum_<OptixMotionFlags>(m, "MotionFlags", py::arithmetic())
.value("MOTION_FLAG_NONE", OPTIX_MOTION_FLAG_NONE)
.value("MOTION_FLAG_START_VANISH", OPTIX_MOTION_FLAG_START_VANISH)
.value("MOTION_FLAG_END_VANISH", OPTIX_MOTION_FLAG_END_VANISH)
.export_values();
py::enum_<OptixAccelPropertyType>(m, "AccelPropertyType", py::arithmetic())
.value("PROPERTY_TYPE_COMPACTED_SIZE",
OPTIX_PROPERTY_TYPE_COMPACTED_SIZE)
.value("PROPERTY_TYPE_AABBS", OPTIX_PROPERTY_TYPE_AABBS)
.export_values();
py::enum_<OptixTraversableType>(m, "TraversableType", py::arithmetic())
.value("TRAVERSABLE_TYPE_STATIC_TRANSFORM",
OPTIX_TRAVERSABLE_TYPE_STATIC_TRANSFORM)
.value("TRAVERSABLE_TYPE_MATRIX_MOTION_TRANSFORM",
OPTIX_TRAVERSABLE_TYPE_MATRIX_MOTION_TRANSFORM)
.value("TRAVERSABLE_TYPE_SRT_MOTION_TRANSFORM",
OPTIX_TRAVERSABLE_TYPE_SRT_MOTION_TRANSFORM)
.export_values();
py::enum_<OptixPixelFormat>(m, "PixelFormat", py::arithmetic())
.value("PIXEL_FORMAT_HALF3", OPTIX_PIXEL_FORMAT_HALF3)
.value("PIXEL_FORMAT_HALF4", OPTIX_PIXEL_FORMAT_HALF4)
.value("PIXEL_FORMAT_FLOAT3", OPTIX_PIXEL_FORMAT_FLOAT3)
.value("PIXEL_FORMAT_FLOAT4", OPTIX_PIXEL_FORMAT_FLOAT4)
.value("PIXEL_FORMAT_UCHAR3", OPTIX_PIXEL_FORMAT_UCHAR3)
.value("PIXEL_FORMAT_UCHAR4", OPTIX_PIXEL_FORMAT_UCHAR4)
.export_values();
py::enum_<OptixDenoiserModelKind>(m, "DenoiserModelKind", py::arithmetic())
#if OPTIX_VERSION < 70300
.value("DENOISER_MODEL_KIND_USER", OPTIX_DENOISER_MODEL_KIND_USER)
#endif
.value("DENOISER_MODEL_KIND_LDR", OPTIX_DENOISER_MODEL_KIND_LDR)
.value("DENOISER_MODEL_KIND_HDR", OPTIX_DENOISER_MODEL_KIND_HDR)
.value("DENOISER_MODEL_KIND_AOV", OPTIX_DENOISER_MODEL_KIND_AOV)
IF_OPTIX73(.value("DENOISER_MODEL_KIND_TEMPORAL",
OPTIX_DENOISER_MODEL_KIND_AOV))
IF_OPTIX74(.value("DENOISER_MODEL_KIND_TEMPORAL_"
"AOV",
OPTIX_DENOISER_MODEL_KIND_AOV))
.export_values();
py::enum_<OptixRayFlags>(m, "RayFlags", py::arithmetic())
.value("RAY_FLAG_NONE", OPTIX_RAY_FLAG_NONE)
.value("RAY_FLAG_DISABLE_ANYHIT", OPTIX_RAY_FLAG_DISABLE_ANYHIT)
.value("RAY_FLAG_ENFORCE_ANYHIT", OPTIX_RAY_FLAG_ENFORCE_ANYHIT)
.value("RAY_FLAG_TERMINATE_ON_FIRST_HIT",
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT)
.value("RAY_FLAG_DISABLE_CLOSESTHIT",
OPTIX_RAY_FLAG_DISABLE_CLOSESTHIT)
.value("RAY_FLAG_CULL_BACK_FACING_TRIANGLES",
OPTIX_RAY_FLAG_CULL_BACK_FACING_TRIANGLES)
.value("RAY_FLAG_CULL_FRONT_FACING_TRIANGLES",
OPTIX_RAY_FLAG_CULL_FRONT_FACING_TRIANGLES)
.value("RAY_FLAG_CULL_DISABLED_ANYHIT",
OPTIX_RAY_FLAG_CULL_DISABLED_ANYHIT)
.value("RAY_FLAG_CULL_ENFORCED_ANYHIT",
OPTIX_RAY_FLAG_CULL_ENFORCED_ANYHIT)
.export_values();
py::enum_<OptixTransformType>(m, "TransformType", py::arithmetic())
.value("TRANSFORM_TYPE_NONE", OPTIX_TRANSFORM_TYPE_NONE)
.value("TRANSFORM_TYPE_STATIC_TRANSFORM",
OPTIX_TRANSFORM_TYPE_STATIC_TRANSFORM)
.value("TRANSFORM_TYPE_MATRIX_MOTION_TRANSFORM",
OPTIX_TRANSFORM_TYPE_MATRIX_MOTION_TRANSFORM)
.value("TRANSFORM_TYPE_SRT_MOTION_TRANSFORM",
OPTIX_TRANSFORM_TYPE_SRT_MOTION_TRANSFORM)
.value("TRANSFORM_TYPE_INSTANCE", OPTIX_TRANSFORM_TYPE_INSTANCE)
.export_values();
py::enum_<OptixTraversableGraphFlags>(m, "TraversableGraphFlags",
py::arithmetic())
.value("TRAVERSABLE_GRAPH_FLAG_ALLOW_ANY",
OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_ANY)
.value("TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS",
OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS)
.value("TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING",
OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING)
.export_values();
py::enum_<OptixCompileOptimizationLevel>(m, "CompileOptimizationLevel",
py::arithmetic())
.value("COMPILE_OPTIMIZATION_DEFAULT",
OPTIX_COMPILE_OPTIMIZATION_DEFAULT)
.value("COMPILE_OPTIMIZATION_LEVEL_0",
OPTIX_COMPILE_OPTIMIZATION_LEVEL_0)
.value("COMPILE_OPTIMIZATION_LEVEL_1",
OPTIX_COMPILE_OPTIMIZATION_LEVEL_1)
.value("COMPILE_OPTIMIZATION_LEVEL_2",
OPTIX_COMPILE_OPTIMIZATION_LEVEL_2)
.value("COMPILE_OPTIMIZATION_LEVEL_3",
OPTIX_COMPILE_OPTIMIZATION_LEVEL_3)
.export_values();
py::enum_<OptixCompileDebugLevel>(m, "CompileDebugLevel", py::arithmetic())
IF_OPTIX71(.value("COMPILE_DEBUG_LEVEL_DEFAULT",
OPTIX_COMPILE_DEBUG_LEVEL_DEFAULT))
.value("COMPILE_DEBUG_LEVEL_NONE",
OPTIX_COMPILE_DEBUG_LEVEL_NONE)
#if OPTIX_VERSION < 70400
.value("COMPILE_DEBUG_LEVEL_LINEINFO",
OPTIX_COMPILE_DEBUG_LEVEL_LINEINFO)
.value("COMPILE_DEBUG_LEVEL_FULL",
OPTIX_COMPILE_DEBUG_LEVEL_FULL)
#else
.value("COMPILE_DEBUG_LEVEL_MINIMAL",
OPTIX_COMPILE_DEBUG_LEVEL_MINIMAL)
.value("COMPILE_DEBUG_LEVEL_MODERATE",
OPTIX_COMPILE_DEBUG_LEVEL_MODERATE)
.value("COMPILE_DEBUG_LEVEL_FULL",
OPTIX_COMPILE_DEBUG_LEVEL_FULL)
#endif
.export_values();
#if OPTIX_VERSION >= 70400
py::enum_<OptixPayloadTypeID>(m, "PayloadTypeID", py::arithmetic())
.value("PAYLOAD_TYPE_DEFAULT", OPTIX_PAYLOAD_TYPE_DEFAULT)
.value("PAYLOAD_TYPE_ID_0", OPTIX_PAYLOAD_TYPE_ID_0)
.value("PAYLOAD_TYPE_ID_1", OPTIX_PAYLOAD_TYPE_ID_1)
.value("PAYLOAD_TYPE_ID_2", OPTIX_PAYLOAD_TYPE_ID_2)
.value("PAYLOAD_TYPE_ID_3", OPTIX_PAYLOAD_TYPE_ID_3)
.value("PAYLOAD_TYPE_ID_4", OPTIX_PAYLOAD_TYPE_ID_4)
.value("PAYLOAD_TYPE_ID_5", OPTIX_PAYLOAD_TYPE_ID_5)
.value("PAYLOAD_TYPE_ID_6", OPTIX_PAYLOAD_TYPE_ID_6)
.value("PAYLOAD_TYPE_ID_7", OPTIX_PAYLOAD_TYPE_ID_7)
.export_values();
py::enum_<OptixPayloadSemantics>(m, "PayloadSemantics", py::arithmetic())
.value("PAYLOAD_SEMANTICS_TRACE_CALLER_NONE",
OPTIX_PAYLOAD_SEMANTICS_TRACE_CALLER_NONE)
.value("PAYLOAD_SEMANTICS_TRACE_CALLER_READ",
OPTIX_PAYLOAD_SEMANTICS_TRACE_CALLER_READ)
.value("PAYLOAD_SEMANTICS_TRACE_CALLER_WRITE",
OPTIX_PAYLOAD_SEMANTICS_TRACE_CALLER_WRITE)
.value("PAYLOAD_SEMANTICS_TRACE_CALLER_READ_WRITE",
OPTIX_PAYLOAD_SEMANTICS_TRACE_CALLER_READ_WRITE)
.value("PAYLOAD_SEMANTICS_CH_NONE", OPTIX_PAYLOAD_SEMANTICS_CH_NONE)
.value("PAYLOAD_SEMANTICS_CH_READ", OPTIX_PAYLOAD_SEMANTICS_CH_READ)
.value("PAYLOAD_SEMANTICS_CH_WRITE",
OPTIX_PAYLOAD_SEMANTICS_CH_WRITE)
.value("PAYLOAD_SEMANTICS_CH_READ_WRITE",
OPTIX_PAYLOAD_SEMANTICS_CH_READ_WRITE)
.value("PAYLOAD_SEMANTICS_MS_NONE", OPTIX_PAYLOAD_SEMANTICS_MS_NONE)
.value("PAYLOAD_SEMANTICS_MS_READ", OPTIX_PAYLOAD_SEMANTICS_MS_READ)
.value("PAYLOAD_SEMANTICS_MS_WRITE",
OPTIX_PAYLOAD_SEMANTICS_MS_WRITE)
.value("PAYLOAD_SEMANTICS_MS_READ_WRITE",
OPTIX_PAYLOAD_SEMANTICS_MS_WRITE)
.value("PAYLOAD_SEMANTICS_AH_NONE", OPTIX_PAYLOAD_SEMANTICS_AH_NONE)
.value("PAYLOAD_SEMANTICS_AH_READ", OPTIX_PAYLOAD_SEMANTICS_AH_READ)
.value("PAYLOAD_SEMANTICS_AH_WRITE",
OPTIX_PAYLOAD_SEMANTICS_AH_WRITE)
.value("PAYLOAD_SEMANTICS_AH_READ_WRITE",
OPTIX_PAYLOAD_SEMANTICS_AH_READ_WRITE)
.value("PAYLOAD_SEMANTICS_IS_NONE", OPTIX_PAYLOAD_SEMANTICS_IS_NONE)
.value("PAYLOAD_SEMANTICS_IS_READ", OPTIX_PAYLOAD_SEMANTICS_IS_READ)
.value("PAYLOAD_SEMANTICS_IS_WRITE",
OPTIX_PAYLOAD_SEMANTICS_IS_WRITE)
.value("PAYLOAD_SEMANTICS_IS_READ_WRITE",
OPTIX_PAYLOAD_SEMANTICS_IS_READ_WRITE)
.export_values();
#endif // OPTIX_VERSION >= 70400
py::enum_<OptixProgramGroupKind>(m, "ProgramGroupKind", py::arithmetic())
.value("PROGRAM_GROUP_KIND_RAYGEN", OPTIX_PROGRAM_GROUP_KIND_RAYGEN)
.value("PROGRAM_GROUP_KIND_MISS", OPTIX_PROGRAM_GROUP_KIND_MISS)
.value("PROGRAM_GROUP_KIND_EXCEPTION",
OPTIX_PROGRAM_GROUP_KIND_EXCEPTION)
.value("PROGRAM_GROUP_KIND_HITGROUP",
OPTIX_PROGRAM_GROUP_KIND_HITGROUP)
.value("PROGRAM_GROUP_KIND_CALLABLES",
OPTIX_PROGRAM_GROUP_KIND_CALLABLES)
.export_values();
py::enum_<OptixProgramGroupFlags>(m, "ProgramGroupFlags", py::arithmetic())
.value("PROGRAM_GROUP_FLAGS_NONE", OPTIX_PROGRAM_GROUP_FLAGS_NONE)
.export_values();
py::enum_<OptixExceptionCodes>(m, "ExceptionCodes", py::arithmetic())
.value("EXCEPTION_CODE_STACK_OVERFLOW",
OPTIX_EXCEPTION_CODE_STACK_OVERFLOW)
.value("EXCEPTION_CODE_TRACE_DEPTH_EXCEEDED",
OPTIX_EXCEPTION_CODE_TRACE_DEPTH_EXCEEDED)
.value("EXCEPTION_CODE_TRAVERSAL_DEPTH_EXCEEDED",
OPTIX_EXCEPTION_CODE_TRAVERSAL_DEPTH_EXCEEDED)
.value("EXCEPTION_CODE_TRAVERSAL_INVALID_TRAVERSABLE",
OPTIX_EXCEPTION_CODE_TRAVERSAL_INVALID_TRAVERSABLE)
.value("EXCEPTION_CODE_TRAVERSAL_INVALID_MISS_SBT",
OPTIX_EXCEPTION_CODE_TRAVERSAL_INVALID_MISS_SBT)
.value("EXCEPTION_CODE_TRAVERSAL_INVALID_HIT_SBT",
OPTIX_EXCEPTION_CODE_TRAVERSAL_INVALID_HIT_SBT)
#if OPTIX_VERSION >= 70100
.value("EXCEPTION_CODE_UNSUPPORTED_PRIMITIVE_TYPE",
OPTIX_EXCEPTION_CODE_UNSUPPORTED_PRIMITIVE_TYPE)
.value("EXCEPTION_CODE_INVALID_RAY",
OPTIX_EXCEPTION_CODE_INVALID_RAY)
.value("EXCEPTION_CODE_CALLABLE_PARAMETER_MISMATCH",
OPTIX_EXCEPTION_CODE_CALLABLE_PARAMETER_MISMATCH)
.value("EXCEPTION_CODE_BUILTIN_IS_MISMATCH",
OPTIX_EXCEPTION_CODE_BUILTIN_IS_MISMATCH)
.value("EXCEPTION_CODE_UNSUPPORTED_SINGLE_LEVEL_GAS",
OPTIX_EXCEPTION_CODE_UNSUPPORTED_SINGLE_LEVEL_GAS)
#endif
#if OPTIX_VERSION >= 70200
.value("EXCEPTION_CODE_CALLABLE_INVALID_SBT",
OPTIX_EXCEPTION_CODE_CALLABLE_INVALID_SBT)
.value("EXCEPTION_CODE_CALLABLE_NO_DC_SBT_RECORD",
OPTIX_EXCEPTION_CODE_CALLABLE_NO_DC_SBT_RECORD)
.value("EXCEPTION_CODE_CALLABLE_NO_CC_SBT_RECORD",
OPTIX_EXCEPTION_CODE_CALLABLE_NO_CC_SBT_RECORD)
#endif
.export_values();
py::enum_<OptixExceptionFlags>(m, "ExceptionFlags", py::arithmetic())
.value("EXCEPTION_FLAG_NONE", OPTIX_EXCEPTION_FLAG_NONE)
.value("EXCEPTION_FLAG_STACK_OVERFLOW",
OPTIX_EXCEPTION_FLAG_STACK_OVERFLOW)
.value("EXCEPTION_FLAG_TRACE_DEPTH",
OPTIX_EXCEPTION_FLAG_TRACE_DEPTH)
.value("EXCEPTION_FLAG_USER", OPTIX_EXCEPTION_FLAG_USER)
.value("EXCEPTION_FLAG_DEBUG", OPTIX_EXCEPTION_FLAG_DEBUG)
.export_values();
py::enum_<OptixQueryFunctionTableOptions>(m, "QueryFunctionTableOptions",
py::arithmetic())
.value("QUERY_FUNCTION_TABLE_OPTION_DUMMY",
OPTIX_QUERY_FUNCTION_TABLE_OPTION_DUMMY)
.export_values();
//---------------------------------------------------------------------------
//
// Opaque types
//
//---------------------------------------------------------------------------
py::class_<pyoptix::DeviceContext>(m, "DeviceContext")
.def("destroy", &pyoptix::deviceContextDestroy)
.def("getProperty", &pyoptix::deviceContextGetProperty)
.def("setLogCallback", &pyoptix::deviceContextSetLogCallback)
.def("setCacheEnabled", &pyoptix::deviceContextSetCacheEnabled)
.def("setCacheLocation", &pyoptix::deviceContextSetCacheLocation)
.def("setCacheDatabaseSizes",
&pyoptix::deviceContextSetCacheDatabaseSizes)
.def("getCacheEnabled", &pyoptix::deviceContextGetCacheEnabled)
.def("getCacheLocation", &pyoptix::deviceContextGetCacheLocation)
.def("getCacheDatabaseSizes",
&pyoptix::deviceContextGetCacheDatabaseSizes)
.def("pipelineCreate", &pyoptix::pipelineCreate)
#if OPTIX_VERSION < 70700
.def("moduleCreateFromPTX", &pyoptix::moduleCreateFromPTX)
#else
.def("moduleCreate", &pyoptix::moduleCreate)
#endif
IF_OPTIX71(.def("builtinISModuleGet",
&pyoptix::builtinISModuleGet))
.def("programGroupCreate", &pyoptix::programGroupCreate,
py::arg("programDescriptions") = py::list()
IF_OPTIX74(COMMA py::arg("options") = py::none()))
.def("accelComputeMemoryUsage", &pyoptix::accelComputeMemoryUsage)
.def("accelBuild", &pyoptix::accelBuild)
.def("accelGetRelocationInfo", &pyoptix::accelGetRelocationInfo)
.def("accelCheckRelocationCompatibility",
&pyoptix::accelCheckRelocationCompatibility)
.def("accelRelocate", &pyoptix::accelRelocate)
.def("accelCompact", &pyoptix::accelCompact)
.def("denoiserCreate", &pyoptix::denoiserCreate)
.def(py::self == py::self);
py::class_<pyoptix::Module>(m, "Module")
.def("destroy", &pyoptix::moduleDestroy)
.def(py::self == py::self);
py::class_<pyoptix::ProgramGroup>(m, "ProgramGroup")
.def("getStackSize", &pyoptix::programGroupGetStackSize)
.def("destroy", &pyoptix::programGroupDestroy)
.def(py::self == py::self);
py::class_<pyoptix::Pipeline>(m, "Pipeline")
.def("destroy", &pyoptix::pipelineDestroy)
.def("setStackSize", &pyoptix::pipelineSetStackSize)
.def(py::self == py::self);
py::class_<pyoptix::Denoiser>(m, "Denoiser")
#if OPTIX_VERSION < 70300
.def("setModel", &pyoptix::denoiserSetModel)
#endif
.def("destroy", &pyoptix::denoiserDestroy)
.def("computeMemoryResources",
&pyoptix::denoiserComputeMemoryResources)
.def("setup", &pyoptix::denoiserSetup)
.def("invoke", &pyoptix::denoiserInvoke)
.def("computeIntensity", &pyoptix::denoiserComputeIntensity)
.def("invokeTiled", &pyoptix::denoiserInvokeTiled)
IF_OPTIX73(.def("computeAverageColor",
&pyoptix::denoiserComputeAverageColor))
.def(py::self == py::self);
//---------------------------------------------------------------------------
//
// Param types
//
//---------------------------------------------------------------------------
py::class_<pyoptix::DeviceContextOptions>(m, "DeviceContextOptions")
.def(py::init<py::object, int32_t,
OptixDeviceContextValidationMode>(),
py::arg("logCallbackFunction") = py::none(),
py::arg("logCallbackLevel") = 0,
IF_OPTIX72(py::arg("validationMode") =
OPTIX_DEVICE_CONTEXT_VALIDATION_MODE_OFF))
.def_property(
"logCallbackFunction",
[](const pyoptix::DeviceContextOptions& self) {
return self.logCallbackFunction;
},
[](pyoptix::DeviceContextOptions& self, py::object val) {
self.logCallbackFunction = val;
self.options.logCallbackFunction =
pyoptix::context_log_cb;
self.options.logCallbackData = val.ptr();
})
.def_property(
"logCallbackLevel",
[](const pyoptix::DeviceContextOptions& self) {
return self.options.logCallbackLevel;
},
[](pyoptix::DeviceContextOptions& self, int32_t val) {
self.options.logCallbackLevel = val;
})
#if OPTIX_VERSION >= 70200
.def_property(
"validationMode",
[](const pyoptix::DeviceContextOptions& self) {
return self.options.validationMode;
},
[](pyoptix::DeviceContextOptions& self,
OptixDeviceContextValidationMode val) {
self.options.validationMode = val;
})
#endif
;
py::class_<pyoptix::BuildInputTriangleArray>(m, "BuildInputTriangleArray")
.def(py::init<const py::list&, OptixVertexFormat, unsigned int,
CUdeviceptr, unsigned int, OptixIndicesFormat,
unsigned int, CUdeviceptr, const py::list&,
unsigned int, CUdeviceptr, unsigned int, unsigned int,
unsigned int IF_OPTIX71(
COMMA OptixTransformFormat)>(),
py::arg("vertexBuffers_") = py::list(), // list of CUdeviceptr
py::arg("vertexFormat") = IF_OPTIX71_ELSE(
OPTIX_VERTEX_FORMAT_NONE,
static_cast<OptixVertexFormat>(0x0000u)),
py::arg("vertexStrideInBytes") = 0u,
py::arg("indexBuffer") = 0u, py::arg("numIndexTriplets") = 0u,
py::arg("indexFormat") = IF_OPTIX71_ELSE(
OPTIX_INDICES_FORMAT_NONE,
static_cast<OptixIndicesFormat>(0x0000u)),
py::arg("indexStrideInBytes") = 0u,
py::arg("preTransform") = 0u,
py::arg("flags_") = py::list(), // list of uint32_t
py::arg("numSbtRecords") = 0u,
py::arg("sbtIndexOffsetBuffer") = 0u,
py::arg("sbtIndexOffsetSizeInBytes") = 0u,
py::arg("sbtIndexOffsetStrideInBytes") = 0u,
py::arg("primitiveIndexOffset") =
0u IF_OPTIX71(COMMA py::arg("transformFormat") =
OPTIX_TRANSFORM_FORMAT_NONE))
.def_property(
"vertexBuffers",
[](const pyoptix::BuildInputTriangleArray& self) {
return py::cast(self.vertexBuffers);
},
[](pyoptix::BuildInputTriangleArray& self, py::list& val) {
self.vertexBuffers =
val.cast<std::vector<CUdeviceptr>>();
})
.def_property(
"numVertices",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.numVertices;
},
[](pyoptix::BuildInputTriangleArray& self,
unsigned int val) {
self.build_input.numVertices = val;
})
.def_property(
"vertexFormat",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.vertexFormat;
},
[](pyoptix::BuildInputTriangleArray& self,
OptixVertexFormat val) {
self.build_input.vertexFormat = val;
})
.def_property(
"vertexStrideInBytes",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.vertexStrideInBytes;
},
[](pyoptix::BuildInputTriangleArray& self,
unsigned int val) {
self.build_input.vertexStrideInBytes = val;
})
.def_property(
"indexBuffer",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.indexBuffer;
},
[](pyoptix::BuildInputTriangleArray& self,
CUdeviceptr val) { self.build_input.indexBuffer = val; })
.def_property(
"numIndexTriplets",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.numIndexTriplets;
},
[](pyoptix::BuildInputTriangleArray& self,
unsigned int val) {
self.build_input.numIndexTriplets = val;
})
.def_property(
"indexFormat",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.indexFormat;
},
[](pyoptix::BuildInputTriangleArray& self,
OptixIndicesFormat val) {
self.build_input.indexFormat = val;
})
.def_property(
"indexStrideInBytes",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.indexStrideInBytes;
},
[](pyoptix::BuildInputTriangleArray& self,
unsigned int val) {
self.build_input.indexStrideInBytes = val;
})
.def_property(
"preTransform",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.preTransform;
},
[](pyoptix::BuildInputTriangleArray& self,
CUdeviceptr val) {
self.build_input.preTransform = val;
})
.def_property(
"flags",
[](const pyoptix::BuildInputTriangleArray& self) {
return py::cast(self.flags);
},
[](pyoptix::BuildInputTriangleArray& self, py::list& val) {
self.flags = val.cast<std::vector<unsigned int>>();
})
.def_property(
"numSbtRecords",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.numSbtRecords;
},
[](pyoptix::BuildInputTriangleArray& self,
unsigned int val) {
self.build_input.numSbtRecords = val;
})
.def_property(
"sbtIndexOffsetBuffer",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.sbtIndexOffsetBuffer;
},
[](pyoptix::BuildInputTriangleArray& self,
CUdeviceptr val) {
self.build_input.sbtIndexOffsetBuffer = val;
})
.def_property(
"sbtIndexOffsetSizeInBytes",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.sbtIndexOffsetSizeInBytes;
},
[](pyoptix::BuildInputTriangleArray& self,
unsigned int val) {
self.build_input.sbtIndexOffsetSizeInBytes = val;
})
.def_property(
"sbtIndexOffsetStrideInBytes",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.sbtIndexOffsetStrideInBytes;
},
[](pyoptix::BuildInputTriangleArray& self,
unsigned int val) {
self.build_input.sbtIndexOffsetStrideInBytes = val;
})
.def_property(
"primitiveIndexOffset",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.primitiveIndexOffset;
},
[](pyoptix::BuildInputTriangleArray& self,
unsigned int val) {
self.build_input.primitiveIndexOffset = val;
})
#if OPTIX_VERSION >= 70100
.def_property(
"transformFormat",
[](const pyoptix::BuildInputTriangleArray& self) {
return self.build_input.transformFormat;
},
[](pyoptix::BuildInputTriangleArray& self,
OptixTransformFormat val) {
self.build_input.transformFormat = val;
})
#endif
;
#if OPTIX_VERSION > 70100
py::class_<pyoptix::BuildInputCurveArray>(m, "BuildInputCurveArray")
.def(py::init<OptixPrimitiveType, unsigned int, const py::list&,
unsigned int, unsigned int, const py::list&,
unsigned int, const py::list&, unsigned int,
CUdeviceptr, unsigned int, unsigned int,
unsigned int>(),
py::arg("curveType") = OPTIX_PRIMITIVE_TYPE_ROUND_LINEAR,
py::arg("numPrimitives") = 0u,
py::arg("vertexBuffers") = py::list(), // list of CUdeviceptr
py::arg("numVertices") = 0u,
py::arg("vertexStrideInBytes") = 0u,
py::arg("widthBuffers") = py::list(), // list of CUdeviceptr
py::arg("widthStrideInBytes") = 0u,
py::arg("normalBuffers") = py::list(), // list of CUdeviceptr
py::arg("normalStrideInBytes") = 0u,
py::arg("indexBuffer") = 0llu,
py::arg("indexStrideInBytes") = 0u, py::arg("flag") = 0u,
py::arg("primitiveIndexOffset") = 0u)
.def_property(
"curveType",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.curveType;
},
[](pyoptix::BuildInputCurveArray& self,
OptixPrimitiveType val) {
self.build_input.curveType = val;
})
.def_property(
"numPrimitives",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.numPrimitives;
},
[](pyoptix::BuildInputCurveArray& self, unsigned int val) {
self.build_input.numPrimitives = val;
})
.def_property(
"vertexBuffers",
[](const pyoptix::BuildInputCurveArray& self) {
return py::cast(self.vertexBuffers);
},
[](pyoptix::BuildInputCurveArray& self, py::list& val) {
self.vertexBuffers =
val.cast<std::vector<CUdeviceptr>>();
})
.def_property(
"numVertices",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.numVertices;
},
[](pyoptix::BuildInputCurveArray& self, unsigned int val) {
self.build_input.numVertices = val;
})
.def_property(
"vertexStrideInBytes",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.vertexStrideInBytes;
},
[](pyoptix::BuildInputCurveArray& self, unsigned int val) {
self.build_input.vertexStrideInBytes = val;
})
.def_property(
"widthBuffers",
[](const pyoptix::BuildInputCurveArray& self) {
return py::cast(self.widthBuffers);
},
[](pyoptix::BuildInputCurveArray& self, py::list& val) {
self.widthBuffers =
val.cast<std::vector<CUdeviceptr>>();
})
.def_property(
"widthStrideInBytes",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.widthStrideInBytes;
},
[](pyoptix::BuildInputCurveArray& self, unsigned int val) {
self.build_input.widthStrideInBytes = val;
})
.def_property(
"normalBuffers",
[](const pyoptix::BuildInputCurveArray& self) {
return py::cast(self.normalBuffers);
},
[](pyoptix::BuildInputCurveArray& self, py::list& val) {
self.normalBuffers =
val.cast<std::vector<CUdeviceptr>>();
})
.def_property(
"normalStrideInBytes",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.normalStrideInBytes;
},
[](pyoptix::BuildInputCurveArray& self, unsigned int val) {
self.build_input.normalStrideInBytes = val;
})
.def_property(
"indexBuffer",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.indexBuffer;
},
[](pyoptix::BuildInputCurveArray& self, CUdeviceptr val) {
self.build_input.indexBuffer = val;
})
.def_property(
"indexStrideInBytes",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.indexStrideInBytes;
},
[](pyoptix::BuildInputCurveArray& self, unsigned int val) {
self.build_input.indexStrideInBytes = val;
})
.def_property(
"flag",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.flag;
},
[](pyoptix::BuildInputCurveArray& self, unsigned int val) {
self.build_input.flag = val;
})
.def_property(
"primitiveIndexOffset",
[](const pyoptix::BuildInputCurveArray& self) {
return self.build_input.primitiveIndexOffset;
},
[](pyoptix::BuildInputCurveArray& self, unsigned int val) {
self.build_input.primitiveIndexOffset = val;
});
#endif // OPTIX_VERSION > 70100
/* NOTE: Not very useful in python host-side
py::class_<OptixAabb>(m, "Aabb")
.def( py::init([]() { return std::unique_ptr<OptixAabb>(new OptixAabb{}
); } ) ) .def_readwrite( "minX", &OptixAabb::minX ) .def_readwrite( "minY",
&OptixAabb::minY ) .def_readwrite( "minZ", &OptixAabb::minZ )
.def_readwrite( "maxX", &OptixAabb::maxX )
.def_readwrite( "maxY", &OptixAabb::maxY )
.def_readwrite( "maxZ", &OptixAabb::maxZ )
;
*/
py::class_<pyoptix::BuildInputCustomPrimitiveArray>(
m, "BuildInputCustomPrimitiveArray")
.def(py::init<const py::list&, unsigned int, unsigned int,
const py::list&, unsigned int, CUdeviceptr,
unsigned int, unsigned int, unsigned int>(),
py::arg("aabbBuffers") = py::list(),
py::arg("numPrimitives") = 0u, py::arg("strideInBytes") = 0u,
py::arg("flags") = py::list(), py::arg("numSbtRecords") = 0u,
py::arg("sbtIndexOffsetBuffer") = 0u,
py::arg("sbtIndexOffsetSizeInBytes") = 0u,
py::arg("sbtIndexOffsetStrideInBytes") = 0u,
py::arg("primitiveIndexOffset") = 0u)
.def_property(
"aabbBuffers",
[](const pyoptix::BuildInputCustomPrimitiveArray& self) {
return py::cast(self.aabbBuffers);
},
[](pyoptix::BuildInputCustomPrimitiveArray& self,
py::list& val) {
self.aabbBuffers = val.cast<std::vector<CUdeviceptr>>();
})
.def_property(
"numPrimitives",
[](const pyoptix::BuildInputCustomPrimitiveArray& self) {
return self.build_input.numPrimitives;
},
[](pyoptix::BuildInputCustomPrimitiveArray& self,
unsigned int val) {
self.build_input.numPrimitives = val;
})
.def_property(
"strideInBytes",
[](const pyoptix::BuildInputCustomPrimitiveArray& self) {
return self.build_input.strideInBytes;
},
[](pyoptix::BuildInputCustomPrimitiveArray& self,
unsigned int val) {
self.build_input.strideInBytes = val;
})
.def_property(
"flags",
[](const pyoptix::BuildInputCustomPrimitiveArray& self) {
return py::cast(self.flags);
},
[](pyoptix::BuildInputCustomPrimitiveArray& self,
py::list& val) {
self.flags = val.cast<std::vector<unsigned int>>();
})
.def_property(
"numSbtRecords",
[](const pyoptix::BuildInputCustomPrimitiveArray& self) {
return self.build_input.numSbtRecords;
},
[](pyoptix::BuildInputCustomPrimitiveArray& self,
unsigned int val) {
self.build_input.numSbtRecords = val;
})
.def_property(
"sbtIndexOffsetBuffer",
[](const pyoptix::BuildInputCustomPrimitiveArray& self) {
return self.build_input.sbtIndexOffsetBuffer;
},
[](pyoptix::BuildInputCustomPrimitiveArray& self,
CUdeviceptr val) {
self.build_input.sbtIndexOffsetBuffer = val;
})
.def_property(
"sbtIndexOffsetSizeInBytes",
[](const pyoptix::BuildInputCustomPrimitiveArray& self) {
return self.build_input.sbtIndexOffsetSizeInBytes;
},
[](pyoptix::BuildInputCustomPrimitiveArray& self,
unsigned int val) {
self.build_input.sbtIndexOffsetSizeInBytes = val;
})
.def_property(
"sbtIndexOffsetStrideInBytes",
[](const pyoptix::BuildInputCustomPrimitiveArray& self) {
return self.build_input.sbtIndexOffsetStrideInBytes;
},
[](pyoptix::BuildInputCustomPrimitiveArray& self,
unsigned int val) {
self.build_input.sbtIndexOffsetStrideInBytes = val;
})
.def_property(
"primitiveIndexOffset",
[](const pyoptix::BuildInputCustomPrimitiveArray& self) {
return self.build_input.primitiveIndexOffset;
},
[](pyoptix::BuildInputCustomPrimitiveArray& self,
unsigned int val) {
self.build_input.primitiveIndexOffset = val;
});
py::class_<pyoptix::BuildInputInstanceArray>(m, "BuildInputInstanceArray")
.def(py::init<CUdeviceptr, CUdeviceptr, unsigned int>(),
py::arg("instances") = 0u, py::arg("instancePointers") = 0u,
py::arg("numInstances") = 0u)
.def_property(
"instances",
[](const pyoptix::BuildInputInstanceArray& self) {
return self.build_input.instances;
},
[](pyoptix::BuildInputInstanceArray& self,
CUdeviceptr val) { self.setInstances(val); })
.def_property(
"instancePointers",
[](const pyoptix::BuildInputInstanceArray& self) {
return self.build_input.instances;
},
[](pyoptix::BuildInputInstanceArray& self,
CUdeviceptr val) { self.setInstancePointers(val); })
.def_property(
"numInstances",
[](const pyoptix::BuildInputInstanceArray& self) {
return self.build_input.numInstances;
},
[](pyoptix::BuildInputInstanceArray& self,
unsigned int val) {
self.build_input.numInstances = val;
});
/* NOTE: Wrapper type OptixBuildInput not used in python bindings
py::class_<OptixBuildInput>(m, "BuildInput")
.def( py::init([]() { return std::unique_ptr<OptixBuildInput>(new
OptixBuildInput{} ); } ) ) .def_readwrite( "type", &OptixBuildInput::type )
.def_readwrite( "triangleArray", &OptixBuildInput::triangleArray )
#if OPTIX_VERSION >= 70100
.def_readwrite( "curveArray", &OptixBuildInput::curveArray )
.def_readwrite( "customPrimitiveArray",
&OptixBuildInput::customPrimitiveArray ) #else .def_readwrite( "aabbArray",
&OptixBuildInput::aabbArray ) #endif .def_readwrite( "instanceArray",
&OptixBuildInput::instanceArray )
;
*/
py::class_<pyoptix::Instance>(m, "Instance")
.def(py::init<const py::list&, // 12 floats
uint32_t, uint32_t, uint32_t, uint32_t,
OptixTraversableHandle>(),
py::arg("transform") = py::list(), py::arg("instanceId") = 0u,
py::arg("sbtOffset") = 0u, py::arg("visibilityMask") = 0u,
py::arg("flags") = 0u, py::arg("traversableHandle") = 0u)
.def_property(
"transform",
[](const pyoptix::Instance& self) {
return py::cast(self.instance.transform);
},
// nullptr,
[](pyoptix::Instance& self, const py::list& val) {
self.setTransform(val);
})
.def_property(
"instanceId",
[](const pyoptix::Instance& self) {
return self.instance.instanceId;
},
[](pyoptix::Instance& self, uint32_t val) {
self.instance.instanceId = val;
})
.def_property(
"sbtOffset",
[](const pyoptix::Instance& self) {
return self.instance.sbtOffset;
},
[](pyoptix::Instance& self, uint32_t val) {
self.instance.sbtOffset = val;
})
.def_property(
"visibilityMask",
[](const pyoptix::Instance& self) {
return self.instance.visibilityMask;
},
[](pyoptix::Instance& self, uint32_t val) {
self.instance.visibilityMask = val;
})
.def_property(
"flags",
[](const pyoptix::Instance& self) {
return self.instance.flags;
},
[](pyoptix::Instance& self, uint32_t val) {
self.instance.flags = val;
})
.def_property(
"traversableHandle",
[](const pyoptix::Instance& self) {
return self.instance.traversableHandle;
},
[](pyoptix::Instance& self, OptixTraversableHandle val) {
self.instance.traversableHandle = val;
});
py::class_<OptixMotionOptions>(m, "MotionOptions")
.def(py::init([](uint32_t numKeys, uint32_t flags, float timeBegin,
float timeEnd) {
auto opts = std::unique_ptr<OptixMotionOptions>(
new OptixMotionOptions{});
opts->numKeys = numKeys;
opts->flags = flags;
opts->timeBegin = timeBegin;
opts->timeEnd = timeEnd;
return opts;
}
),
py::arg("numKeys") = 0u, py::arg("flags") = 0u,
py::arg("timeBegin") = 0.0f, py::arg("timeEnd") = 0.0f)
.def_readwrite("numKeys", &OptixMotionOptions::numKeys)
.def_readwrite("flags", &OptixMotionOptions::flags)
.def_readwrite("timeBegin", &OptixMotionOptions::timeBegin)
.def_readwrite("timeEnd", &OptixMotionOptions::timeEnd);
py::class_<OptixAccelBuildOptions>(m, "AccelBuildOptions")
.def(py::init([](unsigned int buildFlags,
OptixBuildOperation operation,
const OptixMotionOptions& motionOptions) {
auto opts = std::unique_ptr<OptixAccelBuildOptions>(
new OptixAccelBuildOptions{});
opts->buildFlags = buildFlags;
opts->operation = operation;
opts->motionOptions = motionOptions;
return opts;
}),
py::arg("buildFlags") = 0,
py::arg("operation") = OPTIX_BUILD_OPERATION_BUILD,
py::arg("motionOptions") = OptixMotionOptions{})
.def_readwrite("buildFlags", &OptixAccelBuildOptions::buildFlags)
.def_readwrite("operation", &OptixAccelBuildOptions::operation)
.def_readwrite("motionOptions",
&OptixAccelBuildOptions::motionOptions);
py::class_<OptixAccelBufferSizes>(m, "AccelBufferSizes")
.def(py::init([]() {
return std::unique_ptr<OptixAccelBufferSizes>(
new OptixAccelBufferSizes{});
}))
.def_readonly("outputSizeInBytes",
&OptixAccelBufferSizes::outputSizeInBytes)
.def_readonly("tempSizeInBytes",
&OptixAccelBufferSizes::tempSizeInBytes)
.def_readonly("tempUpdateSizeInBytes",
&OptixAccelBufferSizes::tempUpdateSizeInBytes);
py::class_<pyoptix::AccelEmitDesc>(m, "AccelEmitDesc")
.def(py::init<CUdeviceptr, OptixAccelPropertyType>(),
py::arg("result") = 0u, py::arg("type") = 0u)
.def_property(
"result",
[](const pyoptix::AccelEmitDesc& self) {
return self.desc.result;
},
[](pyoptix::AccelEmitDesc& self, CUdeviceptr val) {
self.desc.result = val;
})
.def_property(
"type",
[](const pyoptix::AccelEmitDesc& self) {
return self.desc.type;
},
[](pyoptix::AccelEmitDesc& self,
OptixAccelPropertyType val) { self.desc.type = val; });
#if OPTIX_VERSION < 70600
py::class_<OptixAccelRelocationInfo>(m, "AccelRelocationInfo")
.def(py::init([]() {
return std::unique_ptr<OptixAccelRelocationInfo>(
new OptixAccelRelocationInfo{});
}))
// NB: info field is internal only so not making accessible
;
#else
py::class_<OptixRelocationInfo>(m, "RelocationInfo").def(py::init([]() {
return std::unique_ptr<OptixRelocationInfo>(new OptixRelocationInfo{});
}))
// NB: info field is internal only so not making accessible
;
#endif
py::class_<pyoptix::StaticTransform>(m, "StaticTransform")
.def(py::init<OptixTraversableHandle,
const py::list&, // 12 floats
const py::list& // 12 floats
>(),
py::arg("child") = 0u, py::arg("transform") = py::list(),
py::arg("invTransform") = py::list())
.def_property(
"child",
[](const pyoptix::StaticTransform& self) {
return self.transform.child;
},
[](pyoptix::StaticTransform& self,
OptixTraversableHandle val) {
self.transform.child = val;
})
.def_property(
"transform",
[](const pyoptix::StaticTransform& self) {
return py::cast(self.transform.transform);
},
// nullptr,
[](pyoptix::StaticTransform& self, const py::list& val) {
self.setTransform(val);
})
.def_property(
"invTransform",
[](const pyoptix::StaticTransform& self) {
return py::cast(self.transform.invTransform);
},
// nullptr,
[](pyoptix::StaticTransform& self, const py::list& val) {
self.setInvTransform(val);
})
.def("getBytes", &pyoptix::StaticTransform::getBytes);
py::class_<pyoptix::MatrixMotionTransform>(m, "MatrixMotionTransform")
.def(py::init<OptixTraversableHandle,
// pyoptix::MotionOptions,
OptixMotionOptions,
const py::list& // N*12 floats where N >= 2
>(),
py::arg("child") = 0u,
py::arg("motionOptions") = OptixMotionOptions{},
py::arg("transform") = py::list())
.def_property(
"child",
[](const pyoptix::MatrixMotionTransform& self) {
return self.mtransform.child;
},
[](pyoptix::MatrixMotionTransform& self,
OptixTraversableHandle val) {
self.mtransform.child = val;
})
.def_property(
"motionOptions",
[](const pyoptix::MatrixMotionTransform& self) {
return OptixMotionOptions(
self.mtransform.motionOptions);
},
[](pyoptix::MatrixMotionTransform& self,
const OptixMotionOptions& val) {
self.mtransform.motionOptions = val;
})
.def_property("transform", nullptr,
[](pyoptix::MatrixMotionTransform& self,
const py::list& val) { self.setTransform(val); })
.def("getBytes", &pyoptix::MatrixMotionTransform::getBytes);
// KEITH
py::class_<OptixSRTData>(m, "SRTData")
.def(py::init([]() {
return std::unique_ptr<OptixSRTData>(new OptixSRTData{});
}))
.def_readwrite("tz", &OptixSRTData::tz);
py::class_<OptixSRTMotionTransform>(m, "SRTMotionTransform")
.def(py::init([]() {
return std::unique_ptr<OptixSRTMotionTransform>(
new OptixSRTMotionTransform{});
}))
.def_readwrite("child", &OptixSRTMotionTransform::child)
.def_readwrite("motionOptions",
&OptixSRTMotionTransform::motionOptions);
py::class_<OptixImage2D>(m, "Image2D")
.def(py::init([]() {
return std::unique_ptr<OptixImage2D>(new OptixImage2D{});
}))
.def_readwrite("data", &OptixImage2D::data)
.def_readwrite("width", &OptixImage2D::width)
.def_readwrite("height", &OptixImage2D::height)
.def_readwrite("rowStrideInBytes", &OptixImage2D::rowStrideInBytes)
.def_readwrite("pixelStrideInBytes",
&OptixImage2D::pixelStrideInBytes)
.def_readwrite("format", &OptixImage2D::format);
#if OPTIX_VERSION >= 70300
py::class_<OptixDenoiserOptions>(m, "DenoiserOptions")
.def(py::init([]() {
return std::unique_ptr<OptixDenoiserOptions>(
new OptixDenoiserOptions{});
}))
.def_readwrite("guideAlbedo", &OptixDenoiserOptions::guideAlbedo)
.def_readwrite("guideNormal", &OptixDenoiserOptions::guideNormal);
py::class_<OptixDenoiserLayer>(m, "DenoiserLayer")
.def(py::init([]() {
return std::unique_ptr<OptixDenoiserLayer>(
new OptixDenoiserLayer{});
}))
.def_readwrite("input", &OptixDenoiserLayer::input)
.def_readwrite("previousOutput",
&OptixDenoiserLayer::previousOutput)
.def_readwrite("output", &OptixDenoiserLayer::output);
py::class_<OptixDenoiserGuideLayer>(m, "DenoiserGuideLayer")
.def(py::init([]() {
return std::unique_ptr<OptixDenoiserGuideLayer>(
new OptixDenoiserGuideLayer{});
}))
.def_readwrite("albedo", &OptixDenoiserGuideLayer::albedo)
.def_readwrite("normal", &OptixDenoiserGuideLayer::normal)
.def_readwrite("flow", &OptixDenoiserGuideLayer::flow);
#elif OPTIX_VERSION <= 70200
py::class_<OptixDenoiserOptions>(m, "DenoiserOptions")
.def(py::init([]() {
return std::unique_ptr<OptixDenoiserOptions>(
new OptixDenoiserOptions{});
}))
.def_readwrite("inputKind", &OptixDenoiserOptions::inputKind);
#endif
py::class_<OptixDenoiserParams>(m, "DenoiserParams")
.def(py::init([]() {
return std::unique_ptr<OptixDenoiserParams>(
new OptixDenoiserParams{});
}))
.def_readwrite("denoiseAlpha", &OptixDenoiserParams::denoiseAlpha)
.def_readwrite("hdrIntensity", &OptixDenoiserParams::hdrIntensity)
.def_readwrite("blendFactor", &OptixDenoiserParams::blendFactor)
IF_OPTIX72(.def_readwrite(
"hdrAverageColor",
&OptixDenoiserParams::hdrAverageColor));
py::class_<OptixDenoiserSizes>(m, "DenoiserSizes")
.def(py::init([]() {
return std::unique_ptr<OptixDenoiserSizes>(
new OptixDenoiserSizes{});
}))
.def_readwrite("stateSizeInBytes",
&OptixDenoiserSizes::stateSizeInBytes)
#if OPTIX_VERSION > 70000
.def_readwrite("withOverlapScratchSizeInBytes",
&OptixDenoiserSizes::withOverlapScratchSizeInBytes)
.def_readwrite(
"withoutOverlapScratchSizeInBytes",
&OptixDenoiserSizes::withoutOverlapScratchSizeInBytes)
#else
.def_readwrite("minimumScratchSizeInBytes",
&OptixDenoiserSizes::minimumScratchSizeInBytes)
.def_readwrite("recommendedScratchSizeInBytes",
&OptixDenoiserSizes::recommendedScratchSizeInBytes)
#endif
.def_readwrite("overlapWindowSizeInPixels",
&OptixDenoiserSizes::overlapWindowSizeInPixels);
#if OPTIX_VERSION >= 70200
py::class_<pyoptix::ModuleCompileBoundValueEntry>(
m, "ModuleCompileBoundValueEntry")
.def(py::init<size_t, py::buffer, const std::string&>(),
py::arg("pipelineParamOffsetInBytes") = 0u,
py::arg("boundValue") = py::bytes(),
py::arg("annotation") = "")
.def_property(
"pipelineParamOffsetInBytes",
[](const pyoptix::ModuleCompileBoundValueEntry& self) {
return self.entry.pipelineParamOffsetInBytes;
},
[](pyoptix::ModuleCompileBoundValueEntry& self,
size_t val) {
self.entry.pipelineParamOffsetInBytes = val;
})
.def_property_readonly(
"sizeInBytes",
[](const pyoptix::ModuleCompileBoundValueEntry& self) {
return self.entry.sizeInBytes;
})
.def_property("boundValue",
//[](const pyoptix::ModuleCompileBoundValueEntry&
// self) { return self.boundValue; },
nullptr,
[](pyoptix::ModuleCompileBoundValueEntry& self,
py::buffer val) { self.setBoundValue(val); })
.def_property(
"annotation",
[](const pyoptix::ModuleCompileBoundValueEntry& self) {
return self.annotation;
},
[](pyoptix::ModuleCompileBoundValueEntry& self,
std::string&& val) {
self.setAnnotation(std::move(val));
});
#endif // OPTIX_VERSION >= 70200
#if OPTIX_VERSION >= 70400
py::class_<pyoptix::PayloadType>(m, "PayloadType")
.def(py::init<py::list>(), py::arg("payloadSemantics") = py::list())
.def_property(
"pipelineParamOffsetInBytes",
[](const pyoptix::ModuleCompileBoundValueEntry& self) {
return self.entry.pipelineParamOffsetInBytes;
},
[](pyoptix::ModuleCompileBoundValueEntry& self,
size_t val) {
self.entry.pipelineParamOffsetInBytes = val;
})
.def_property("payloadSemantics",
//[](const pyoptix::PayloadType& self)
//{ return self.payloadSemantics; },
nullptr,
[](pyoptix::PayloadType& self, py::list val) {
self.setPayloadSemantics(val);
});
#endif // OPTIX_VERSION >= 70400
py::class_<pyoptix::ModuleCompileOptions>(m, "ModuleCompileOptions")
.def(py::init<
int32_t, OptixCompileOptimizationLevel,
OptixCompileDebugLevel IF_OPTIX72(
COMMA std::vector<
pyoptix::
ModuleCompileBoundValueEntry>&&)
IF_OPTIX74(COMMA std::vector<
pyoptix::PayloadType>&&)>(),
py::arg("maxRegisterCount") = 0u,
py::arg("optLevel") = OPTIX_COMPILE_OPTIMIZATION_DEFAULT,
py::arg("debugLevel") = IF_OPTIX71_ELSE(
OPTIX_COMPILE_DEBUG_LEVEL_DEFAULT,
OPTIX_COMPILE_DEBUG_LEVEL_LINEINFO)
IF_OPTIX72(
COMMA py::arg("boundValues") = std::vector<
pyoptix::
ModuleCompileBoundValueEntry>())
IF_OPTIX74(
COMMA py::arg("payloadTypes") = std::
vector<pyoptix::
PayloadType>()))
.def_property(
"maxRegisterCount",
[](const pyoptix::ModuleCompileOptions& self) {
return self.options.maxRegisterCount;
},
[](pyoptix::ModuleCompileOptions& self, int32_t val) {
self.options.maxRegisterCount = val;
})
.def_property(
"optLevel",
[](const pyoptix::ModuleCompileOptions& self) {
return self.options.optLevel;
},
[](pyoptix::ModuleCompileOptions& self,
OptixCompileOptimizationLevel val) {
self.options.optLevel = val;
})
.def_property(
"debugLevel",
[](const pyoptix::ModuleCompileOptions& self) {
return self.options.debugLevel;
},
[](pyoptix::ModuleCompileOptions& self,
OptixCompileDebugLevel val) {
self.options.debugLevel = val;
})
#if OPTIX_VERSION >= 70200
.def_property(
"boundValues",
// This doesnt do what you probably want it to so disable it
//[](const pyoptix::ModuleCompileOptions& self)
//{ return self.boundValues; },
nullptr,
[](pyoptix::ModuleCompileOptions& self,
std::vector<pyoptix::ModuleCompileBoundValueEntry>&&
val) { self.pyboundValues = std::move(val); })
#endif
#if OPTIX_VERSION >= 70400
.def_property(
"payloadTypes",
// This doesnt do what you probably want it to so disable it
//[](const pyoptix::PayloadType& self)
//{ return self.payloadTypes; },
nullptr,
[](pyoptix::ModuleCompileOptions& self,
std::vector<pyoptix::PayloadType>&& val) {
self.pypayloadTypes = std::move(val);
})
#endif
;
py::class_<pyoptix::ProgramGroupDesc>(m, "ProgramGroupDesc")
.def(py::init<uint32_t,
const char*, // raygenEntryFunctionName
const pyoptix::Module, // raygenModule
const char*, // missEntryFunctionName
const pyoptix::Module, // missModule
const char*, // exceptionEntryFunctionName
const pyoptix::Module, // exceptionModule
const char*, // callablesEntryFunctionNameDC
const pyoptix::Module, // callablesModuleDC
const char*, // callablesEntryFunctionNameCC
const pyoptix::Module, // callablesModuleCC
const char*, // hitgroupEntryFunctionNameCH
const pyoptix::Module, // hitgroupModuleCH
const char*, // hitgroupEntryFunctionNameAH
const pyoptix::Module, // hitgroupModuleAH
const char*, // hitgroupEntryFunctionNameIS
const pyoptix::Module // hitgroupModuleIS
>(),
py::arg("flags") = 0u,
py::arg("raygenEntryFunctionName") = nullptr,
py::arg("raygenModule") = pyoptix::Module{},
py::arg("missEntryFunctionName") = nullptr,
py::arg("missModule") = pyoptix::Module{},
py::arg("exceptionEntryFunctionName") = nullptr,
py::arg("exceptionModule") = pyoptix::Module{},
py::arg("callablesEntryFunctionNameDC") = nullptr,
py::arg("callablesModuleDC") = pyoptix::Module{},
py::arg("callablesEntryFunctionNameCC") = nullptr,
py::arg("callablesModuleCC") = pyoptix::Module{},
py::arg("hitgroupEntryFunctionNameCH") = nullptr,
py::arg("hitgroupModuleCH") = pyoptix::Module{},
py::arg("hitgroupEntryFunctionNameAH") = nullptr,
py::arg("hitgroupModuleAH") = pyoptix::Module{},
py::arg("hitgroupEntryFunctionNameIS") = nullptr,
py::arg("hitgroupModuleIS") = pyoptix::Module{})
.def_property(
"flags",
[](pyoptix::ProgramGroupDesc& self) {
return self.program_group_desc.flags;
},
[](pyoptix::ProgramGroupDesc& self, uint32_t flags) {
self.program_group_desc.flags = flags;
})
.def_property(
"raygenModule",
[](pyoptix::ProgramGroupDesc& self) {
return pyoptix::Module{
self.program_group_desc.raygen.module};
},
[](pyoptix::ProgramGroupDesc& self,
const pyoptix::Module& module) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
self.program_group_desc.raygen.module = module.module;
})
.def_property(
"raygenEntryFunctionName",
[](pyoptix::ProgramGroupDesc& self) {
return self.entryFunctionName0;
},
[](pyoptix::ProgramGroupDesc& self,
const std::string& name) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_RAYGEN;
self.entryFunctionName0 = name;
})
.def_property(
"missModule",
[](pyoptix::ProgramGroupDesc& self) {
return pyoptix::Module{
self.program_group_desc.miss.module};
},
[](pyoptix::ProgramGroupDesc& self,
const pyoptix::Module& module) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_MISS;
self.program_group_desc.miss.module = module.module;
})
.def_property(
"missEntryFunctionName",
[](pyoptix::ProgramGroupDesc& self) {
return self.entryFunctionName0;
},
[](pyoptix::ProgramGroupDesc& self,
const std::string& name) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_MISS;
self.entryFunctionName0 = name;
})
.def_property(
"exceptionModule",
[](pyoptix::ProgramGroupDesc& self) {
return pyoptix::Module{
self.program_group_desc.exception.module};
},
[](pyoptix::ProgramGroupDesc& self,
const pyoptix::Module& module) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_EXCEPTION;
self.program_group_desc.exception.module =
module.module;
})
.def_property(
"exceptionEntryFunctionName",
[](pyoptix::ProgramGroupDesc& self) {
return self.entryFunctionName0;
},
[](pyoptix::ProgramGroupDesc& self,
const std::string& name) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_EXCEPTION;
self.entryFunctionName0 = name;
})
.def_property(
"callablesModuleDC",
[](pyoptix::ProgramGroupDesc& self) {
return pyoptix::Module{
self.program_group_desc.callables.moduleDC};
},
[](pyoptix::ProgramGroupDesc& self,
const pyoptix::Module& module) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
self.program_group_desc.callables.moduleDC =
module.module;
})
.def_property(
"callablesEntryFunctionNameDC",
[](pyoptix::ProgramGroupDesc& self) {
return self.entryFunctionName0;
},
[](pyoptix::ProgramGroupDesc& self,
const std::string& name) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
self.entryFunctionName0 = name;
})
.def_property(
"callablesModuleCC",
[](pyoptix::ProgramGroupDesc& self) {
return pyoptix::Module{
self.program_group_desc.callables.moduleCC};
},
[](pyoptix::ProgramGroupDesc& self,
const pyoptix::Module& module) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
self.program_group_desc.callables.moduleCC =
module.module;
})
.def_property(
"callablesEntryFunctionNameCC",
[](pyoptix::ProgramGroupDesc& self) {
return self.entryFunctionName1;
},
[](pyoptix::ProgramGroupDesc& self,
const std::string& name) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_CALLABLES;
self.entryFunctionName1 = name;
})
.def_property(
"hitgroupModuleCH",
[](pyoptix::ProgramGroupDesc& self) {
return pyoptix::Module{
self.program_group_desc.hitgroup.moduleCH};
},
[](pyoptix::ProgramGroupDesc& self,
const pyoptix::Module& module) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
self.program_group_desc.hitgroup.moduleCH =
module.module;
})
.def_property(
"hitgroupEntryFunctionNameCH",
[](pyoptix::ProgramGroupDesc& self) {
return self.entryFunctionName0;
},
[](pyoptix::ProgramGroupDesc& self,
const std::string& name) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
self.entryFunctionName0 = name;
})
.def_property(
"hitgroupModuleAH",
[](pyoptix::ProgramGroupDesc& self) {
return pyoptix::Module{
self.program_group_desc.hitgroup.moduleAH};
},
[](pyoptix::ProgramGroupDesc& self,
const pyoptix::Module& module) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
self.program_group_desc.hitgroup.moduleAH =
module.module;
})
.def_property(
"hitgroupEntryFunctionNameAH",
[](pyoptix::ProgramGroupDesc& self) {
return self.entryFunctionName1;
},
[](pyoptix::ProgramGroupDesc& self,
const std::string& name) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
self.entryFunctionName1 = name;
})
.def_property(
"hitgroupModuleIS",
[](pyoptix::ProgramGroupDesc& self) {
return pyoptix::Module{
self.program_group_desc.hitgroup.moduleIS};
},
[](pyoptix::ProgramGroupDesc& self,
const pyoptix::Module& module) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
self.program_group_desc.hitgroup.moduleIS =
module.module;
})
.def_property(
"hitgroupEntryFunctionNameIS",
[](pyoptix::ProgramGroupDesc& self) {
return self.entryFunctionName2;
},
[](pyoptix::ProgramGroupDesc& self,
const std::string& name) {
self.program_group_desc.kind =
OPTIX_PROGRAM_GROUP_KIND_HITGROUP;
self.entryFunctionName2 = name;
});
#if OPTIX_VERSION >= 70400
py::class_<pyoptix::ProgramGroupOptions>(m, "ProgramGroupOptions")
.def(py::init<pyoptix::PayloadType>(),
py::arg("payloadType") = pyoptix::PayloadType{})
.def_property(
"payloadType",
// This doesnt do what you probably want it to so disable it
//[](const pyoptix::ProgramGroupOptions& self)
//{ return self.payload_type; },
nullptr,
[](pyoptix::ProgramGroupOptions& self,
const pyoptix::PayloadType& payload_type) {
self.setPayloadType(payload_type);
});
#endif // OPTIX_VERSION >= 70400
py::class_<pyoptix::PipelineCompileOptions>(m, "PipelineCompileOptions")
.def(py::init<bool, uint32_t, int32_t, int32_t, uint32_t,
const char * IF_OPTIX71(COMMA int32_t)>(),
py::arg("usesMotionBlur") = 0,
py::arg("traversableGraphFlags") = 0u,
py::arg("numPayloadValues") = 0,
py::arg("numAttributeValues") = 0,
py::arg("exceptionFlags") = 0u,
py::arg("pipelineLaunchParamsVariableName") =
nullptr IF_OPTIX71(
COMMA py::arg("usesPrimitiveTypeFlags") = 0))
.def_property(
"usesMotionBlur",
[](const pyoptix::PipelineCompileOptions& self) {
return self.options.usesMotionBlur;
},
[](pyoptix::PipelineCompileOptions& self, bool val) {
self.options.usesMotionBlur = val;
})
.def_property(
"traversableGraphFlags",
[](const pyoptix::PipelineCompileOptions& self) {
return self.options.traversableGraphFlags;
},
[](pyoptix::PipelineCompileOptions& self, uint32_t val) {
self.options.traversableGraphFlags = val;
})
.def_property(
"numPayloadValues",
[](const pyoptix::PipelineCompileOptions& self) {
return self.options.numPayloadValues;
},
[](pyoptix::PipelineCompileOptions& self, int val) {
self.options.numPayloadValues = val;
})
.def_property(
"numAttributeValues",
[](const pyoptix::PipelineCompileOptions& self) {
return self.options.numAttributeValues;
},
[](pyoptix::PipelineCompileOptions& self, int val) {
self.options.numAttributeValues = val;
})
.def_property(
"exceptionFlags",
[](const pyoptix::PipelineCompileOptions& self) {
return self.options.exceptionFlags;
},
[](pyoptix::PipelineCompileOptions& self, uint32_t val) {
self.options.exceptionFlags = val;
})
.def_readwrite("pipelineLaunchParamsVariableName",
&pyoptix::PipelineCompileOptions::
pipelineLaunchParamsVariableName)
#if OPTIX_VERSION >= 70100
.def_property(
"usesPrimitiveTypeFlags",
[](const pyoptix::PipelineCompileOptions& self) {
return self.options.usesPrimitiveTypeFlags;
},
[](pyoptix::PipelineCompileOptions& self, uint32_t val) {
self.options.usesPrimitiveTypeFlags = val;
})
#endif
;
py::class_<pyoptix::PipelineLinkOptions>(m, "PipelineLinkOptions")
.def(py::init<uint32_t
#if OPTIX_VERSION < 70700
COMMA OptixCompileDebugLevel
#endif
>(),
py::arg("maxTraceDepth") = 0u
#if OPTIX_VERSION < 70700
COMMA py::arg("debugLevel") =
IF_OPTIX71_ELSE(OPTIX_COMPILE_DEBUG_LEVEL_DEFAULT,
OPTIX_COMPILE_DEBUG_LEVEL_LINEINFO)
#endif
)
.def_property(
"maxTraceDepth",
[](const pyoptix::PipelineLinkOptions& self) {
return self.options.maxTraceDepth;
},
[](pyoptix::PipelineLinkOptions& self, uint32_t val) {
self.options.maxTraceDepth = val;
})
#if OPTIX_VERSION < 70700
.def_property(
"debugLevel",
[](const pyoptix::PipelineLinkOptions& self) {
return self.options.debugLevel;
},
[](pyoptix::PipelineLinkOptions& self,
OptixCompileDebugLevel val) {
self.options.debugLevel = val;
})
#endif
;
py::class_<pyoptix::ShaderBindingTable>(m, "ShaderBindingTable")
.def(py::init<CUdeviceptr, CUdeviceptr, CUdeviceptr, uint32_t,
uint32_t, CUdeviceptr, uint32_t, uint32_t,
CUdeviceptr, uint32_t, uint32_t>(),
py::arg("raygenRecord") = 0, py::arg("exceptionRecord") = 0,
py::arg("missRecordBase") = 0,
py::arg("missRecordStrideInBytes") = 0,
py::arg("missRecordCount") = 0,
py::arg("hitgroupRecordBase") = 0,
py::arg("hitgroupRecordStrideInBytes") = 0,
py::arg("hitgroupRecordCount") = 0,
py::arg("callablesRecordBase") = 0,
py::arg("callablesRecordStrideInBytes") = 0,
py::arg("callablesRecordCount") = 0)
.def_property(
"raygenRecord",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.raygenRecord;
},
[](pyoptix::ShaderBindingTable& self, CUdeviceptr val) {
self.sbt.raygenRecord = val;
})
.def_property(
"exceptionRecord",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.exceptionRecord;
},
[](pyoptix::ShaderBindingTable& self, CUdeviceptr val) {
self.sbt.exceptionRecord = val;
})
.def_property(
"missRecordBase",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.missRecordBase;
},
[](pyoptix::ShaderBindingTable& self, CUdeviceptr val) {
self.sbt.missRecordBase = val;
})
.def_property(
"missRecordStrideInBytes",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.missRecordStrideInBytes;
},
[](pyoptix::ShaderBindingTable& self, uint32_t val) {
self.sbt.missRecordStrideInBytes = val;
})
.def_property(
"missRecordCount",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.missRecordCount;
},
[](pyoptix::ShaderBindingTable& self, uint32_t val) {
self.sbt.missRecordCount = val;
})
.def_property(
"hitgroupRecordBase",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.hitgroupRecordBase;
},
[](pyoptix::ShaderBindingTable& self, CUdeviceptr val) {
self.sbt.hitgroupRecordBase = val;
})
.def_property(
"hitgroupRecordStrideInBytes",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.hitgroupRecordStrideInBytes;
},
[](pyoptix::ShaderBindingTable& self, uint32_t val) {
self.sbt.hitgroupRecordStrideInBytes = val;
})
.def_property(
"hitgroupRecordCount",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.hitgroupRecordCount;
},
[](pyoptix::ShaderBindingTable& self, uint32_t val) {
self.sbt.hitgroupRecordCount = val;
})
.def_property(
"callablesRecordBase",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.callablesRecordBase;
},
[](pyoptix::ShaderBindingTable& self, CUdeviceptr val) {
self.sbt.callablesRecordBase = val;
})
.def_property(
"callablesRecordStrideInBytes",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.callablesRecordStrideInBytes;
},
[](pyoptix::ShaderBindingTable& self, uint32_t val) {
self.sbt.callablesRecordStrideInBytes = val;
})
.def_property(
"callablesRecordCount",
[](const pyoptix::ShaderBindingTable& self) {
return self.sbt.callablesRecordCount;
},
[](pyoptix::ShaderBindingTable& self, uint32_t val) {
self.sbt.callablesRecordCount = val;
});
py::class_<pyoptix::StackSizes>(m, "StackSizes")
.def(py::init<uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
uint32_t, uint32_t>(),
py::arg("cssRG") = 0, py::arg("cssMS") = 0,
py::arg("cssCH") = 0, py::arg("cssAH") = 0,
py::arg("cssIS") = 0, py::arg("cssCC") = 0,
py::arg("dssDC") = 0)
.def_property(
"cssRG",
[](const pyoptix::StackSizes& self) {
return self.ss.cssRG;
},
[](pyoptix::StackSizes& self, uint32_t val) {
self.ss.cssRG = val;
})
.def_property(
"cssMS",
[](const pyoptix::StackSizes& self) {
return self.ss.cssMS;
},
[](pyoptix::StackSizes& self, uint32_t val) {
self.ss.cssMS = val;
})
.def_property(
"cssCH",
[](const pyoptix::StackSizes& self) {
return self.ss.cssCH;
},
[](pyoptix::StackSizes& self, uint32_t val) {
self.ss.cssCH = val;
})
.def_property(
"cssAH",
[](const pyoptix::StackSizes& self) {
return self.ss.cssAH;
},
[](pyoptix::StackSizes& self, uint32_t val) {
self.ss.cssAH = val;
})
.def_property(
"cssIS",
[](const pyoptix::StackSizes& self) {
return self.ss.cssIS;
},
[](pyoptix::StackSizes& self, uint32_t val) {
self.ss.cssIS = val;
})
.def_property(
"cssCC",
[](const pyoptix::StackSizes& self) {
return self.ss.cssCC;
},
[](pyoptix::StackSizes& self, uint32_t val) {
self.ss.cssCC = val;
})
.def_property(
"cssRG",
[](const pyoptix::StackSizes& self) {
return self.ss.cssRG;
},
[](pyoptix::StackSizes& self, uint32_t val) {
self.ss.cssRG = val;
});
#if OPTIX_VERSION >= 70100
py::class_<pyoptix::BuiltinISOptions>(m, "BuiltinISOptions")
.def(py::init<OptixPrimitiveType,
bool IF_OPTIX74(COMMA uint32_t)
IF_OPTIX74(COMMA uint32_t)>(),
py::arg("builtinISModuleType") = OPTIX_PRIMITIVE_TYPE_TRIANGLE,
py::arg("usesMotionBlur") = false IF_OPTIX74(
COMMA py::arg("buildFlags") = 0)
IF_OPTIX74(COMMA py::arg("curveEndcapFlags") = 0))
.def_property(
"builtinISModuleType",
[](const pyoptix::BuiltinISOptions& self) {
return self.options.builtinISModuleType;
},
[](pyoptix::BuiltinISOptions& self,
OptixPrimitiveType val) {
self.options.builtinISModuleType = val;
})
.def_property(
"usesMotionBlur",
[](const pyoptix::BuiltinISOptions& self) {
return self.options.usesMotionBlur;
},
[](pyoptix::BuiltinISOptions& self, bool val) {
self.options.usesMotionBlur = val;
})
#if OPTIX_VERSION >= 70400
.def_property(
"buildFlags",
[](const pyoptix::BuiltinISOptions& self) {
return self.options.buildFlags;
},
[](pyoptix::BuiltinISOptions& self, uint32_t val) {
self.options.buildFlags = val;
})
.def_property(
"curveEndcapFlags",
[](const pyoptix::BuiltinISOptions& self) {
return self.options.curveEndcapFlags;
},
[](pyoptix::BuiltinISOptions& self, uint32_t val) {
self.options.curveEndcapFlags = val;
})
#endif // OPTIX_VERSION >= 70400
;
#endif // OPTIX_VERSION >= 70100
py::class_<OptixTraversableHandle>(m, "TraversableHandle")
.def(py::init([]() {
return std::unique_ptr<OptixTraversableHandle>(
new OptixTraversableHandle{});
}));
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/docs/_static/css/custom.css | CSS | .highlight .go {
color: #707070;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/docs/benchmark.py | Python | import datetime as dt
import os
import random
nfns = 4 # Functions per class
nargs = 4 # Arguments per function
def generate_dummy_code_pybind11(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += f"class cl{cl:03};\n"
decl += "\n"
for cl in range(nclasses):
decl += f"class {cl:03} {{\n"
decl += "public:\n"
bindings += f' py::class_<cl{cl:03}>(m, "cl{cl:03}")\n'
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += f" cl{ret:03} *fn_{fn:03}("
decl += ", ".join(f"cl{p:03} *" for p in params)
decl += ");\n"
bindings += f' .def("fn_{fn:03}", &cl{cl:03}::fn_{fn:03})\n'
decl += "};\n\n"
bindings += " ;\n"
result = "#include <pybind11/pybind11.h>\n\n"
result += "namespace py = pybind11;\n\n"
result += decl + "\n"
result += "PYBIND11_MODULE(example, m) {\n"
result += bindings
result += "}"
return result
def generate_dummy_code_boost(nclasses=10):
decl = ""
bindings = ""
for cl in range(nclasses):
decl += f"class cl{cl:03};\n"
decl += "\n"
for cl in range(nclasses):
decl += "class cl%03i {\n" % cl
decl += "public:\n"
bindings += f' py::class_<cl{cl:03}>("cl{cl:03}")\n'
for fn in range(nfns):
ret = random.randint(0, nclasses - 1)
params = [random.randint(0, nclasses - 1) for i in range(nargs)]
decl += f" cl{ret:03} *fn_{fn:03}("
decl += ", ".join(f"cl{p:03} *" for p in params)
decl += ");\n"
bindings += f' .def("fn_{fn:03}", &cl{cl:03}::fn_{fn:03}, py::return_value_policy<py::manage_new_object>())\n'
decl += "};\n\n"
bindings += " ;\n"
result = "#include <boost/python.hpp>\n\n"
result += "namespace py = boost::python;\n\n"
result += decl + "\n"
result += "BOOST_PYTHON_MODULE(example) {\n"
result += bindings
result += "}"
return result
for codegen in [generate_dummy_code_pybind11, generate_dummy_code_boost]:
print("{")
for i in range(0, 10):
nclasses = 2**i
with open("test.cpp", "w") as f:
f.write(codegen(nclasses))
n1 = dt.datetime.now()
os.system(
"g++ -Os -shared -rdynamic -undefined dynamic_lookup "
"-fvisibility=hidden -std=c++14 test.cpp -I include "
"-I /System/Library/Frameworks/Python.framework/Headers -o test.so"
)
n2 = dt.datetime.now()
elapsed = (n2 - n1).total_seconds()
size = os.stat("test.so").st_size
print(" {%i, %f, %i}," % (nclasses * nfns, elapsed, size))
print("}")
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/docs/conf.py | Python | #!/usr/bin/env python3
#
# pybind11 documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 11 19:23:48 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import re
import subprocess
import sys
from pathlib import Path
DIR = Path(__file__).parent.resolve()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"breathe",
"sphinx_copybutton",
"sphinxcontrib.rsvgconverter",
"sphinxcontrib.moderncmakedomain",
]
breathe_projects = {"pybind11": ".build/doxygenxml/"}
breathe_default_project = "pybind11"
breathe_domain_by_extension = {"h": "cpp"}
# Add any paths that contain templates here, relative to this directory.
templates_path = [".templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pybind11"
copyright = "2017, Wenzel Jakob"
author = "Wenzel Jakob"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Read the listed version
with open("../pybind11/_version.py") as f:
code = compile(f.read(), "../pybind11/_version.py", "exec")
loc = {}
exec(code, loc)
# The full version, including alpha/beta/rc tags.
version = loc["__version__"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [".build", "release.rst"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<version> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pybind11doc"
# -- Options for LaTeX output ---------------------------------------------
latex_engine = "pdflatex"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
#
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
#
# Additional stuff for the LaTeX preamble.
# remove blank pages (between the title page and the TOC, etc.)
"classoptions": ",openany,oneside",
"preamble": r"""
\usepackage{fontawesome}
\usepackage{textgreek}
\DeclareUnicodeCharacter{00A0}{}
\DeclareUnicodeCharacter{2194}{\faArrowsH}
\DeclareUnicodeCharacter{1F382}{\faBirthdayCake}
\DeclareUnicodeCharacter{1F355}{\faAdjust}
\DeclareUnicodeCharacter{0301}{'}
\DeclareUnicodeCharacter{03C0}{\textpi}
""",
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pybind11.tex", "pybind11 Documentation", "Wenzel Jakob", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = 'pybind11-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pybind11", "pybind11 Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pybind11",
"pybind11 Documentation",
author,
"pybind11",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
primary_domain = "cpp"
highlight_language = "cpp"
def generate_doxygen_xml(app):
build_dir = os.path.join(app.confdir, ".build")
if not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
subprocess.call(["doxygen", "--version"])
retcode = subprocess.call(["doxygen"], cwd=app.confdir)
if retcode < 0:
sys.stderr.write(f"doxygen error code: {-retcode}\n")
except OSError as e:
sys.stderr.write(f"doxygen execution failed: {e}\n")
def prepare(app):
with open(DIR.parent / "README.rst") as f:
contents = f.read()
if app.builder.name == "latex":
# Remove badges and stuff from start
contents = contents[contents.find(r".. start") :]
# Filter out section titles for index.rst for LaTeX
contents = re.sub(r"^(.*)\n[-~]{3,}$", r"**\1**", contents, flags=re.MULTILINE)
with open(DIR / "readme.rst", "w") as f:
f.write(contents)
def clean_up(app, exception): # noqa: ARG001
(DIR / "readme.rst").unlink()
def setup(app):
# Add hook for building doxygen xml when needed
app.connect("builder-inited", generate_doxygen_xml)
# Copy the readme in
app.connect("builder-inited", prepare)
# Clean up the generated readme
app.connect("build-finished", clean_up)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/attr.h | C/C++ Header | /*
pybind11/attr.h: Infrastructure for processing custom
type and function attributes
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include "cast.h"
#include <functional>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
/// \addtogroup annotations
/// @{
/// Annotation for methods
struct is_method {
handle class_;
explicit is_method(const handle &c) : class_(c) {}
};
/// Annotation for setters
struct is_setter {};
/// Annotation for operators
struct is_operator {};
/// Annotation for classes that cannot be subclassed
struct is_final {};
/// Annotation for parent scope
struct scope {
handle value;
explicit scope(const handle &s) : value(s) {}
};
/// Annotation for documentation
struct doc {
const char *value;
explicit doc(const char *value) : value(value) {}
};
/// Annotation for function names
struct name {
const char *value;
explicit name(const char *value) : value(value) {}
};
/// Annotation indicating that a function is an overload associated with a given "sibling"
struct sibling {
handle value;
explicit sibling(const handle &value) : value(value.ptr()) {}
};
/// Annotation indicating that a class derives from another given type
template <typename T>
struct base {
PYBIND11_DEPRECATED(
"base<T>() was deprecated in favor of specifying 'T' as a template argument to class_")
base() = default;
};
/// Keep patient alive while nurse lives
template <size_t Nurse, size_t Patient>
struct keep_alive {};
/// Annotation indicating that a class is involved in a multiple inheritance relationship
struct multiple_inheritance {};
/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class
struct dynamic_attr {};
/// Annotation which enables the buffer protocol for a type
struct buffer_protocol {};
/// Annotation which requests that a special metaclass is created for a type
struct metaclass {
handle value;
PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.")
metaclass() = default;
/// Override pybind11's default metaclass
explicit metaclass(handle value) : value(value) {}
};
/// Specifies a custom callback with signature `void (PyHeapTypeObject*)` that
/// may be used to customize the Python type.
///
/// The callback is invoked immediately before `PyType_Ready`.
///
/// Note: This is an advanced interface, and uses of it may require changes to
/// work with later versions of pybind11. You may wish to consult the
/// implementation of `make_new_python_type` in `detail/classes.h` to understand
/// the context in which the callback will be run.
struct custom_type_setup {
using callback = std::function<void(PyHeapTypeObject *heap_type)>;
explicit custom_type_setup(callback value) : value(std::move(value)) {}
callback value;
};
/// Annotation that marks a class as local to the module:
struct module_local {
const bool value;
constexpr explicit module_local(bool v = true) : value(v) {}
};
/// Annotation to mark enums as an arithmetic type
struct arithmetic {};
/// Mark a function for addition at the beginning of the existing overload chain instead of the end
struct prepend {};
/** \rst
A call policy which places one or more guard variables (``Ts...``) around the function call.
For example, this definition:
.. code-block:: cpp
m.def("foo", foo, py::call_guard<T>());
is equivalent to the following pseudocode:
.. code-block:: cpp
m.def("foo", [](args...) {
T scope_guard;
return foo(args...); // forwarded arguments
});
\endrst */
template <typename... Ts>
struct call_guard;
template <>
struct call_guard<> {
using type = detail::void_type;
};
template <typename T>
struct call_guard<T> {
static_assert(std::is_default_constructible<T>::value,
"The guard type must be default constructible");
using type = T;
};
template <typename T, typename... Ts>
struct call_guard<T, Ts...> {
struct type {
T guard{}; // Compose multiple guard types with left-to-right default-constructor order
typename call_guard<Ts...>::type next{};
};
};
/// @} annotations
PYBIND11_NAMESPACE_BEGIN(detail)
/* Forward declarations */
enum op_id : int;
enum op_type : int;
struct undefined_t;
template <op_id id, op_type ot, typename L = undefined_t, typename R = undefined_t>
struct op_;
void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret);
/// Internal data structure which holds metadata about a keyword argument
struct argument_record {
const char *name; ///< Argument name
const char *descr; ///< Human-readable version of the argument value
handle value; ///< Associated Python object
bool convert : 1; ///< True if the argument is allowed to convert when loading
bool none : 1; ///< True if None is allowed when loading
argument_record(const char *name, const char *descr, handle value, bool convert, bool none)
: name(name), descr(descr), value(value), convert(convert), none(none) {}
};
/// Internal data structure which holds metadata about a bound function (signature, overloads,
/// etc.)
struct function_record {
function_record()
: is_constructor(false), is_new_style_constructor(false), is_stateless(false),
is_operator(false), is_method(false), is_setter(false), has_args(false),
has_kwargs(false), prepend(false) {}
/// Function name
char *name = nullptr; /* why no C++ strings? They generate heavier code.. */
// User-specified documentation string
char *doc = nullptr;
/// Human-readable version of the function signature
char *signature = nullptr;
/// List of registered keyword arguments
std::vector<argument_record> args;
/// Pointer to lambda function which converts arguments and performs the actual call
handle (*impl)(function_call &) = nullptr;
/// Storage for the wrapped function pointer and captured data, if any
void *data[3] = {};
/// Pointer to custom destructor for 'data' (if needed)
void (*free_data)(function_record *ptr) = nullptr;
/// Return value policy associated with this function
return_value_policy policy = return_value_policy::automatic;
/// True if name == '__init__'
bool is_constructor : 1;
/// True if this is a new-style `__init__` defined in `detail/init.h`
bool is_new_style_constructor : 1;
/// True if this is a stateless function pointer
bool is_stateless : 1;
/// True if this is an operator (__add__), etc.
bool is_operator : 1;
/// True if this is a method
bool is_method : 1;
/// True if this is a setter
bool is_setter : 1;
/// True if the function has a '*args' argument
bool has_args : 1;
/// True if the function has a '**kwargs' argument
bool has_kwargs : 1;
/// True if this function is to be inserted at the beginning of the overload resolution chain
bool prepend : 1;
/// Number of arguments (including py::args and/or py::kwargs, if present)
std::uint16_t nargs;
/// Number of leading positional arguments, which are terminated by a py::args or py::kwargs
/// argument or by a py::kw_only annotation.
std::uint16_t nargs_pos = 0;
/// Number of leading arguments (counted in `nargs`) that are positional-only
std::uint16_t nargs_pos_only = 0;
/// Python method object
PyMethodDef *def = nullptr;
/// Python handle to the parent scope (a class or a module)
handle scope;
/// Python handle to the sibling function representing an overload chain
handle sibling;
/// Pointer to next overload
function_record *next = nullptr;
};
/// Special data structure which (temporarily) holds metadata about a bound class
struct type_record {
PYBIND11_NOINLINE type_record()
: multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false),
default_holder(true), module_local(false), is_final(false) {}
/// Handle to the parent scope
handle scope;
/// Name of the class
const char *name = nullptr;
// Pointer to RTTI type_info data structure
const std::type_info *type = nullptr;
/// How large is the underlying C++ type?
size_t type_size = 0;
/// What is the alignment of the underlying C++ type?
size_t type_align = 0;
/// How large is the type's holder?
size_t holder_size = 0;
/// The global operator new can be overridden with a class-specific variant
void *(*operator_new)(size_t) = nullptr;
/// Function pointer to class_<..>::init_instance
void (*init_instance)(instance *, const void *) = nullptr;
/// Function pointer to class_<..>::dealloc
void (*dealloc)(detail::value_and_holder &) = nullptr;
/// List of base classes of the newly created type
list bases;
/// Optional docstring
const char *doc = nullptr;
/// Custom metaclass (optional)
handle metaclass;
/// Custom type setup.
custom_type_setup::callback custom_type_setup_callback;
/// Multiple inheritance marker
bool multiple_inheritance : 1;
/// Does the class manage a __dict__?
bool dynamic_attr : 1;
/// Does the class implement the buffer protocol?
bool buffer_protocol : 1;
/// Is the default (unique_ptr) holder type used?
bool default_holder : 1;
/// Is the class definition local to the module shared object?
bool module_local : 1;
/// Is the class inheritable from python classes?
bool is_final : 1;
PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *) ) {
auto *base_info = detail::get_type_info(base, false);
if (!base_info) {
std::string tname(base.name());
detail::clean_type_id(tname);
pybind11_fail("generic_type: type \"" + std::string(name)
+ "\" referenced unknown base type \"" + tname + "\"");
}
if (default_holder != base_info->default_holder) {
std::string tname(base.name());
detail::clean_type_id(tname);
pybind11_fail("generic_type: type \"" + std::string(name) + "\" "
+ (default_holder ? "does not have" : "has")
+ " a non-default holder type while its base \"" + tname + "\" "
+ (base_info->default_holder ? "does not" : "does"));
}
bases.append((PyObject *) base_info->type);
#if PY_VERSION_HEX < 0x030B0000
dynamic_attr |= base_info->type->tp_dictoffset != 0;
#else
dynamic_attr |= (base_info->type->tp_flags & Py_TPFLAGS_MANAGED_DICT) != 0;
#endif
if (caster) {
base_info->implicit_casts.emplace_back(type, caster);
}
}
};
inline function_call::function_call(const function_record &f, handle p) : func(f), parent(p) {
args.reserve(f.nargs);
args_convert.reserve(f.nargs);
}
/// Tag for a new-style `__init__` defined in `detail/init.h`
struct is_new_style_constructor {};
/**
* Partial template specializations to process custom attributes provided to
* cpp_function_ and class_. These are either used to initialize the respective
* fields in the type_record and function_record data structures or executed at
* runtime to deal with custom call policies (e.g. keep_alive).
*/
template <typename T, typename SFINAE = void>
struct process_attribute;
template <typename T>
struct process_attribute_default {
/// Default implementation: do nothing
static void init(const T &, function_record *) {}
static void init(const T &, type_record *) {}
static void precall(function_call &) {}
static void postcall(function_call &, handle) {}
};
/// Process an attribute specifying the function's name
template <>
struct process_attribute<name> : process_attribute_default<name> {
static void init(const name &n, function_record *r) { r->name = const_cast<char *>(n.value); }
};
/// Process an attribute specifying the function's docstring
template <>
struct process_attribute<doc> : process_attribute_default<doc> {
static void init(const doc &n, function_record *r) { r->doc = const_cast<char *>(n.value); }
};
/// Process an attribute specifying the function's docstring (provided as a C-style string)
template <>
struct process_attribute<const char *> : process_attribute_default<const char *> {
static void init(const char *d, function_record *r) { r->doc = const_cast<char *>(d); }
static void init(const char *d, type_record *r) { r->doc = d; }
};
template <>
struct process_attribute<char *> : process_attribute<const char *> {};
/// Process an attribute indicating the function's return value policy
template <>
struct process_attribute<return_value_policy> : process_attribute_default<return_value_policy> {
static void init(const return_value_policy &p, function_record *r) { r->policy = p; }
};
/// Process an attribute which indicates that this is an overloaded function associated with a
/// given sibling
template <>
struct process_attribute<sibling> : process_attribute_default<sibling> {
static void init(const sibling &s, function_record *r) { r->sibling = s.value; }
};
/// Process an attribute which indicates that this function is a method
template <>
struct process_attribute<is_method> : process_attribute_default<is_method> {
static void init(const is_method &s, function_record *r) {
r->is_method = true;
r->scope = s.class_;
}
};
/// Process an attribute which indicates that this function is a setter
template <>
struct process_attribute<is_setter> : process_attribute_default<is_setter> {
static void init(const is_setter &, function_record *r) { r->is_setter = true; }
};
/// Process an attribute which indicates the parent scope of a method
template <>
struct process_attribute<scope> : process_attribute_default<scope> {
static void init(const scope &s, function_record *r) { r->scope = s.value; }
};
/// Process an attribute which indicates that this function is an operator
template <>
struct process_attribute<is_operator> : process_attribute_default<is_operator> {
static void init(const is_operator &, function_record *r) { r->is_operator = true; }
};
template <>
struct process_attribute<is_new_style_constructor>
: process_attribute_default<is_new_style_constructor> {
static void init(const is_new_style_constructor &, function_record *r) {
r->is_new_style_constructor = true;
}
};
inline void check_kw_only_arg(const arg &a, function_record *r) {
if (r->args.size() > r->nargs_pos && (!a.name || a.name[0] == '\0')) {
pybind11_fail("arg(): cannot specify an unnamed argument after a kw_only() annotation or "
"args() argument");
}
}
inline void append_self_arg_if_needed(function_record *r) {
if (r->is_method && r->args.empty()) {
r->args.emplace_back("self", nullptr, handle(), /*convert=*/true, /*none=*/false);
}
}
/// Process a keyword argument attribute (*without* a default value)
template <>
struct process_attribute<arg> : process_attribute_default<arg> {
static void init(const arg &a, function_record *r) {
append_self_arg_if_needed(r);
r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none);
check_kw_only_arg(a, r);
}
};
/// Process a keyword argument attribute (*with* a default value)
template <>
struct process_attribute<arg_v> : process_attribute_default<arg_v> {
static void init(const arg_v &a, function_record *r) {
if (r->is_method && r->args.empty()) {
r->args.emplace_back(
"self", /*descr=*/nullptr, /*parent=*/handle(), /*convert=*/true, /*none=*/false);
}
if (!a.value) {
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
std::string descr("'");
if (a.name) {
descr += std::string(a.name) + ": ";
}
descr += a.type + "'";
if (r->is_method) {
if (r->name) {
descr += " in method '" + (std::string) str(r->scope) + "."
+ (std::string) r->name + "'";
} else {
descr += " in method of '" + (std::string) str(r->scope) + "'";
}
} else if (r->name) {
descr += " in function '" + (std::string) r->name + "'";
}
pybind11_fail("arg(): could not convert default argument " + descr
+ " into a Python object (type not registered yet?)");
#else
pybind11_fail("arg(): could not convert default argument "
"into a Python object (type not registered yet?). "
"#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for "
"more information.");
#endif
}
r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none);
check_kw_only_arg(a, r);
}
};
/// Process a keyword-only-arguments-follow pseudo argument
template <>
struct process_attribute<kw_only> : process_attribute_default<kw_only> {
static void init(const kw_only &, function_record *r) {
append_self_arg_if_needed(r);
if (r->has_args && r->nargs_pos != static_cast<std::uint16_t>(r->args.size())) {
pybind11_fail("Mismatched args() and kw_only(): they must occur at the same relative "
"argument location (or omit kw_only() entirely)");
}
r->nargs_pos = static_cast<std::uint16_t>(r->args.size());
}
};
/// Process a positional-only-argument maker
template <>
struct process_attribute<pos_only> : process_attribute_default<pos_only> {
static void init(const pos_only &, function_record *r) {
append_self_arg_if_needed(r);
r->nargs_pos_only = static_cast<std::uint16_t>(r->args.size());
if (r->nargs_pos_only > r->nargs_pos) {
pybind11_fail("pos_only(): cannot follow a py::args() argument");
}
// It also can't follow a kw_only, but a static_assert in pybind11.h checks that
}
};
/// Process a parent class attribute. Single inheritance only (class_ itself already guarantees
/// that)
template <typename T>
struct process_attribute<T, enable_if_t<is_pyobject<T>::value>>
: process_attribute_default<handle> {
static void init(const handle &h, type_record *r) { r->bases.append(h); }
};
/// Process a parent class attribute (deprecated, does not support multiple inheritance)
template <typename T>
struct process_attribute<base<T>> : process_attribute_default<base<T>> {
static void init(const base<T> &, type_record *r) { r->add_base(typeid(T), nullptr); }
};
/// Process a multiple inheritance attribute
template <>
struct process_attribute<multiple_inheritance> : process_attribute_default<multiple_inheritance> {
static void init(const multiple_inheritance &, type_record *r) {
r->multiple_inheritance = true;
}
};
template <>
struct process_attribute<dynamic_attr> : process_attribute_default<dynamic_attr> {
static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; }
};
template <>
struct process_attribute<custom_type_setup> {
static void init(const custom_type_setup &value, type_record *r) {
r->custom_type_setup_callback = value.value;
}
};
template <>
struct process_attribute<is_final> : process_attribute_default<is_final> {
static void init(const is_final &, type_record *r) { r->is_final = true; }
};
template <>
struct process_attribute<buffer_protocol> : process_attribute_default<buffer_protocol> {
static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; }
};
template <>
struct process_attribute<metaclass> : process_attribute_default<metaclass> {
static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; }
};
template <>
struct process_attribute<module_local> : process_attribute_default<module_local> {
static void init(const module_local &l, type_record *r) { r->module_local = l.value; }
};
/// Process a 'prepend' attribute, putting this at the beginning of the overload chain
template <>
struct process_attribute<prepend> : process_attribute_default<prepend> {
static void init(const prepend &, function_record *r) { r->prepend = true; }
};
/// Process an 'arithmetic' attribute for enums (does nothing here)
template <>
struct process_attribute<arithmetic> : process_attribute_default<arithmetic> {};
template <typename... Ts>
struct process_attribute<call_guard<Ts...>> : process_attribute_default<call_guard<Ts...>> {};
/**
* Process a keep_alive call policy -- invokes keep_alive_impl during the
* pre-call handler if both Nurse, Patient != 0 and use the post-call handler
* otherwise
*/
template <size_t Nurse, size_t Patient>
struct process_attribute<keep_alive<Nurse, Patient>>
: public process_attribute_default<keep_alive<Nurse, Patient>> {
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
static void precall(function_call &call) {
keep_alive_impl(Nurse, Patient, call, handle());
}
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
static void postcall(function_call &, handle) {}
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
static void precall(function_call &) {}
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
static void postcall(function_call &call, handle ret) {
keep_alive_impl(Nurse, Patient, call, ret);
}
};
/// Recursively iterate over variadic template arguments
template <typename... Args>
struct process_attributes {
static void init(const Args &...args, function_record *r) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r);
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r);
using expander = int[];
(void) expander{
0, ((void) process_attribute<typename std::decay<Args>::type>::init(args, r), 0)...};
}
static void init(const Args &...args, type_record *r) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r);
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r);
using expander = int[];
(void) expander{0,
(process_attribute<typename std::decay<Args>::type>::init(args, r), 0)...};
}
static void precall(function_call &call) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call);
using expander = int[];
(void) expander{0,
(process_attribute<typename std::decay<Args>::type>::precall(call), 0)...};
}
static void postcall(function_call &call, handle fn_ret) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call, fn_ret);
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(fn_ret);
using expander = int[];
(void) expander{
0, (process_attribute<typename std::decay<Args>::type>::postcall(call, fn_ret), 0)...};
}
};
template <typename T>
using is_call_guard = is_instantiation<call_guard, T>;
/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found)
template <typename... Extra>
using extract_guard_t = typename exactly_one_t<is_call_guard, call_guard<>, Extra...>::type;
/// Check the number of named arguments at compile time
template <typename... Extra,
size_t named = constexpr_sum(std::is_base_of<arg, Extra>::value...),
size_t self = constexpr_sum(std::is_same<is_method, Extra>::value...)>
constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(nargs, has_args, has_kwargs);
return named == 0 || (self + named + size_t(has_args) + size_t(has_kwargs)) == nargs;
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/buffer_info.h | C/C++ Header | /*
pybind11/buffer_info.h: Python buffer object interface
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// Default, C-style strides
inline std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
auto ndim = shape.size();
std::vector<ssize_t> strides(ndim, itemsize);
if (ndim > 0) {
for (size_t i = ndim - 1; i > 0; --i) {
strides[i - 1] = strides[i] * shape[i];
}
}
return strides;
}
// F-style strides; default when constructing an array_t with `ExtraFlags & f_style`
inline std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
auto ndim = shape.size();
std::vector<ssize_t> strides(ndim, itemsize);
for (size_t i = 1; i < ndim; ++i) {
strides[i] = strides[i - 1] * shape[i - 1];
}
return strides;
}
template <typename T, typename SFINAE = void>
struct compare_buffer_info;
PYBIND11_NAMESPACE_END(detail)
/// Information record describing a Python buffer object
struct buffer_info {
void *ptr = nullptr; // Pointer to the underlying storage
ssize_t itemsize = 0; // Size of individual items in bytes
ssize_t size = 0; // Total number of entries
std::string format; // For homogeneous buffers, this should be set to
// format_descriptor<T>::format()
ssize_t ndim = 0; // Number of dimensions
std::vector<ssize_t> shape; // Shape of the tensor (1 entry per dimension)
std::vector<ssize_t> strides; // Number of bytes between adjacent entries
// (for each per dimension)
bool readonly = false; // flag to indicate if the underlying storage may be written to
buffer_info() = default;
buffer_info(void *ptr,
ssize_t itemsize,
const std::string &format,
ssize_t ndim,
detail::any_container<ssize_t> shape_in,
detail::any_container<ssize_t> strides_in,
bool readonly = false)
: ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim),
shape(std::move(shape_in)), strides(std::move(strides_in)), readonly(readonly) {
if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size()) {
pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length");
}
for (size_t i = 0; i < (size_t) ndim; ++i) {
size *= shape[i];
}
}
template <typename T>
buffer_info(T *ptr,
detail::any_container<ssize_t> shape_in,
detail::any_container<ssize_t> strides_in,
bool readonly = false)
: buffer_info(private_ctr_tag(),
ptr,
sizeof(T),
format_descriptor<T>::format(),
static_cast<ssize_t>(shape_in->size()),
std::move(shape_in),
std::move(strides_in),
readonly) {}
buffer_info(void *ptr,
ssize_t itemsize,
const std::string &format,
ssize_t size,
bool readonly = false)
: buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}, readonly) {}
template <typename T>
buffer_info(T *ptr, ssize_t size, bool readonly = false)
: buffer_info(ptr, sizeof(T), format_descriptor<T>::format(), size, readonly) {}
template <typename T>
buffer_info(const T *ptr, ssize_t size, bool readonly = true)
: buffer_info(
const_cast<T *>(ptr), sizeof(T), format_descriptor<T>::format(), size, readonly) {}
explicit buffer_info(Py_buffer *view, bool ownview = true)
: buffer_info(
view->buf,
view->itemsize,
view->format,
view->ndim,
{view->shape, view->shape + view->ndim},
/* Though buffer::request() requests PyBUF_STRIDES, ctypes objects
* ignore this flag and return a view with NULL strides.
* When strides are NULL, build them manually. */
view->strides
? std::vector<ssize_t>(view->strides, view->strides + view->ndim)
: detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize),
(view->readonly != 0)) {
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
this->m_view = view;
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
this->ownview = ownview;
}
buffer_info(const buffer_info &) = delete;
buffer_info &operator=(const buffer_info &) = delete;
buffer_info(buffer_info &&other) noexcept { (*this) = std::move(other); }
buffer_info &operator=(buffer_info &&rhs) noexcept {
ptr = rhs.ptr;
itemsize = rhs.itemsize;
size = rhs.size;
format = std::move(rhs.format);
ndim = rhs.ndim;
shape = std::move(rhs.shape);
strides = std::move(rhs.strides);
std::swap(m_view, rhs.m_view);
std::swap(ownview, rhs.ownview);
readonly = rhs.readonly;
return *this;
}
~buffer_info() {
if (m_view && ownview) {
PyBuffer_Release(m_view);
delete m_view;
}
}
Py_buffer *view() const { return m_view; }
Py_buffer *&view() { return m_view; }
/* True if the buffer item type is equivalent to `T`. */
// To define "equivalent" by example:
// `buffer_info::item_type_is_equivalent_to<int>(b)` and
// `buffer_info::item_type_is_equivalent_to<long>(b)` may both be true
// on some platforms, but `int` and `unsigned` will never be equivalent.
// For the ground truth, please inspect `detail::compare_buffer_info<>`.
template <typename T>
bool item_type_is_equivalent_to() const {
return detail::compare_buffer_info<T>::compare(*this);
}
private:
struct private_ctr_tag {};
buffer_info(private_ctr_tag,
void *ptr,
ssize_t itemsize,
const std::string &format,
ssize_t ndim,
detail::any_container<ssize_t> &&shape_in,
detail::any_container<ssize_t> &&strides_in,
bool readonly)
: buffer_info(
ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) {}
Py_buffer *m_view = nullptr;
bool ownview = false;
};
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T, typename SFINAE>
struct compare_buffer_info {
static bool compare(const buffer_info &b) {
// NOLINTNEXTLINE(bugprone-sizeof-expression) Needed for `PyObject *`
return b.format == format_descriptor<T>::format() && b.itemsize == (ssize_t) sizeof(T);
}
};
template <typename T>
struct compare_buffer_info<T, detail::enable_if_t<std::is_integral<T>::value>> {
static bool compare(const buffer_info &b) {
return (size_t) b.itemsize == sizeof(T)
&& (b.format == format_descriptor<T>::value
|| ((sizeof(T) == sizeof(long))
&& b.format == (std::is_unsigned<T>::value ? "L" : "l"))
|| ((sizeof(T) == sizeof(size_t))
&& b.format == (std::is_unsigned<T>::value ? "N" : "n")));
}
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/cast.h | C/C++ Header | /*
pybind11/cast.h: Partial template specializations to cast between
C++ and Python types
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include "detail/descr.h"
#include "detail/type_caster_base.h"
#include "detail/typeid.h"
#include "pytypes.h"
#include <array>
#include <cstring>
#include <functional>
#include <iosfwd>
#include <iterator>
#include <memory>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename type, typename SFINAE = void>
class type_caster : public type_caster_base<type> {};
template <typename type>
using make_caster = type_caster<intrinsic_t<type>>;
// Shortcut for calling a caster's `cast_op_type` cast operator for casting a type_caster to a T
template <typename T>
typename make_caster<T>::template cast_op_type<T> cast_op(make_caster<T> &caster) {
return caster.operator typename make_caster<T>::template cast_op_type<T>();
}
template <typename T>
typename make_caster<T>::template cast_op_type<typename std::add_rvalue_reference<T>::type>
cast_op(make_caster<T> &&caster) {
return std::move(caster).operator typename make_caster<T>::
template cast_op_type<typename std::add_rvalue_reference<T>::type>();
}
template <typename type>
class type_caster<std::reference_wrapper<type>> {
private:
using caster_t = make_caster<type>;
caster_t subcaster;
using reference_t = type &;
using subcaster_cast_op_type = typename caster_t::template cast_op_type<reference_t>;
static_assert(
std::is_same<typename std::remove_const<type>::type &, subcaster_cast_op_type>::value
|| std::is_same<reference_t, subcaster_cast_op_type>::value,
"std::reference_wrapper<T> caster requires T to have a caster with an "
"`operator T &()` or `operator const T &()`");
public:
bool load(handle src, bool convert) { return subcaster.load(src, convert); }
static constexpr auto name = caster_t::name;
static handle
cast(const std::reference_wrapper<type> &src, return_value_policy policy, handle parent) {
// It is definitely wrong to take ownership of this pointer, so mask that rvp
if (policy == return_value_policy::take_ownership
|| policy == return_value_policy::automatic) {
policy = return_value_policy::automatic_reference;
}
return caster_t::cast(&src.get(), policy, parent);
}
template <typename T>
using cast_op_type = std::reference_wrapper<type>;
explicit operator std::reference_wrapper<type>() { return cast_op<type &>(subcaster); }
};
#define PYBIND11_TYPE_CASTER(type, py_name) \
protected: \
type value; \
\
public: \
static constexpr auto name = py_name; \
template <typename T_, \
::pybind11::detail::enable_if_t< \
std::is_same<type, ::pybind11::detail::remove_cv_t<T_>>::value, \
int> \
= 0> \
static ::pybind11::handle cast( \
T_ *src, ::pybind11::return_value_policy policy, ::pybind11::handle parent) { \
if (!src) \
return ::pybind11::none().release(); \
if (policy == ::pybind11::return_value_policy::take_ownership) { \
auto h = cast(std::move(*src), policy, parent); \
delete src; \
return h; \
} \
return cast(*src, policy, parent); \
} \
operator type *() { return &value; } /* NOLINT(bugprone-macro-parentheses) */ \
operator type &() { return value; } /* NOLINT(bugprone-macro-parentheses) */ \
operator type &&() && { return std::move(value); } /* NOLINT(bugprone-macro-parentheses) */ \
template <typename T_> \
using cast_op_type = ::pybind11::detail::movable_cast_op_type<T_>
template <typename CharT>
using is_std_char_type = any_of<std::is_same<CharT, char>, /* std::string */
#if defined(PYBIND11_HAS_U8STRING)
std::is_same<CharT, char8_t>, /* std::u8string */
#endif
std::is_same<CharT, char16_t>, /* std::u16string */
std::is_same<CharT, char32_t>, /* std::u32string */
std::is_same<CharT, wchar_t> /* std::wstring */
>;
template <typename T>
struct type_caster<T, enable_if_t<std::is_arithmetic<T>::value && !is_std_char_type<T>::value>> {
using _py_type_0 = conditional_t<sizeof(T) <= sizeof(long), long, long long>;
using _py_type_1 = conditional_t<std::is_signed<T>::value,
_py_type_0,
typename std::make_unsigned<_py_type_0>::type>;
using py_type = conditional_t<std::is_floating_point<T>::value, double, _py_type_1>;
public:
bool load(handle src, bool convert) {
py_type py_value;
if (!src) {
return false;
}
#if !defined(PYPY_VERSION)
auto index_check = [](PyObject *o) { return PyIndex_Check(o); };
#else
// In PyPy 7.3.3, `PyIndex_Check` is implemented by calling `__index__`,
// while CPython only considers the existence of `nb_index`/`__index__`.
auto index_check = [](PyObject *o) { return hasattr(o, "__index__"); };
#endif
if (std::is_floating_point<T>::value) {
if (convert || PyFloat_Check(src.ptr())) {
py_value = (py_type) PyFloat_AsDouble(src.ptr());
} else {
return false;
}
} else if (PyFloat_Check(src.ptr())
|| (!convert && !PYBIND11_LONG_CHECK(src.ptr()) && !index_check(src.ptr()))) {
return false;
} else {
handle src_or_index = src;
// PyPy: 7.3.7's 3.8 does not implement PyLong_*'s __index__ calls.
#if PY_VERSION_HEX < 0x03080000 || defined(PYPY_VERSION)
object index;
if (!PYBIND11_LONG_CHECK(src.ptr())) { // So: index_check(src.ptr())
index = reinterpret_steal<object>(PyNumber_Index(src.ptr()));
if (!index) {
PyErr_Clear();
if (!convert)
return false;
} else {
src_or_index = index;
}
}
#endif
if (std::is_unsigned<py_type>::value) {
py_value = as_unsigned<py_type>(src_or_index.ptr());
} else { // signed integer:
py_value = sizeof(T) <= sizeof(long)
? (py_type) PyLong_AsLong(src_or_index.ptr())
: (py_type) PYBIND11_LONG_AS_LONGLONG(src_or_index.ptr());
}
}
// Python API reported an error
bool py_err = py_value == (py_type) -1 && PyErr_Occurred();
// Check to see if the conversion is valid (integers should match exactly)
// Signed/unsigned checks happen elsewhere
if (py_err
|| (std::is_integral<T>::value && sizeof(py_type) != sizeof(T)
&& py_value != (py_type) (T) py_value)) {
PyErr_Clear();
if (py_err && convert && (PyNumber_Check(src.ptr()) != 0)) {
auto tmp = reinterpret_steal<object>(std::is_floating_point<T>::value
? PyNumber_Float(src.ptr())
: PyNumber_Long(src.ptr()));
PyErr_Clear();
return load(tmp, false);
}
return false;
}
value = (T) py_value;
return true;
}
template <typename U = T>
static typename std::enable_if<std::is_floating_point<U>::value, handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PyFloat_FromDouble((double) src);
}
template <typename U = T>
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value
&& (sizeof(U) <= sizeof(long)),
handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PYBIND11_LONG_FROM_SIGNED((long) src);
}
template <typename U = T>
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value
&& (sizeof(U) <= sizeof(unsigned long)),
handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PYBIND11_LONG_FROM_UNSIGNED((unsigned long) src);
}
template <typename U = T>
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value
&& (sizeof(U) > sizeof(long)),
handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PyLong_FromLongLong((long long) src);
}
template <typename U = T>
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value
&& (sizeof(U) > sizeof(unsigned long)),
handle>::type
cast(U src, return_value_policy /* policy */, handle /* parent */) {
return PyLong_FromUnsignedLongLong((unsigned long long) src);
}
PYBIND11_TYPE_CASTER(T, const_name<std::is_integral<T>::value>("int", "float"));
};
template <typename T>
struct void_caster {
public:
bool load(handle src, bool) {
if (src && src.is_none()) {
return true;
}
return false;
}
static handle cast(T, return_value_policy /* policy */, handle /* parent */) {
return none().release();
}
PYBIND11_TYPE_CASTER(T, const_name("None"));
};
template <>
class type_caster<void_type> : public void_caster<void_type> {};
template <>
class type_caster<void> : public type_caster<void_type> {
public:
using type_caster<void_type>::cast;
bool load(handle h, bool) {
if (!h) {
return false;
}
if (h.is_none()) {
value = nullptr;
return true;
}
/* Check if this is a capsule */
if (isinstance<capsule>(h)) {
value = reinterpret_borrow<capsule>(h);
return true;
}
/* Check if this is a C++ type */
const auto &bases = all_type_info((PyTypeObject *) type::handle_of(h).ptr());
if (bases.size() == 1) { // Only allowing loading from a single-value type
value = values_and_holders(reinterpret_cast<instance *>(h.ptr())).begin()->value_ptr();
return true;
}
/* Fail */
return false;
}
static handle cast(const void *ptr, return_value_policy /* policy */, handle /* parent */) {
if (ptr) {
return capsule(ptr).release();
}
return none().release();
}
template <typename T>
using cast_op_type = void *&;
explicit operator void *&() { return value; }
static constexpr auto name = const_name("capsule");
private:
void *value = nullptr;
};
template <>
class type_caster<std::nullptr_t> : public void_caster<std::nullptr_t> {};
template <>
class type_caster<bool> {
public:
bool load(handle src, bool convert) {
if (!src) {
return false;
}
if (src.ptr() == Py_True) {
value = true;
return true;
}
if (src.ptr() == Py_False) {
value = false;
return true;
}
if (convert || (std::strcmp("numpy.bool_", Py_TYPE(src.ptr())->tp_name) == 0)) {
// (allow non-implicit conversion for numpy booleans)
Py_ssize_t res = -1;
if (src.is_none()) {
res = 0; // None is implicitly converted to False
}
#if defined(PYPY_VERSION)
// On PyPy, check that "__bool__" attr exists
else if (hasattr(src, PYBIND11_BOOL_ATTR)) {
res = PyObject_IsTrue(src.ptr());
}
#else
// Alternate approach for CPython: this does the same as the above, but optimized
// using the CPython API so as to avoid an unneeded attribute lookup.
else if (auto *tp_as_number = src.ptr()->ob_type->tp_as_number) {
if (PYBIND11_NB_BOOL(tp_as_number)) {
res = (*PYBIND11_NB_BOOL(tp_as_number))(src.ptr());
}
}
#endif
if (res == 0 || res == 1) {
value = (res != 0);
return true;
}
PyErr_Clear();
}
return false;
}
static handle cast(bool src, return_value_policy /* policy */, handle /* parent */) {
return handle(src ? Py_True : Py_False).inc_ref();
}
PYBIND11_TYPE_CASTER(bool, const_name("bool"));
};
// Helper class for UTF-{8,16,32} C++ stl strings:
template <typename StringType, bool IsView = false>
struct string_caster {
using CharT = typename StringType::value_type;
// Simplify life by being able to assume standard char sizes (the standard only guarantees
// minimums, but Python requires exact sizes)
static_assert(!std::is_same<CharT, char>::value || sizeof(CharT) == 1,
"Unsupported char size != 1");
#if defined(PYBIND11_HAS_U8STRING)
static_assert(!std::is_same<CharT, char8_t>::value || sizeof(CharT) == 1,
"Unsupported char8_t size != 1");
#endif
static_assert(!std::is_same<CharT, char16_t>::value || sizeof(CharT) == 2,
"Unsupported char16_t size != 2");
static_assert(!std::is_same<CharT, char32_t>::value || sizeof(CharT) == 4,
"Unsupported char32_t size != 4");
// wchar_t can be either 16 bits (Windows) or 32 (everywhere else)
static_assert(!std::is_same<CharT, wchar_t>::value || sizeof(CharT) == 2 || sizeof(CharT) == 4,
"Unsupported wchar_t size != 2/4");
static constexpr size_t UTF_N = 8 * sizeof(CharT);
bool load(handle src, bool) {
handle load_src = src;
if (!src) {
return false;
}
if (!PyUnicode_Check(load_src.ptr())) {
return load_raw(load_src);
}
// For UTF-8 we avoid the need for a temporary `bytes` object by using
// `PyUnicode_AsUTF8AndSize`.
if (UTF_N == 8) {
Py_ssize_t size = -1;
const auto *buffer
= reinterpret_cast<const CharT *>(PyUnicode_AsUTF8AndSize(load_src.ptr(), &size));
if (!buffer) {
PyErr_Clear();
return false;
}
value = StringType(buffer, static_cast<size_t>(size));
return true;
}
auto utfNbytes
= reinterpret_steal<object>(PyUnicode_AsEncodedString(load_src.ptr(),
UTF_N == 8 ? "utf-8"
: UTF_N == 16 ? "utf-16"
: "utf-32",
nullptr));
if (!utfNbytes) {
PyErr_Clear();
return false;
}
const auto *buffer
= reinterpret_cast<const CharT *>(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr()));
size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT);
// Skip BOM for UTF-16/32
if (UTF_N > 8) {
buffer++;
length--;
}
value = StringType(buffer, length);
// If we're loading a string_view we need to keep the encoded Python object alive:
if (IsView) {
loader_life_support::add_patient(utfNbytes);
}
return true;
}
static handle
cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) {
const char *buffer = reinterpret_cast<const char *>(src.data());
auto nbytes = ssize_t(src.size() * sizeof(CharT));
handle s = decode_utfN(buffer, nbytes);
if (!s) {
throw error_already_set();
}
return s;
}
PYBIND11_TYPE_CASTER(StringType, const_name(PYBIND11_STRING_NAME));
private:
static handle decode_utfN(const char *buffer, ssize_t nbytes) {
#if !defined(PYPY_VERSION)
return UTF_N == 8 ? PyUnicode_DecodeUTF8(buffer, nbytes, nullptr)
: UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr)
: PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr);
#else
// PyPy segfaults when on PyUnicode_DecodeUTF16 (and possibly on PyUnicode_DecodeUTF32 as
// well), so bypass the whole thing by just passing the encoding as a string value, which
// works properly:
return PyUnicode_Decode(buffer,
nbytes,
UTF_N == 8 ? "utf-8"
: UTF_N == 16 ? "utf-16"
: "utf-32",
nullptr);
#endif
}
// When loading into a std::string or char*, accept a bytes/bytearray object as-is (i.e.
// without any encoding/decoding attempt). For other C++ char sizes this is a no-op.
// which supports loading a unicode from a str, doesn't take this path.
template <typename C = CharT>
bool load_raw(enable_if_t<std::is_same<C, char>::value, handle> src) {
if (PYBIND11_BYTES_CHECK(src.ptr())) {
// We were passed raw bytes; accept it into a std::string or char*
// without any encoding attempt.
const char *bytes = PYBIND11_BYTES_AS_STRING(src.ptr());
if (!bytes) {
pybind11_fail("Unexpected PYBIND11_BYTES_AS_STRING() failure.");
}
value = StringType(bytes, (size_t) PYBIND11_BYTES_SIZE(src.ptr()));
return true;
}
if (PyByteArray_Check(src.ptr())) {
// We were passed a bytearray; accept it into a std::string or char*
// without any encoding attempt.
const char *bytearray = PyByteArray_AsString(src.ptr());
if (!bytearray) {
pybind11_fail("Unexpected PyByteArray_AsString() failure.");
}
value = StringType(bytearray, (size_t) PyByteArray_Size(src.ptr()));
return true;
}
return false;
}
template <typename C = CharT>
bool load_raw(enable_if_t<!std::is_same<C, char>::value, handle>) {
return false;
}
};
template <typename CharT, class Traits, class Allocator>
struct type_caster<std::basic_string<CharT, Traits, Allocator>,
enable_if_t<is_std_char_type<CharT>::value>>
: string_caster<std::basic_string<CharT, Traits, Allocator>> {};
#ifdef PYBIND11_HAS_STRING_VIEW
template <typename CharT, class Traits>
struct type_caster<std::basic_string_view<CharT, Traits>,
enable_if_t<is_std_char_type<CharT>::value>>
: string_caster<std::basic_string_view<CharT, Traits>, true> {};
#endif
// Type caster for C-style strings. We basically use a std::string type caster, but also add the
// ability to use None as a nullptr char* (which the string caster doesn't allow).
template <typename CharT>
struct type_caster<CharT, enable_if_t<is_std_char_type<CharT>::value>> {
using StringType = std::basic_string<CharT>;
using StringCaster = make_caster<StringType>;
StringCaster str_caster;
bool none = false;
CharT one_char = 0;
public:
bool load(handle src, bool convert) {
if (!src) {
return false;
}
if (src.is_none()) {
// Defer accepting None to other overloads (if we aren't in convert mode):
if (!convert) {
return false;
}
none = true;
return true;
}
return str_caster.load(src, convert);
}
static handle cast(const CharT *src, return_value_policy policy, handle parent) {
if (src == nullptr) {
return pybind11::none().release();
}
return StringCaster::cast(StringType(src), policy, parent);
}
static handle cast(CharT src, return_value_policy policy, handle parent) {
if (std::is_same<char, CharT>::value) {
handle s = PyUnicode_DecodeLatin1((const char *) &src, 1, nullptr);
if (!s) {
throw error_already_set();
}
return s;
}
return StringCaster::cast(StringType(1, src), policy, parent);
}
explicit operator CharT *() {
return none ? nullptr : const_cast<CharT *>(static_cast<StringType &>(str_caster).c_str());
}
explicit operator CharT &() {
if (none) {
throw value_error("Cannot convert None to a character");
}
auto &value = static_cast<StringType &>(str_caster);
size_t str_len = value.size();
if (str_len == 0) {
throw value_error("Cannot convert empty string to a character");
}
// If we're in UTF-8 mode, we have two possible failures: one for a unicode character that
// is too high, and one for multiple unicode characters (caught later), so we need to
// figure out how long the first encoded character is in bytes to distinguish between these
// two errors. We also allow want to allow unicode characters U+0080 through U+00FF, as
// those can fit into a single char value.
if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) {
auto v0 = static_cast<unsigned char>(value[0]);
// low bits only: 0-127
// 0b110xxxxx - start of 2-byte sequence
// 0b1110xxxx - start of 3-byte sequence
// 0b11110xxx - start of 4-byte sequence
size_t char0_bytes = (v0 & 0x80) == 0 ? 1
: (v0 & 0xE0) == 0xC0 ? 2
: (v0 & 0xF0) == 0xE0 ? 3
: 4;
if (char0_bytes == str_len) {
// If we have a 128-255 value, we can decode it into a single char:
if (char0_bytes == 2 && (v0 & 0xFC) == 0xC0) { // 0x110000xx 0x10xxxxxx
one_char = static_cast<CharT>(((v0 & 3) << 6)
+ (static_cast<unsigned char>(value[1]) & 0x3F));
return one_char;
}
// Otherwise we have a single character, but it's > U+00FF
throw value_error("Character code point not in range(0x100)");
}
}
// UTF-16 is much easier: we can only have a surrogate pair for values above U+FFFF, thus a
// surrogate pair with total length 2 instantly indicates a range error (but not a "your
// string was too long" error).
else if (StringCaster::UTF_N == 16 && str_len == 2) {
one_char = static_cast<CharT>(value[0]);
if (one_char >= 0xD800 && one_char < 0xE000) {
throw value_error("Character code point not in range(0x10000)");
}
}
if (str_len != 1) {
throw value_error("Expected a character, but multi-character string found");
}
one_char = value[0];
return one_char;
}
static constexpr auto name = const_name(PYBIND11_STRING_NAME);
template <typename _T>
using cast_op_type = pybind11::detail::cast_op_type<_T>;
};
// Base implementation for std::tuple and std::pair
template <template <typename...> class Tuple, typename... Ts>
class tuple_caster {
using type = Tuple<Ts...>;
static constexpr auto size = sizeof...(Ts);
using indices = make_index_sequence<size>;
public:
bool load(handle src, bool convert) {
if (!isinstance<sequence>(src)) {
return false;
}
const auto seq = reinterpret_borrow<sequence>(src);
if (seq.size() != size) {
return false;
}
return load_impl(seq, convert, indices{});
}
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
return cast_impl(std::forward<T>(src), policy, parent, indices{});
}
// copied from the PYBIND11_TYPE_CASTER macro
template <typename T>
static handle cast(T *src, return_value_policy policy, handle parent) {
if (!src) {
return none().release();
}
if (policy == return_value_policy::take_ownership) {
auto h = cast(std::move(*src), policy, parent);
delete src;
return h;
}
return cast(*src, policy, parent);
}
static constexpr auto name
= const_name("Tuple[") + concat(make_caster<Ts>::name...) + const_name("]");
template <typename T>
using cast_op_type = type;
explicit operator type() & { return implicit_cast(indices{}); }
explicit operator type() && { return std::move(*this).implicit_cast(indices{}); }
protected:
template <size_t... Is>
type implicit_cast(index_sequence<Is...>) & {
return type(cast_op<Ts>(std::get<Is>(subcasters))...);
}
template <size_t... Is>
type implicit_cast(index_sequence<Is...>) && {
return type(cast_op<Ts>(std::move(std::get<Is>(subcasters)))...);
}
static constexpr bool load_impl(const sequence &, bool, index_sequence<>) { return true; }
template <size_t... Is>
bool load_impl(const sequence &seq, bool convert, index_sequence<Is...>) {
#ifdef __cpp_fold_expressions
if ((... || !std::get<Is>(subcasters).load(seq[Is], convert))) {
return false;
}
#else
for (bool r : {std::get<Is>(subcasters).load(seq[Is], convert)...}) {
if (!r) {
return false;
}
}
#endif
return true;
}
/* Implementation: Convert a C++ tuple into a Python tuple */
template <typename T, size_t... Is>
static handle
cast_impl(T &&src, return_value_policy policy, handle parent, index_sequence<Is...>) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(src, policy, parent);
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(policy, parent);
std::array<object, size> entries{{reinterpret_steal<object>(
make_caster<Ts>::cast(std::get<Is>(std::forward<T>(src)), policy, parent))...}};
for (const auto &entry : entries) {
if (!entry) {
return handle();
}
}
tuple result(size);
int counter = 0;
for (auto &entry : entries) {
PyTuple_SET_ITEM(result.ptr(), counter++, entry.release().ptr());
}
return result.release();
}
Tuple<make_caster<Ts>...> subcasters;
};
template <typename T1, typename T2>
class type_caster<std::pair<T1, T2>> : public tuple_caster<std::pair, T1, T2> {};
template <typename... Ts>
class type_caster<std::tuple<Ts...>> : public tuple_caster<std::tuple, Ts...> {};
/// Helper class which abstracts away certain actions. Users can provide specializations for
/// custom holders, but it's only necessary if the type has a non-standard interface.
template <typename T>
struct holder_helper {
static auto get(const T &p) -> decltype(p.get()) { return p.get(); }
};
/// Type caster for holder types like std::shared_ptr, etc.
/// The SFINAE hook is provided to help work around the current lack of support
/// for smart-pointer interoperability. Please consider it an implementation
/// detail that may change in the future, as formal support for smart-pointer
/// interoperability is added into pybind11.
template <typename type, typename holder_type, typename SFINAE = void>
struct copyable_holder_caster : public type_caster_base<type> {
public:
using base = type_caster_base<type>;
static_assert(std::is_base_of<base, type_caster<type>>::value,
"Holder classes are only supported for custom types");
using base::base;
using base::cast;
using base::typeinfo;
using base::value;
bool load(handle src, bool convert) {
return base::template load_impl<copyable_holder_caster<type, holder_type>>(src, convert);
}
explicit operator type *() { return this->value; }
// static_cast works around compiler error with MSVC 17 and CUDA 10.2
// see issue #2180
explicit operator type &() { return *(static_cast<type *>(this->value)); }
explicit operator holder_type *() { return std::addressof(holder); }
explicit operator holder_type &() { return holder; }
static handle cast(const holder_type &src, return_value_policy, handle) {
const auto *ptr = holder_helper<holder_type>::get(src);
return type_caster_base<type>::cast_holder(ptr, &src);
}
protected:
friend class type_caster_generic;
void check_holder_compat() {
if (typeinfo->default_holder) {
throw cast_error("Unable to load a custom holder type from a default-holder instance");
}
}
bool load_value(value_and_holder &&v_h) {
if (v_h.holder_constructed()) {
value = v_h.value_ptr();
holder = v_h.template holder<holder_type>();
return true;
}
throw cast_error("Unable to cast from non-held to held instance (T& to Holder<T>) "
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for "
"type information)");
#else
"of type '"
+ type_id<holder_type>() + "''");
#endif
}
template <typename T = holder_type,
detail::enable_if_t<!std::is_constructible<T, const T &, type *>::value, int> = 0>
bool try_implicit_casts(handle, bool) {
return false;
}
template <typename T = holder_type,
detail::enable_if_t<std::is_constructible<T, const T &, type *>::value, int> = 0>
bool try_implicit_casts(handle src, bool convert) {
for (auto &cast : typeinfo->implicit_casts) {
copyable_holder_caster sub_caster(*cast.first);
if (sub_caster.load(src, convert)) {
value = cast.second(sub_caster.value);
holder = holder_type(sub_caster.holder, (type *) value);
return true;
}
}
return false;
}
static bool try_direct_conversions(handle) { return false; }
holder_type holder;
};
/// Specialize for the common std::shared_ptr, so users don't need to
template <typename T>
class type_caster<std::shared_ptr<T>> : public copyable_holder_caster<T, std::shared_ptr<T>> {};
/// Type caster for holder types like std::unique_ptr.
/// Please consider the SFINAE hook an implementation detail, as explained
/// in the comment for the copyable_holder_caster.
template <typename type, typename holder_type, typename SFINAE = void>
struct move_only_holder_caster {
static_assert(std::is_base_of<type_caster_base<type>, type_caster<type>>::value,
"Holder classes are only supported for custom types");
static handle cast(holder_type &&src, return_value_policy, handle) {
auto *ptr = holder_helper<holder_type>::get(src);
return type_caster_base<type>::cast_holder(ptr, std::addressof(src));
}
static constexpr auto name = type_caster_base<type>::name;
};
template <typename type, typename deleter>
class type_caster<std::unique_ptr<type, deleter>>
: public move_only_holder_caster<type, std::unique_ptr<type, deleter>> {};
template <typename type, typename holder_type>
using type_caster_holder = conditional_t<is_copy_constructible<holder_type>::value,
copyable_holder_caster<type, holder_type>,
move_only_holder_caster<type, holder_type>>;
template <typename T, bool Value = false>
struct always_construct_holder {
static constexpr bool value = Value;
};
/// Create a specialization for custom holder types (silently ignores std::shared_ptr)
#define PYBIND11_DECLARE_HOLDER_TYPE(type, holder_type, ...) \
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) \
namespace detail { \
template <typename type> \
struct always_construct_holder<holder_type> : always_construct_holder<void, ##__VA_ARGS__> { \
}; \
template <typename type> \
class type_caster<holder_type, enable_if_t<!is_shared_ptr<holder_type>::value>> \
: public type_caster_holder<type, holder_type> {}; \
} \
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
// PYBIND11_DECLARE_HOLDER_TYPE holder types:
template <typename base, typename holder>
struct is_holder_type
: std::is_base_of<detail::type_caster_holder<base, holder>, detail::type_caster<holder>> {};
// Specialization for always-supported unique_ptr holders:
template <typename base, typename deleter>
struct is_holder_type<base, std::unique_ptr<base, deleter>> : std::true_type {};
template <typename T>
struct handle_type_name {
static constexpr auto name = const_name<T>();
};
template <>
struct handle_type_name<bool_> {
static constexpr auto name = const_name("bool");
};
template <>
struct handle_type_name<bytes> {
static constexpr auto name = const_name(PYBIND11_BYTES_NAME);
};
template <>
struct handle_type_name<int_> {
static constexpr auto name = const_name("int");
};
template <>
struct handle_type_name<iterable> {
static constexpr auto name = const_name("Iterable");
};
template <>
struct handle_type_name<iterator> {
static constexpr auto name = const_name("Iterator");
};
template <>
struct handle_type_name<float_> {
static constexpr auto name = const_name("float");
};
template <>
struct handle_type_name<none> {
static constexpr auto name = const_name("None");
};
template <>
struct handle_type_name<args> {
static constexpr auto name = const_name("*args");
};
template <>
struct handle_type_name<kwargs> {
static constexpr auto name = const_name("**kwargs");
};
template <typename type>
struct pyobject_caster {
template <typename T = type, enable_if_t<std::is_same<T, handle>::value, int> = 0>
pyobject_caster() : value() {}
// `type` may not be default constructible (e.g. frozenset, anyset). Initializing `value`
// to a nil handle is safe since it will only be accessed if `load` succeeds.
template <typename T = type, enable_if_t<std::is_base_of<object, T>::value, int> = 0>
pyobject_caster() : value(reinterpret_steal<type>(handle())) {}
template <typename T = type, enable_if_t<std::is_same<T, handle>::value, int> = 0>
bool load(handle src, bool /* convert */) {
value = src;
return static_cast<bool>(value);
}
template <typename T = type, enable_if_t<std::is_base_of<object, T>::value, int> = 0>
bool load(handle src, bool /* convert */) {
if (!isinstance<type>(src)) {
return false;
}
value = reinterpret_borrow<type>(src);
return true;
}
static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {
return src.inc_ref();
}
PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);
};
template <typename T>
class type_caster<T, enable_if_t<is_pyobject<T>::value>> : public pyobject_caster<T> {};
// Our conditions for enabling moving are quite restrictive:
// At compile time:
// - T needs to be a non-const, non-pointer, non-reference type
// - type_caster<T>::operator T&() must exist
// - the type must be move constructible (obviously)
// At run-time:
// - if the type is non-copy-constructible, the object must be the sole owner of the type (i.e. it
// must have ref_count() == 1)h
// If any of the above are not satisfied, we fall back to copying.
template <typename T>
using move_is_plain_type
= satisfies_none_of<T, std::is_void, std::is_pointer, std::is_reference, std::is_const>;
template <typename T, typename SFINAE = void>
struct move_always : std::false_type {};
template <typename T>
struct move_always<
T,
enable_if_t<
all_of<move_is_plain_type<T>,
negation<is_copy_constructible<T>>,
is_move_constructible<T>,
std::is_same<decltype(std::declval<make_caster<T>>().operator T &()), T &>>::value>>
: std::true_type {};
template <typename T, typename SFINAE = void>
struct move_if_unreferenced : std::false_type {};
template <typename T>
struct move_if_unreferenced<
T,
enable_if_t<
all_of<move_is_plain_type<T>,
negation<move_always<T>>,
is_move_constructible<T>,
std::is_same<decltype(std::declval<make_caster<T>>().operator T &()), T &>>::value>>
: std::true_type {};
template <typename T>
using move_never = none_of<move_always<T>, move_if_unreferenced<T>>;
// Detect whether returning a `type` from a cast on type's type_caster is going to result in a
// reference or pointer to a local variable of the type_caster. Basically, only
// non-reference/pointer `type`s and reference/pointers from a type_caster_generic are safe;
// everything else returns a reference/pointer to a local variable.
template <typename type>
using cast_is_temporary_value_reference
= bool_constant<(std::is_reference<type>::value || std::is_pointer<type>::value)
&& !std::is_base_of<type_caster_generic, make_caster<type>>::value
&& !std::is_same<intrinsic_t<type>, void>::value>;
// When a value returned from a C++ function is being cast back to Python, we almost always want to
// force `policy = move`, regardless of the return value policy the function/method was declared
// with.
template <typename Return, typename SFINAE = void>
struct return_value_policy_override {
static return_value_policy policy(return_value_policy p) { return p; }
};
template <typename Return>
struct return_value_policy_override<
Return,
detail::enable_if_t<std::is_base_of<type_caster_generic, make_caster<Return>>::value, void>> {
static return_value_policy policy(return_value_policy p) {
return !std::is_lvalue_reference<Return>::value && !std::is_pointer<Return>::value
? return_value_policy::move
: p;
}
};
// Basic python -> C++ casting; throws if casting fails
template <typename T, typename SFINAE>
type_caster<T, SFINAE> &load_type(type_caster<T, SFINAE> &conv, const handle &handle) {
static_assert(!detail::is_pyobject<T>::value,
"Internal error: type_caster should only be used for C++ types");
if (!conv.load(handle, true)) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error(
"Unable to cast Python instance of type "
+ str(type::handle_of(handle)).cast<std::string>()
+ " to C++ type '?' (#define "
"PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
#else
throw cast_error("Unable to cast Python instance of type "
+ str(type::handle_of(handle)).cast<std::string>() + " to C++ type '"
+ type_id<T>() + "'");
#endif
}
return conv;
}
// Wrapper around the above that also constructs and returns a type_caster
template <typename T>
make_caster<T> load_type(const handle &handle) {
make_caster<T> conv;
load_type(conv, handle);
return conv;
}
PYBIND11_NAMESPACE_END(detail)
// pytype -> C++ type
template <typename T,
detail::enable_if_t<!detail::is_pyobject<T>::value
&& !detail::is_same_ignoring_cvref<T, PyObject *>::value,
int>
= 0>
T cast(const handle &handle) {
using namespace detail;
static_assert(!cast_is_temporary_value_reference<T>::value,
"Unable to cast type to reference: value is local to type caster");
return cast_op<T>(load_type<T>(handle));
}
// pytype -> pytype (calls converting constructor)
template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
T cast(const handle &handle) {
return T(reinterpret_borrow<object>(handle));
}
// Note that `cast<PyObject *>(obj)` increments the reference count of `obj`.
// This is necessary for the case that `obj` is a temporary, and could
// not possibly be different, given
// 1. the established convention that the passed `handle` is borrowed, and
// 2. we don't want to force all generic code using `cast<T>()` to special-case
// handling of `T` = `PyObject *` (to increment the reference count there).
// It is the responsibility of the caller to ensure that the reference count
// is decremented.
template <typename T,
typename Handle,
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value
&& detail::is_same_ignoring_cvref<Handle, handle>::value,
int>
= 0>
T cast(Handle &&handle) {
return handle.inc_ref().ptr();
}
// To optimize way an inc_ref/dec_ref cycle:
template <typename T,
typename Object,
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value
&& detail::is_same_ignoring_cvref<Object, object>::value,
int>
= 0>
T cast(Object &&obj) {
return obj.release().ptr();
}
// C++ type -> py::object
template <typename T, detail::enable_if_t<!detail::is_pyobject<T>::value, int> = 0>
object cast(T &&value,
return_value_policy policy = return_value_policy::automatic_reference,
handle parent = handle()) {
using no_ref_T = typename std::remove_reference<T>::type;
if (policy == return_value_policy::automatic) {
policy = std::is_pointer<no_ref_T>::value ? return_value_policy::take_ownership
: std::is_lvalue_reference<T>::value ? return_value_policy::copy
: return_value_policy::move;
} else if (policy == return_value_policy::automatic_reference) {
policy = std::is_pointer<no_ref_T>::value ? return_value_policy::reference
: std::is_lvalue_reference<T>::value ? return_value_policy::copy
: return_value_policy::move;
}
return reinterpret_steal<object>(
detail::make_caster<T>::cast(std::forward<T>(value), policy, parent));
}
template <typename T>
T handle::cast() const {
return pybind11::cast<T>(*this);
}
template <>
inline void handle::cast() const {
return;
}
template <typename T>
detail::enable_if_t<!detail::move_never<T>::value, T> move(object &&obj) {
if (obj.ref_count() > 1) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error(
"Unable to cast Python " + str(type::handle_of(obj)).cast<std::string>()
+ " instance to C++ rvalue: instance has multiple references"
" (#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
#else
throw cast_error("Unable to move from Python "
+ str(type::handle_of(obj)).cast<std::string>() + " instance to C++ "
+ type_id<T>() + " instance: instance has multiple references");
#endif
}
// Move into a temporary and return that, because the reference may be a local value of `conv`
T ret = std::move(detail::load_type<T>(obj).operator T &());
return ret;
}
// Calling cast() on an rvalue calls pybind11::cast with the object rvalue, which does:
// - If we have to move (because T has no copy constructor), do it. This will fail if the moved
// object has multiple references, but trying to copy will fail to compile.
// - If both movable and copyable, check ref count: if 1, move; otherwise copy
// - Otherwise (not movable), copy.
template <typename T>
detail::enable_if_t<!detail::is_pyobject<T>::value && detail::move_always<T>::value, T>
cast(object &&object) {
return move<T>(std::move(object));
}
template <typename T>
detail::enable_if_t<!detail::is_pyobject<T>::value && detail::move_if_unreferenced<T>::value, T>
cast(object &&object) {
if (object.ref_count() > 1) {
return cast<T>(object);
}
return move<T>(std::move(object));
}
template <typename T>
detail::enable_if_t<!detail::is_pyobject<T>::value && detail::move_never<T>::value, T>
cast(object &&object) {
return cast<T>(object);
}
// pytype rvalue -> pytype (calls converting constructor)
template <typename T>
detail::enable_if_t<detail::is_pyobject<T>::value, T> cast(object &&object) {
return T(std::move(object));
}
template <typename T>
T object::cast() const & {
return pybind11::cast<T>(*this);
}
template <typename T>
T object::cast() && {
return pybind11::cast<T>(std::move(*this));
}
template <>
inline void object::cast() const & {
return;
}
template <>
inline void object::cast() && {
return;
}
PYBIND11_NAMESPACE_BEGIN(detail)
// Declared in pytypes.h:
template <typename T, enable_if_t<!is_pyobject<T>::value, int>>
object object_or_cast(T &&o) {
return pybind11::cast(std::forward<T>(o));
}
// Placeholder type for the unneeded (and dead code) static variable in the
// PYBIND11_OVERRIDE_OVERRIDE macro
struct override_unused {};
template <typename ret_type>
using override_caster_t = conditional_t<cast_is_temporary_value_reference<ret_type>::value,
make_caster<ret_type>,
override_unused>;
// Trampoline use: for reference/pointer types to value-converted values, we do a value cast, then
// store the result in the given variable. For other types, this is a no-op.
template <typename T>
enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&o,
make_caster<T> &caster) {
return cast_op<T>(load_type(caster, o));
}
template <typename T>
enable_if_t<!cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&,
override_unused &) {
pybind11_fail("Internal error: cast_ref fallback invoked");
}
// Trampoline use: Having a pybind11::cast with an invalid reference type is going to
// static_assert, even though if it's in dead code, so we provide a "trampoline" to pybind11::cast
// that only does anything in cases where pybind11::cast is valid.
template <typename T>
enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_safe(object &&) {
pybind11_fail("Internal error: cast_safe fallback invoked");
}
template <typename T>
enable_if_t<std::is_void<T>::value, void> cast_safe(object &&) {}
template <typename T>
enable_if_t<detail::none_of<cast_is_temporary_value_reference<T>, std::is_void<T>>::value, T>
cast_safe(object &&o) {
return pybind11::cast<T>(std::move(o));
}
PYBIND11_NAMESPACE_END(detail)
// The overloads could coexist, i.e. the #if is not strictly speaking needed,
// but it is an easy minor optimization.
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
inline cast_error cast_error_unable_to_convert_call_arg(const std::string &name) {
return cast_error("Unable to convert call argument '" + name
+ "' to Python object (#define "
"PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
}
#else
inline cast_error cast_error_unable_to_convert_call_arg(const std::string &name,
const std::string &type) {
return cast_error("Unable to convert call argument '" + name + "' of type '" + type
+ "' to Python object");
}
#endif
template <return_value_policy policy = return_value_policy::automatic_reference>
tuple make_tuple() {
return tuple(0);
}
template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
tuple make_tuple(Args &&...args_) {
constexpr size_t size = sizeof...(Args);
std::array<object, size> args{{reinterpret_steal<object>(
detail::make_caster<Args>::cast(std::forward<Args>(args_), policy, nullptr))...}};
for (size_t i = 0; i < args.size(); i++) {
if (!args[i]) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error_unable_to_convert_call_arg(std::to_string(i));
#else
std::array<std::string, size> argtypes{{type_id<Args>()...}};
throw cast_error_unable_to_convert_call_arg(std::to_string(i), argtypes[i]);
#endif
}
}
tuple result(size);
int counter = 0;
for (auto &arg_value : args) {
PyTuple_SET_ITEM(result.ptr(), counter++, arg_value.release().ptr());
}
return result;
}
/// \ingroup annotations
/// Annotation for arguments
struct arg {
/// Constructs an argument with the name of the argument; if null or omitted, this is a
/// positional argument.
constexpr explicit arg(const char *name = nullptr)
: name(name), flag_noconvert(false), flag_none(true) {}
/// Assign a value to this argument
template <typename T>
arg_v operator=(T &&value) const;
/// Indicate that the type should not be converted in the type caster
arg &noconvert(bool flag = true) {
flag_noconvert = flag;
return *this;
}
/// Indicates that the argument should/shouldn't allow None (e.g. for nullable pointer args)
arg &none(bool flag = true) {
flag_none = flag;
return *this;
}
const char *name; ///< If non-null, this is a named kwargs argument
bool flag_noconvert : 1; ///< If set, do not allow conversion (requires a supporting type
///< caster!)
bool flag_none : 1; ///< If set (the default), allow None to be passed to this argument
};
/// \ingroup annotations
/// Annotation for arguments with values
struct arg_v : arg {
private:
template <typename T>
arg_v(arg &&base, T &&x, const char *descr = nullptr)
: arg(base), value(reinterpret_steal<object>(detail::make_caster<T>::cast(
std::forward<T>(x), return_value_policy::automatic, {}))),
descr(descr)
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
,
type(type_id<T>())
#endif
{
// Workaround! See:
// https://github.com/pybind/pybind11/issues/2336
// https://github.com/pybind/pybind11/pull/2685#issuecomment-731286700
if (PyErr_Occurred()) {
PyErr_Clear();
}
}
public:
/// Direct construction with name, default, and description
template <typename T>
arg_v(const char *name, T &&x, const char *descr = nullptr)
: arg_v(arg(name), std::forward<T>(x), descr) {}
/// Called internally when invoking `py::arg("a") = value`
template <typename T>
arg_v(const arg &base, T &&x, const char *descr = nullptr)
: arg_v(arg(base), std::forward<T>(x), descr) {}
/// Same as `arg::noconvert()`, but returns *this as arg_v&, not arg&
arg_v &noconvert(bool flag = true) {
arg::noconvert(flag);
return *this;
}
/// Same as `arg::nonone()`, but returns *this as arg_v&, not arg&
arg_v &none(bool flag = true) {
arg::none(flag);
return *this;
}
/// The default value
object value;
/// The (optional) description of the default value
const char *descr;
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
/// The C++ type name of the default value (only available when compiled in debug mode)
std::string type;
#endif
};
/// \ingroup annotations
/// Annotation indicating that all following arguments are keyword-only; the is the equivalent of
/// an unnamed '*' argument
struct kw_only {};
/// \ingroup annotations
/// Annotation indicating that all previous arguments are positional-only; the is the equivalent of
/// an unnamed '/' argument (in Python 3.8)
struct pos_only {};
template <typename T>
arg_v arg::operator=(T &&value) const {
return {*this, std::forward<T>(value)};
}
/// Alias for backward compatibility -- to be removed in version 2.0
template <typename /*unused*/>
using arg_t = arg_v;
inline namespace literals {
/** \rst
String literal version of `arg`
\endrst */
constexpr arg operator"" _a(const char *name, size_t) { return arg(name); }
} // namespace literals
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T>
using is_kw_only = std::is_same<intrinsic_t<T>, kw_only>;
template <typename T>
using is_pos_only = std::is_same<intrinsic_t<T>, pos_only>;
// forward declaration (definition in attr.h)
struct function_record;
/// Internal data associated with a single function call
struct function_call {
function_call(const function_record &f, handle p); // Implementation in attr.h
/// The function data:
const function_record &func;
/// Arguments passed to the function:
std::vector<handle> args;
/// The `convert` value the arguments should be loaded with
std::vector<bool> args_convert;
/// Extra references for the optional `py::args` and/or `py::kwargs` arguments (which, if
/// present, are also in `args` but without a reference).
object args_ref, kwargs_ref;
/// The parent, if any
handle parent;
/// If this is a call to an initializer, this argument contains `self`
handle init_self;
};
/// Helper class which loads arguments for C++ functions called from Python
template <typename... Args>
class argument_loader {
using indices = make_index_sequence<sizeof...(Args)>;
template <typename Arg>
using argument_is_args = std::is_same<intrinsic_t<Arg>, args>;
template <typename Arg>
using argument_is_kwargs = std::is_same<intrinsic_t<Arg>, kwargs>;
// Get kwargs argument position, or -1 if not present:
static constexpr auto kwargs_pos = constexpr_last<argument_is_kwargs, Args...>();
static_assert(kwargs_pos == -1 || kwargs_pos == (int) sizeof...(Args) - 1,
"py::kwargs is only permitted as the last argument of a function");
public:
static constexpr bool has_kwargs = kwargs_pos != -1;
// py::args argument position; -1 if not present.
static constexpr int args_pos = constexpr_last<argument_is_args, Args...>();
static_assert(args_pos == -1 || args_pos == constexpr_first<argument_is_args, Args...>(),
"py::args cannot be specified more than once");
static constexpr auto arg_names = concat(type_descr(make_caster<Args>::name)...);
bool load_args(function_call &call) { return load_impl_sequence(call, indices{}); }
template <typename Return, typename Guard, typename Func>
// NOLINTNEXTLINE(readability-const-return-type)
enable_if_t<!std::is_void<Return>::value, Return> call(Func &&f) && {
return std::move(*this).template call_impl<remove_cv_t<Return>>(
std::forward<Func>(f), indices{}, Guard{});
}
template <typename Return, typename Guard, typename Func>
enable_if_t<std::is_void<Return>::value, void_type> call(Func &&f) && {
std::move(*this).template call_impl<remove_cv_t<Return>>(
std::forward<Func>(f), indices{}, Guard{});
return void_type();
}
private:
static bool load_impl_sequence(function_call &, index_sequence<>) { return true; }
template <size_t... Is>
bool load_impl_sequence(function_call &call, index_sequence<Is...>) {
#ifdef __cpp_fold_expressions
if ((... || !std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is]))) {
return false;
}
#else
for (bool r : {std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is])...}) {
if (!r) {
return false;
}
}
#endif
return true;
}
template <typename Return, typename Func, size_t... Is, typename Guard>
Return call_impl(Func &&f, index_sequence<Is...>, Guard &&) && {
return std::forward<Func>(f)(cast_op<Args>(std::move(std::get<Is>(argcasters)))...);
}
std::tuple<make_caster<Args>...> argcasters;
};
/// Helper class which collects only positional arguments for a Python function call.
/// A fancier version below can collect any argument, but this one is optimal for simple calls.
template <return_value_policy policy>
class simple_collector {
public:
template <typename... Ts>
explicit simple_collector(Ts &&...values)
: m_args(pybind11::make_tuple<policy>(std::forward<Ts>(values)...)) {}
const tuple &args() const & { return m_args; }
dict kwargs() const { return {}; }
tuple args() && { return std::move(m_args); }
/// Call a Python function and pass the collected arguments
object call(PyObject *ptr) const {
PyObject *result = PyObject_CallObject(ptr, m_args.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
private:
tuple m_args;
};
/// Helper class which collects positional, keyword, * and ** arguments for a Python function call
template <return_value_policy policy>
class unpacking_collector {
public:
template <typename... Ts>
explicit unpacking_collector(Ts &&...values) {
// Tuples aren't (easily) resizable so a list is needed for collection,
// but the actual function call strictly requires a tuple.
auto args_list = list();
using expander = int[];
(void) expander{0, (process(args_list, std::forward<Ts>(values)), 0)...};
m_args = std::move(args_list);
}
const tuple &args() const & { return m_args; }
const dict &kwargs() const & { return m_kwargs; }
tuple args() && { return std::move(m_args); }
dict kwargs() && { return std::move(m_kwargs); }
/// Call a Python function and pass the collected arguments
object call(PyObject *ptr) const {
PyObject *result = PyObject_Call(ptr, m_args.ptr(), m_kwargs.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
private:
template <typename T>
void process(list &args_list, T &&x) {
auto o = reinterpret_steal<object>(
detail::make_caster<T>::cast(std::forward<T>(x), policy, {}));
if (!o) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error_unable_to_convert_call_arg(std::to_string(args_list.size()));
#else
throw cast_error_unable_to_convert_call_arg(std::to_string(args_list.size()),
type_id<T>());
#endif
}
args_list.append(std::move(o));
}
void process(list &args_list, detail::args_proxy ap) {
for (auto a : ap) {
args_list.append(a);
}
}
void process(list & /*args_list*/, arg_v a) {
if (!a.name) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
nameless_argument_error();
#else
nameless_argument_error(a.type);
#endif
}
if (m_kwargs.contains(a.name)) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
multiple_values_error();
#else
multiple_values_error(a.name);
#endif
}
if (!a.value) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
throw cast_error_unable_to_convert_call_arg(a.name);
#else
throw cast_error_unable_to_convert_call_arg(a.name, a.type);
#endif
}
m_kwargs[a.name] = std::move(a.value);
}
void process(list & /*args_list*/, detail::kwargs_proxy kp) {
if (!kp) {
return;
}
for (auto k : reinterpret_borrow<dict>(kp)) {
if (m_kwargs.contains(k.first)) {
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
multiple_values_error();
#else
multiple_values_error(str(k.first));
#endif
}
m_kwargs[k.first] = k.second;
}
}
[[noreturn]] static void nameless_argument_error() {
throw type_error(
"Got kwargs without a name; only named arguments "
"may be passed via py::arg() to a python function call. "
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
}
[[noreturn]] static void nameless_argument_error(const std::string &type) {
throw type_error("Got kwargs without a name of type '" + type
+ "'; only named "
"arguments may be passed via py::arg() to a python function call. ");
}
[[noreturn]] static void multiple_values_error() {
throw type_error(
"Got multiple values for keyword argument "
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
}
[[noreturn]] static void multiple_values_error(const std::string &name) {
throw type_error("Got multiple values for keyword argument '" + name + "'");
}
private:
tuple m_args;
dict m_kwargs;
};
// [workaround(intel)] Separate function required here
// We need to put this into a separate function because the Intel compiler
// fails to compile enable_if_t<!all_of<is_positional<Args>...>::value>
// (tested with ICC 2021.1 Beta 20200827).
template <typename... Args>
constexpr bool args_are_all_positional() {
return all_of<is_positional<Args>...>::value;
}
/// Collect only positional arguments for a Python function call
template <return_value_policy policy,
typename... Args,
typename = enable_if_t<args_are_all_positional<Args...>()>>
simple_collector<policy> collect_arguments(Args &&...args) {
return simple_collector<policy>(std::forward<Args>(args)...);
}
/// Collect all arguments, including keywords and unpacking (only instantiated when needed)
template <return_value_policy policy,
typename... Args,
typename = enable_if_t<!args_are_all_positional<Args...>()>>
unpacking_collector<policy> collect_arguments(Args &&...args) {
// Following argument order rules for generalized unpacking according to PEP 448
static_assert(constexpr_last<is_positional, Args...>()
< constexpr_first<is_keyword_or_ds, Args...>()
&& constexpr_last<is_s_unpacking, Args...>()
< constexpr_first<is_ds_unpacking, Args...>(),
"Invalid function call: positional args must precede keywords and ** unpacking; "
"* unpacking must precede ** unpacking");
return unpacking_collector<policy>(std::forward<Args>(args)...);
}
template <typename Derived>
template <return_value_policy policy, typename... Args>
object object_api<Derived>::operator()(Args &&...args) const {
#ifndef NDEBUG
if (!PyGILState_Check()) {
pybind11_fail("pybind11::object_api<>::operator() PyGILState_Check() failure.");
}
#endif
return detail::collect_arguments<policy>(std::forward<Args>(args)...).call(derived().ptr());
}
template <typename Derived>
template <return_value_policy policy, typename... Args>
object object_api<Derived>::call(Args &&...args) const {
return operator()<policy>(std::forward<Args>(args)...);
}
PYBIND11_NAMESPACE_END(detail)
template <typename T>
handle type::handle_of() {
static_assert(std::is_base_of<detail::type_caster_generic, detail::make_caster<T>>::value,
"py::type::of<T> only supports the case where T is a registered C++ types.");
return detail::get_type_handle(typeid(T), true);
}
#define PYBIND11_MAKE_OPAQUE(...) \
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) \
namespace detail { \
template <> \
class type_caster<__VA_ARGS__> : public type_caster_base<__VA_ARGS__> {}; \
} \
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
/// Lets you pass a type containing a `,` through a macro parameter without needing a separate
/// typedef, e.g.:
/// `PYBIND11_OVERRIDE(PYBIND11_TYPE(ReturnType<A, B>), PYBIND11_TYPE(Parent<C, D>), f, arg)`
#define PYBIND11_TYPE(...) __VA_ARGS__
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/chrono.h | C/C++ Header | /*
pybind11/chrono.h: Transparent conversion between std::chrono and python's datetime
Copyright (c) 2016 Trent Houliston <trent@houliston.me> and
Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include <chrono>
#include <cmath>
#include <ctime>
#include <datetime.h>
#include <mutex>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename type>
class duration_caster {
public:
using rep = typename type::rep;
using period = typename type::period;
// signed 25 bits required by the standard.
using days = std::chrono::duration<int_least32_t, std::ratio<86400>>;
bool load(handle src, bool) {
using namespace std::chrono;
// Lazy initialise the PyDateTime import
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
}
if (!src) {
return false;
}
// If invoked with datetime.delta object
if (PyDelta_Check(src.ptr())) {
value = type(duration_cast<duration<rep, period>>(
days(PyDateTime_DELTA_GET_DAYS(src.ptr()))
+ seconds(PyDateTime_DELTA_GET_SECONDS(src.ptr()))
+ microseconds(PyDateTime_DELTA_GET_MICROSECONDS(src.ptr()))));
return true;
}
// If invoked with a float we assume it is seconds and convert
if (PyFloat_Check(src.ptr())) {
value = type(duration_cast<duration<rep, period>>(
duration<double>(PyFloat_AsDouble(src.ptr()))));
return true;
}
return false;
}
// If this is a duration just return it back
static const std::chrono::duration<rep, period> &
get_duration(const std::chrono::duration<rep, period> &src) {
return src;
}
// If this is a time_point get the time_since_epoch
template <typename Clock>
static std::chrono::duration<rep, period>
get_duration(const std::chrono::time_point<Clock, std::chrono::duration<rep, period>> &src) {
return src.time_since_epoch();
}
static handle cast(const type &src, return_value_policy /* policy */, handle /* parent */) {
using namespace std::chrono;
// Use overloaded function to get our duration from our source
// Works out if it is a duration or time_point and get the duration
auto d = get_duration(src);
// Lazy initialise the PyDateTime import
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
}
// Declare these special duration types so the conversions happen with the correct
// primitive types (int)
using dd_t = duration<int, std::ratio<86400>>;
using ss_t = duration<int, std::ratio<1>>;
using us_t = duration<int, std::micro>;
auto dd = duration_cast<dd_t>(d);
auto subd = d - dd;
auto ss = duration_cast<ss_t>(subd);
auto us = duration_cast<us_t>(subd - ss);
return PyDelta_FromDSU(dd.count(), ss.count(), us.count());
}
PYBIND11_TYPE_CASTER(type, const_name("datetime.timedelta"));
};
inline std::tm *localtime_thread_safe(const std::time_t *time, std::tm *buf) {
#if (defined(__STDC_LIB_EXT1__) && defined(__STDC_WANT_LIB_EXT1__)) || defined(_MSC_VER)
if (localtime_s(buf, time))
return nullptr;
return buf;
#else
static std::mutex mtx;
std::lock_guard<std::mutex> lock(mtx);
std::tm *tm_ptr = std::localtime(time);
if (tm_ptr != nullptr) {
*buf = *tm_ptr;
}
return tm_ptr;
#endif
}
// This is for casting times on the system clock into datetime.datetime instances
template <typename Duration>
class type_caster<std::chrono::time_point<std::chrono::system_clock, Duration>> {
public:
using type = std::chrono::time_point<std::chrono::system_clock, Duration>;
bool load(handle src, bool) {
using namespace std::chrono;
// Lazy initialise the PyDateTime import
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
}
if (!src) {
return false;
}
std::tm cal;
microseconds msecs;
if (PyDateTime_Check(src.ptr())) {
cal.tm_sec = PyDateTime_DATE_GET_SECOND(src.ptr());
cal.tm_min = PyDateTime_DATE_GET_MINUTE(src.ptr());
cal.tm_hour = PyDateTime_DATE_GET_HOUR(src.ptr());
cal.tm_mday = PyDateTime_GET_DAY(src.ptr());
cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1;
cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900;
cal.tm_isdst = -1;
msecs = microseconds(PyDateTime_DATE_GET_MICROSECOND(src.ptr()));
} else if (PyDate_Check(src.ptr())) {
cal.tm_sec = 0;
cal.tm_min = 0;
cal.tm_hour = 0;
cal.tm_mday = PyDateTime_GET_DAY(src.ptr());
cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1;
cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900;
cal.tm_isdst = -1;
msecs = microseconds(0);
} else if (PyTime_Check(src.ptr())) {
cal.tm_sec = PyDateTime_TIME_GET_SECOND(src.ptr());
cal.tm_min = PyDateTime_TIME_GET_MINUTE(src.ptr());
cal.tm_hour = PyDateTime_TIME_GET_HOUR(src.ptr());
cal.tm_mday = 1; // This date (day, month, year) = (1, 0, 70)
cal.tm_mon = 0; // represents 1-Jan-1970, which is the first
cal.tm_year = 70; // earliest available date for Python's datetime
cal.tm_isdst = -1;
msecs = microseconds(PyDateTime_TIME_GET_MICROSECOND(src.ptr()));
} else {
return false;
}
value = time_point_cast<Duration>(system_clock::from_time_t(std::mktime(&cal)) + msecs);
return true;
}
static handle cast(const std::chrono::time_point<std::chrono::system_clock, Duration> &src,
return_value_policy /* policy */,
handle /* parent */) {
using namespace std::chrono;
// Lazy initialise the PyDateTime import
if (!PyDateTimeAPI) {
PyDateTime_IMPORT;
}
// Get out microseconds, and make sure they are positive, to avoid bug in eastern
// hemisphere time zones (cfr. https://github.com/pybind/pybind11/issues/2417)
using us_t = duration<int, std::micro>;
auto us = duration_cast<us_t>(src.time_since_epoch() % seconds(1));
if (us.count() < 0) {
us += seconds(1);
}
// Subtract microseconds BEFORE `system_clock::to_time_t`, because:
// > If std::time_t has lower precision, it is implementation-defined whether the value is
// rounded or truncated. (https://en.cppreference.com/w/cpp/chrono/system_clock/to_time_t)
std::time_t tt
= system_clock::to_time_t(time_point_cast<system_clock::duration>(src - us));
std::tm localtime;
std::tm *localtime_ptr = localtime_thread_safe(&tt, &localtime);
if (!localtime_ptr) {
throw cast_error("Unable to represent system_clock in local time");
}
return PyDateTime_FromDateAndTime(localtime.tm_year + 1900,
localtime.tm_mon + 1,
localtime.tm_mday,
localtime.tm_hour,
localtime.tm_min,
localtime.tm_sec,
us.count());
}
PYBIND11_TYPE_CASTER(type, const_name("datetime.datetime"));
};
// Other clocks that are not the system clock are not measured as datetime.datetime objects
// since they are not measured on calendar time. So instead we just make them timedeltas
// Or if they have passed us a time as a float we convert that
template <typename Clock, typename Duration>
class type_caster<std::chrono::time_point<Clock, Duration>>
: public duration_caster<std::chrono::time_point<Clock, Duration>> {};
template <typename Rep, typename Period>
class type_caster<std::chrono::duration<Rep, Period>>
: public duration_caster<std::chrono::duration<Rep, Period>> {};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/common.h | C/C++ Header | #include "detail/common.h"
#warning "Including 'common.h' is deprecated. It will be removed in v3.0. Use 'pybind11.h'."
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/complex.h | C/C++ Header | /*
pybind11/complex.h: Complex number support
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include <complex>
/// glibc defines I as a macro which breaks things, e.g., boost template names
#ifdef I
# undef I
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
template <typename T>
struct format_descriptor<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
static constexpr const char c = format_descriptor<T>::c;
static constexpr const char value[3] = {'Z', c, '\0'};
static std::string format() { return std::string(value); }
};
#ifndef PYBIND11_CPP17
template <typename T>
constexpr const char
format_descriptor<std::complex<T>,
detail::enable_if_t<std::is_floating_point<T>::value>>::value[3];
#endif
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T>
struct is_fmt_numeric<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
static constexpr bool value = true;
static constexpr int index = is_fmt_numeric<T>::index + 3;
};
template <typename T>
class type_caster<std::complex<T>> {
public:
bool load(handle src, bool convert) {
if (!src) {
return false;
}
if (!convert && !PyComplex_Check(src.ptr())) {
return false;
}
Py_complex result = PyComplex_AsCComplex(src.ptr());
if (result.real == -1.0 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
value = std::complex<T>((T) result.real, (T) result.imag);
return true;
}
static handle
cast(const std::complex<T> &src, return_value_policy /* policy */, handle /* parent */) {
return PyComplex_FromDoubles((double) src.real(), (double) src.imag());
}
PYBIND11_TYPE_CASTER(std::complex<T>, const_name("complex"));
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/detail/class.h | C/C++ Header | /*
pybind11/detail/class.h: Python C API implementation details for py::class_
Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "../attr.h"
#include "../options.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
#if !defined(PYPY_VERSION)
# define PYBIND11_BUILTIN_QUALNAME
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj)
#else
// In PyPy, we still set __qualname__ so that we can produce reliable function type
// signatures; in CPython this macro expands to nothing:
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) \
setattr((PyObject *) obj, "__qualname__", nameobj)
#endif
inline std::string get_fully_qualified_tp_name(PyTypeObject *type) {
#if !defined(PYPY_VERSION)
return type->tp_name;
#else
auto module_name = handle((PyObject *) type).attr("__module__").cast<std::string>();
if (module_name == PYBIND11_BUILTINS_MODULE)
return type->tp_name;
else
return std::move(module_name) + "." + type->tp_name;
#endif
}
inline PyTypeObject *type_incref(PyTypeObject *type) {
Py_INCREF(type);
return type;
}
#if !defined(PYPY_VERSION)
/// `pybind11_static_property.__get__()`: Always pass the class instead of the instance.
extern "C" inline PyObject *pybind11_static_get(PyObject *self, PyObject * /*ob*/, PyObject *cls) {
return PyProperty_Type.tp_descr_get(self, cls, cls);
}
/// `pybind11_static_property.__set__()`: Just like the above `__get__()`.
extern "C" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObject *value) {
PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj);
return PyProperty_Type.tp_descr_set(self, cls, value);
}
// Forward declaration to use in `make_static_property_type()`
inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type);
/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()`
methods are modified to always use the object type instead of a concrete instance.
Return value: New reference. */
inline PyTypeObject *make_static_property_type() {
constexpr auto *name = "pybind11_static_property";
auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
/* Danger zone: from now (and until PyType_Ready), make sure to
issue no Python C API calls which could potentially invoke the
garbage collector (the GC will call type_traverse(), which will in
turn find the newly constructed type in an invalid state) */
auto *heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);
if (!heap_type) {
pybind11_fail("make_static_property_type(): error allocating type!");
}
heap_type->ht_name = name_obj.inc_ref().ptr();
# ifdef PYBIND11_BUILTIN_QUALNAME
heap_type->ht_qualname = name_obj.inc_ref().ptr();
# endif
auto *type = &heap_type->ht_type;
type->tp_name = name;
type->tp_base = type_incref(&PyProperty_Type);
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
type->tp_descr_get = pybind11_static_get;
type->tp_descr_set = pybind11_static_set;
if (PyType_Ready(type) < 0) {
pybind11_fail("make_static_property_type(): failure in PyType_Ready()!");
}
# if PY_VERSION_HEX >= 0x030C0000
// PRE 3.12 FEATURE FREEZE. PLEASE REVIEW AFTER FREEZE.
// Since Python-3.12 property-derived types are required to
// have dynamic attributes (to set `__doc__`)
enable_dynamic_attributes(heap_type);
# endif
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
return type;
}
#else // PYPY
/** PyPy has some issues with the above C API, so we evaluate Python code instead.
This function will only be called once so performance isn't really a concern.
Return value: New reference. */
inline PyTypeObject *make_static_property_type() {
auto d = dict();
PyObject *result = PyRun_String(R"(\
class pybind11_static_property(property):
def __get__(self, obj, cls):
return property.__get__(self, cls, cls)
def __set__(self, obj, value):
cls = obj if isinstance(obj, type) else type(obj)
property.__set__(self, cls, value)
)",
Py_file_input,
d.ptr(),
d.ptr());
if (result == nullptr)
throw error_already_set();
Py_DECREF(result);
return (PyTypeObject *) d["pybind11_static_property"].cast<object>().release().ptr();
}
#endif // PYPY
/** Types with static properties need to handle `Type.static_prop = x` in a specific way.
By default, Python replaces the `static_property` itself, but for wrapped C++ types
we need to call `static_property.__set__()` in order to propagate the new value to
the underlying C++ data structure. */
extern "C" inline int pybind11_meta_setattro(PyObject *obj, PyObject *name, PyObject *value) {
// Use `_PyType_Lookup()` instead of `PyObject_GetAttr()` in order to get the raw
// descriptor (`property`) instead of calling `tp_descr_get` (`property.__get__()`).
PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);
// The following assignment combinations are possible:
// 1. `Type.static_prop = value` --> descr_set: `Type.static_prop.__set__(value)`
// 2. `Type.static_prop = other_static_prop` --> setattro: replace existing `static_prop`
// 3. `Type.regular_attribute = value` --> setattro: regular attribute assignment
auto *const static_prop = (PyObject *) get_internals().static_property_type;
const auto call_descr_set = (descr != nullptr) && (value != nullptr)
&& (PyObject_IsInstance(descr, static_prop) != 0)
&& (PyObject_IsInstance(value, static_prop) == 0);
if (call_descr_set) {
// Call `static_property.__set__()` instead of replacing the `static_property`.
#if !defined(PYPY_VERSION)
return Py_TYPE(descr)->tp_descr_set(descr, obj, value);
#else
if (PyObject *result = PyObject_CallMethod(descr, "__set__", "OO", obj, value)) {
Py_DECREF(result);
return 0;
} else {
return -1;
}
#endif
} else {
// Replace existing attribute.
return PyType_Type.tp_setattro(obj, name, value);
}
}
/**
* Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing
* methods via cls.attr("m2") = cls.attr("m1"): instead the tp_descr_get returns a plain function,
* when called on a class, or a PyMethod, when called on an instance. Override that behaviour here
* to do a special case bypass for PyInstanceMethod_Types.
*/
extern "C" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name) {
PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);
if (descr && PyInstanceMethod_Check(descr)) {
Py_INCREF(descr);
return descr;
}
return PyType_Type.tp_getattro(obj, name);
}
/// metaclass `__call__` function that is used to create all pybind11 objects.
extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, PyObject *kwargs) {
// use the default metaclass call to create/initialize the object
PyObject *self = PyType_Type.tp_call(type, args, kwargs);
if (self == nullptr) {
return nullptr;
}
// This must be a pybind11 instance
auto *instance = reinterpret_cast<detail::instance *>(self);
// Ensure that the base __init__ function(s) were called
for (const auto &vh : values_and_holders(instance)) {
if (!vh.holder_constructed()) {
PyErr_Format(PyExc_TypeError,
"%.200s.__init__() must be called when overriding __init__",
get_fully_qualified_tp_name(vh.type->type).c_str());
Py_DECREF(self);
return nullptr;
}
}
return self;
}
/// Cleanup the type-info for a pybind11-registered type.
extern "C" inline void pybind11_meta_dealloc(PyObject *obj) {
auto *type = (PyTypeObject *) obj;
auto &internals = get_internals();
// A pybind11-registered type will:
// 1) be found in internals.registered_types_py
// 2) have exactly one associated `detail::type_info`
auto found_type = internals.registered_types_py.find(type);
if (found_type != internals.registered_types_py.end() && found_type->second.size() == 1
&& found_type->second[0]->type == type) {
auto *tinfo = found_type->second[0];
auto tindex = std::type_index(*tinfo->cpptype);
internals.direct_conversions.erase(tindex);
if (tinfo->module_local) {
get_local_internals().registered_types_cpp.erase(tindex);
} else {
internals.registered_types_cpp.erase(tindex);
}
internals.registered_types_py.erase(tinfo->type);
// Actually just `std::erase_if`, but that's only available in C++20
auto &cache = internals.inactive_override_cache;
for (auto it = cache.begin(), last = cache.end(); it != last;) {
if (it->first == (PyObject *) tinfo->type) {
it = cache.erase(it);
} else {
++it;
}
}
delete tinfo;
}
PyType_Type.tp_dealloc(obj);
}
/** This metaclass is assigned by default to all pybind11 types and is required in order
for static properties to function correctly. Users may override this using `py::metaclass`.
Return value: New reference. */
inline PyTypeObject *make_default_metaclass() {
constexpr auto *name = "pybind11_type";
auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
/* Danger zone: from now (and until PyType_Ready), make sure to
issue no Python C API calls which could potentially invoke the
garbage collector (the GC will call type_traverse(), which will in
turn find the newly constructed type in an invalid state) */
auto *heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);
if (!heap_type) {
pybind11_fail("make_default_metaclass(): error allocating metaclass!");
}
heap_type->ht_name = name_obj.inc_ref().ptr();
#ifdef PYBIND11_BUILTIN_QUALNAME
heap_type->ht_qualname = name_obj.inc_ref().ptr();
#endif
auto *type = &heap_type->ht_type;
type->tp_name = name;
type->tp_base = type_incref(&PyType_Type);
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
type->tp_call = pybind11_meta_call;
type->tp_setattro = pybind11_meta_setattro;
type->tp_getattro = pybind11_meta_getattro;
type->tp_dealloc = pybind11_meta_dealloc;
if (PyType_Ready(type) < 0) {
pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!");
}
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
return type;
}
/// For multiple inheritance types we need to recursively register/deregister base pointers for any
/// base classes with pointers that are difference from the instance value pointer so that we can
/// correctly recognize an offset base class pointer. This calls a function with any offset base
/// ptrs.
inline void traverse_offset_bases(void *valueptr,
const detail::type_info *tinfo,
instance *self,
bool (*f)(void * /*parentptr*/, instance * /*self*/)) {
for (handle h : reinterpret_borrow<tuple>(tinfo->type->tp_bases)) {
if (auto *parent_tinfo = get_type_info((PyTypeObject *) h.ptr())) {
for (auto &c : parent_tinfo->implicit_casts) {
if (c.first == tinfo->cpptype) {
auto *parentptr = c.second(valueptr);
if (parentptr != valueptr) {
f(parentptr, self);
}
traverse_offset_bases(parentptr, parent_tinfo, self, f);
break;
}
}
}
}
}
inline bool register_instance_impl(void *ptr, instance *self) {
get_internals().registered_instances.emplace(ptr, self);
return true; // unused, but gives the same signature as the deregister func
}
inline bool deregister_instance_impl(void *ptr, instance *self) {
auto ®istered_instances = get_internals().registered_instances;
auto range = registered_instances.equal_range(ptr);
for (auto it = range.first; it != range.second; ++it) {
if (self == it->second) {
registered_instances.erase(it);
return true;
}
}
return false;
}
inline void register_instance(instance *self, void *valptr, const type_info *tinfo) {
register_instance_impl(valptr, self);
if (!tinfo->simple_ancestors) {
traverse_offset_bases(valptr, tinfo, self, register_instance_impl);
}
}
inline bool deregister_instance(instance *self, void *valptr, const type_info *tinfo) {
bool ret = deregister_instance_impl(valptr, self);
if (!tinfo->simple_ancestors) {
traverse_offset_bases(valptr, tinfo, self, deregister_instance_impl);
}
return ret;
}
/// Instance creation function for all pybind11 types. It allocates the internal instance layout
/// for holding C++ objects and holders. Allocation is done lazily (the first time the instance is
/// cast to a reference or pointer), and initialization is done by an `__init__` function.
inline PyObject *make_new_instance(PyTypeObject *type) {
#if defined(PYPY_VERSION)
// PyPy gets tp_basicsize wrong (issue 2482) under multiple inheritance when the first
// inherited object is a plain Python type (i.e. not derived from an extension type). Fix it.
ssize_t instance_size = static_cast<ssize_t>(sizeof(instance));
if (type->tp_basicsize < instance_size) {
type->tp_basicsize = instance_size;
}
#endif
PyObject *self = type->tp_alloc(type, 0);
auto *inst = reinterpret_cast<instance *>(self);
// Allocate the value/holder internals:
inst->allocate_layout();
return self;
}
/// Instance creation function for all pybind11 types. It only allocates space for the
/// C++ object, but doesn't call the constructor -- an `__init__` function must do that.
extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *) {
return make_new_instance(type);
}
/// An `__init__` function constructs the C++ object. Users should provide at least one
/// of these using `py::init` or directly with `.def(__init__, ...)`. Otherwise, the
/// following default function will be used which simply throws an exception.
extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) {
PyTypeObject *type = Py_TYPE(self);
std::string msg = get_fully_qualified_tp_name(type) + ": No constructor defined!";
PyErr_SetString(PyExc_TypeError, msg.c_str());
return -1;
}
inline void add_patient(PyObject *nurse, PyObject *patient) {
auto &internals = get_internals();
auto *instance = reinterpret_cast<detail::instance *>(nurse);
instance->has_patients = true;
Py_INCREF(patient);
internals.patients[nurse].push_back(patient);
}
inline void clear_patients(PyObject *self) {
auto *instance = reinterpret_cast<detail::instance *>(self);
auto &internals = get_internals();
auto pos = internals.patients.find(self);
assert(pos != internals.patients.end());
// Clearing the patients can cause more Python code to run, which
// can invalidate the iterator. Extract the vector of patients
// from the unordered_map first.
auto patients = std::move(pos->second);
internals.patients.erase(pos);
instance->has_patients = false;
for (PyObject *&patient : patients) {
Py_CLEAR(patient);
}
}
/// Clears all internal data from the instance and removes it from registered instances in
/// preparation for deallocation.
inline void clear_instance(PyObject *self) {
auto *instance = reinterpret_cast<detail::instance *>(self);
// Deallocate any values/holders, if present:
for (auto &v_h : values_and_holders(instance)) {
if (v_h) {
// We have to deregister before we call dealloc because, for virtual MI types, we still
// need to be able to get the parent pointers.
if (v_h.instance_registered()
&& !deregister_instance(instance, v_h.value_ptr(), v_h.type)) {
pybind11_fail(
"pybind11_object_dealloc(): Tried to deallocate unregistered instance!");
}
if (instance->owned || v_h.holder_constructed()) {
v_h.type->dealloc(v_h);
}
}
}
// Deallocate the value/holder layout internals:
instance->deallocate_layout();
if (instance->weakrefs) {
PyObject_ClearWeakRefs(self);
}
PyObject **dict_ptr = _PyObject_GetDictPtr(self);
if (dict_ptr) {
Py_CLEAR(*dict_ptr);
}
if (instance->has_patients) {
clear_patients(self);
}
}
/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc`
/// to destroy the C++ object itself, while the rest is Python bookkeeping.
extern "C" inline void pybind11_object_dealloc(PyObject *self) {
auto *type = Py_TYPE(self);
// If this is a GC tracked object, untrack it first
// Note that the track call is implicitly done by the
// default tp_alloc, which we never override.
if (PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC) != 0) {
PyObject_GC_UnTrack(self);
}
clear_instance(self);
type->tp_free(self);
#if PY_VERSION_HEX < 0x03080000
// `type->tp_dealloc != pybind11_object_dealloc` means that we're being called
// as part of a derived type's dealloc, in which case we're not allowed to decref
// the type here. For cross-module compatibility, we shouldn't compare directly
// with `pybind11_object_dealloc`, but with the common one stashed in internals.
auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base;
if (type->tp_dealloc == pybind11_object_type->tp_dealloc)
Py_DECREF(type);
#else
// This was not needed before Python 3.8 (Python issue 35810)
// https://github.com/pybind/pybind11/issues/1946
Py_DECREF(type);
#endif
}
std::string error_string();
/** Create the type which can be used as a common base for all classes. This is
needed in order to satisfy Python's requirements for multiple inheritance.
Return value: New reference. */
inline PyObject *make_object_base_type(PyTypeObject *metaclass) {
constexpr auto *name = "pybind11_object";
auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
/* Danger zone: from now (and until PyType_Ready), make sure to
issue no Python C API calls which could potentially invoke the
garbage collector (the GC will call type_traverse(), which will in
turn find the newly constructed type in an invalid state) */
auto *heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);
if (!heap_type) {
pybind11_fail("make_object_base_type(): error allocating type!");
}
heap_type->ht_name = name_obj.inc_ref().ptr();
#ifdef PYBIND11_BUILTIN_QUALNAME
heap_type->ht_qualname = name_obj.inc_ref().ptr();
#endif
auto *type = &heap_type->ht_type;
type->tp_name = name;
type->tp_base = type_incref(&PyBaseObject_Type);
type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
type->tp_new = pybind11_object_new;
type->tp_init = pybind11_object_init;
type->tp_dealloc = pybind11_object_dealloc;
/* Support weak references (needed for the keep_alive feature) */
type->tp_weaklistoffset = offsetof(instance, weakrefs);
if (PyType_Ready(type) < 0) {
pybind11_fail("PyType_Ready failed in make_object_base_type(): " + error_string());
}
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));
return (PyObject *) heap_type;
}
/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`.
extern "C" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) {
PyObject *&dict = *_PyObject_GetDictPtr(self);
Py_VISIT(dict);
// https://docs.python.org/3/c-api/typeobj.html#c.PyTypeObject.tp_traverse
#if PY_VERSION_HEX >= 0x03090000
Py_VISIT(Py_TYPE(self));
#endif
return 0;
}
/// dynamic_attr: Allow the GC to clear the dictionary.
extern "C" inline int pybind11_clear(PyObject *self) {
PyObject *&dict = *_PyObject_GetDictPtr(self);
Py_CLEAR(dict);
return 0;
}
/// Give instances of this type a `__dict__` and opt into garbage collection.
inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) {
auto *type = &heap_type->ht_type;
type->tp_flags |= Py_TPFLAGS_HAVE_GC;
#if PY_VERSION_HEX < 0x030B0000
type->tp_dictoffset = type->tp_basicsize; // place dict at the end
type->tp_basicsize += (ssize_t) sizeof(PyObject *); // and allocate enough space for it
#else
type->tp_flags |= Py_TPFLAGS_MANAGED_DICT;
#endif
type->tp_traverse = pybind11_traverse;
type->tp_clear = pybind11_clear;
static PyGetSetDef getset[] = {{
#if PY_VERSION_HEX < 0x03070000
const_cast<char *>("__dict__"),
#else
"__dict__",
#endif
PyObject_GenericGetDict,
PyObject_GenericSetDict,
nullptr,
nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}};
type->tp_getset = getset;
}
/// buffer_protocol: Fill in the view as specified by flags.
extern "C" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int flags) {
// Look for a `get_buffer` implementation in this type's info or any bases (following MRO).
type_info *tinfo = nullptr;
for (auto type : reinterpret_borrow<tuple>(Py_TYPE(obj)->tp_mro)) {
tinfo = get_type_info((PyTypeObject *) type.ptr());
if (tinfo && tinfo->get_buffer) {
break;
}
}
if (view == nullptr || !tinfo || !tinfo->get_buffer) {
if (view) {
view->obj = nullptr;
}
PyErr_SetString(PyExc_BufferError, "pybind11_getbuffer(): Internal error");
return -1;
}
std::memset(view, 0, sizeof(Py_buffer));
buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data);
if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) {
delete info;
// view->obj = nullptr; // Was just memset to 0, so not necessary
PyErr_SetString(PyExc_BufferError, "Writable buffer requested for readonly storage");
return -1;
}
view->obj = obj;
view->ndim = 1;
view->internal = info;
view->buf = info->ptr;
view->itemsize = info->itemsize;
view->len = view->itemsize;
for (auto s : info->shape) {
view->len *= s;
}
view->readonly = static_cast<int>(info->readonly);
if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) {
view->format = const_cast<char *>(info->format.c_str());
}
if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) {
view->ndim = (int) info->ndim;
view->strides = info->strides.data();
view->shape = info->shape.data();
}
Py_INCREF(view->obj);
return 0;
}
/// buffer_protocol: Release the resources of the buffer.
extern "C" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) {
delete (buffer_info *) view->internal;
}
/// Give this type a buffer interface.
inline void enable_buffer_protocol(PyHeapTypeObject *heap_type) {
heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer;
heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer;
heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer;
}
/** Create a brand new Python type according to the `type_record` specification.
Return value: New reference. */
inline PyObject *make_new_python_type(const type_record &rec) {
auto name = reinterpret_steal<object>(PYBIND11_FROM_STRING(rec.name));
auto qualname = name;
if (rec.scope && !PyModule_Check(rec.scope.ptr()) && hasattr(rec.scope, "__qualname__")) {
qualname = reinterpret_steal<object>(
PyUnicode_FromFormat("%U.%U", rec.scope.attr("__qualname__").ptr(), name.ptr()));
}
object module_;
if (rec.scope) {
if (hasattr(rec.scope, "__module__")) {
module_ = rec.scope.attr("__module__");
} else if (hasattr(rec.scope, "__name__")) {
module_ = rec.scope.attr("__name__");
}
}
const auto *full_name = c_str(
#if !defined(PYPY_VERSION)
module_ ? str(module_).cast<std::string>() + "." + rec.name :
#endif
rec.name);
char *tp_doc = nullptr;
if (rec.doc && options::show_user_defined_docstrings()) {
/* Allocate memory for docstring (using PyObject_MALLOC, since
Python will free this later on) */
size_t size = std::strlen(rec.doc) + 1;
tp_doc = (char *) PyObject_MALLOC(size);
std::memcpy((void *) tp_doc, rec.doc, size);
}
auto &internals = get_internals();
auto bases = tuple(rec.bases);
auto *base = (bases.empty()) ? internals.instance_base : bases[0].ptr();
/* Danger zone: from now (and until PyType_Ready), make sure to
issue no Python C API calls which could potentially invoke the
garbage collector (the GC will call type_traverse(), which will in
turn find the newly constructed type in an invalid state) */
auto *metaclass
= rec.metaclass.ptr() ? (PyTypeObject *) rec.metaclass.ptr() : internals.default_metaclass;
auto *heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);
if (!heap_type) {
pybind11_fail(std::string(rec.name) + ": Unable to create type object!");
}
heap_type->ht_name = name.release().ptr();
#ifdef PYBIND11_BUILTIN_QUALNAME
heap_type->ht_qualname = qualname.inc_ref().ptr();
#endif
auto *type = &heap_type->ht_type;
type->tp_name = full_name;
type->tp_doc = tp_doc;
type->tp_base = type_incref((PyTypeObject *) base);
type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));
if (!bases.empty()) {
type->tp_bases = bases.release().ptr();
}
/* Don't inherit base __init__ */
type->tp_init = pybind11_object_init;
/* Supported protocols */
type->tp_as_number = &heap_type->as_number;
type->tp_as_sequence = &heap_type->as_sequence;
type->tp_as_mapping = &heap_type->as_mapping;
type->tp_as_async = &heap_type->as_async;
/* Flags */
type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE;
if (!rec.is_final) {
type->tp_flags |= Py_TPFLAGS_BASETYPE;
}
if (rec.dynamic_attr) {
enable_dynamic_attributes(heap_type);
}
if (rec.buffer_protocol) {
enable_buffer_protocol(heap_type);
}
if (rec.custom_type_setup_callback) {
rec.custom_type_setup_callback(heap_type);
}
if (PyType_Ready(type) < 0) {
pybind11_fail(std::string(rec.name) + ": PyType_Ready failed: " + error_string());
}
assert(!rec.dynamic_attr || PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));
/* Register type with the parent scope */
if (rec.scope) {
setattr(rec.scope, rec.name, (PyObject *) type);
} else {
Py_INCREF(type); // Keep it alive forever (reference leak)
}
if (module_) { // Needed by pydoc
setattr((PyObject *) type, "__module__", module_);
}
PYBIND11_SET_OLDPY_QUALNAME(type, qualname);
return (PyObject *) type;
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/detail/common.h | C/C++ Header | /*
pybind11/detail/common.h -- Basic macros
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#define PYBIND11_VERSION_MAJOR 2
#define PYBIND11_VERSION_MINOR 11
#define PYBIND11_VERSION_PATCH 1
// Similar to Python's convention: https://docs.python.org/3/c-api/apiabiversion.html
// Additional convention: 0xD = dev
#define PYBIND11_VERSION_HEX 0x020B0100
// Define some generic pybind11 helper macros for warning management.
//
// Note that compiler-specific push/pop pairs are baked into the
// PYBIND11_NAMESPACE_BEGIN/PYBIND11_NAMESPACE_END pair of macros. Therefore manual
// PYBIND11_WARNING_PUSH/PYBIND11_WARNING_POP are usually only needed in `#include` sections.
//
// If you find you need to suppress a warning, please try to make the suppression as local as
// possible using these macros. Please also be sure to push/pop with the pybind11 macros. Please
// only use compiler specifics if you need to check specific versions, e.g. Apple Clang vs. vanilla
// Clang.
#if defined(_MSC_VER)
# define PYBIND11_COMPILER_MSVC
# define PYBIND11_PRAGMA(...) __pragma(__VA_ARGS__)
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(warning(push))
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(warning(pop))
#elif defined(__INTEL_COMPILER)
# define PYBIND11_COMPILER_INTEL
# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__)
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(warning push)
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(warning pop)
#elif defined(__clang__)
# define PYBIND11_COMPILER_CLANG
# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__)
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(clang diagnostic push)
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(clang diagnostic push)
#elif defined(__GNUC__)
# define PYBIND11_COMPILER_GCC
# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__)
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(GCC diagnostic push)
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(GCC diagnostic pop)
#endif
#ifdef PYBIND11_COMPILER_MSVC
# define PYBIND11_WARNING_DISABLE_MSVC(name) PYBIND11_PRAGMA(warning(disable : name))
#else
# define PYBIND11_WARNING_DISABLE_MSVC(name)
#endif
#ifdef PYBIND11_COMPILER_CLANG
# define PYBIND11_WARNING_DISABLE_CLANG(name) PYBIND11_PRAGMA(clang diagnostic ignored name)
#else
# define PYBIND11_WARNING_DISABLE_CLANG(name)
#endif
#ifdef PYBIND11_COMPILER_GCC
# define PYBIND11_WARNING_DISABLE_GCC(name) PYBIND11_PRAGMA(GCC diagnostic ignored name)
#else
# define PYBIND11_WARNING_DISABLE_GCC(name)
#endif
#ifdef PYBIND11_COMPILER_INTEL
# define PYBIND11_WARNING_DISABLE_INTEL(name) PYBIND11_PRAGMA(warning disable name)
#else
# define PYBIND11_WARNING_DISABLE_INTEL(name)
#endif
#define PYBIND11_NAMESPACE_BEGIN(name) \
namespace name { \
PYBIND11_WARNING_PUSH
#define PYBIND11_NAMESPACE_END(name) \
PYBIND11_WARNING_POP \
}
// Robust support for some features and loading modules compiled against different pybind versions
// requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute
// on the main `pybind11` namespace.
#if !defined(PYBIND11_NAMESPACE)
# ifdef __GNUG__
# define PYBIND11_NAMESPACE pybind11 __attribute__((visibility("hidden")))
# else
# define PYBIND11_NAMESPACE pybind11
# endif
#endif
#if !(defined(_MSC_VER) && __cplusplus == 199711L)
# if __cplusplus >= 201402L
# define PYBIND11_CPP14
# if __cplusplus >= 201703L
# define PYBIND11_CPP17
# if __cplusplus >= 202002L
# define PYBIND11_CPP20
// Please update tests/pybind11_tests.cpp `cpp_std()` when adding a macro here.
# endif
# endif
# endif
#elif defined(_MSC_VER) && __cplusplus == 199711L
// MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully
// implemented). Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3
// or newer.
# if _MSVC_LANG >= 201402L
# define PYBIND11_CPP14
# if _MSVC_LANG > 201402L
# define PYBIND11_CPP17
# if _MSVC_LANG >= 202002L
# define PYBIND11_CPP20
# endif
# endif
# endif
#endif
// Compiler version assertions
#if defined(__INTEL_COMPILER)
# if __INTEL_COMPILER < 1800
# error pybind11 requires Intel C++ compiler v18 or newer
# elif __INTEL_COMPILER < 1900 && defined(PYBIND11_CPP14)
# error pybind11 supports only C++11 with Intel C++ compiler v18. Use v19 or newer for C++14.
# endif
/* The following pragma cannot be pop'ed:
https://community.intel.com/t5/Intel-C-Compiler/Inline-and-no-inline-warning/td-p/1216764 */
# pragma warning disable 2196 // warning #2196: routine is both "inline" and "noinline"
#elif defined(__clang__) && !defined(__apple_build_version__)
# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3)
# error pybind11 requires clang 3.3 or newer
# endif
#elif defined(__clang__)
// Apple changes clang version macros to its Xcode version; the first Xcode release based on
// (upstream) clang 3.3 was Xcode 5:
# if __clang_major__ < 5
# error pybind11 requires Xcode/clang 5.0 or newer
# endif
#elif defined(__GNUG__)
# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
# error pybind11 requires gcc 4.8 or newer
# endif
#elif defined(_MSC_VER)
# if _MSC_VER < 1910
# error pybind11 2.10+ requires MSVC 2017 or newer
# endif
#endif
#if !defined(PYBIND11_EXPORT)
# if defined(WIN32) || defined(_WIN32)
# define PYBIND11_EXPORT __declspec(dllexport)
# else
# define PYBIND11_EXPORT __attribute__((visibility("default")))
# endif
#endif
#if !defined(PYBIND11_EXPORT_EXCEPTION)
# if defined(__apple_build_version__)
# define PYBIND11_EXPORT_EXCEPTION PYBIND11_EXPORT
# else
# define PYBIND11_EXPORT_EXCEPTION
# endif
#endif
// For CUDA, GCC7, GCC8:
// PYBIND11_NOINLINE_FORCED is incompatible with `-Wattributes -Werror`.
// When defining PYBIND11_NOINLINE_FORCED, it is best to also use `-Wno-attributes`.
// However, the measured shared-library size saving when using noinline are only
// 1.7% for CUDA, -0.2% for GCC7, and 0.0% for GCC8 (using -DCMAKE_BUILD_TYPE=MinSizeRel,
// the default under pybind11/tests).
#if !defined(PYBIND11_NOINLINE_FORCED) \
&& (defined(__CUDACC__) || (defined(__GNUC__) && (__GNUC__ == 7 || __GNUC__ == 8)))
# define PYBIND11_NOINLINE_DISABLED
#endif
// The PYBIND11_NOINLINE macro is for function DEFINITIONS.
// In contrast, FORWARD DECLARATIONS should never use this macro:
// https://stackoverflow.com/questions/9317473/forward-declaration-of-inline-functions
#if defined(PYBIND11_NOINLINE_DISABLED) // Option for maximum portability and experimentation.
# define PYBIND11_NOINLINE inline
#elif defined(_MSC_VER)
# define PYBIND11_NOINLINE __declspec(noinline) inline
#else
# define PYBIND11_NOINLINE __attribute__((noinline)) inline
#endif
#if defined(__MINGW32__)
// For unknown reasons all PYBIND11_DEPRECATED member trigger a warning when declared
// whether it is used or not
# define PYBIND11_DEPRECATED(reason)
#elif defined(PYBIND11_CPP14)
# define PYBIND11_DEPRECATED(reason) [[deprecated(reason)]]
#else
# define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason)))
#endif
#if defined(PYBIND11_CPP17)
# define PYBIND11_MAYBE_UNUSED [[maybe_unused]]
#elif defined(_MSC_VER) && !defined(__clang__)
# define PYBIND11_MAYBE_UNUSED
#else
# define PYBIND11_MAYBE_UNUSED __attribute__((__unused__))
#endif
/* Don't let Python.h #define (v)snprintf as macro because they are implemented
properly in Visual Studio since 2015. */
#if defined(_MSC_VER)
# define HAVE_SNPRINTF 1
#endif
/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode
#if defined(_MSC_VER)
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC(4505)
// C4505: 'PySlice_GetIndicesEx': unreferenced local function has been removed (PyPy only)
# if defined(_DEBUG) && !defined(Py_DEBUG)
// Workaround for a VS 2022 issue.
// NOTE: This workaround knowingly violates the Python.h include order requirement:
// https://docs.python.org/3/c-api/intro.html#include-files
// See https://github.com/pybind/pybind11/pull/3497 for full context.
# include <yvals.h>
# if _MSVC_STL_VERSION >= 143
# include <crtdefs.h>
# endif
# define PYBIND11_DEBUG_MARKER
# undef _DEBUG
# endif
#endif
// https://en.cppreference.com/w/c/chrono/localtime
#if defined(__STDC_LIB_EXT1__) && !defined(__STDC_WANT_LIB_EXT1__)
# define __STDC_WANT_LIB_EXT1__
#endif
#ifdef __has_include
// std::optional (but including it in c++14 mode isn't allowed)
# if defined(PYBIND11_CPP17) && __has_include(<optional>)
# define PYBIND11_HAS_OPTIONAL 1
# endif
// std::experimental::optional (but not allowed in c++11 mode)
# if defined(PYBIND11_CPP14) && (__has_include(<experimental/optional>) && \
!__has_include(<optional>))
# define PYBIND11_HAS_EXP_OPTIONAL 1
# endif
// std::variant
# if defined(PYBIND11_CPP17) && __has_include(<variant>)
# define PYBIND11_HAS_VARIANT 1
# endif
#elif defined(_MSC_VER) && defined(PYBIND11_CPP17)
# define PYBIND11_HAS_OPTIONAL 1
# define PYBIND11_HAS_VARIANT 1
#endif
#if defined(PYBIND11_CPP17)
# if defined(__has_include)
# if __has_include(<string_view>)
# define PYBIND11_HAS_STRING_VIEW
# endif
# elif defined(_MSC_VER)
# define PYBIND11_HAS_STRING_VIEW
# endif
#endif
#include <Python.h>
// Reminder: WITH_THREAD is always defined if PY_VERSION_HEX >= 0x03070000
#if PY_VERSION_HEX < 0x03060000
# error "PYTHON < 3.6 IS UNSUPPORTED. pybind11 v2.9 was the last to support Python 2 and 3.5."
#endif
#include <frameobject.h>
#include <pythread.h>
/* Python #defines overrides on all sorts of core functions, which
tends to weak havok in C++ codebases that expect these to work
like regular functions (potentially with several overloads) */
#if defined(isalnum)
# undef isalnum
# undef isalpha
# undef islower
# undef isspace
# undef isupper
# undef tolower
# undef toupper
#endif
#if defined(copysign)
# undef copysign
#endif
#if defined(PYPY_VERSION) && !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# define PYBIND11_SIMPLE_GIL_MANAGEMENT
#endif
#if defined(_MSC_VER)
# if defined(PYBIND11_DEBUG_MARKER)
# define _DEBUG
# undef PYBIND11_DEBUG_MARKER
# endif
PYBIND11_WARNING_POP
#endif
#include <cstddef>
#include <cstring>
#include <exception>
#include <forward_list>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <typeindex>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#if defined(__has_include)
# if __has_include(<version>)
# include <version>
# endif
#endif
// Must be after including <version> or one of the other headers specified by the standard
#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
# define PYBIND11_HAS_U8STRING
#endif
// See description of PR #4246:
#if !defined(PYBIND11_NO_ASSERT_GIL_HELD_INCREF_DECREF) && !defined(NDEBUG) \
&& !defined(PYPY_VERSION) && !defined(PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF)
# define PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
#endif
// #define PYBIND11_STR_LEGACY_PERMISSIVE
// If DEFINED, pybind11::str can hold PyUnicodeObject or PyBytesObject
// (probably surprising and never documented, but this was the
// legacy behavior until and including v2.6.x). As a side-effect,
// pybind11::isinstance<str>() is true for both pybind11::str and
// pybind11::bytes.
// If UNDEFINED, pybind11::str can only hold PyUnicodeObject, and
// pybind11::isinstance<str>() is true only for pybind11::str.
// However, for Python 2 only (!), the pybind11::str caster
// implicitly decoded bytes to PyUnicodeObject. This was to ease
// the transition from the legacy behavior to the non-permissive
// behavior.
/// Compatibility macros for Python 2 / Python 3 versions TODO: remove
#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr)
#define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check
#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION
#define PYBIND11_BYTES_CHECK PyBytes_Check
#define PYBIND11_BYTES_FROM_STRING PyBytes_FromString
#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize
#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize
#define PYBIND11_BYTES_AS_STRING PyBytes_AsString
#define PYBIND11_BYTES_SIZE PyBytes_Size
#define PYBIND11_LONG_CHECK(o) PyLong_Check(o)
#define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o)
#define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) (o))
#define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) (o))
#define PYBIND11_BYTES_NAME "bytes"
#define PYBIND11_STRING_NAME "str"
#define PYBIND11_SLICE_OBJECT PyObject
#define PYBIND11_FROM_STRING PyUnicode_FromString
#define PYBIND11_STR_TYPE ::pybind11::str
#define PYBIND11_BOOL_ATTR "__bool__"
#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool)
#define PYBIND11_BUILTINS_MODULE "builtins"
// Providing a separate declaration to make Clang's -Wmissing-prototypes happy.
// See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
#define PYBIND11_PLUGIN_IMPL(name) \
extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name(); \
extern "C" PYBIND11_EXPORT PyObject *PyInit_##name()
#define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code
#define PYBIND11_STRINGIFY(x) #x
#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x)
#define PYBIND11_CONCAT(first, second) first##second
#define PYBIND11_ENSURE_INTERNALS_READY pybind11::detail::get_internals();
#define PYBIND11_CHECK_PYTHON_VERSION \
{ \
const char *compiled_ver \
= PYBIND11_TOSTRING(PY_MAJOR_VERSION) "." PYBIND11_TOSTRING(PY_MINOR_VERSION); \
const char *runtime_ver = Py_GetVersion(); \
size_t len = std::strlen(compiled_ver); \
if (std::strncmp(runtime_ver, compiled_ver, len) != 0 \
|| (runtime_ver[len] >= '0' && runtime_ver[len] <= '9')) { \
PyErr_Format(PyExc_ImportError, \
"Python version mismatch: module was compiled for Python %s, " \
"but the interpreter version is incompatible: %s.", \
compiled_ver, \
runtime_ver); \
return nullptr; \
} \
}
#define PYBIND11_CATCH_INIT_EXCEPTIONS \
catch (pybind11::error_already_set & e) { \
pybind11::raise_from(e, PyExc_ImportError, "initialization failed"); \
return nullptr; \
} \
catch (const std::exception &e) { \
PyErr_SetString(PyExc_ImportError, e.what()); \
return nullptr; \
}
/** \rst
***Deprecated in favor of PYBIND11_MODULE***
This macro creates the entry point that will be invoked when the Python interpreter
imports a plugin library. Please create a `module_` in the function body and return
the pointer to its underlying Python object at the end.
.. code-block:: cpp
PYBIND11_PLUGIN(example) {
pybind11::module_ m("example", "pybind11 example plugin");
/// Set up bindings here
return m.ptr();
}
\endrst */
#define PYBIND11_PLUGIN(name) \
PYBIND11_DEPRECATED("PYBIND11_PLUGIN is deprecated, use PYBIND11_MODULE") \
static PyObject *pybind11_init(); \
PYBIND11_PLUGIN_IMPL(name) { \
PYBIND11_CHECK_PYTHON_VERSION \
PYBIND11_ENSURE_INTERNALS_READY \
try { \
return pybind11_init(); \
} \
PYBIND11_CATCH_INIT_EXCEPTIONS \
} \
PyObject *pybind11_init()
/** \rst
This macro creates the entry point that will be invoked when the Python interpreter
imports an extension module. The module name is given as the first argument and it
should not be in quotes. The second macro argument defines a variable of type
`py::module_` which can be used to initialize the module.
The entry point is marked as "maybe unused" to aid dead-code detection analysis:
since the entry point is typically only looked up at runtime and not referenced
during translation, it would otherwise appear as unused ("dead") code.
.. code-block:: cpp
PYBIND11_MODULE(example, m) {
m.doc() = "pybind11 example module";
// Add bindings here
m.def("foo", []() {
return "Hello, World!";
});
}
\endrst */
#define PYBIND11_MODULE(name, variable) \
static ::pybind11::module_::module_def PYBIND11_CONCAT(pybind11_module_def_, name) \
PYBIND11_MAYBE_UNUSED; \
PYBIND11_MAYBE_UNUSED \
static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
PYBIND11_PLUGIN_IMPL(name) { \
PYBIND11_CHECK_PYTHON_VERSION \
PYBIND11_ENSURE_INTERNALS_READY \
auto m = ::pybind11::module_::create_extension_module( \
PYBIND11_TOSTRING(name), nullptr, &PYBIND11_CONCAT(pybind11_module_def_, name)); \
try { \
PYBIND11_CONCAT(pybind11_init_, name)(m); \
return m.ptr(); \
} \
PYBIND11_CATCH_INIT_EXCEPTIONS \
} \
void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ & (variable))
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
using ssize_t = Py_ssize_t;
using size_t = std::size_t;
template <typename IntType>
inline ssize_t ssize_t_cast(const IntType &val) {
static_assert(sizeof(IntType) <= sizeof(ssize_t), "Implicit narrowing is not permitted.");
return static_cast<ssize_t>(val);
}
/// Approach used to cast a previously unknown C++ instance into a Python object
enum class return_value_policy : uint8_t {
/** This is the default return value policy, which falls back to the policy
return_value_policy::take_ownership when the return value is a pointer.
Otherwise, it uses return_value::move or return_value::copy for rvalue
and lvalue references, respectively. See below for a description of what
all of these different policies do. */
automatic = 0,
/** As above, but use policy return_value_policy::reference when the return
value is a pointer. This is the default conversion policy for function
arguments when calling Python functions manually from C++ code (i.e. via
handle::operator()). You probably won't need to use this. */
automatic_reference,
/** Reference an existing object (i.e. do not create a new copy) and take
ownership. Python will call the destructor and delete operator when the
object's reference count reaches zero. Undefined behavior ensues when
the C++ side does the same.. */
take_ownership,
/** Create a new copy of the returned object, which will be owned by
Python. This policy is comparably safe because the lifetimes of the two
instances are decoupled. */
copy,
/** Use std::move to move the return value contents into a new instance
that will be owned by Python. This policy is comparably safe because the
lifetimes of the two instances (move source and destination) are
decoupled. */
move,
/** Reference an existing object, but do not take ownership. The C++ side
is responsible for managing the object's lifetime and deallocating it
when it is no longer used. Warning: undefined behavior will ensue when
the C++ side deletes an object that is still referenced and used by
Python. */
reference,
/** This policy only applies to methods and properties. It references the
object without taking ownership similar to the above
return_value_policy::reference policy. In contrast to that policy, the
function or property's implicit this argument (called the parent) is
considered to be the the owner of the return value (the child).
pybind11 then couples the lifetime of the parent to the child via a
reference relationship that ensures that the parent cannot be garbage
collected while Python is still using the child. More advanced
variations of this scheme are also possible using combinations of
return_value_policy::reference and the keep_alive call policy */
reference_internal
};
PYBIND11_NAMESPACE_BEGIN(detail)
inline static constexpr int log2(size_t n, int k = 0) {
return (n <= 1) ? k : log2(n >> 1, k + 1);
}
// Returns the size as a multiple of sizeof(void *), rounded up.
inline static constexpr size_t size_in_ptrs(size_t s) {
return 1 + ((s - 1) >> log2(sizeof(void *)));
}
/**
* The space to allocate for simple layout instance holders (see below) in multiple of the size of
* a pointer (e.g. 2 means 16 bytes on 64-bit architectures). The default is the minimum required
* to holder either a std::unique_ptr or std::shared_ptr (which is almost always
* sizeof(std::shared_ptr<T>)).
*/
constexpr size_t instance_simple_holder_in_ptrs() {
static_assert(sizeof(std::shared_ptr<int>) >= sizeof(std::unique_ptr<int>),
"pybind assumes std::shared_ptrs are at least as big as std::unique_ptrs");
return size_in_ptrs(sizeof(std::shared_ptr<int>));
}
// Forward declarations
struct type_info;
struct value_and_holder;
struct nonsimple_values_and_holders {
void **values_and_holders;
uint8_t *status;
};
/// The 'instance' type which needs to be standard layout (need to be able to use 'offsetof')
struct instance {
PyObject_HEAD
/// Storage for pointers and holder; see simple_layout, below, for a description
union {
void *simple_value_holder[1 + instance_simple_holder_in_ptrs()];
nonsimple_values_and_holders nonsimple;
};
/// Weak references
PyObject *weakrefs;
/// If true, the pointer is owned which means we're free to manage it with a holder.
bool owned : 1;
/**
* An instance has two possible value/holder layouts.
*
* Simple layout (when this flag is true), means the `simple_value_holder` is set with a
* pointer and the holder object governing that pointer, i.e. [val1*][holder]. This layout is
* applied whenever there is no python-side multiple inheritance of bound C++ types *and* the
* type's holder will fit in the default space (which is large enough to hold either a
* std::unique_ptr or std::shared_ptr).
*
* Non-simple layout applies when using custom holders that require more space than
* `shared_ptr` (which is typically the size of two pointers), or when multiple inheritance is
* used on the python side. Non-simple layout allocates the required amount of memory to have
* multiple bound C++ classes as parents. Under this layout, `nonsimple.values_and_holders` is
* set to a pointer to allocated space of the required space to hold a sequence of value
* pointers and holders followed `status`, a set of bit flags (1 byte each), i.e.
* [val1*][holder1][val2*][holder2]...[bb...] where each [block] is rounded up to a multiple
* of `sizeof(void *)`. `nonsimple.status` is, for convenience, a pointer to the beginning of
* the [bb...] block (but not independently allocated).
*
* Status bits indicate whether the associated holder is constructed (&
* status_holder_constructed) and whether the value pointer is registered (&
* status_instance_registered) in `registered_instances`.
*/
bool simple_layout : 1;
/// For simple layout, tracks whether the holder has been constructed
bool simple_holder_constructed : 1;
/// For simple layout, tracks whether the instance is registered in `registered_instances`
bool simple_instance_registered : 1;
/// If true, get_internals().patients has an entry for this object
bool has_patients : 1;
/// Initializes all of the above type/values/holders data (but not the instance values
/// themselves)
void allocate_layout();
/// Destroys/deallocates all of the above
void deallocate_layout();
/// Returns the value_and_holder wrapper for the given type (or the first, if `find_type`
/// omitted). Returns a default-constructed (with `.inst = nullptr`) object on failure if
/// `throw_if_missing` is false.
value_and_holder get_value_and_holder(const type_info *find_type = nullptr,
bool throw_if_missing = true);
/// Bit values for the non-simple status flags
static constexpr uint8_t status_holder_constructed = 1;
static constexpr uint8_t status_instance_registered = 2;
};
static_assert(std::is_standard_layout<instance>::value,
"Internal error: `pybind11::detail::instance` is not standard layout!");
/// from __cpp_future__ import (convenient aliases from C++14/17)
#if defined(PYBIND11_CPP14)
using std::conditional_t;
using std::enable_if_t;
using std::remove_cv_t;
using std::remove_reference_t;
#else
template <bool B, typename T = void>
using enable_if_t = typename std::enable_if<B, T>::type;
template <bool B, typename T, typename F>
using conditional_t = typename std::conditional<B, T, F>::type;
template <typename T>
using remove_cv_t = typename std::remove_cv<T>::type;
template <typename T>
using remove_reference_t = typename std::remove_reference<T>::type;
#endif
#if defined(PYBIND11_CPP20)
using std::remove_cvref;
using std::remove_cvref_t;
#else
template <class T>
struct remove_cvref {
using type = remove_cv_t<remove_reference_t<T>>;
};
template <class T>
using remove_cvref_t = typename remove_cvref<T>::type;
#endif
/// Example usage: is_same_ignoring_cvref<T, PyObject *>::value
template <typename T, typename U>
using is_same_ignoring_cvref = std::is_same<detail::remove_cvref_t<T>, U>;
/// Index sequences
#if defined(PYBIND11_CPP14)
using std::index_sequence;
using std::make_index_sequence;
#else
template <size_t...>
struct index_sequence {};
template <size_t N, size_t... S>
struct make_index_sequence_impl : make_index_sequence_impl<N - 1, N - 1, S...> {};
template <size_t... S>
struct make_index_sequence_impl<0, S...> {
using type = index_sequence<S...>;
};
template <size_t N>
using make_index_sequence = typename make_index_sequence_impl<N>::type;
#endif
/// Make an index sequence of the indices of true arguments
template <typename ISeq, size_t, bool...>
struct select_indices_impl {
using type = ISeq;
};
template <size_t... IPrev, size_t I, bool B, bool... Bs>
struct select_indices_impl<index_sequence<IPrev...>, I, B, Bs...>
: select_indices_impl<conditional_t<B, index_sequence<IPrev..., I>, index_sequence<IPrev...>>,
I + 1,
Bs...> {};
template <bool... Bs>
using select_indices = typename select_indices_impl<index_sequence<>, 0, Bs...>::type;
/// Backports of std::bool_constant and std::negation to accommodate older compilers
template <bool B>
using bool_constant = std::integral_constant<bool, B>;
template <typename T>
struct negation : bool_constant<!T::value> {};
// PGI/Intel cannot detect operator delete with the "compatible" void_t impl, so
// using the new one (C++14 defect, so generally works on newer compilers, even
// if not in C++17 mode)
#if defined(__PGIC__) || defined(__INTEL_COMPILER)
template <typename...>
using void_t = void;
#else
template <typename...>
struct void_t_impl {
using type = void;
};
template <typename... Ts>
using void_t = typename void_t_impl<Ts...>::type;
#endif
/// Compile-time all/any/none of that check the boolean value of all template types
#if defined(__cpp_fold_expressions) && !(defined(_MSC_VER) && (_MSC_VER < 1916))
template <class... Ts>
using all_of = bool_constant<(Ts::value && ...)>;
template <class... Ts>
using any_of = bool_constant<(Ts::value || ...)>;
#elif !defined(_MSC_VER)
template <bool...>
struct bools {};
template <class... Ts>
using all_of = std::is_same<bools<Ts::value..., true>, bools<true, Ts::value...>>;
template <class... Ts>
using any_of = negation<all_of<negation<Ts>...>>;
#else
// MSVC has trouble with the above, but supports std::conjunction, which we can use instead (albeit
// at a slight loss of compilation efficiency).
template <class... Ts>
using all_of = std::conjunction<Ts...>;
template <class... Ts>
using any_of = std::disjunction<Ts...>;
#endif
template <class... Ts>
using none_of = negation<any_of<Ts...>>;
template <class T, template <class> class... Predicates>
using satisfies_all_of = all_of<Predicates<T>...>;
template <class T, template <class> class... Predicates>
using satisfies_any_of = any_of<Predicates<T>...>;
template <class T, template <class> class... Predicates>
using satisfies_none_of = none_of<Predicates<T>...>;
/// Strip the class from a method type
template <typename T>
struct remove_class {};
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...)> {
using type = R(A...);
};
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...) const> {
using type = R(A...);
};
#ifdef __cpp_noexcept_function_type
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...) noexcept> {
using type = R(A...);
};
template <typename C, typename R, typename... A>
struct remove_class<R (C::*)(A...) const noexcept> {
using type = R(A...);
};
#endif
/// Helper template to strip away type modifiers
template <typename T>
struct intrinsic_type {
using type = T;
};
template <typename T>
struct intrinsic_type<const T> {
using type = typename intrinsic_type<T>::type;
};
template <typename T>
struct intrinsic_type<T *> {
using type = typename intrinsic_type<T>::type;
};
template <typename T>
struct intrinsic_type<T &> {
using type = typename intrinsic_type<T>::type;
};
template <typename T>
struct intrinsic_type<T &&> {
using type = typename intrinsic_type<T>::type;
};
template <typename T, size_t N>
struct intrinsic_type<const T[N]> {
using type = typename intrinsic_type<T>::type;
};
template <typename T, size_t N>
struct intrinsic_type<T[N]> {
using type = typename intrinsic_type<T>::type;
};
template <typename T>
using intrinsic_t = typename intrinsic_type<T>::type;
/// Helper type to replace 'void' in some expressions
struct void_type {};
/// Helper template which holds a list of types
template <typename...>
struct type_list {};
/// Compile-time integer sum
#ifdef __cpp_fold_expressions
template <typename... Ts>
constexpr size_t constexpr_sum(Ts... ns) {
return (0 + ... + size_t{ns});
}
#else
constexpr size_t constexpr_sum() { return 0; }
template <typename T, typename... Ts>
constexpr size_t constexpr_sum(T n, Ts... ns) {
return size_t{n} + constexpr_sum(ns...);
}
#endif
PYBIND11_NAMESPACE_BEGIN(constexpr_impl)
/// Implementation details for constexpr functions
constexpr int first(int i) { return i; }
template <typename T, typename... Ts>
constexpr int first(int i, T v, Ts... vs) {
return v ? i : first(i + 1, vs...);
}
constexpr int last(int /*i*/, int result) { return result; }
template <typename T, typename... Ts>
constexpr int last(int i, int result, T v, Ts... vs) {
return last(i + 1, v ? i : result, vs...);
}
PYBIND11_NAMESPACE_END(constexpr_impl)
/// Return the index of the first type in Ts which satisfies Predicate<T>.
/// Returns sizeof...(Ts) if none match.
template <template <typename> class Predicate, typename... Ts>
constexpr int constexpr_first() {
return constexpr_impl::first(0, Predicate<Ts>::value...);
}
/// Return the index of the last type in Ts which satisfies Predicate<T>, or -1 if none match.
template <template <typename> class Predicate, typename... Ts>
constexpr int constexpr_last() {
return constexpr_impl::last(0, -1, Predicate<Ts>::value...);
}
/// Return the Nth element from the parameter pack
template <size_t N, typename T, typename... Ts>
struct pack_element {
using type = typename pack_element<N - 1, Ts...>::type;
};
template <typename T, typename... Ts>
struct pack_element<0, T, Ts...> {
using type = T;
};
/// Return the one and only type which matches the predicate, or Default if none match.
/// If more than one type matches the predicate, fail at compile-time.
template <template <typename> class Predicate, typename Default, typename... Ts>
struct exactly_one {
static constexpr auto found = constexpr_sum(Predicate<Ts>::value...);
static_assert(found <= 1, "Found more than one type matching the predicate");
static constexpr auto index = found ? constexpr_first<Predicate, Ts...>() : 0;
using type = conditional_t<found, typename pack_element<index, Ts...>::type, Default>;
};
template <template <typename> class P, typename Default>
struct exactly_one<P, Default> {
using type = Default;
};
template <template <typename> class Predicate, typename Default, typename... Ts>
using exactly_one_t = typename exactly_one<Predicate, Default, Ts...>::type;
/// Defer the evaluation of type T until types Us are instantiated
template <typename T, typename... /*Us*/>
struct deferred_type {
using type = T;
};
template <typename T, typename... Us>
using deferred_t = typename deferred_type<T, Us...>::type;
/// Like is_base_of, but requires a strict base (i.e. `is_strict_base_of<T, T>::value == false`,
/// unlike `std::is_base_of`)
template <typename Base, typename Derived>
using is_strict_base_of
= bool_constant<std::is_base_of<Base, Derived>::value && !std::is_same<Base, Derived>::value>;
/// Like is_base_of, but also requires that the base type is accessible (i.e. that a Derived
/// pointer can be converted to a Base pointer) For unions, `is_base_of<T, T>::value` is False, so
/// we need to check `is_same` as well.
template <typename Base, typename Derived>
using is_accessible_base_of
= bool_constant<(std::is_same<Base, Derived>::value || std::is_base_of<Base, Derived>::value)
&& std::is_convertible<Derived *, Base *>::value>;
template <template <typename...> class Base>
struct is_template_base_of_impl {
template <typename... Us>
static std::true_type check(Base<Us...> *);
static std::false_type check(...);
};
/// Check if a template is the base of a type. For example:
/// `is_template_base_of<Base, T>` is true if `struct T : Base<U> {}` where U can be anything
template <template <typename...> class Base, typename T>
// Sadly, all MSVC versions incl. 2022 need the workaround, even in C++20 mode.
// See also: https://github.com/pybind/pybind11/pull/3741
#if !defined(_MSC_VER)
using is_template_base_of
= decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T> *) nullptr));
#else
struct is_template_base_of
: decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T> *) nullptr)) {
};
#endif
/// Check if T is an instantiation of the template `Class`. For example:
/// `is_instantiation<shared_ptr, T>` is true if `T == shared_ptr<U>` where U can be anything.
template <template <typename...> class Class, typename T>
struct is_instantiation : std::false_type {};
template <template <typename...> class Class, typename... Us>
struct is_instantiation<Class, Class<Us...>> : std::true_type {};
/// Check if T is std::shared_ptr<U> where U can be anything
template <typename T>
using is_shared_ptr = is_instantiation<std::shared_ptr, T>;
/// Check if T looks like an input iterator
template <typename T, typename = void>
struct is_input_iterator : std::false_type {};
template <typename T>
struct is_input_iterator<T,
void_t<decltype(*std::declval<T &>()), decltype(++std::declval<T &>())>>
: std::true_type {};
template <typename T>
using is_function_pointer
= bool_constant<std::is_pointer<T>::value
&& std::is_function<typename std::remove_pointer<T>::type>::value>;
template <typename F>
struct strip_function_object {
// If you are encountering an
// 'error: name followed by "::" must be a class or namespace name'
// with the Intel compiler and a noexcept function here,
// try to use noexcept(true) instead of plain noexcept.
using type = typename remove_class<decltype(&F::operator())>::type;
};
// Extracts the function signature from a function, function pointer or lambda.
template <typename Function, typename F = remove_reference_t<Function>>
using function_signature_t = conditional_t<
std::is_function<F>::value,
F,
typename conditional_t<std::is_pointer<F>::value || std::is_member_pointer<F>::value,
std::remove_pointer<F>,
strip_function_object<F>>::type>;
/// Returns true if the type looks like a lambda: that is, isn't a function, pointer or member
/// pointer. Note that this can catch all sorts of other things, too; this is intended to be used
/// in a place where passing a lambda makes sense.
template <typename T>
using is_lambda = satisfies_none_of<remove_reference_t<T>,
std::is_function,
std::is_pointer,
std::is_member_pointer>;
// [workaround(intel)] Internal error on fold expression
/// Apply a function over each element of a parameter pack
#if defined(__cpp_fold_expressions) && !defined(__INTEL_COMPILER)
// Intel compiler produces an internal error on this fold expression (tested with ICC 19.0.2)
# define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (((PATTERN), void()), ...)
#else
using expand_side_effects = bool[];
# define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) \
(void) pybind11::detail::expand_side_effects { ((PATTERN), void(), false)..., false }
#endif
PYBIND11_NAMESPACE_END(detail)
/// C++ bindings of builtin Python exceptions
class PYBIND11_EXPORT_EXCEPTION builtin_exception : public std::runtime_error {
public:
using std::runtime_error::runtime_error;
/// Set the error using the Python C API
virtual void set_error() const = 0;
};
#define PYBIND11_RUNTIME_EXCEPTION(name, type) \
class PYBIND11_EXPORT_EXCEPTION name : public builtin_exception { \
public: \
using builtin_exception::builtin_exception; \
name() : name("") {} \
void set_error() const override { PyErr_SetString(type, what()); } \
};
PYBIND11_RUNTIME_EXCEPTION(stop_iteration, PyExc_StopIteration)
PYBIND11_RUNTIME_EXCEPTION(index_error, PyExc_IndexError)
PYBIND11_RUNTIME_EXCEPTION(key_error, PyExc_KeyError)
PYBIND11_RUNTIME_EXCEPTION(value_error, PyExc_ValueError)
PYBIND11_RUNTIME_EXCEPTION(type_error, PyExc_TypeError)
PYBIND11_RUNTIME_EXCEPTION(buffer_error, PyExc_BufferError)
PYBIND11_RUNTIME_EXCEPTION(import_error, PyExc_ImportError)
PYBIND11_RUNTIME_EXCEPTION(attribute_error, PyExc_AttributeError)
PYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybind11::cast or
/// handle::call fail due to a type
/// casting error
PYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally
[[noreturn]] PYBIND11_NOINLINE void pybind11_fail(const char *reason) {
assert(!PyErr_Occurred());
throw std::runtime_error(reason);
}
[[noreturn]] PYBIND11_NOINLINE void pybind11_fail(const std::string &reason) {
assert(!PyErr_Occurred());
throw std::runtime_error(reason);
}
template <typename T, typename SFINAE = void>
struct format_descriptor {};
template <typename T>
struct format_descriptor<
T,
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value>> {
static constexpr const char c = 'O';
static constexpr const char value[2] = {c, '\0'};
static std::string format() { return std::string(1, c); }
};
PYBIND11_NAMESPACE_BEGIN(detail)
// Returns the index of the given type in the type char array below, and in the list in numpy.h
// The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double;
// complex float,double,long double. Note that the long double types only participate when long
// double is actually longer than double (it isn't under MSVC).
// NB: not only the string below but also complex.h and numpy.h rely on this order.
template <typename T, typename SFINAE = void>
struct is_fmt_numeric {
static constexpr bool value = false;
};
template <typename T>
struct is_fmt_numeric<T, enable_if_t<std::is_arithmetic<T>::value>> {
static constexpr bool value = true;
static constexpr int index
= std::is_same<T, bool>::value
? 0
: 1
+ (std::is_integral<T>::value
? detail::log2(sizeof(T)) * 2 + std::is_unsigned<T>::value
: 8
+ (std::is_same<T, double>::value ? 1
: std::is_same<T, long double>::value ? 2
: 0));
};
PYBIND11_NAMESPACE_END(detail)
template <typename T>
struct format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>> {
static constexpr const char c = "?bBhHiIqQfdg"[detail::is_fmt_numeric<T>::index];
static constexpr const char value[2] = {c, '\0'};
static std::string format() { return std::string(1, c); }
};
#if !defined(PYBIND11_CPP17)
template <typename T>
constexpr const char
format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>>::value[2];
#endif
/// RAII wrapper that temporarily clears any Python error state
struct error_scope {
PyObject *type, *value, *trace;
error_scope() { PyErr_Fetch(&type, &value, &trace); }
error_scope(const error_scope &) = delete;
error_scope &operator=(const error_scope &) = delete;
~error_scope() { PyErr_Restore(type, value, trace); }
};
/// Dummy destructor wrapper that can be used to expose classes with a private destructor
struct nodelete {
template <typename T>
void operator()(T *) {}
};
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename... Args>
struct overload_cast_impl {
template <typename Return>
constexpr auto operator()(Return (*pf)(Args...)) const noexcept -> decltype(pf) {
return pf;
}
template <typename Return, typename Class>
constexpr auto operator()(Return (Class::*pmf)(Args...), std::false_type = {}) const noexcept
-> decltype(pmf) {
return pmf;
}
template <typename Return, typename Class>
constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept
-> decltype(pmf) {
return pmf;
}
};
PYBIND11_NAMESPACE_END(detail)
// overload_cast requires variable templates: C++14
#if defined(PYBIND11_CPP14)
# define PYBIND11_OVERLOAD_CAST 1
/// Syntax sugar for resolving overloaded function pointers:
/// - regular: static_cast<Return (Class::*)(Arg0, Arg1, Arg2)>(&Class::func)
/// - sweet: overload_cast<Arg0, Arg1, Arg2>(&Class::func)
template <typename... Args>
static constexpr detail::overload_cast_impl<Args...> overload_cast{};
#endif
/// Const member function selector for overload_cast
/// - regular: static_cast<Return (Class::*)(Arg) const>(&Class::func)
/// - sweet: overload_cast<Arg>(&Class::func, const_)
static constexpr auto const_ = std::true_type{};
#if !defined(PYBIND11_CPP14) // no overload_cast: providing something that static_assert-fails:
template <typename... Args>
struct overload_cast {
static_assert(detail::deferred_t<std::false_type, Args...>::value,
"pybind11::overload_cast<...> requires compiling in C++14 mode");
};
#endif // overload_cast
PYBIND11_NAMESPACE_BEGIN(detail)
// Adaptor for converting arbitrary container arguments into a vector; implicitly convertible from
// any standard container (or C-style array) supporting std::begin/std::end, any singleton
// arithmetic type (if T is arithmetic), or explicitly constructible from an iterator pair.
template <typename T>
class any_container {
std::vector<T> v;
public:
any_container() = default;
// Can construct from a pair of iterators
template <typename It, typename = enable_if_t<is_input_iterator<It>::value>>
any_container(It first, It last) : v(first, last) {}
// Implicit conversion constructor from any arbitrary container type
// with values convertible to T
template <typename Container,
typename = enable_if_t<
std::is_convertible<decltype(*std::begin(std::declval<const Container &>())),
T>::value>>
// NOLINTNEXTLINE(google-explicit-constructor)
any_container(const Container &c) : any_container(std::begin(c), std::end(c)) {}
// initializer_list's aren't deducible, so don't get matched by the above template;
// we need this to explicitly allow implicit conversion from one:
template <typename TIn, typename = enable_if_t<std::is_convertible<TIn, T>::value>>
any_container(const std::initializer_list<TIn> &c) : any_container(c.begin(), c.end()) {}
// Avoid copying if given an rvalue vector of the correct type.
// NOLINTNEXTLINE(google-explicit-constructor)
any_container(std::vector<T> &&v) : v(std::move(v)) {}
// Moves the vector out of an rvalue any_container
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::vector<T> &&() && { return std::move(v); }
// Dereferencing obtains a reference to the underlying vector
std::vector<T> &operator*() { return v; }
const std::vector<T> &operator*() const { return v; }
// -> lets you call methods on the underlying vector
std::vector<T> *operator->() { return &v; }
const std::vector<T> *operator->() const { return &v; }
};
// Forward-declaration; see detail/class.h
std::string get_fully_qualified_tp_name(PyTypeObject *);
template <typename T>
inline static std::shared_ptr<T>
try_get_shared_from_this(std::enable_shared_from_this<T> *holder_value_ptr) {
// Pre C++17, this code path exploits undefined behavior, but is known to work on many platforms.
// Use at your own risk!
// See also https://en.cppreference.com/w/cpp/memory/enable_shared_from_this, and in particular
// the `std::shared_ptr<Good> gp1 = not_so_good.getptr();` and `try`-`catch` parts of the example.
#if defined(__cpp_lib_enable_shared_from_this) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
return holder_value_ptr->weak_from_this().lock();
#else
try {
return holder_value_ptr->shared_from_this();
} catch (const std::bad_weak_ptr &) {
return nullptr;
}
#endif
}
// For silencing "unused" compiler warnings in special situations.
template <typename... Args>
#if defined(_MSC_VER) && _MSC_VER < 1920 // MSVC 2017
constexpr
#endif
inline void
silence_unused_warnings(Args &&...) {
}
// MSVC warning C4100: Unreferenced formal parameter
#if defined(_MSC_VER) && _MSC_VER <= 1916
# define PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(...) \
detail::silence_unused_warnings(__VA_ARGS__)
#else
# define PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(...)
#endif
// GCC -Wunused-but-set-parameter All GCC versions (as of July 2021).
#if defined(__GNUG__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
# define PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(...) \
detail::silence_unused_warnings(__VA_ARGS__)
#else
# define PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(...)
#endif
#if defined(__clang__) \
&& (defined(__apple_build_version__) /* AppleClang 13.0.0.13000029 was the only data point \
available. */ \
|| (__clang_major__ >= 7 \
&& __clang_major__ <= 12) /* Clang 3, 5, 13, 14, 15 do not generate the warning. */ \
)
# define PYBIND11_DETECTED_CLANG_WITH_MISLEADING_CALL_STD_MOVE_EXPLICITLY_WARNING
// Example:
// tests/test_kwargs_and_defaults.cpp:46:68: error: local variable 'args' will be copied despite
// being returned by name [-Werror,-Wreturn-std-move]
// m.def("args_function", [](py::args args) -> py::tuple { return args; });
// ^~~~
// test_kwargs_and_defaults.cpp:46:68: note: call 'std::move' explicitly to avoid copying
// m.def("args_function", [](py::args args) -> py::tuple { return args; });
// ^~~~
// std::move(args)
#endif
// Pybind offers detailed error messages by default for all builts that are debug (through the
// negation of NDEBUG). This can also be manually enabled by users, for any builds, through
// defining PYBIND11_DETAILED_ERROR_MESSAGES. This information is primarily useful for those
// who are writing (as opposed to merely using) libraries that use pybind11.
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES) && !defined(NDEBUG)
# define PYBIND11_DETAILED_ERROR_MESSAGES
#endif
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/detail/descr.h | C/C++ Header | /*
pybind11/detail/descr.h: Helper type for concatenating type signatures at compile time
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "common.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
#if !defined(_MSC_VER)
# define PYBIND11_DESCR_CONSTEXPR static constexpr
#else
# define PYBIND11_DESCR_CONSTEXPR const
#endif
/* Concatenate type signatures at compile time */
template <size_t N, typename... Ts>
struct descr {
char text[N + 1]{'\0'};
constexpr descr() = default;
// NOLINTNEXTLINE(google-explicit-constructor)
constexpr descr(char const (&s)[N + 1]) : descr(s, make_index_sequence<N>()) {}
template <size_t... Is>
constexpr descr(char const (&s)[N + 1], index_sequence<Is...>) : text{s[Is]..., '\0'} {}
template <typename... Chars>
// NOLINTNEXTLINE(google-explicit-constructor)
constexpr descr(char c, Chars... cs) : text{c, static_cast<char>(cs)..., '\0'} {}
static constexpr std::array<const std::type_info *, sizeof...(Ts) + 1> types() {
return {{&typeid(Ts)..., nullptr}};
}
};
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2, size_t... Is1, size_t... Is2>
constexpr descr<N1 + N2, Ts1..., Ts2...> plus_impl(const descr<N1, Ts1...> &a,
const descr<N2, Ts2...> &b,
index_sequence<Is1...>,
index_sequence<Is2...>) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(b);
return {a.text[Is1]..., b.text[Is2]...};
}
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
constexpr descr<N1 + N2, Ts1..., Ts2...> operator+(const descr<N1, Ts1...> &a,
const descr<N2, Ts2...> &b) {
return plus_impl(a, b, make_index_sequence<N1>(), make_index_sequence<N2>());
}
template <size_t N>
constexpr descr<N - 1> const_name(char const (&text)[N]) {
return descr<N - 1>(text);
}
constexpr descr<0> const_name(char const (&)[1]) { return {}; }
template <size_t Rem, size_t... Digits>
struct int_to_str : int_to_str<Rem / 10, Rem % 10, Digits...> {};
template <size_t... Digits>
struct int_to_str<0, Digits...> {
// WARNING: This only works with C++17 or higher.
static constexpr auto digits = descr<sizeof...(Digits)>(('0' + Digits)...);
};
// Ternary description (like std::conditional)
template <bool B, size_t N1, size_t N2>
constexpr enable_if_t<B, descr<N1 - 1>> const_name(char const (&text1)[N1], char const (&)[N2]) {
return const_name(text1);
}
template <bool B, size_t N1, size_t N2>
constexpr enable_if_t<!B, descr<N2 - 1>> const_name(char const (&)[N1], char const (&text2)[N2]) {
return const_name(text2);
}
template <bool B, typename T1, typename T2>
constexpr enable_if_t<B, T1> const_name(const T1 &d, const T2 &) {
return d;
}
template <bool B, typename T1, typename T2>
constexpr enable_if_t<!B, T2> const_name(const T1 &, const T2 &d) {
return d;
}
template <size_t Size>
auto constexpr const_name() -> remove_cv_t<decltype(int_to_str<Size / 10, Size % 10>::digits)> {
return int_to_str<Size / 10, Size % 10>::digits;
}
template <typename Type>
constexpr descr<1, Type> const_name() {
return {'%'};
}
// If "_" is defined as a macro, py::detail::_ cannot be provided.
// It is therefore best to use py::detail::const_name universally.
// This block is for backward compatibility only.
// (The const_name code is repeated to avoid introducing a "_" #define ourselves.)
#ifndef _
# define PYBIND11_DETAIL_UNDERSCORE_BACKWARD_COMPATIBILITY
template <size_t N>
constexpr descr<N - 1> _(char const (&text)[N]) {
return const_name<N>(text);
}
template <bool B, size_t N1, size_t N2>
constexpr enable_if_t<B, descr<N1 - 1>> _(char const (&text1)[N1], char const (&text2)[N2]) {
return const_name<B, N1, N2>(text1, text2);
}
template <bool B, size_t N1, size_t N2>
constexpr enable_if_t<!B, descr<N2 - 1>> _(char const (&text1)[N1], char const (&text2)[N2]) {
return const_name<B, N1, N2>(text1, text2);
}
template <bool B, typename T1, typename T2>
constexpr enable_if_t<B, T1> _(const T1 &d1, const T2 &d2) {
return const_name<B, T1, T2>(d1, d2);
}
template <bool B, typename T1, typename T2>
constexpr enable_if_t<!B, T2> _(const T1 &d1, const T2 &d2) {
return const_name<B, T1, T2>(d1, d2);
}
template <size_t Size>
auto constexpr _() -> remove_cv_t<decltype(int_to_str<Size / 10, Size % 10>::digits)> {
return const_name<Size>();
}
template <typename Type>
constexpr descr<1, Type> _() {
return const_name<Type>();
}
#endif // #ifndef _
constexpr descr<0> concat() { return {}; }
template <size_t N, typename... Ts>
constexpr descr<N, Ts...> concat(const descr<N, Ts...> &descr) {
return descr;
}
#ifdef __cpp_fold_expressions
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
constexpr descr<N1 + N2 + 2, Ts1..., Ts2...> operator,(const descr<N1, Ts1...> &a,
const descr<N2, Ts2...> &b) {
return a + const_name(", ") + b;
}
template <size_t N, typename... Ts, typename... Args>
constexpr auto concat(const descr<N, Ts...> &d, const Args &...args) {
return (d, ..., args);
}
#else
template <size_t N, typename... Ts, typename... Args>
constexpr auto concat(const descr<N, Ts...> &d, const Args &...args)
-> decltype(std::declval<descr<N + 2, Ts...>>() + concat(args...)) {
return d + const_name(", ") + concat(args...);
}
#endif
template <size_t N, typename... Ts>
constexpr descr<N + 2, Ts...> type_descr(const descr<N, Ts...> &descr) {
return const_name("{") + descr + const_name("}");
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/detail/init.h | C/C++ Header | /*
pybind11/detail/init.h: init factory function implementation and support code.
Copyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "class.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
class type_caster<value_and_holder> {
public:
bool load(handle h, bool) {
value = reinterpret_cast<value_and_holder *>(h.ptr());
return true;
}
template <typename>
using cast_op_type = value_and_holder &;
explicit operator value_and_holder &() { return *value; }
static constexpr auto name = const_name<value_and_holder>();
private:
value_and_holder *value = nullptr;
};
PYBIND11_NAMESPACE_BEGIN(initimpl)
inline void no_nullptr(void *ptr) {
if (!ptr) {
throw type_error("pybind11::init(): factory function returned nullptr");
}
}
// Implementing functions for all forms of py::init<...> and py::init(...)
template <typename Class>
using Cpp = typename Class::type;
template <typename Class>
using Alias = typename Class::type_alias;
template <typename Class>
using Holder = typename Class::holder_type;
template <typename Class>
using is_alias_constructible = std::is_constructible<Alias<Class>, Cpp<Class> &&>;
// Takes a Cpp pointer and returns true if it actually is a polymorphic Alias instance.
template <typename Class, enable_if_t<Class::has_alias, int> = 0>
bool is_alias(Cpp<Class> *ptr) {
return dynamic_cast<Alias<Class> *>(ptr) != nullptr;
}
// Failing fallback version of the above for a no-alias class (always returns false)
template <typename /*Class*/>
constexpr bool is_alias(void *) {
return false;
}
// Constructs and returns a new object; if the given arguments don't map to a constructor, we fall
// back to brace aggregate initiailization so that for aggregate initialization can be used with
// py::init, e.g. `py::init<int, int>` to initialize a `struct T { int a; int b; }`. For
// non-aggregate types, we need to use an ordinary T(...) constructor (invoking as `T{...}` usually
// works, but will not do the expected thing when `T` has an `initializer_list<T>` constructor).
template <typename Class,
typename... Args,
detail::enable_if_t<std::is_constructible<Class, Args...>::value, int> = 0>
inline Class *construct_or_initialize(Args &&...args) {
return new Class(std::forward<Args>(args)...);
}
template <typename Class,
typename... Args,
detail::enable_if_t<!std::is_constructible<Class, Args...>::value, int> = 0>
inline Class *construct_or_initialize(Args &&...args) {
return new Class{std::forward<Args>(args)...};
}
// Attempts to constructs an alias using a `Alias(Cpp &&)` constructor. This allows types with
// an alias to provide only a single Cpp factory function as long as the Alias can be
// constructed from an rvalue reference of the base Cpp type. This means that Alias classes
// can, when appropriate, simply define a `Alias(Cpp &&)` constructor rather than needing to
// inherit all the base class constructors.
template <typename Class>
void construct_alias_from_cpp(std::true_type /*is_alias_constructible*/,
value_and_holder &v_h,
Cpp<Class> &&base) {
v_h.value_ptr() = new Alias<Class>(std::move(base));
}
template <typename Class>
[[noreturn]] void construct_alias_from_cpp(std::false_type /*!is_alias_constructible*/,
value_and_holder &,
Cpp<Class> &&) {
throw type_error("pybind11::init(): unable to convert returned instance to required "
"alias class: no `Alias<Class>(Class &&)` constructor available");
}
// Error-generating fallback for factories that don't match one of the below construction
// mechanisms.
template <typename Class>
void construct(...) {
static_assert(!std::is_same<Class, Class>::value /* always false */,
"pybind11::init(): init function must return a compatible pointer, "
"holder, or value");
}
// Pointer return v1: the factory function returns a class pointer for a registered class.
// If we don't need an alias (because this class doesn't have one, or because the final type is
// inherited on the Python side) we can simply take over ownership. Otherwise we need to try to
// construct an Alias from the returned base instance.
template <typename Class>
void construct(value_and_holder &v_h, Cpp<Class> *ptr, bool need_alias) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(need_alias);
no_nullptr(ptr);
if (Class::has_alias && need_alias && !is_alias<Class>(ptr)) {
// We're going to try to construct an alias by moving the cpp type. Whether or not
// that succeeds, we still need to destroy the original cpp pointer (either the
// moved away leftover, if the alias construction works, or the value itself if we
// throw an error), but we can't just call `delete ptr`: it might have a special
// deleter, or might be shared_from_this. So we construct a holder around it as if
// it was a normal instance, then steal the holder away into a local variable; thus
// the holder and destruction happens when we leave the C++ scope, and the holder
// class gets to handle the destruction however it likes.
v_h.value_ptr() = ptr;
v_h.set_instance_registered(true); // To prevent init_instance from registering it
v_h.type->init_instance(v_h.inst, nullptr); // Set up the holder
Holder<Class> temp_holder(std::move(v_h.holder<Holder<Class>>())); // Steal the holder
v_h.type->dealloc(v_h); // Destroys the moved-out holder remains, resets value ptr to null
v_h.set_instance_registered(false);
construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(*ptr));
} else {
// Otherwise the type isn't inherited, so we don't need an Alias
v_h.value_ptr() = ptr;
}
}
// Pointer return v2: a factory that always returns an alias instance ptr. We simply take over
// ownership of the pointer.
template <typename Class, enable_if_t<Class::has_alias, int> = 0>
void construct(value_and_holder &v_h, Alias<Class> *alias_ptr, bool) {
no_nullptr(alias_ptr);
v_h.value_ptr() = static_cast<Cpp<Class> *>(alias_ptr);
}
// Holder return: copy its pointer, and move or copy the returned holder into the new instance's
// holder. This also handles types like std::shared_ptr<T> and std::unique_ptr<T> where T is a
// derived type (through those holder's implicit conversion from derived class holder
// constructors).
template <typename Class>
void construct(value_and_holder &v_h, Holder<Class> holder, bool need_alias) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(need_alias);
auto *ptr = holder_helper<Holder<Class>>::get(holder);
no_nullptr(ptr);
// If we need an alias, check that the held pointer is actually an alias instance
if (Class::has_alias && need_alias && !is_alias<Class>(ptr)) {
throw type_error("pybind11::init(): construction failed: returned holder-wrapped instance "
"is not an alias instance");
}
v_h.value_ptr() = ptr;
v_h.type->init_instance(v_h.inst, &holder);
}
// return-by-value version 1: returning a cpp class by value. If the class has an alias and an
// alias is required the alias must have an `Alias(Cpp &&)` constructor so that we can construct
// the alias from the base when needed (i.e. because of Python-side inheritance). When we don't
// need it, we simply move-construct the cpp value into a new instance.
template <typename Class>
void construct(value_and_holder &v_h, Cpp<Class> &&result, bool need_alias) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(need_alias);
static_assert(is_move_constructible<Cpp<Class>>::value,
"pybind11::init() return-by-value factory function requires a movable class");
if (Class::has_alias && need_alias) {
construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(result));
} else {
v_h.value_ptr() = new Cpp<Class>(std::move(result));
}
}
// return-by-value version 2: returning a value of the alias type itself. We move-construct an
// Alias instance (even if no the python-side inheritance is involved). The is intended for
// cases where Alias initialization is always desired.
template <typename Class>
void construct(value_and_holder &v_h, Alias<Class> &&result, bool) {
static_assert(
is_move_constructible<Alias<Class>>::value,
"pybind11::init() return-by-alias-value factory function requires a movable alias class");
v_h.value_ptr() = new Alias<Class>(std::move(result));
}
// Implementing class for py::init<...>()
template <typename... Args>
struct constructor {
template <typename Class, typename... Extra, enable_if_t<!Class::has_alias, int> = 0>
static void execute(Class &cl, const Extra &...extra) {
cl.def(
"__init__",
[](value_and_holder &v_h, Args... args) {
v_h.value_ptr() = construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);
},
is_new_style_constructor(),
extra...);
}
template <
typename Class,
typename... Extra,
enable_if_t<Class::has_alias && std::is_constructible<Cpp<Class>, Args...>::value, int>
= 0>
static void execute(Class &cl, const Extra &...extra) {
cl.def(
"__init__",
[](value_and_holder &v_h, Args... args) {
if (Py_TYPE(v_h.inst) == v_h.type->type) {
v_h.value_ptr()
= construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);
} else {
v_h.value_ptr()
= construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
}
},
is_new_style_constructor(),
extra...);
}
template <
typename Class,
typename... Extra,
enable_if_t<Class::has_alias && !std::is_constructible<Cpp<Class>, Args...>::value, int>
= 0>
static void execute(Class &cl, const Extra &...extra) {
cl.def(
"__init__",
[](value_and_holder &v_h, Args... args) {
v_h.value_ptr()
= construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
},
is_new_style_constructor(),
extra...);
}
};
// Implementing class for py::init_alias<...>()
template <typename... Args>
struct alias_constructor {
template <
typename Class,
typename... Extra,
enable_if_t<Class::has_alias && std::is_constructible<Alias<Class>, Args...>::value, int>
= 0>
static void execute(Class &cl, const Extra &...extra) {
cl.def(
"__init__",
[](value_and_holder &v_h, Args... args) {
v_h.value_ptr()
= construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
},
is_new_style_constructor(),
extra...);
}
};
// Implementation class for py::init(Func) and py::init(Func, AliasFunc)
template <typename CFunc,
typename AFunc = void_type (*)(),
typename = function_signature_t<CFunc>,
typename = function_signature_t<AFunc>>
struct factory;
// Specialization for py::init(Func)
template <typename Func, typename Return, typename... Args>
struct factory<Func, void_type (*)(), Return(Args...)> {
remove_reference_t<Func> class_factory;
// NOLINTNEXTLINE(google-explicit-constructor)
factory(Func &&f) : class_factory(std::forward<Func>(f)) {}
// The given class either has no alias or has no separate alias factory;
// this always constructs the class itself. If the class is registered with an alias
// type and an alias instance is needed (i.e. because the final type is a Python class
// inheriting from the C++ type) the returned value needs to either already be an alias
// instance, or the alias needs to be constructible from a `Class &&` argument.
template <typename Class, typename... Extra>
void execute(Class &cl, const Extra &...extra) && {
#if defined(PYBIND11_CPP14)
cl.def(
"__init__",
[func = std::move(class_factory)]
#else
auto &func = class_factory;
cl.def(
"__init__",
[func]
#endif
(value_and_holder &v_h, Args... args) {
construct<Class>(
v_h, func(std::forward<Args>(args)...), Py_TYPE(v_h.inst) != v_h.type->type);
},
is_new_style_constructor(),
extra...);
}
};
// Specialization for py::init(Func, AliasFunc)
template <typename CFunc,
typename AFunc,
typename CReturn,
typename... CArgs,
typename AReturn,
typename... AArgs>
struct factory<CFunc, AFunc, CReturn(CArgs...), AReturn(AArgs...)> {
static_assert(sizeof...(CArgs) == sizeof...(AArgs),
"pybind11::init(class_factory, alias_factory): class and alias factories "
"must have identical argument signatures");
static_assert(all_of<std::is_same<CArgs, AArgs>...>::value,
"pybind11::init(class_factory, alias_factory): class and alias factories "
"must have identical argument signatures");
remove_reference_t<CFunc> class_factory;
remove_reference_t<AFunc> alias_factory;
factory(CFunc &&c, AFunc &&a)
: class_factory(std::forward<CFunc>(c)), alias_factory(std::forward<AFunc>(a)) {}
// The class factory is called when the `self` type passed to `__init__` is the direct
// class (i.e. not inherited), the alias factory when `self` is a Python-side subtype.
template <typename Class, typename... Extra>
void execute(Class &cl, const Extra &...extra) && {
static_assert(Class::has_alias,
"The two-argument version of `py::init()` can "
"only be used if the class has an alias");
#if defined(PYBIND11_CPP14)
cl.def(
"__init__",
[class_func = std::move(class_factory), alias_func = std::move(alias_factory)]
#else
auto &class_func = class_factory;
auto &alias_func = alias_factory;
cl.def(
"__init__",
[class_func, alias_func]
#endif
(value_and_holder &v_h, CArgs... args) {
if (Py_TYPE(v_h.inst) == v_h.type->type) {
// If the instance type equals the registered type we don't have inheritance,
// so don't need the alias and can construct using the class function:
construct<Class>(v_h, class_func(std::forward<CArgs>(args)...), false);
} else {
construct<Class>(v_h, alias_func(std::forward<CArgs>(args)...), true);
}
},
is_new_style_constructor(),
extra...);
}
};
/// Set just the C++ state. Same as `__init__`.
template <typename Class, typename T>
void setstate(value_and_holder &v_h, T &&result, bool need_alias) {
construct<Class>(v_h, std::forward<T>(result), need_alias);
}
/// Set both the C++ and Python states
template <typename Class,
typename T,
typename O,
enable_if_t<std::is_convertible<O, handle>::value, int> = 0>
void setstate(value_and_holder &v_h, std::pair<T, O> &&result, bool need_alias) {
construct<Class>(v_h, std::move(result.first), need_alias);
auto d = handle(result.second);
if (PyDict_Check(d.ptr()) && PyDict_Size(d.ptr()) == 0) {
// Skipping setattr below, to not force use of py::dynamic_attr() for Class unnecessarily.
// See PR #2972 for details.
return;
}
setattr((PyObject *) v_h.inst, "__dict__", d);
}
/// Implementation for py::pickle(GetState, SetState)
template <typename Get,
typename Set,
typename = function_signature_t<Get>,
typename = function_signature_t<Set>>
struct pickle_factory;
template <typename Get,
typename Set,
typename RetState,
typename Self,
typename NewInstance,
typename ArgState>
struct pickle_factory<Get, Set, RetState(Self), NewInstance(ArgState)> {
static_assert(std::is_same<intrinsic_t<RetState>, intrinsic_t<ArgState>>::value,
"The type returned by `__getstate__` must be the same "
"as the argument accepted by `__setstate__`");
remove_reference_t<Get> get;
remove_reference_t<Set> set;
pickle_factory(Get get, Set set) : get(std::forward<Get>(get)), set(std::forward<Set>(set)) {}
template <typename Class, typename... Extra>
void execute(Class &cl, const Extra &...extra) && {
cl.def("__getstate__", std::move(get));
#if defined(PYBIND11_CPP14)
cl.def(
"__setstate__",
[func = std::move(set)]
#else
auto &func = set;
cl.def(
"__setstate__",
[func]
#endif
(value_and_holder &v_h, ArgState state) {
setstate<Class>(
v_h, func(std::forward<ArgState>(state)), Py_TYPE(v_h.inst) != v_h.type->type);
},
is_new_style_constructor(),
extra...);
}
};
PYBIND11_NAMESPACE_END(initimpl)
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/detail/internals.h | C/C++ Header | /*
pybind11/detail/internals.h: Internal data structure and related functions
Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "common.h"
#if defined(WITH_THREAD) && defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# include "../gil.h"
#endif
#include "../pytypes.h"
#include <exception>
/// Tracks the `internals` and `type_info` ABI version independent of the main library version.
///
/// Some portions of the code use an ABI that is conditional depending on this
/// version number. That allows ABI-breaking changes to be "pre-implemented".
/// Once the default version number is incremented, the conditional logic that
/// no longer applies can be removed. Additionally, users that need not
/// maintain ABI compatibility can increase the version number in order to take
/// advantage of any functionality/efficiency improvements that depend on the
/// newer ABI.
///
/// WARNING: If you choose to manually increase the ABI version, note that
/// pybind11 may not be tested as thoroughly with a non-default ABI version, and
/// further ABI-incompatible changes may be made before the ABI is officially
/// changed to the new version.
#ifndef PYBIND11_INTERNALS_VERSION
# if PY_VERSION_HEX >= 0x030C0000
// Version bump for Python 3.12+, before first 3.12 beta release.
# define PYBIND11_INTERNALS_VERSION 5
# else
# define PYBIND11_INTERNALS_VERSION 4
# endif
#endif
// This requirement is mainly to reduce the support burden (see PR #4570).
static_assert(PY_VERSION_HEX < 0x030C0000 || PYBIND11_INTERNALS_VERSION >= 5,
"pybind11 ABI version 5 is the minimum for Python 3.12+");
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
using ExceptionTranslator = void (*)(std::exception_ptr);
PYBIND11_NAMESPACE_BEGIN(detail)
constexpr const char *internals_function_record_capsule_name = "pybind11_function_record_capsule";
// Forward declarations
inline PyTypeObject *make_static_property_type();
inline PyTypeObject *make_default_metaclass();
inline PyObject *make_object_base_type(PyTypeObject *metaclass);
// The old Python Thread Local Storage (TLS) API is deprecated in Python 3.7 in favor of the new
// Thread Specific Storage (TSS) API.
#if PY_VERSION_HEX >= 0x03070000
// Avoid unnecessary allocation of `Py_tss_t`, since we cannot use
// `Py_LIMITED_API` anyway.
# if PYBIND11_INTERNALS_VERSION > 4
# define PYBIND11_TLS_KEY_REF Py_tss_t &
# if defined(__GNUC__) && !defined(__INTEL_COMPILER)
// Clang on macOS warns due to `Py_tss_NEEDS_INIT` not specifying an initializer
// for every field.
# define PYBIND11_TLS_KEY_INIT(var) \
_Pragma("GCC diagnostic push") /**/ \
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"") /**/ \
Py_tss_t var \
= Py_tss_NEEDS_INIT; \
_Pragma("GCC diagnostic pop")
# else
# define PYBIND11_TLS_KEY_INIT(var) Py_tss_t var = Py_tss_NEEDS_INIT;
# endif
# define PYBIND11_TLS_KEY_CREATE(var) (PyThread_tss_create(&(var)) == 0)
# define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get(&(key))
# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set(&(key), (value))
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set(&(key), nullptr)
# define PYBIND11_TLS_FREE(key) PyThread_tss_delete(&(key))
# else
# define PYBIND11_TLS_KEY_REF Py_tss_t *
# define PYBIND11_TLS_KEY_INIT(var) Py_tss_t *var = nullptr;
# define PYBIND11_TLS_KEY_CREATE(var) \
(((var) = PyThread_tss_alloc()) != nullptr && (PyThread_tss_create((var)) == 0))
# define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get((key))
# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set((key), (value))
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set((key), nullptr)
# define PYBIND11_TLS_FREE(key) PyThread_tss_free(key)
# endif
#else
// Usually an int but a long on Cygwin64 with Python 3.x
# define PYBIND11_TLS_KEY_REF decltype(PyThread_create_key())
# define PYBIND11_TLS_KEY_INIT(var) PYBIND11_TLS_KEY_REF var = 0;
# define PYBIND11_TLS_KEY_CREATE(var) (((var) = PyThread_create_key()) != -1)
# define PYBIND11_TLS_GET_VALUE(key) PyThread_get_key_value((key))
# if defined(PYPY_VERSION)
// On CPython < 3.4 and on PyPy, `PyThread_set_key_value` strangely does not set
// the value if it has already been set. Instead, it must first be deleted and
// then set again.
inline void tls_replace_value(PYBIND11_TLS_KEY_REF key, void *value) {
PyThread_delete_key_value(key);
PyThread_set_key_value(key, value);
}
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_delete_key_value(key)
# define PYBIND11_TLS_REPLACE_VALUE(key, value) \
::pybind11::detail::tls_replace_value((key), (value))
# else
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_set_key_value((key), nullptr)
# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_set_key_value((key), (value))
# endif
# define PYBIND11_TLS_FREE(key) (void) key
#endif
// Python loads modules by default with dlopen with the RTLD_LOCAL flag; under libc++ and possibly
// other STLs, this means `typeid(A)` from one module won't equal `typeid(A)` from another module
// even when `A` is the same, non-hidden-visibility type (e.g. from a common include). Under
// libstdc++, this doesn't happen: equality and the type_index hash are based on the type name,
// which works. If not under a known-good stl, provide our own name-based hash and equality
// functions that use the type name.
#if (PYBIND11_INTERNALS_VERSION <= 4 && defined(__GLIBCXX__)) \
|| (PYBIND11_INTERNALS_VERSION >= 5 && !defined(_LIBCPP_VERSION))
inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) { return lhs == rhs; }
using type_hash = std::hash<std::type_index>;
using type_equal_to = std::equal_to<std::type_index>;
#else
inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) {
return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;
}
struct type_hash {
size_t operator()(const std::type_index &t) const {
size_t hash = 5381;
const char *ptr = t.name();
while (auto c = static_cast<unsigned char>(*ptr++)) {
hash = (hash * 33) ^ c;
}
return hash;
}
};
struct type_equal_to {
bool operator()(const std::type_index &lhs, const std::type_index &rhs) const {
return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;
}
};
#endif
template <typename value_type>
using type_map = std::unordered_map<std::type_index, value_type, type_hash, type_equal_to>;
struct override_hash {
inline size_t operator()(const std::pair<const PyObject *, const char *> &v) const {
size_t value = std::hash<const void *>()(v.first);
value ^= std::hash<const void *>()(v.second) + 0x9e3779b9 + (value << 6) + (value >> 2);
return value;
}
};
/// Internal data structure used to track registered instances and types.
/// Whenever binary incompatible changes are made to this structure,
/// `PYBIND11_INTERNALS_VERSION` must be incremented.
struct internals {
// std::type_index -> pybind11's type information
type_map<type_info *> registered_types_cpp;
// PyTypeObject* -> base type_info(s)
std::unordered_map<PyTypeObject *, std::vector<type_info *>> registered_types_py;
std::unordered_multimap<const void *, instance *> registered_instances; // void * -> instance*
std::unordered_set<std::pair<const PyObject *, const char *>, override_hash>
inactive_override_cache;
type_map<std::vector<bool (*)(PyObject *, void *&)>> direct_conversions;
std::unordered_map<const PyObject *, std::vector<PyObject *>> patients;
std::forward_list<ExceptionTranslator> registered_exception_translators;
std::unordered_map<std::string, void *> shared_data; // Custom data to be shared across
// extensions
#if PYBIND11_INTERNALS_VERSION == 4
std::vector<PyObject *> unused_loader_patient_stack_remove_at_v5;
#endif
std::forward_list<std::string> static_strings; // Stores the std::strings backing
// detail::c_str()
PyTypeObject *static_property_type;
PyTypeObject *default_metaclass;
PyObject *instance_base;
#if defined(WITH_THREAD)
// Unused if PYBIND11_SIMPLE_GIL_MANAGEMENT is defined:
PYBIND11_TLS_KEY_INIT(tstate)
# if PYBIND11_INTERNALS_VERSION > 4
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
# endif // PYBIND11_INTERNALS_VERSION > 4
// Unused if PYBIND11_SIMPLE_GIL_MANAGEMENT is defined:
PyInterpreterState *istate = nullptr;
# if PYBIND11_INTERNALS_VERSION > 4
// Note that we have to use a std::string to allocate memory to ensure a unique address
// We want unique addresses since we use pointer equality to compare function records
std::string function_record_capsule_name = internals_function_record_capsule_name;
# endif
internals() = default;
internals(const internals &other) = delete;
internals &operator=(const internals &other) = delete;
~internals() {
# if PYBIND11_INTERNALS_VERSION > 4
PYBIND11_TLS_FREE(loader_life_support_tls_key);
# endif // PYBIND11_INTERNALS_VERSION > 4
// This destructor is called *after* Py_Finalize() in finalize_interpreter().
// That *SHOULD BE* fine. The following details what happens when PyThread_tss_free is
// called. PYBIND11_TLS_FREE is PyThread_tss_free on python 3.7+. On older python, it does
// nothing. PyThread_tss_free calls PyThread_tss_delete and PyMem_RawFree.
// PyThread_tss_delete just calls TlsFree (on Windows) or pthread_key_delete (on *NIX).
// Neither of those have anything to do with CPython internals. PyMem_RawFree *requires*
// that the `tstate` be allocated with the CPython allocator.
PYBIND11_TLS_FREE(tstate);
}
#endif
};
/// Additional type information which does not fit into the PyTypeObject.
/// Changes to this struct also require bumping `PYBIND11_INTERNALS_VERSION`.
struct type_info {
PyTypeObject *type;
const std::type_info *cpptype;
size_t type_size, type_align, holder_size_in_ptrs;
void *(*operator_new)(size_t);
void (*init_instance)(instance *, const void *);
void (*dealloc)(value_and_holder &v_h);
std::vector<PyObject *(*) (PyObject *, PyTypeObject *)> implicit_conversions;
std::vector<std::pair<const std::type_info *, void *(*) (void *)>> implicit_casts;
std::vector<bool (*)(PyObject *, void *&)> *direct_conversions;
buffer_info *(*get_buffer)(PyObject *, void *) = nullptr;
void *get_buffer_data = nullptr;
void *(*module_local_load)(PyObject *, const type_info *) = nullptr;
/* A simple type never occurs as a (direct or indirect) parent
* of a class that makes use of multiple inheritance.
* A type can be simple even if it has non-simple ancestors as long as it has no descendants.
*/
bool simple_type : 1;
/* True if there is no multiple inheritance in this type's inheritance tree */
bool simple_ancestors : 1;
/* for base vs derived holder_type checks */
bool default_holder : 1;
/* true if this is a type registered with py::module_local */
bool module_local : 1;
};
/// On MSVC, debug and release builds are not ABI-compatible!
#if defined(_MSC_VER) && defined(_DEBUG)
# define PYBIND11_BUILD_TYPE "_debug"
#else
# define PYBIND11_BUILD_TYPE ""
#endif
/// Let's assume that different compilers are ABI-incompatible.
/// A user can manually set this string if they know their
/// compiler is compatible.
#ifndef PYBIND11_COMPILER_TYPE
# if defined(_MSC_VER)
# define PYBIND11_COMPILER_TYPE "_msvc"
# elif defined(__INTEL_COMPILER)
# define PYBIND11_COMPILER_TYPE "_icc"
# elif defined(__clang__)
# define PYBIND11_COMPILER_TYPE "_clang"
# elif defined(__PGI)
# define PYBIND11_COMPILER_TYPE "_pgi"
# elif defined(__MINGW32__)
# define PYBIND11_COMPILER_TYPE "_mingw"
# elif defined(__CYGWIN__)
# define PYBIND11_COMPILER_TYPE "_gcc_cygwin"
# elif defined(__GNUC__)
# define PYBIND11_COMPILER_TYPE "_gcc"
# else
# define PYBIND11_COMPILER_TYPE "_unknown"
# endif
#endif
/// Also standard libs
#ifndef PYBIND11_STDLIB
# if defined(_LIBCPP_VERSION)
# define PYBIND11_STDLIB "_libcpp"
# elif defined(__GLIBCXX__) || defined(__GLIBCPP__)
# define PYBIND11_STDLIB "_libstdcpp"
# else
# define PYBIND11_STDLIB ""
# endif
#endif
/// On Linux/OSX, changes in __GXX_ABI_VERSION__ indicate ABI incompatibility.
#ifndef PYBIND11_BUILD_ABI
# if defined(__GXX_ABI_VERSION)
# define PYBIND11_BUILD_ABI "_cxxabi" PYBIND11_TOSTRING(__GXX_ABI_VERSION)
# else
# define PYBIND11_BUILD_ABI ""
# endif
#endif
#ifndef PYBIND11_INTERNALS_KIND
# if defined(WITH_THREAD)
# define PYBIND11_INTERNALS_KIND ""
# else
# define PYBIND11_INTERNALS_KIND "_without_thread"
# endif
#endif
#define PYBIND11_INTERNALS_ID \
"__pybind11_internals_v" PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) \
PYBIND11_INTERNALS_KIND PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI \
PYBIND11_BUILD_TYPE "__"
#define PYBIND11_MODULE_LOCAL_ID \
"__pybind11_module_local_v" PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) \
PYBIND11_INTERNALS_KIND PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI \
PYBIND11_BUILD_TYPE "__"
/// Each module locally stores a pointer to the `internals` data. The data
/// itself is shared among modules with the same `PYBIND11_INTERNALS_ID`.
inline internals **&get_internals_pp() {
static internals **internals_pp = nullptr;
return internals_pp;
}
// forward decl
inline void translate_exception(std::exception_ptr);
template <class T,
enable_if_t<std::is_same<std::nested_exception, remove_cvref_t<T>>::value, int> = 0>
bool handle_nested_exception(const T &exc, const std::exception_ptr &p) {
std::exception_ptr nested = exc.nested_ptr();
if (nested != nullptr && nested != p) {
translate_exception(nested);
return true;
}
return false;
}
template <class T,
enable_if_t<!std::is_same<std::nested_exception, remove_cvref_t<T>>::value, int> = 0>
bool handle_nested_exception(const T &exc, const std::exception_ptr &p) {
if (const auto *nep = dynamic_cast<const std::nested_exception *>(std::addressof(exc))) {
return handle_nested_exception(*nep, p);
}
return false;
}
inline bool raise_err(PyObject *exc_type, const char *msg) {
if (PyErr_Occurred()) {
raise_from(exc_type, msg);
return true;
}
PyErr_SetString(exc_type, msg);
return false;
}
inline void translate_exception(std::exception_ptr p) {
if (!p) {
return;
}
try {
std::rethrow_exception(p);
} catch (error_already_set &e) {
handle_nested_exception(e, p);
e.restore();
return;
} catch (const builtin_exception &e) {
// Could not use template since it's an abstract class.
if (const auto *nep = dynamic_cast<const std::nested_exception *>(std::addressof(e))) {
handle_nested_exception(*nep, p);
}
e.set_error();
return;
} catch (const std::bad_alloc &e) {
handle_nested_exception(e, p);
raise_err(PyExc_MemoryError, e.what());
return;
} catch (const std::domain_error &e) {
handle_nested_exception(e, p);
raise_err(PyExc_ValueError, e.what());
return;
} catch (const std::invalid_argument &e) {
handle_nested_exception(e, p);
raise_err(PyExc_ValueError, e.what());
return;
} catch (const std::length_error &e) {
handle_nested_exception(e, p);
raise_err(PyExc_ValueError, e.what());
return;
} catch (const std::out_of_range &e) {
handle_nested_exception(e, p);
raise_err(PyExc_IndexError, e.what());
return;
} catch (const std::range_error &e) {
handle_nested_exception(e, p);
raise_err(PyExc_ValueError, e.what());
return;
} catch (const std::overflow_error &e) {
handle_nested_exception(e, p);
raise_err(PyExc_OverflowError, e.what());
return;
} catch (const std::exception &e) {
handle_nested_exception(e, p);
raise_err(PyExc_RuntimeError, e.what());
return;
} catch (const std::nested_exception &e) {
handle_nested_exception(e, p);
raise_err(PyExc_RuntimeError, "Caught an unknown nested exception!");
return;
} catch (...) {
raise_err(PyExc_RuntimeError, "Caught an unknown exception!");
return;
}
}
#if !defined(__GLIBCXX__)
inline void translate_local_exception(std::exception_ptr p) {
try {
if (p) {
std::rethrow_exception(p);
}
} catch (error_already_set &e) {
e.restore();
return;
} catch (const builtin_exception &e) {
e.set_error();
return;
}
}
#endif
inline object get_python_state_dict() {
object state_dict;
#if PYBIND11_INTERNALS_VERSION <= 4 || PY_VERSION_HEX < 0x03080000 || defined(PYPY_VERSION)
state_dict = reinterpret_borrow<object>(PyEval_GetBuiltins());
#else
# if PY_VERSION_HEX < 0x03090000
PyInterpreterState *istate = _PyInterpreterState_Get();
# else
PyInterpreterState *istate = PyInterpreterState_Get();
# endif
if (istate) {
state_dict = reinterpret_borrow<object>(PyInterpreterState_GetDict(istate));
}
#endif
if (!state_dict) {
raise_from(PyExc_SystemError, "pybind11::detail::get_python_state_dict() FAILED");
}
return state_dict;
}
inline object get_internals_obj_from_state_dict(handle state_dict) {
return reinterpret_borrow<object>(dict_getitemstring(state_dict.ptr(), PYBIND11_INTERNALS_ID));
}
inline internals **get_internals_pp_from_capsule(handle obj) {
void *raw_ptr = PyCapsule_GetPointer(obj.ptr(), /*name=*/nullptr);
if (raw_ptr == nullptr) {
raise_from(PyExc_SystemError, "pybind11::detail::get_internals_pp_from_capsule() FAILED");
}
return static_cast<internals **>(raw_ptr);
}
/// Return a reference to the current `internals` data
PYBIND11_NOINLINE internals &get_internals() {
auto **&internals_pp = get_internals_pp();
if (internals_pp && *internals_pp) {
return **internals_pp;
}
#if defined(WITH_THREAD)
# if defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
gil_scoped_acquire gil;
# else
// Ensure that the GIL is held since we will need to make Python calls.
// Cannot use py::gil_scoped_acquire here since that constructor calls get_internals.
struct gil_scoped_acquire_local {
gil_scoped_acquire_local() : state(PyGILState_Ensure()) {}
gil_scoped_acquire_local(const gil_scoped_acquire_local &) = delete;
gil_scoped_acquire_local &operator=(const gil_scoped_acquire_local &) = delete;
~gil_scoped_acquire_local() { PyGILState_Release(state); }
const PyGILState_STATE state;
} gil;
# endif
#endif
error_scope err_scope;
dict state_dict = get_python_state_dict();
if (object internals_obj = get_internals_obj_from_state_dict(state_dict)) {
internals_pp = get_internals_pp_from_capsule(internals_obj);
}
if (internals_pp && *internals_pp) {
// We loaded the internals through `state_dict`, which means that our `error_already_set`
// and `builtin_exception` may be different local classes than the ones set up in the
// initial exception translator, below, so add another for our local exception classes.
//
// libstdc++ doesn't require this (types there are identified only by name)
// libc++ with CPython doesn't require this (types are explicitly exported)
// libc++ with PyPy still need it, awaiting further investigation
#if !defined(__GLIBCXX__)
(*internals_pp)->registered_exception_translators.push_front(&translate_local_exception);
#endif
} else {
if (!internals_pp) {
internals_pp = new internals *();
}
auto *&internals_ptr = *internals_pp;
internals_ptr = new internals();
#if defined(WITH_THREAD)
PyThreadState *tstate = PyThreadState_Get();
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
if (!PYBIND11_TLS_KEY_CREATE(internals_ptr->tstate)) {
pybind11_fail("get_internals: could not successfully initialize the tstate TSS key!");
}
PYBIND11_TLS_REPLACE_VALUE(internals_ptr->tstate, tstate);
# if PYBIND11_INTERNALS_VERSION > 4
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
if (!PYBIND11_TLS_KEY_CREATE(internals_ptr->loader_life_support_tls_key)) {
pybind11_fail("get_internals: could not successfully initialize the "
"loader_life_support TSS key!");
}
# endif
internals_ptr->istate = tstate->interp;
#endif
state_dict[PYBIND11_INTERNALS_ID] = capsule(internals_pp);
internals_ptr->registered_exception_translators.push_front(&translate_exception);
internals_ptr->static_property_type = make_static_property_type();
internals_ptr->default_metaclass = make_default_metaclass();
internals_ptr->instance_base = make_object_base_type(internals_ptr->default_metaclass);
}
return **internals_pp;
}
// the internals struct (above) is shared between all the modules. local_internals are only
// for a single module. Any changes made to internals may require an update to
// PYBIND11_INTERNALS_VERSION, breaking backwards compatibility. local_internals is, by design,
// restricted to a single module. Whether a module has local internals or not should not
// impact any other modules, because the only things accessing the local internals is the
// module that contains them.
struct local_internals {
type_map<type_info *> registered_types_cpp;
std::forward_list<ExceptionTranslator> registered_exception_translators;
#if defined(WITH_THREAD) && PYBIND11_INTERNALS_VERSION == 4
// For ABI compatibility, we can't store the loader_life_support TLS key in
// the `internals` struct directly. Instead, we store it in `shared_data` and
// cache a copy in `local_internals`. If we allocated a separate TLS key for
// each instance of `local_internals`, we could end up allocating hundreds of
// TLS keys if hundreds of different pybind11 modules are loaded (which is a
// plausible number).
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
// Holds the shared TLS key for the loader_life_support stack.
struct shared_loader_life_support_data {
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
shared_loader_life_support_data() {
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
if (!PYBIND11_TLS_KEY_CREATE(loader_life_support_tls_key)) {
pybind11_fail("local_internals: could not successfully initialize the "
"loader_life_support TLS key!");
}
}
// We can't help but leak the TLS key, because Python never unloads extension modules.
};
local_internals() {
auto &internals = get_internals();
// Get or create the `loader_life_support_stack_key`.
auto &ptr = internals.shared_data["_life_support"];
if (!ptr) {
ptr = new shared_loader_life_support_data;
}
loader_life_support_tls_key
= static_cast<shared_loader_life_support_data *>(ptr)->loader_life_support_tls_key;
}
#endif // defined(WITH_THREAD) && PYBIND11_INTERNALS_VERSION == 4
};
/// Works like `get_internals`, but for things which are locally registered.
inline local_internals &get_local_internals() {
// Current static can be created in the interpreter finalization routine. If the later will be
// destroyed in another static variable destructor, creation of this static there will cause
// static deinitialization fiasco. In order to avoid it we avoid destruction of the
// local_internals static. One can read more about the problem and current solution here:
// https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables
static auto *locals = new local_internals();
return *locals;
}
/// Constructs a std::string with the given arguments, stores it in `internals`, and returns its
/// `c_str()`. Such strings objects have a long storage duration -- the internal strings are only
/// cleared when the program exits or after interpreter shutdown (when embedding), and so are
/// suitable for c-style strings needed by Python internals (such as PyTypeObject's tp_name).
template <typename... Args>
const char *c_str(Args &&...args) {
auto &strings = get_internals().static_strings;
strings.emplace_front(std::forward<Args>(args)...);
return strings.front().c_str();
}
inline const char *get_function_record_capsule_name() {
#if PYBIND11_INTERNALS_VERSION > 4
return get_internals().function_record_capsule_name.c_str();
#else
return nullptr;
#endif
}
// Determine whether or not the following capsule contains a pybind11 function record.
// Note that we use `internals` to make sure that only ABI compatible records are touched.
//
// This check is currently used in two places:
// - An important optimization in functional.h to avoid overhead in C++ -> Python -> C++
// - The sibling feature of cpp_function to allow overloads
inline bool is_function_record_capsule(const capsule &cap) {
// Pointer equality as we rely on internals() to ensure unique pointers
return cap.name() == get_function_record_capsule_name();
}
PYBIND11_NAMESPACE_END(detail)
/// Returns a named pointer that is shared among all extension modules (using the same
/// pybind11 version) running in the current interpreter. Names starting with underscores
/// are reserved for internal usage. Returns `nullptr` if no matching entry was found.
PYBIND11_NOINLINE void *get_shared_data(const std::string &name) {
auto &internals = detail::get_internals();
auto it = internals.shared_data.find(name);
return it != internals.shared_data.end() ? it->second : nullptr;
}
/// Set the shared data that can be later recovered by `get_shared_data()`.
PYBIND11_NOINLINE void *set_shared_data(const std::string &name, void *data) {
detail::get_internals().shared_data[name] = data;
return data;
}
/// Returns a typed reference to a shared data entry (by using `get_shared_data()`) if
/// such entry exists. Otherwise, a new object of default-constructible type `T` is
/// added to the shared data under the given name and a reference to it is returned.
template <typename T>
T &get_or_create_shared_data(const std::string &name) {
auto &internals = detail::get_internals();
auto it = internals.shared_data.find(name);
T *ptr = (T *) (it != internals.shared_data.end() ? it->second : nullptr);
if (!ptr) {
ptr = new T();
internals.shared_data[name] = ptr;
}
return *ptr;
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/detail/type_caster_base.h | C/C++ Header | /*
pybind11/detail/type_caster_base.h (originally first part of pybind11/cast.h)
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "../pytypes.h"
#include "common.h"
#include "descr.h"
#include "internals.h"
#include "typeid.h"
#include <cstdint>
#include <iterator>
#include <new>
#include <string>
#include <type_traits>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include <utility>
#include <vector>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// A life support system for temporary objects created by `type_caster::load()`.
/// Adding a patient will keep it alive up until the enclosing function returns.
class loader_life_support {
private:
loader_life_support *parent = nullptr;
std::unordered_set<PyObject *> keep_alive;
#if defined(WITH_THREAD)
// Store stack pointer in thread-local storage.
static PYBIND11_TLS_KEY_REF get_stack_tls_key() {
# if PYBIND11_INTERNALS_VERSION == 4
return get_local_internals().loader_life_support_tls_key;
# else
return get_internals().loader_life_support_tls_key;
# endif
}
static loader_life_support *get_stack_top() {
return static_cast<loader_life_support *>(PYBIND11_TLS_GET_VALUE(get_stack_tls_key()));
}
static void set_stack_top(loader_life_support *value) {
PYBIND11_TLS_REPLACE_VALUE(get_stack_tls_key(), value);
}
#else
// Use single global variable for stack.
static loader_life_support **get_stack_pp() {
static loader_life_support *global_stack = nullptr;
return global_stack;
}
static loader_life_support *get_stack_top() { return *get_stack_pp(); }
static void set_stack_top(loader_life_support *value) { *get_stack_pp() = value; }
#endif
public:
/// A new patient frame is created when a function is entered
loader_life_support() : parent{get_stack_top()} { set_stack_top(this); }
/// ... and destroyed after it returns
~loader_life_support() {
if (get_stack_top() != this) {
pybind11_fail("loader_life_support: internal error");
}
set_stack_top(parent);
for (auto *item : keep_alive) {
Py_DECREF(item);
}
}
/// This can only be used inside a pybind11-bound function, either by `argument_loader`
/// at argument preparation time or by `py::cast()` at execution time.
PYBIND11_NOINLINE static void add_patient(handle h) {
loader_life_support *frame = get_stack_top();
if (!frame) {
// NOTE: It would be nice to include the stack frames here, as this indicates
// use of pybind11::cast<> outside the normal call framework, finding such
// a location is challenging. Developers could consider printing out
// stack frame addresses here using something like __builtin_frame_address(0)
throw cast_error("When called outside a bound function, py::cast() cannot "
"do Python -> C++ conversions which require the creation "
"of temporary values");
}
if (frame->keep_alive.insert(h.ptr()).second) {
Py_INCREF(h.ptr());
}
}
};
// Gets the cache entry for the given type, creating it if necessary. The return value is the pair
// returned by emplace, i.e. an iterator for the entry and a bool set to `true` if the entry was
// just created.
inline std::pair<decltype(internals::registered_types_py)::iterator, bool>
all_type_info_get_cache(PyTypeObject *type);
// Populates a just-created cache entry.
PYBIND11_NOINLINE void all_type_info_populate(PyTypeObject *t, std::vector<type_info *> &bases) {
std::vector<PyTypeObject *> check;
for (handle parent : reinterpret_borrow<tuple>(t->tp_bases)) {
check.push_back((PyTypeObject *) parent.ptr());
}
auto const &type_dict = get_internals().registered_types_py;
for (size_t i = 0; i < check.size(); i++) {
auto *type = check[i];
// Ignore Python2 old-style class super type:
if (!PyType_Check((PyObject *) type)) {
continue;
}
// Check `type` in the current set of registered python types:
auto it = type_dict.find(type);
if (it != type_dict.end()) {
// We found a cache entry for it, so it's either pybind-registered or has pre-computed
// pybind bases, but we have to make sure we haven't already seen the type(s) before:
// we want to follow Python/virtual C++ rules that there should only be one instance of
// a common base.
for (auto *tinfo : it->second) {
// NB: Could use a second set here, rather than doing a linear search, but since
// having a large number of immediate pybind11-registered types seems fairly
// unlikely, that probably isn't worthwhile.
bool found = false;
for (auto *known : bases) {
if (known == tinfo) {
found = true;
break;
}
}
if (!found) {
bases.push_back(tinfo);
}
}
} else if (type->tp_bases) {
// It's some python type, so keep follow its bases classes to look for one or more
// registered types
if (i + 1 == check.size()) {
// When we're at the end, we can pop off the current element to avoid growing
// `check` when adding just one base (which is typical--i.e. when there is no
// multiple inheritance)
check.pop_back();
i--;
}
for (handle parent : reinterpret_borrow<tuple>(type->tp_bases)) {
check.push_back((PyTypeObject *) parent.ptr());
}
}
}
}
/**
* Extracts vector of type_info pointers of pybind-registered roots of the given Python type. Will
* be just 1 pybind type for the Python type of a pybind-registered class, or for any Python-side
* derived class that uses single inheritance. Will contain as many types as required for a Python
* class that uses multiple inheritance to inherit (directly or indirectly) from multiple
* pybind-registered classes. Will be empty if neither the type nor any base classes are
* pybind-registered.
*
* The value is cached for the lifetime of the Python type.
*/
inline const std::vector<detail::type_info *> &all_type_info(PyTypeObject *type) {
auto ins = all_type_info_get_cache(type);
if (ins.second) {
// New cache entry: populate it
all_type_info_populate(type, ins.first->second);
}
return ins.first->second;
}
/**
* Gets a single pybind11 type info for a python type. Returns nullptr if neither the type nor any
* ancestors are pybind11-registered. Throws an exception if there are multiple bases--use
* `all_type_info` instead if you want to support multiple bases.
*/
PYBIND11_NOINLINE detail::type_info *get_type_info(PyTypeObject *type) {
const auto &bases = all_type_info(type);
if (bases.empty()) {
return nullptr;
}
if (bases.size() > 1) {
pybind11_fail(
"pybind11::detail::get_type_info: type has multiple pybind11-registered bases");
}
return bases.front();
}
inline detail::type_info *get_local_type_info(const std::type_index &tp) {
auto &locals = get_local_internals().registered_types_cpp;
auto it = locals.find(tp);
if (it != locals.end()) {
return it->second;
}
return nullptr;
}
inline detail::type_info *get_global_type_info(const std::type_index &tp) {
auto &types = get_internals().registered_types_cpp;
auto it = types.find(tp);
if (it != types.end()) {
return it->second;
}
return nullptr;
}
/// Return the type info for a given C++ type; on lookup failure can either throw or return
/// nullptr.
PYBIND11_NOINLINE detail::type_info *get_type_info(const std::type_index &tp,
bool throw_if_missing = false) {
if (auto *ltype = get_local_type_info(tp)) {
return ltype;
}
if (auto *gtype = get_global_type_info(tp)) {
return gtype;
}
if (throw_if_missing) {
std::string tname = tp.name();
detail::clean_type_id(tname);
pybind11_fail("pybind11::detail::get_type_info: unable to find type info for \""
+ std::move(tname) + '"');
}
return nullptr;
}
PYBIND11_NOINLINE handle get_type_handle(const std::type_info &tp, bool throw_if_missing) {
detail::type_info *type_info = get_type_info(tp, throw_if_missing);
return handle(type_info ? ((PyObject *) type_info->type) : nullptr);
}
// Searches the inheritance graph for a registered Python instance, using all_type_info().
PYBIND11_NOINLINE handle find_registered_python_instance(void *src,
const detail::type_info *tinfo) {
auto it_instances = get_internals().registered_instances.equal_range(src);
for (auto it_i = it_instances.first; it_i != it_instances.second; ++it_i) {
for (auto *instance_type : detail::all_type_info(Py_TYPE(it_i->second))) {
if (instance_type && same_type(*instance_type->cpptype, *tinfo->cpptype)) {
return handle((PyObject *) it_i->second).inc_ref();
}
}
}
return handle();
}
struct value_and_holder {
instance *inst = nullptr;
size_t index = 0u;
const detail::type_info *type = nullptr;
void **vh = nullptr;
// Main constructor for a found value/holder:
value_and_holder(instance *i, const detail::type_info *type, size_t vpos, size_t index)
: inst{i}, index{index}, type{type},
vh{inst->simple_layout ? inst->simple_value_holder
: &inst->nonsimple.values_and_holders[vpos]} {}
// Default constructor (used to signal a value-and-holder not found by get_value_and_holder())
value_and_holder() = default;
// Used for past-the-end iterator
explicit value_and_holder(size_t index) : index{index} {}
template <typename V = void>
V *&value_ptr() const {
return reinterpret_cast<V *&>(vh[0]);
}
// True if this `value_and_holder` has a non-null value pointer
explicit operator bool() const { return value_ptr() != nullptr; }
template <typename H>
H &holder() const {
return reinterpret_cast<H &>(vh[1]);
}
bool holder_constructed() const {
return inst->simple_layout
? inst->simple_holder_constructed
: (inst->nonsimple.status[index] & instance::status_holder_constructed) != 0u;
}
// NOLINTNEXTLINE(readability-make-member-function-const)
void set_holder_constructed(bool v = true) {
if (inst->simple_layout) {
inst->simple_holder_constructed = v;
} else if (v) {
inst->nonsimple.status[index] |= instance::status_holder_constructed;
} else {
inst->nonsimple.status[index] &= (std::uint8_t) ~instance::status_holder_constructed;
}
}
bool instance_registered() const {
return inst->simple_layout
? inst->simple_instance_registered
: ((inst->nonsimple.status[index] & instance::status_instance_registered) != 0);
}
// NOLINTNEXTLINE(readability-make-member-function-const)
void set_instance_registered(bool v = true) {
if (inst->simple_layout) {
inst->simple_instance_registered = v;
} else if (v) {
inst->nonsimple.status[index] |= instance::status_instance_registered;
} else {
inst->nonsimple.status[index] &= (std::uint8_t) ~instance::status_instance_registered;
}
}
};
// Container for accessing and iterating over an instance's values/holders
struct values_and_holders {
private:
instance *inst;
using type_vec = std::vector<detail::type_info *>;
const type_vec &tinfo;
public:
explicit values_and_holders(instance *inst)
: inst{inst}, tinfo(all_type_info(Py_TYPE(inst))) {}
struct iterator {
private:
instance *inst = nullptr;
const type_vec *types = nullptr;
value_and_holder curr;
friend struct values_and_holders;
iterator(instance *inst, const type_vec *tinfo)
: inst{inst}, types{tinfo},
curr(inst /* instance */,
types->empty() ? nullptr : (*types)[0] /* type info */,
0, /* vpos: (non-simple types only): the first vptr comes first */
0 /* index */) {}
// Past-the-end iterator:
explicit iterator(size_t end) : curr(end) {}
public:
bool operator==(const iterator &other) const { return curr.index == other.curr.index; }
bool operator!=(const iterator &other) const { return curr.index != other.curr.index; }
iterator &operator++() {
if (!inst->simple_layout) {
curr.vh += 1 + (*types)[curr.index]->holder_size_in_ptrs;
}
++curr.index;
curr.type = curr.index < types->size() ? (*types)[curr.index] : nullptr;
return *this;
}
value_and_holder &operator*() { return curr; }
value_and_holder *operator->() { return &curr; }
};
iterator begin() { return iterator(inst, &tinfo); }
iterator end() { return iterator(tinfo.size()); }
iterator find(const type_info *find_type) {
auto it = begin(), endit = end();
while (it != endit && it->type != find_type) {
++it;
}
return it;
}
size_t size() { return tinfo.size(); }
};
/**
* Extracts C++ value and holder pointer references from an instance (which may contain multiple
* values/holders for python-side multiple inheritance) that match the given type. Throws an error
* if the given type (or ValueType, if omitted) is not a pybind11 base of the given instance. If
* `find_type` is omitted (or explicitly specified as nullptr) the first value/holder are returned,
* regardless of type (and the resulting .type will be nullptr).
*
* The returned object should be short-lived: in particular, it must not outlive the called-upon
* instance.
*/
PYBIND11_NOINLINE value_and_holder
instance::get_value_and_holder(const type_info *find_type /*= nullptr default in common.h*/,
bool throw_if_missing /*= true in common.h*/) {
// Optimize common case:
if (!find_type || Py_TYPE(this) == find_type->type) {
return value_and_holder(this, find_type, 0, 0);
}
detail::values_and_holders vhs(this);
auto it = vhs.find(find_type);
if (it != vhs.end()) {
return *it;
}
if (!throw_if_missing) {
return value_and_holder();
}
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
pybind11_fail("pybind11::detail::instance::get_value_and_holder: `"
+ get_fully_qualified_tp_name(find_type->type)
+ "' is not a pybind11 base of the given `"
+ get_fully_qualified_tp_name(Py_TYPE(this)) + "' instance");
#else
pybind11_fail(
"pybind11::detail::instance::get_value_and_holder: "
"type is not a pybind11 base of the given instance "
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for type details)");
#endif
}
PYBIND11_NOINLINE void instance::allocate_layout() {
const auto &tinfo = all_type_info(Py_TYPE(this));
const size_t n_types = tinfo.size();
if (n_types == 0) {
pybind11_fail(
"instance allocation failed: new instance has no pybind11-registered base types");
}
simple_layout
= n_types == 1 && tinfo.front()->holder_size_in_ptrs <= instance_simple_holder_in_ptrs();
// Simple path: no python-side multiple inheritance, and a small-enough holder
if (simple_layout) {
simple_value_holder[0] = nullptr;
simple_holder_constructed = false;
simple_instance_registered = false;
} else { // multiple base types or a too-large holder
// Allocate space to hold: [v1*][h1][v2*][h2]...[bb...] where [vN*] is a value pointer,
// [hN] is the (uninitialized) holder instance for value N, and [bb...] is a set of bool
// values that tracks whether each associated holder has been initialized. Each [block] is
// padded, if necessary, to an integer multiple of sizeof(void *).
size_t space = 0;
for (auto *t : tinfo) {
space += 1; // value pointer
space += t->holder_size_in_ptrs; // holder instance
}
size_t flags_at = space;
space += size_in_ptrs(n_types); // status bytes (holder_constructed and
// instance_registered)
// Allocate space for flags, values, and holders, and initialize it to 0 (flags and values,
// in particular, need to be 0). Use Python's memory allocation
// functions: Python is using pymalloc, which is designed to be
// efficient for small allocations like the one we're doing here;
// for larger allocations they are just wrappers around malloc.
// TODO: is this still true for pure Python 3.6?
nonsimple.values_and_holders = (void **) PyMem_Calloc(space, sizeof(void *));
if (!nonsimple.values_and_holders) {
throw std::bad_alloc();
}
nonsimple.status
= reinterpret_cast<std::uint8_t *>(&nonsimple.values_and_holders[flags_at]);
}
owned = true;
}
// NOLINTNEXTLINE(readability-make-member-function-const)
PYBIND11_NOINLINE void instance::deallocate_layout() {
if (!simple_layout) {
PyMem_Free(nonsimple.values_and_holders);
}
}
PYBIND11_NOINLINE bool isinstance_generic(handle obj, const std::type_info &tp) {
handle type = detail::get_type_handle(tp, false);
if (!type) {
return false;
}
return isinstance(obj, type);
}
PYBIND11_NOINLINE handle get_object_handle(const void *ptr, const detail::type_info *type) {
auto &instances = get_internals().registered_instances;
auto range = instances.equal_range(ptr);
for (auto it = range.first; it != range.second; ++it) {
for (const auto &vh : values_and_holders(it->second)) {
if (vh.type == type) {
return handle((PyObject *) it->second);
}
}
}
return handle();
}
inline PyThreadState *get_thread_state_unchecked() {
#if defined(PYPY_VERSION)
return PyThreadState_GET();
#else
return _PyThreadState_UncheckedGet();
#endif
}
// Forward declarations
void keep_alive_impl(handle nurse, handle patient);
inline PyObject *make_new_instance(PyTypeObject *type);
class type_caster_generic {
public:
PYBIND11_NOINLINE explicit type_caster_generic(const std::type_info &type_info)
: typeinfo(get_type_info(type_info)), cpptype(&type_info) {}
explicit type_caster_generic(const type_info *typeinfo)
: typeinfo(typeinfo), cpptype(typeinfo ? typeinfo->cpptype : nullptr) {}
bool load(handle src, bool convert) { return load_impl<type_caster_generic>(src, convert); }
PYBIND11_NOINLINE static handle cast(const void *_src,
return_value_policy policy,
handle parent,
const detail::type_info *tinfo,
void *(*copy_constructor)(const void *),
void *(*move_constructor)(const void *),
const void *existing_holder = nullptr) {
if (!tinfo) { // no type info: error will be set already
return handle();
}
void *src = const_cast<void *>(_src);
if (src == nullptr) {
return none().release();
}
if (handle registered_inst = find_registered_python_instance(src, tinfo)) {
return registered_inst;
}
auto inst = reinterpret_steal<object>(make_new_instance(tinfo->type));
auto *wrapper = reinterpret_cast<instance *>(inst.ptr());
wrapper->owned = false;
void *&valueptr = values_and_holders(wrapper).begin()->value_ptr();
switch (policy) {
case return_value_policy::automatic:
case return_value_policy::take_ownership:
valueptr = src;
wrapper->owned = true;
break;
case return_value_policy::automatic_reference:
case return_value_policy::reference:
valueptr = src;
wrapper->owned = false;
break;
case return_value_policy::copy:
if (copy_constructor) {
valueptr = copy_constructor(src);
} else {
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
std::string type_name(tinfo->cpptype->name());
detail::clean_type_id(type_name);
throw cast_error("return_value_policy = copy, but type " + type_name
+ " is non-copyable!");
#else
throw cast_error("return_value_policy = copy, but type is "
"non-copyable! (#define PYBIND11_DETAILED_ERROR_MESSAGES or "
"compile in debug mode for details)");
#endif
}
wrapper->owned = true;
break;
case return_value_policy::move:
if (move_constructor) {
valueptr = move_constructor(src);
} else if (copy_constructor) {
valueptr = copy_constructor(src);
} else {
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
std::string type_name(tinfo->cpptype->name());
detail::clean_type_id(type_name);
throw cast_error("return_value_policy = move, but type " + type_name
+ " is neither movable nor copyable!");
#else
throw cast_error("return_value_policy = move, but type is neither "
"movable nor copyable! "
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in "
"debug mode for details)");
#endif
}
wrapper->owned = true;
break;
case return_value_policy::reference_internal:
valueptr = src;
wrapper->owned = false;
keep_alive_impl(inst, parent);
break;
default:
throw cast_error("unhandled return_value_policy: should not happen!");
}
tinfo->init_instance(wrapper, existing_holder);
return inst.release();
}
// Base methods for generic caster; there are overridden in copyable_holder_caster
void load_value(value_and_holder &&v_h) {
auto *&vptr = v_h.value_ptr();
// Lazy allocation for unallocated values:
if (vptr == nullptr) {
const auto *type = v_h.type ? v_h.type : typeinfo;
if (type->operator_new) {
vptr = type->operator_new(type->type_size);
} else {
#if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
if (type->type_align > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
vptr = ::operator new(type->type_size, std::align_val_t(type->type_align));
} else {
vptr = ::operator new(type->type_size);
}
#else
vptr = ::operator new(type->type_size);
#endif
}
}
value = vptr;
}
bool try_implicit_casts(handle src, bool convert) {
for (const auto &cast : typeinfo->implicit_casts) {
type_caster_generic sub_caster(*cast.first);
if (sub_caster.load(src, convert)) {
value = cast.second(sub_caster.value);
return true;
}
}
return false;
}
bool try_direct_conversions(handle src) {
for (auto &converter : *typeinfo->direct_conversions) {
if (converter(src.ptr(), value)) {
return true;
}
}
return false;
}
void check_holder_compat() {}
PYBIND11_NOINLINE static void *local_load(PyObject *src, const type_info *ti) {
auto caster = type_caster_generic(ti);
if (caster.load(src, false)) {
return caster.value;
}
return nullptr;
}
/// Try to load with foreign typeinfo, if available. Used when there is no
/// native typeinfo, or when the native one wasn't able to produce a value.
PYBIND11_NOINLINE bool try_load_foreign_module_local(handle src) {
constexpr auto *local_key = PYBIND11_MODULE_LOCAL_ID;
const auto pytype = type::handle_of(src);
if (!hasattr(pytype, local_key)) {
return false;
}
type_info *foreign_typeinfo = reinterpret_borrow<capsule>(getattr(pytype, local_key));
// Only consider this foreign loader if actually foreign and is a loader of the correct cpp
// type
if (foreign_typeinfo->module_local_load == &local_load
|| (cpptype && !same_type(*cpptype, *foreign_typeinfo->cpptype))) {
return false;
}
if (auto *result = foreign_typeinfo->module_local_load(src.ptr(), foreign_typeinfo)) {
value = result;
return true;
}
return false;
}
// Implementation of `load`; this takes the type of `this` so that it can dispatch the relevant
// bits of code between here and copyable_holder_caster where the two classes need different
// logic (without having to resort to virtual inheritance).
template <typename ThisT>
PYBIND11_NOINLINE bool load_impl(handle src, bool convert) {
if (!src) {
return false;
}
if (!typeinfo) {
return try_load_foreign_module_local(src);
}
auto &this_ = static_cast<ThisT &>(*this);
this_.check_holder_compat();
PyTypeObject *srctype = Py_TYPE(src.ptr());
// Case 1: If src is an exact type match for the target type then we can reinterpret_cast
// the instance's value pointer to the target type:
if (srctype == typeinfo->type) {
this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());
return true;
}
// Case 2: We have a derived class
if (PyType_IsSubtype(srctype, typeinfo->type)) {
const auto &bases = all_type_info(srctype);
bool no_cpp_mi = typeinfo->simple_type;
// Case 2a: the python type is a Python-inherited derived class that inherits from just
// one simple (no MI) pybind11 class, or is an exact match, so the C++ instance is of
// the right type and we can use reinterpret_cast.
// (This is essentially the same as case 2b, but because not using multiple inheritance
// is extremely common, we handle it specially to avoid the loop iterator and type
// pointer lookup overhead)
if (bases.size() == 1 && (no_cpp_mi || bases.front()->type == typeinfo->type)) {
this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());
return true;
}
// Case 2b: the python type inherits from multiple C++ bases. Check the bases to see
// if we can find an exact match (or, for a simple C++ type, an inherited match); if
// so, we can safely reinterpret_cast to the relevant pointer.
if (bases.size() > 1) {
for (auto *base : bases) {
if (no_cpp_mi ? PyType_IsSubtype(base->type, typeinfo->type)
: base->type == typeinfo->type) {
this_.load_value(
reinterpret_cast<instance *>(src.ptr())->get_value_and_holder(base));
return true;
}
}
}
// Case 2c: C++ multiple inheritance is involved and we couldn't find an exact type
// match in the registered bases, above, so try implicit casting (needed for proper C++
// casting when MI is involved).
if (this_.try_implicit_casts(src, convert)) {
return true;
}
}
// Perform an implicit conversion
if (convert) {
for (const auto &converter : typeinfo->implicit_conversions) {
auto temp = reinterpret_steal<object>(converter(src.ptr(), typeinfo->type));
if (load_impl<ThisT>(temp, false)) {
loader_life_support::add_patient(temp);
return true;
}
}
if (this_.try_direct_conversions(src)) {
return true;
}
}
// Failed to match local typeinfo. Try again with global.
if (typeinfo->module_local) {
if (auto *gtype = get_global_type_info(*typeinfo->cpptype)) {
typeinfo = gtype;
return load(src, false);
}
}
// Global typeinfo has precedence over foreign module_local
if (try_load_foreign_module_local(src)) {
return true;
}
// Custom converters didn't take None, now we convert None to nullptr.
if (src.is_none()) {
// Defer accepting None to other overloads (if we aren't in convert mode):
if (!convert) {
return false;
}
value = nullptr;
return true;
}
return false;
}
// Called to do type lookup and wrap the pointer and type in a pair when a dynamic_cast
// isn't needed or can't be used. If the type is unknown, sets the error and returns a pair
// with .second = nullptr. (p.first = nullptr is not an error: it becomes None).
PYBIND11_NOINLINE static std::pair<const void *, const type_info *>
src_and_type(const void *src,
const std::type_info &cast_type,
const std::type_info *rtti_type = nullptr) {
if (auto *tpi = get_type_info(cast_type)) {
return {src, const_cast<const type_info *>(tpi)};
}
// Not found, set error:
std::string tname = rtti_type ? rtti_type->name() : cast_type.name();
detail::clean_type_id(tname);
std::string msg = "Unregistered type : " + tname;
PyErr_SetString(PyExc_TypeError, msg.c_str());
return {nullptr, nullptr};
}
const type_info *typeinfo = nullptr;
const std::type_info *cpptype = nullptr;
void *value = nullptr;
};
/**
* Determine suitable casting operator for pointer-or-lvalue-casting type casters. The type caster
* needs to provide `operator T*()` and `operator T&()` operators.
*
* If the type supports moving the value away via an `operator T&&() &&` method, it should use
* `movable_cast_op_type` instead.
*/
template <typename T>
using cast_op_type = conditional_t<std::is_pointer<remove_reference_t<T>>::value,
typename std::add_pointer<intrinsic_t<T>>::type,
typename std::add_lvalue_reference<intrinsic_t<T>>::type>;
/**
* Determine suitable casting operator for a type caster with a movable value. Such a type caster
* needs to provide `operator T*()`, `operator T&()`, and `operator T&&() &&`. The latter will be
* called in appropriate contexts where the value can be moved rather than copied.
*
* These operator are automatically provided when using the PYBIND11_TYPE_CASTER macro.
*/
template <typename T>
using movable_cast_op_type
= conditional_t<std::is_pointer<typename std::remove_reference<T>::type>::value,
typename std::add_pointer<intrinsic_t<T>>::type,
conditional_t<std::is_rvalue_reference<T>::value,
typename std::add_rvalue_reference<intrinsic_t<T>>::type,
typename std::add_lvalue_reference<intrinsic_t<T>>::type>>;
// Does the container have a mapped type and is it recursive?
// Implemented by specializations below.
template <typename Container, typename SFINAE = void>
struct container_mapped_type_traits {
static constexpr bool has_mapped_type = false;
static constexpr bool has_recursive_mapped_type = false;
};
template <typename Container>
struct container_mapped_type_traits<
Container,
typename std::enable_if<
std::is_same<typename Container::mapped_type, Container>::value>::type> {
static constexpr bool has_mapped_type = true;
static constexpr bool has_recursive_mapped_type = true;
};
template <typename Container>
struct container_mapped_type_traits<
Container,
typename std::enable_if<
negation<std::is_same<typename Container::mapped_type, Container>>::value>::type> {
static constexpr bool has_mapped_type = true;
static constexpr bool has_recursive_mapped_type = false;
};
// Does the container have a value type and is it recursive?
// Implemented by specializations below.
template <typename Container, typename SFINAE = void>
struct container_value_type_traits : std::false_type {
static constexpr bool has_value_type = false;
static constexpr bool has_recursive_value_type = false;
};
template <typename Container>
struct container_value_type_traits<
Container,
typename std::enable_if<
std::is_same<typename Container::value_type, Container>::value>::type> {
static constexpr bool has_value_type = true;
static constexpr bool has_recursive_value_type = true;
};
template <typename Container>
struct container_value_type_traits<
Container,
typename std::enable_if<
negation<std::is_same<typename Container::value_type, Container>>::value>::type> {
static constexpr bool has_value_type = true;
static constexpr bool has_recursive_value_type = false;
};
/*
* Tag to be used for representing the bottom of recursively defined types.
* Define this tag so we don't have to use void.
*/
struct recursive_bottom {};
/*
* Implementation detail of `recursive_container_traits` below.
* `T` is the `value_type` of the container, which might need to be modified to
* avoid recursive types and const types.
*/
template <typename T, bool is_this_a_map>
struct impl_type_to_check_recursively {
/*
* If the container is recursive, then no further recursion should be done.
*/
using if_recursive = recursive_bottom;
/*
* Otherwise yield `T` unchanged.
*/
using if_not_recursive = T;
};
/*
* For pairs - only as value type of a map -, the first type should remove the `const`.
* Also, if the map is recursive, then the recursive checking should consider
* the first type only.
*/
template <typename A, typename B>
struct impl_type_to_check_recursively<std::pair<A, B>, /* is_this_a_map = */ true> {
using if_recursive = typename std::remove_const<A>::type;
using if_not_recursive = std::pair<typename std::remove_const<A>::type, B>;
};
/*
* Implementation of `recursive_container_traits` below.
*/
template <typename Container, typename SFINAE = void>
struct impl_recursive_container_traits {
using type_to_check_recursively = recursive_bottom;
};
template <typename Container>
struct impl_recursive_container_traits<
Container,
typename std::enable_if<container_value_type_traits<Container>::has_value_type>::type> {
static constexpr bool is_recursive
= container_mapped_type_traits<Container>::has_recursive_mapped_type
|| container_value_type_traits<Container>::has_recursive_value_type;
/*
* This member dictates which type Pybind11 should check recursively in traits
* such as `is_move_constructible`, `is_copy_constructible`, `is_move_assignable`, ...
* Direct access to `value_type` should be avoided:
* 1. `value_type` might recursively contain the type again
* 2. `value_type` of STL map types is `std::pair<A const, B>`, the `const`
* should be removed.
*
*/
using type_to_check_recursively = typename std::conditional<
is_recursive,
typename impl_type_to_check_recursively<
typename Container::value_type,
container_mapped_type_traits<Container>::has_mapped_type>::if_recursive,
typename impl_type_to_check_recursively<
typename Container::value_type,
container_mapped_type_traits<Container>::has_mapped_type>::if_not_recursive>::type;
};
/*
* This trait defines the `type_to_check_recursively` which is needed to properly
* handle recursively defined traits such as `is_move_constructible` without going
* into an infinite recursion.
* Should be used instead of directly accessing the `value_type`.
* It cancels the recursion by returning the `recursive_bottom` tag.
*
* The default definition of `type_to_check_recursively` is as follows:
*
* 1. By default, it is `recursive_bottom`, so that the recursion is canceled.
* 2. If the type is non-recursive and defines a `value_type`, then the `value_type` is used.
* If the `value_type` is a pair and a `mapped_type` is defined,
* then the `const` is removed from the first type.
* 3. If the type is recursive and `value_type` is not a pair, then `recursive_bottom` is returned.
* 4. If the type is recursive and `value_type` is a pair and a `mapped_type` is defined,
* then `const` is removed from the first type and the first type is returned.
*
* This behavior can be extended by the user as seen in test_stl_binders.cpp.
*
* This struct is exactly the same as impl_recursive_container_traits.
* The duplication achieves that user-defined specializations don't compete
* with internal specializations, but take precedence.
*/
template <typename Container, typename SFINAE = void>
struct recursive_container_traits : impl_recursive_container_traits<Container> {};
template <typename T>
struct is_move_constructible
: all_of<std::is_move_constructible<T>,
is_move_constructible<
typename recursive_container_traits<T>::type_to_check_recursively>> {};
template <>
struct is_move_constructible<recursive_bottom> : std::true_type {};
// Likewise for std::pair
// (after C++17 it is mandatory that the move constructor not exist when the two types aren't
// themselves move constructible, but this can not be relied upon when T1 or T2 are themselves
// containers).
template <typename T1, typename T2>
struct is_move_constructible<std::pair<T1, T2>>
: all_of<is_move_constructible<T1>, is_move_constructible<T2>> {};
// std::is_copy_constructible isn't quite enough: it lets std::vector<T> (and similar) through when
// T is non-copyable, but code containing such a copy constructor fails to actually compile.
template <typename T>
struct is_copy_constructible
: all_of<std::is_copy_constructible<T>,
is_copy_constructible<
typename recursive_container_traits<T>::type_to_check_recursively>> {};
template <>
struct is_copy_constructible<recursive_bottom> : std::true_type {};
// Likewise for std::pair
// (after C++17 it is mandatory that the copy constructor not exist when the two types aren't
// themselves copy constructible, but this can not be relied upon when T1 or T2 are themselves
// containers).
template <typename T1, typename T2>
struct is_copy_constructible<std::pair<T1, T2>>
: all_of<is_copy_constructible<T1>, is_copy_constructible<T2>> {};
// The same problems arise with std::is_copy_assignable, so we use the same workaround.
template <typename T>
struct is_copy_assignable
: all_of<
std::is_copy_assignable<T>,
is_copy_assignable<typename recursive_container_traits<T>::type_to_check_recursively>> {
};
template <>
struct is_copy_assignable<recursive_bottom> : std::true_type {};
template <typename T1, typename T2>
struct is_copy_assignable<std::pair<T1, T2>>
: all_of<is_copy_assignable<T1>, is_copy_assignable<T2>> {};
PYBIND11_NAMESPACE_END(detail)
// polymorphic_type_hook<itype>::get(src, tinfo) determines whether the object pointed
// to by `src` actually is an instance of some class derived from `itype`.
// If so, it sets `tinfo` to point to the std::type_info representing that derived
// type, and returns a pointer to the start of the most-derived object of that type
// (in which `src` is a subobject; this will be the same address as `src` in most
// single inheritance cases). If not, or if `src` is nullptr, it simply returns `src`
// and leaves `tinfo` at its default value of nullptr.
//
// The default polymorphic_type_hook just returns src. A specialization for polymorphic
// types determines the runtime type of the passed object and adjusts the this-pointer
// appropriately via dynamic_cast<void*>. This is what enables a C++ Animal* to appear
// to Python as a Dog (if Dog inherits from Animal, Animal is polymorphic, Dog is
// registered with pybind11, and this Animal is in fact a Dog).
//
// You may specialize polymorphic_type_hook yourself for types that want to appear
// polymorphic to Python but do not use C++ RTTI. (This is a not uncommon pattern
// in performance-sensitive applications, used most notably in LLVM.)
//
// polymorphic_type_hook_base allows users to specialize polymorphic_type_hook with
// std::enable_if. User provided specializations will always have higher priority than
// the default implementation and specialization provided in polymorphic_type_hook_base.
template <typename itype, typename SFINAE = void>
struct polymorphic_type_hook_base {
static const void *get(const itype *src, const std::type_info *&) { return src; }
};
template <typename itype>
struct polymorphic_type_hook_base<itype, detail::enable_if_t<std::is_polymorphic<itype>::value>> {
static const void *get(const itype *src, const std::type_info *&type) {
type = src ? &typeid(*src) : nullptr;
return dynamic_cast<const void *>(src);
}
};
template <typename itype, typename SFINAE = void>
struct polymorphic_type_hook : public polymorphic_type_hook_base<itype> {};
PYBIND11_NAMESPACE_BEGIN(detail)
/// Generic type caster for objects stored on the heap
template <typename type>
class type_caster_base : public type_caster_generic {
using itype = intrinsic_t<type>;
public:
static constexpr auto name = const_name<type>();
type_caster_base() : type_caster_base(typeid(type)) {}
explicit type_caster_base(const std::type_info &info) : type_caster_generic(info) {}
static handle cast(const itype &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
static handle cast(itype &&src, return_value_policy, handle parent) {
return cast(&src, return_value_policy::move, parent);
}
// Returns a (pointer, type_info) pair taking care of necessary type lookup for a
// polymorphic type (using RTTI by default, but can be overridden by specializing
// polymorphic_type_hook). If the instance isn't derived, returns the base version.
static std::pair<const void *, const type_info *> src_and_type(const itype *src) {
const auto &cast_type = typeid(itype);
const std::type_info *instance_type = nullptr;
const void *vsrc = polymorphic_type_hook<itype>::get(src, instance_type);
if (instance_type && !same_type(cast_type, *instance_type)) {
// This is a base pointer to a derived type. If the derived type is registered
// with pybind11, we want to make the full derived object available.
// In the typical case where itype is polymorphic, we get the correct
// derived pointer (which may be != base pointer) by a dynamic_cast to
// most derived type. If itype is not polymorphic, we won't get here
// except via a user-provided specialization of polymorphic_type_hook,
// and the user has promised that no this-pointer adjustment is
// required in that case, so it's OK to use static_cast.
if (const auto *tpi = get_type_info(*instance_type)) {
return {vsrc, tpi};
}
}
// Otherwise we have either a nullptr, an `itype` pointer, or an unknown derived pointer,
// so don't do a cast
return type_caster_generic::src_and_type(src, cast_type, instance_type);
}
static handle cast(const itype *src, return_value_policy policy, handle parent) {
auto st = src_and_type(src);
return type_caster_generic::cast(st.first,
policy,
parent,
st.second,
make_copy_constructor(src),
make_move_constructor(src));
}
static handle cast_holder(const itype *src, const void *holder) {
auto st = src_and_type(src);
return type_caster_generic::cast(st.first,
return_value_policy::take_ownership,
{},
st.second,
nullptr,
nullptr,
holder);
}
template <typename T>
using cast_op_type = detail::cast_op_type<T>;
// NOLINTNEXTLINE(google-explicit-constructor)
operator itype *() { return (type *) value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator itype &() {
if (!value) {
throw reference_cast_error();
}
return *((itype *) value);
}
protected:
using Constructor = void *(*) (const void *);
/* Only enabled when the types are {copy,move}-constructible *and* when the type
does not have a private operator new implementation. A comma operator is used in the
decltype argument to apply SFINAE to the public copy/move constructors.*/
template <typename T, typename = enable_if_t<is_copy_constructible<T>::value>>
static auto make_copy_constructor(const T *)
-> decltype(new T(std::declval<const T>()), Constructor{}) {
return [](const void *arg) -> void * { return new T(*reinterpret_cast<const T *>(arg)); };
}
template <typename T, typename = enable_if_t<is_move_constructible<T>::value>>
static auto make_move_constructor(const T *)
-> decltype(new T(std::declval<T &&>()), Constructor{}) {
return [](const void *arg) -> void * {
return new T(std::move(*const_cast<T *>(reinterpret_cast<const T *>(arg))));
};
}
static Constructor make_copy_constructor(...) { return nullptr; }
static Constructor make_move_constructor(...) { return nullptr; }
};
PYBIND11_NOINLINE std::string type_info_description(const std::type_info &ti) {
if (auto *type_data = get_type_info(ti)) {
handle th((PyObject *) type_data->type);
return th.attr("__module__").cast<std::string>() + '.'
+ th.attr("__qualname__").cast<std::string>();
}
return clean_type_id(ti.name());
}
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/detail/typeid.h | C/C++ Header | /*
pybind11/detail/typeid.h: Compiler-independent access to type identifiers
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include <cstdio>
#include <cstdlib>
#if defined(__GNUG__)
# include <cxxabi.h>
#endif
#include "common.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// Erase all occurrences of a substring
inline void erase_all(std::string &string, const std::string &search) {
for (size_t pos = 0;;) {
pos = string.find(search, pos);
if (pos == std::string::npos) {
break;
}
string.erase(pos, search.length());
}
}
PYBIND11_NOINLINE void clean_type_id(std::string &name) {
#if defined(__GNUG__)
int status = 0;
std::unique_ptr<char, void (*)(void *)> res{
abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), std::free};
if (status == 0) {
name = res.get();
}
#else
detail::erase_all(name, "class ");
detail::erase_all(name, "struct ");
detail::erase_all(name, "enum ");
#endif
detail::erase_all(name, "pybind11::");
}
inline std::string clean_type_id(const char *typeid_name) {
std::string name(typeid_name);
detail::clean_type_id(name);
return name;
}
PYBIND11_NAMESPACE_END(detail)
/// Return a string representation of a C++ type
template <typename T>
static std::string type_id() {
return detail::clean_type_id(typeid(T).name());
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/eigen.h | C/C++ Header | /*
pybind11/eigen.h: Transparent conversion for dense and sparse Eigen matrices
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "eigen/matrix.h"
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/eigen/common.h | C/C++ Header | // Copyright (c) 2023 The pybind Community.
#pragma once
// Common message for `static_assert()`s, which are useful to easily
// preempt much less obvious errors.
#define PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED \
"Pointer types (in particular `PyObject *`) are not supported as scalar types for Eigen " \
"types."
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/eigen/matrix.h | C/C++ Header | /*
pybind11/eigen/matrix.h: Transparent conversion for dense and sparse Eigen matrices
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "../numpy.h"
#include "common.h"
/* HINT: To suppress warnings originating from the Eigen headers, use -isystem.
See also:
https://stackoverflow.com/questions/2579576/i-dir-vs-isystem-dir
https://stackoverflow.com/questions/1741816/isystem-for-ms-visual-studio-c-compiler
*/
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC(5054) // https://github.com/pybind/pybind11/pull/3741
// C5054: operator '&': deprecated between enumerations of different types
#if defined(__MINGW32__)
PYBIND11_WARNING_DISABLE_GCC("-Wmaybe-uninitialized")
#endif
#include <Eigen/Core>
#include <Eigen/SparseCore>
PYBIND11_WARNING_POP
// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
// move constructors that break things. We could detect this an explicitly copy, but an extra copy
// of matrices seems highly undesirable.
static_assert(EIGEN_VERSION_AT_LEAST(3, 2, 7),
"Eigen matrix support in pybind11 requires Eigen >= 3.2.7");
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
template <typename MatrixType>
using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
template <typename MatrixType>
using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
PYBIND11_NAMESPACE_BEGIN(detail)
#if EIGEN_VERSION_AT_LEAST(3, 3, 0)
using EigenIndex = Eigen::Index;
template <typename Scalar, int Flags, typename StorageIndex>
using EigenMapSparseMatrix = Eigen::Map<Eigen::SparseMatrix<Scalar, Flags, StorageIndex>>;
#else
using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;
template <typename Scalar, int Flags, typename StorageIndex>
using EigenMapSparseMatrix = Eigen::MappedSparseMatrix<Scalar, Flags, StorageIndex>;
#endif
// Matches Eigen::Map, Eigen::Ref, blocks, etc:
template <typename T>
using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>,
std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;
template <typename T>
using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;
template <typename T>
using is_eigen_dense_plain
= all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;
template <typename T>
using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;
// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above. This
// basically covers anything that can be assigned to a dense matrix but that don't have a typical
// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and
// SelfAdjointView fall into this category.
template <typename T>
using is_eigen_other
= all_of<is_template_base_of<Eigen::EigenBase, T>,
negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>>;
// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
template <bool EigenRowMajor>
struct EigenConformable {
bool conformable = false;
EigenIndex rows = 0, cols = 0;
EigenDStride stride{0, 0}; // Only valid if negativestrides is false!
bool negativestrides = false; // If true, do not use stride!
// NOLINTNEXTLINE(google-explicit-constructor)
EigenConformable(bool fits = false) : conformable{fits} {}
// Matrix type:
EigenConformable(EigenIndex r, EigenIndex c, EigenIndex rstride, EigenIndex cstride)
: conformable{true}, rows{r}, cols{c},
// TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity.
// http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
stride{EigenRowMajor ? (rstride > 0 ? rstride : 0)
: (cstride > 0 ? cstride : 0) /* outer stride */,
EigenRowMajor ? (cstride > 0 ? cstride : 0)
: (rstride > 0 ? rstride : 0) /* inner stride */},
negativestrides{rstride < 0 || cstride < 0} {}
// Vector type:
EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)
: EigenConformable(r, c, r == 1 ? c * stride : stride, c == 1 ? r : r * stride) {}
template <typename props>
bool stride_compatible() const {
// To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
// matching strides, or a dimension size of 1 (in which case the stride value is
// irrelevant). Alternatively, if any dimension size is 0, the strides are not relevant
// (and numpy ≥ 1.23 sets the strides to 0 in that case, so we need to check explicitly).
if (negativestrides) {
return false;
}
if (rows == 0 || cols == 0) {
return true;
}
return (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner()
|| (EigenRowMajor ? cols : rows) == 1)
&& (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer()
|| (EigenRowMajor ? rows : cols) == 1);
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator bool() const { return conformable; }
};
template <typename Type>
struct eigen_extract_stride {
using type = Type;
};
template <typename PlainObjectType, int MapOptions, typename StrideType>
struct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> {
using type = StrideType;
};
template <typename PlainObjectType, int Options, typename StrideType>
struct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> {
using type = StrideType;
};
// Helper struct for extracting information from an Eigen type
template <typename Type_>
struct EigenProps {
using Type = Type_;
using Scalar = typename Type::Scalar;
using StrideType = typename eigen_extract_stride<Type>::type;
static constexpr EigenIndex rows = Type::RowsAtCompileTime, cols = Type::ColsAtCompileTime,
size = Type::SizeAtCompileTime;
static constexpr bool row_major = Type::IsRowMajor,
vector
= Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1
fixed_rows = rows != Eigen::Dynamic, fixed_cols = cols != Eigen::Dynamic,
fixed = size != Eigen::Dynamic, // Fully-fixed size
dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size
template <EigenIndex i, EigenIndex ifzero>
using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;
static constexpr EigenIndex inner_stride
= if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,
outer_stride = if_zero < StrideType::OuterStrideAtCompileTime,
vector ? size
: row_major ? cols
: rows > ::value;
static constexpr bool dynamic_stride
= inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;
static constexpr bool requires_row_major
= !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;
static constexpr bool requires_col_major
= !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;
// Takes an input array and determines whether we can make it fit into the Eigen type. If
// the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
// (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
static EigenConformable<row_major> conformable(const array &a) {
const auto dims = a.ndim();
if (dims < 1 || dims > 2) {
return false;
}
if (dims == 2) { // Matrix type: require exact match (or dynamic)
EigenIndex np_rows = a.shape(0), np_cols = a.shape(1),
np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),
np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));
if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols)) {
return false;
}
return {np_rows, np_cols, np_rstride, np_cstride};
}
// Otherwise we're storing an n-vector. Only one of the strides will be used, but
// whichever is used, we want the (single) numpy stride value.
const EigenIndex n = a.shape(0),
stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));
if (vector) { // Eigen type is a compile-time vector
if (fixed && size != n) {
return false; // Vector size mismatch
}
return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};
}
if (fixed) {
// The type has a fixed size, but is not a vector: abort
return false;
}
if (fixed_cols) {
// Since this isn't a vector, cols must be != 1. We allow this only if it exactly
// equals the number of elements (rows is Dynamic, and so 1 row is allowed).
if (cols != n) {
return false;
}
return {1, n, stride};
} // Otherwise it's either fully dynamic, or column dynamic; both become a column vector
if (fixed_rows && rows != n) {
return false;
}
return {n, 1, stride};
}
static constexpr bool show_writeable
= is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;
static constexpr bool show_order = is_eigen_dense_map<Type>::value;
static constexpr bool show_c_contiguous = show_order && requires_row_major;
static constexpr bool show_f_contiguous
= !show_c_contiguous && show_order && requires_col_major;
static constexpr auto descriptor
= const_name("numpy.ndarray[") + npy_format_descriptor<Scalar>::name + const_name("[")
+ const_name<fixed_rows>(const_name<(size_t) rows>(), const_name("m")) + const_name(", ")
+ const_name<fixed_cols>(const_name<(size_t) cols>(), const_name("n")) + const_name("]")
+
// For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to
// be satisfied: writeable=True (for a mutable reference), and, depending on the map's
// stride options, possibly f_contiguous or c_contiguous. We include them in the
// descriptor output to provide some hint as to why a TypeError is occurring (otherwise
// it can be confusing to see that a function accepts a 'numpy.ndarray[float64[3,2]]' and
// an error message that you *gave* a numpy.ndarray of the right type and dimensions.
const_name<show_writeable>(", flags.writeable", "")
+ const_name<show_c_contiguous>(", flags.c_contiguous", "")
+ const_name<show_f_contiguous>(", flags.f_contiguous", "") + const_name("]");
};
// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data,
// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array.
template <typename props>
handle
eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {
constexpr ssize_t elem_size = sizeof(typename props::Scalar);
array a;
if (props::vector) {
a = array({src.size()}, {elem_size * src.innerStride()}, src.data(), base);
} else {
a = array({src.rows(), src.cols()},
{elem_size * src.rowStride(), elem_size * src.colStride()},
src.data(),
base);
}
if (!writeable) {
array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return a.release();
}
// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
// reference the Eigen object's data with `base` as the python-registered base class (if omitted,
// the base will be set to None, and lifetime management is up to the caller). The numpy array is
// non-writeable if the given type is const.
template <typename props, typename Type>
handle eigen_ref_array(Type &src, handle parent = none()) {
// none here is to get past array's should-we-copy detection, which currently always
// copies when there is no base. Setting the base to None should be harmless.
return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);
}
// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a
// numpy array that references the encapsulated data with a python-side reference to the capsule to
// tie its destruction to that of any dependent python objects. Const-ness is determined by
// whether or not the Type of the pointer given is const.
template <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>
handle eigen_encapsulate(Type *src) {
capsule base(src, [](void *o) { delete static_cast<Type *>(o); });
return eigen_ref_array<props>(*src, base);
}
// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
// types.
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
using Scalar = typename Type::Scalar;
static_assert(!std::is_pointer<Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using props = EigenProps<Type>;
bool load(handle src, bool convert) {
// If we're in no-convert mode, only load if given an array of the correct type
if (!convert && !isinstance<array_t<Scalar>>(src)) {
return false;
}
// Coerce into an array, but don't do type conversion yet; the copy below handles it.
auto buf = array::ensure(src);
if (!buf) {
return false;
}
auto dims = buf.ndim();
if (dims < 1 || dims > 2) {
return false;
}
auto fits = props::conformable(buf);
if (!fits) {
return false;
}
// Allocate the new type, then build a numpy reference into it
value = Type(fits.rows, fits.cols);
auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
if (dims == 1) {
ref = ref.squeeze();
} else if (ref.ndim() == 1) {
buf = buf.squeeze();
}
int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());
if (result < 0) { // Copy failed!
PyErr_Clear();
return false;
}
return true;
}
private:
// Cast implementation
template <typename CType>
static handle cast_impl(CType *src, return_value_policy policy, handle parent) {
switch (policy) {
case return_value_policy::take_ownership:
case return_value_policy::automatic:
return eigen_encapsulate<props>(src);
case return_value_policy::move:
return eigen_encapsulate<props>(new CType(std::move(*src)));
case return_value_policy::copy:
return eigen_array_cast<props>(*src);
case return_value_policy::reference:
case return_value_policy::automatic_reference:
return eigen_ref_array<props>(*src);
case return_value_policy::reference_internal:
return eigen_ref_array<props>(*src, parent);
default:
throw cast_error("unhandled return_value_policy: should not happen!");
};
}
public:
// Normal returned non-reference, non-const value:
static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {
return cast_impl(&src, return_value_policy::move, parent);
}
// If you return a non-reference const, we mark the numpy array readonly:
static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {
return cast_impl(&src, return_value_policy::move, parent);
}
// lvalue reference return; default (automatic) becomes copy
static handle cast(Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
// const lvalue reference return; default (automatic) becomes copy
static handle cast(const Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
// non-const pointer return
static handle cast(Type *src, return_value_policy policy, handle parent) {
return cast_impl(src, policy, parent);
}
// const pointer return
static handle cast(const Type *src, return_value_policy policy, handle parent) {
return cast_impl(src, policy, parent);
}
static constexpr auto name = props::descriptor;
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type *() { return &value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &() { return value; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &&() && { return std::move(value); }
template <typename T>
using cast_op_type = movable_cast_op_type<T>;
private:
Type value;
};
// Base class for casting reference/map/block/etc. objects back to python.
template <typename MapType>
struct eigen_map_caster {
static_assert(!std::is_pointer<typename MapType::Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
private:
using props = EigenProps<MapType>;
public:
// Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
// to stay around), but we'll allow it under the assumption that you know what you're doing
// (and have an appropriate keep_alive in place). We return a numpy array pointing directly at
// the ref's data (The numpy array ends up read-only if the ref was to a const matrix type.)
// Note that this means you need to ensure you don't destroy the object in some other way (e.g.
// with an appropriate keep_alive, or with a reference to a statically allocated matrix).
static handle cast(const MapType &src, return_value_policy policy, handle parent) {
switch (policy) {
case return_value_policy::copy:
return eigen_array_cast<props>(src);
case return_value_policy::reference_internal:
return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);
case return_value_policy::reference:
case return_value_policy::automatic:
case return_value_policy::automatic_reference:
return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);
default:
// move, take_ownership don't make any sense for a ref/map:
pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type");
}
}
static constexpr auto name = props::descriptor;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool load(handle, bool) = delete;
operator MapType() = delete;
template <typename>
using cast_op_type = MapType;
};
// We can return any map-like object (but can only load Refs, specialized next):
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>> : eigen_map_caster<Type> {};
// Loader for Ref<...> arguments. See the documentation for info on how to make this work without
// copying (it requires some extra effort in many cases).
template <typename PlainObjectType, typename StrideType>
struct type_caster<
Eigen::Ref<PlainObjectType, 0, StrideType>,
enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>>
: public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {
private:
using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;
using props = EigenProps<Type>;
using Scalar = typename props::Scalar;
static_assert(!std::is_pointer<Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;
using Array
= array_t<Scalar,
array::forcecast
| ((props::row_major ? props::inner_stride : props::outer_stride) == 1
? array::c_style
: (props::row_major ? props::outer_stride : props::inner_stride) == 1
? array::f_style
: 0)>;
static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;
// Delay construction (these have no default constructor)
std::unique_ptr<MapType> map;
std::unique_ptr<Type> ref;
// Our array. When possible, this is just a numpy array pointing to the source data, but
// sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an
// incompatible layout, or is an array of a type that needs to be converted). Using a numpy
// temporary (rather than an Eigen temporary) saves an extra copy when we need both type
// conversion and storage order conversion. (Note that we refuse to use this temporary copy
// when loading an argument for a Ref<M> with M non-const, i.e. a read-write reference).
Array copy_or_ref;
public:
bool load(handle src, bool convert) {
// First check whether what we have is already an array of the right type. If not, we
// can't avoid a copy (because the copy is also going to do type conversion).
bool need_copy = !isinstance<Array>(src);
EigenConformable<props::row_major> fits;
if (!need_copy) {
// We don't need a converting copy, but we also need to check whether the strides are
// compatible with the Ref's stride requirements
auto aref = reinterpret_borrow<Array>(src);
if (aref && (!need_writeable || aref.writeable())) {
fits = props::conformable(aref);
if (!fits) {
return false; // Incompatible dimensions
}
if (!fits.template stride_compatible<props>()) {
need_copy = true;
} else {
copy_or_ref = std::move(aref);
}
} else {
need_copy = true;
}
}
if (need_copy) {
// We need to copy: If we need a mutable reference, or we're not supposed to convert
// (either because we're in the no-convert overload pass, or because we're explicitly
// instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
if (!convert || need_writeable) {
return false;
}
Array copy = Array::ensure(src);
if (!copy) {
return false;
}
fits = props::conformable(copy);
if (!fits || !fits.template stride_compatible<props>()) {
return false;
}
copy_or_ref = std::move(copy);
loader_life_support::add_patient(copy_or_ref);
}
ref.reset();
map.reset(new MapType(data(copy_or_ref),
fits.rows,
fits.cols,
make_stride(fits.stride.outer(), fits.stride.inner())));
ref.reset(new Type(*map));
return true;
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type *() { return ref.get(); }
// NOLINTNEXTLINE(google-explicit-constructor)
operator Type &() { return *ref; }
template <typename _T>
using cast_op_type = pybind11::detail::cast_op_type<_T>;
private:
template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>
Scalar *data(Array &a) {
return a.mutable_data();
}
template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>
const Scalar *data(Array &a) {
return a.data();
}
// Attempt to figure out a constructor of `Stride` that will work.
// If both strides are fixed, use a default constructor:
template <typename S>
using stride_ctor_default = bool_constant<S::InnerStrideAtCompileTime != Eigen::Dynamic
&& S::OuterStrideAtCompileTime != Eigen::Dynamic
&& std::is_default_constructible<S>::value>;
// Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
// Eigen::Stride, and use it:
template <typename S>
using stride_ctor_dual
= bool_constant<!stride_ctor_default<S>::value
&& std::is_constructible<S, EigenIndex, EigenIndex>::value>;
// Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
// it (passing whichever stride is dynamic).
template <typename S>
using stride_ctor_outer
= bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
&& S::OuterStrideAtCompileTime == Eigen::Dynamic
&& S::InnerStrideAtCompileTime != Eigen::Dynamic
&& std::is_constructible<S, EigenIndex>::value>;
template <typename S>
using stride_ctor_inner
= bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
&& S::InnerStrideAtCompileTime == Eigen::Dynamic
&& S::OuterStrideAtCompileTime != Eigen::Dynamic
&& std::is_constructible<S, EigenIndex>::value>;
template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>
static S make_stride(EigenIndex, EigenIndex) {
return S();
}
template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>
static S make_stride(EigenIndex outer, EigenIndex inner) {
return S(outer, inner);
}
template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>
static S make_stride(EigenIndex outer, EigenIndex) {
return S(outer);
}
template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>
static S make_stride(EigenIndex, EigenIndex inner) {
return S(inner);
}
};
// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
// load() is not supported, but we can cast them into the python domain by first copying to a
// regular Eigen::Matrix, then casting that.
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {
static_assert(!std::is_pointer<typename Type::Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
protected:
using Matrix
= Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;
using props = EigenProps<Matrix>;
public:
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
handle h = eigen_encapsulate<props>(new Matrix(src));
return h;
}
static handle cast(const Type *src, return_value_policy policy, handle parent) {
return cast(*src, policy, parent);
}
static constexpr auto name = props::descriptor;
// Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
// types but not bound arguments). We still provide them (with an explicitly delete) so that
// you end up here if you try anyway.
bool load(handle, bool) = delete;
operator Type() = delete;
template <typename>
using cast_op_type = Type;
};
template <typename Type>
struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
using Scalar = typename Type::Scalar;
static_assert(!std::is_pointer<Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using StorageIndex = remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())>;
using Index = typename Type::Index;
static constexpr bool rowMajor = Type::IsRowMajor;
bool load(handle src, bool) {
if (!src) {
return false;
}
auto obj = reinterpret_borrow<object>(src);
object sparse_module = module_::import("scipy.sparse");
object matrix_type = sparse_module.attr(rowMajor ? "csr_matrix" : "csc_matrix");
if (!type::handle_of(obj).is(matrix_type)) {
try {
obj = matrix_type(obj);
} catch (const error_already_set &) {
return false;
}
}
auto values = array_t<Scalar>((object) obj.attr("data"));
auto innerIndices = array_t<StorageIndex>((object) obj.attr("indices"));
auto outerIndices = array_t<StorageIndex>((object) obj.attr("indptr"));
auto shape = pybind11::tuple((pybind11::object) obj.attr("shape"));
auto nnz = obj.attr("nnz").cast<Index>();
if (!values || !innerIndices || !outerIndices) {
return false;
}
value = EigenMapSparseMatrix<Scalar,
Type::Flags &(Eigen::RowMajor | Eigen::ColMajor),
StorageIndex>(shape[0].cast<Index>(),
shape[1].cast<Index>(),
std::move(nnz),
outerIndices.mutable_data(),
innerIndices.mutable_data(),
values.mutable_data());
return true;
}
static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
const_cast<Type &>(src).makeCompressed();
object matrix_type
= module_::import("scipy.sparse").attr(rowMajor ? "csr_matrix" : "csc_matrix");
array data(src.nonZeros(), src.valuePtr());
array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());
array innerIndices(src.nonZeros(), src.innerIndexPtr());
return matrix_type(pybind11::make_tuple(
std::move(data), std::move(innerIndices), std::move(outerIndices)),
pybind11::make_tuple(src.rows(), src.cols()))
.release();
}
PYBIND11_TYPE_CASTER(Type,
const_name<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[",
"scipy.sparse.csc_matrix[")
+ npy_format_descriptor<Scalar>::name + const_name("]"));
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/eigen/tensor.h | C/C++ Header | /*
pybind11/eigen/tensor.h: Transparent conversion for Eigen tensors
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "../numpy.h"
#include "common.h"
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
static_assert(__GNUC__ > 5, "Eigen Tensor support in pybind11 requires GCC > 5.0");
#endif
// Disable warnings for Eigen
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC(4554)
PYBIND11_WARNING_DISABLE_MSVC(4127)
#if defined(__MINGW32__)
PYBIND11_WARNING_DISABLE_GCC("-Wmaybe-uninitialized")
#endif
#include <unsupported/Eigen/CXX11/Tensor>
PYBIND11_WARNING_POP
static_assert(EIGEN_VERSION_AT_LEAST(3, 3, 0),
"Eigen Tensor support in pybind11 requires Eigen >= 3.3.0");
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
PYBIND11_NAMESPACE_BEGIN(detail)
inline bool is_tensor_aligned(const void *data) {
return (reinterpret_cast<std::size_t>(data) % EIGEN_DEFAULT_ALIGN_BYTES) == 0;
}
template <typename T>
constexpr int compute_array_flag_from_tensor() {
static_assert((static_cast<int>(T::Layout) == static_cast<int>(Eigen::RowMajor))
|| (static_cast<int>(T::Layout) == static_cast<int>(Eigen::ColMajor)),
"Layout must be row or column major");
return (static_cast<int>(T::Layout) == static_cast<int>(Eigen::RowMajor)) ? array::c_style
: array::f_style;
}
template <typename T>
struct eigen_tensor_helper {};
template <typename Scalar_, int NumIndices_, int Options_, typename IndexType>
struct eigen_tensor_helper<Eigen::Tensor<Scalar_, NumIndices_, Options_, IndexType>> {
using Type = Eigen::Tensor<Scalar_, NumIndices_, Options_, IndexType>;
using ValidType = void;
static Eigen::DSizes<typename Type::Index, Type::NumIndices> get_shape(const Type &f) {
return f.dimensions();
}
static constexpr bool
is_correct_shape(const Eigen::DSizes<typename Type::Index, Type::NumIndices> & /*shape*/) {
return true;
}
template <typename T>
struct helper {};
template <size_t... Is>
struct helper<index_sequence<Is...>> {
static constexpr auto value = concat(const_name(((void) Is, "?"))...);
};
static constexpr auto dimensions_descriptor
= helper<decltype(make_index_sequence<Type::NumIndices>())>::value;
template <typename... Args>
static Type *alloc(Args &&...args) {
return new Type(std::forward<Args>(args)...);
}
static void free(Type *tensor) { delete tensor; }
};
template <typename Scalar_, typename std::ptrdiff_t... Indices, int Options_, typename IndexType>
struct eigen_tensor_helper<
Eigen::TensorFixedSize<Scalar_, Eigen::Sizes<Indices...>, Options_, IndexType>> {
using Type = Eigen::TensorFixedSize<Scalar_, Eigen::Sizes<Indices...>, Options_, IndexType>;
using ValidType = void;
static constexpr Eigen::DSizes<typename Type::Index, Type::NumIndices>
get_shape(const Type & /*f*/) {
return get_shape();
}
static constexpr Eigen::DSizes<typename Type::Index, Type::NumIndices> get_shape() {
return Eigen::DSizes<typename Type::Index, Type::NumIndices>(Indices...);
}
static bool
is_correct_shape(const Eigen::DSizes<typename Type::Index, Type::NumIndices> &shape) {
return get_shape() == shape;
}
static constexpr auto dimensions_descriptor = concat(const_name<Indices>()...);
template <typename... Args>
static Type *alloc(Args &&...args) {
Eigen::aligned_allocator<Type> allocator;
return ::new (allocator.allocate(1)) Type(std::forward<Args>(args)...);
}
static void free(Type *tensor) {
Eigen::aligned_allocator<Type> allocator;
tensor->~Type();
allocator.deallocate(tensor, 1);
}
};
template <typename Type, bool ShowDetails, bool NeedsWriteable = false>
struct get_tensor_descriptor {
static constexpr auto details
= const_name<NeedsWriteable>(", flags.writeable", "")
+ const_name<static_cast<int>(Type::Layout) == static_cast<int>(Eigen::RowMajor)>(
", flags.c_contiguous", ", flags.f_contiguous");
static constexpr auto value
= const_name("numpy.ndarray[") + npy_format_descriptor<typename Type::Scalar>::name
+ const_name("[") + eigen_tensor_helper<remove_cv_t<Type>>::dimensions_descriptor
+ const_name("]") + const_name<ShowDetails>(details, const_name("")) + const_name("]");
};
// When EIGEN_AVOID_STL_ARRAY is defined, Eigen::DSizes<T, 0> does not have the begin() member
// function. Falling back to a simple loop works around this issue.
//
// We need to disable the type-limits warning for the inner loop when size = 0.
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_GCC("-Wtype-limits")
template <typename T, int size>
std::vector<T> convert_dsizes_to_vector(const Eigen::DSizes<T, size> &arr) {
std::vector<T> result(size);
for (size_t i = 0; i < size; i++) {
result[i] = arr[i];
}
return result;
}
template <typename T, int size>
Eigen::DSizes<T, size> get_shape_for_array(const array &arr) {
Eigen::DSizes<T, size> result;
const T *shape = arr.shape();
for (size_t i = 0; i < size; i++) {
result[i] = shape[i];
}
return result;
}
PYBIND11_WARNING_POP
template <typename Type>
struct type_caster<Type, typename eigen_tensor_helper<Type>::ValidType> {
static_assert(!std::is_pointer<typename Type::Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using Helper = eigen_tensor_helper<Type>;
static constexpr auto temp_name = get_tensor_descriptor<Type, false>::value;
PYBIND11_TYPE_CASTER(Type, temp_name);
bool load(handle src, bool convert) {
if (!convert) {
if (!isinstance<array>(src)) {
return false;
}
array temp = array::ensure(src);
if (!temp) {
return false;
}
if (!temp.dtype().is(dtype::of<typename Type::Scalar>())) {
return false;
}
}
array_t<typename Type::Scalar, compute_array_flag_from_tensor<Type>()> arr(
reinterpret_borrow<object>(src));
if (arr.ndim() != Type::NumIndices) {
return false;
}
auto shape = get_shape_for_array<typename Type::Index, Type::NumIndices>(arr);
if (!Helper::is_correct_shape(shape)) {
return false;
}
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
auto data_pointer = arr.data();
#else
// Handle Eigen bug
auto data_pointer = const_cast<typename Type::Scalar *>(arr.data());
#endif
if (is_tensor_aligned(arr.data())) {
value = Eigen::TensorMap<const Type, Eigen::Aligned>(data_pointer, shape);
} else {
value = Eigen::TensorMap<const Type>(data_pointer, shape);
}
return true;
}
static handle cast(Type &&src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::reference
|| policy == return_value_policy::reference_internal) {
pybind11_fail("Cannot use a reference return value policy for an rvalue");
}
return cast_impl(&src, return_value_policy::move, parent);
}
static handle cast(const Type &&src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::reference
|| policy == return_value_policy::reference_internal) {
pybind11_fail("Cannot use a reference return value policy for an rvalue");
}
return cast_impl(&src, return_value_policy::move, parent);
}
static handle cast(Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
static handle cast(const Type &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
static handle cast(Type *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
static handle cast(const Type *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
template <typename C>
static handle cast_impl(C *src, return_value_policy policy, handle parent) {
object parent_object;
bool writeable = false;
switch (policy) {
case return_value_policy::move:
if (std::is_const<C>::value) {
pybind11_fail("Cannot move from a constant reference");
}
src = Helper::alloc(std::move(*src));
parent_object
= capsule(src, [](void *ptr) { Helper::free(reinterpret_cast<Type *>(ptr)); });
writeable = true;
break;
case return_value_policy::take_ownership:
if (std::is_const<C>::value) {
// This cast is ugly, and might be UB in some cases, but we don't have an
// alternative here as we must free that memory
Helper::free(const_cast<Type *>(src));
pybind11_fail("Cannot take ownership of a const reference");
}
parent_object
= capsule(src, [](void *ptr) { Helper::free(reinterpret_cast<Type *>(ptr)); });
writeable = true;
break;
case return_value_policy::copy:
writeable = true;
break;
case return_value_policy::reference:
parent_object = none();
writeable = !std::is_const<C>::value;
break;
case return_value_policy::reference_internal:
// Default should do the right thing
if (!parent) {
pybind11_fail("Cannot use reference internal when there is no parent");
}
parent_object = reinterpret_borrow<object>(parent);
writeable = !std::is_const<C>::value;
break;
default:
pybind11_fail("pybind11 bug in eigen.h, please file a bug report");
}
auto result = array_t<typename Type::Scalar, compute_array_flag_from_tensor<Type>()>(
convert_dsizes_to_vector(Helper::get_shape(*src)), src->data(), parent_object);
if (!writeable) {
array_proxy(result.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return result.release();
}
};
template <typename StoragePointerType,
bool needs_writeable,
enable_if_t<!needs_writeable, bool> = true>
StoragePointerType get_array_data_for_type(array &arr) {
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
return reinterpret_cast<StoragePointerType>(arr.data());
#else
// Handle Eigen bug
return reinterpret_cast<StoragePointerType>(const_cast<void *>(arr.data()));
#endif
}
template <typename StoragePointerType,
bool needs_writeable,
enable_if_t<needs_writeable, bool> = true>
StoragePointerType get_array_data_for_type(array &arr) {
return reinterpret_cast<StoragePointerType>(arr.mutable_data());
}
template <typename T, typename = void>
struct get_storage_pointer_type;
template <typename MapType>
struct get_storage_pointer_type<MapType, void_t<typename MapType::StoragePointerType>> {
using SPT = typename MapType::StoragePointerType;
};
template <typename MapType>
struct get_storage_pointer_type<MapType, void_t<typename MapType::PointerArgType>> {
using SPT = typename MapType::PointerArgType;
};
template <typename Type, int Options>
struct type_caster<Eigen::TensorMap<Type, Options>,
typename eigen_tensor_helper<remove_cv_t<Type>>::ValidType> {
static_assert(!std::is_pointer<typename Type::Scalar>::value,
PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
using MapType = Eigen::TensorMap<Type, Options>;
using Helper = eigen_tensor_helper<remove_cv_t<Type>>;
bool load(handle src, bool /*convert*/) {
// Note that we have a lot more checks here as we want to make sure to avoid copies
if (!isinstance<array>(src)) {
return false;
}
auto arr = reinterpret_borrow<array>(src);
if ((arr.flags() & compute_array_flag_from_tensor<Type>()) == 0) {
return false;
}
if (!arr.dtype().is(dtype::of<typename Type::Scalar>())) {
return false;
}
if (arr.ndim() != Type::NumIndices) {
return false;
}
constexpr bool is_aligned = (Options & Eigen::Aligned) != 0;
if (is_aligned && !is_tensor_aligned(arr.data())) {
return false;
}
auto shape = get_shape_for_array<typename Type::Index, Type::NumIndices>(arr);
if (!Helper::is_correct_shape(shape)) {
return false;
}
if (needs_writeable && !arr.writeable()) {
return false;
}
auto result = get_array_data_for_type<typename get_storage_pointer_type<MapType>::SPT,
needs_writeable>(arr);
value.reset(new MapType(std::move(result), std::move(shape)));
return true;
}
static handle cast(MapType &&src, return_value_policy policy, handle parent) {
return cast_impl(&src, policy, parent);
}
static handle cast(const MapType &&src, return_value_policy policy, handle parent) {
return cast_impl(&src, policy, parent);
}
static handle cast(MapType &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast_impl(&src, policy, parent);
}
static handle cast(const MapType &src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic
|| policy == return_value_policy::automatic_reference) {
policy = return_value_policy::copy;
}
return cast(&src, policy, parent);
}
static handle cast(MapType *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
static handle cast(const MapType *src, return_value_policy policy, handle parent) {
if (policy == return_value_policy::automatic) {
policy = return_value_policy::take_ownership;
} else if (policy == return_value_policy::automatic_reference) {
policy = return_value_policy::reference;
}
return cast_impl(src, policy, parent);
}
template <typename C>
static handle cast_impl(C *src, return_value_policy policy, handle parent) {
object parent_object;
constexpr bool writeable = !std::is_const<C>::value;
switch (policy) {
case return_value_policy::reference:
parent_object = none();
break;
case return_value_policy::reference_internal:
// Default should do the right thing
if (!parent) {
pybind11_fail("Cannot use reference internal when there is no parent");
}
parent_object = reinterpret_borrow<object>(parent);
break;
case return_value_policy::take_ownership:
delete src;
// fallthrough
default:
// move, take_ownership don't make any sense for a ref/map:
pybind11_fail("Invalid return_value_policy for Eigen Map type, must be either "
"reference or reference_internal");
}
auto result = array_t<typename Type::Scalar, compute_array_flag_from_tensor<Type>()>(
convert_dsizes_to_vector(Helper::get_shape(*src)),
src->data(),
std::move(parent_object));
if (!writeable) {
array_proxy(result.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
return result.release();
}
#if EIGEN_VERSION_AT_LEAST(3, 4, 0)
static constexpr bool needs_writeable = !std::is_const<typename std::remove_pointer<
typename get_storage_pointer_type<MapType>::SPT>::type>::value;
#else
// Handle Eigen bug
static constexpr bool needs_writeable = !std::is_const<Type>::value;
#endif
protected:
// TODO: Move to std::optional once std::optional has more support
std::unique_ptr<MapType> value;
public:
static constexpr auto name = get_tensor_descriptor<Type, true, needs_writeable>::value;
explicit operator MapType *() { return value.get(); }
explicit operator MapType &() { return *value; }
explicit operator MapType &&() && { return std::move(*value); }
template <typename T_>
using cast_op_type = ::pybind11::detail::movable_cast_op_type<T_>;
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/embed.h | C/C++ Header | /*
pybind11/embed.h: Support for embedding the interpreter
Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include "eval.h"
#include <memory>
#include <vector>
#if defined(PYPY_VERSION)
# error Embedding the interpreter is not supported with PyPy
#endif
#define PYBIND11_EMBEDDED_MODULE_IMPL(name) \
extern "C" PyObject *pybind11_init_impl_##name(); \
extern "C" PyObject *pybind11_init_impl_##name() { return pybind11_init_wrapper_##name(); }
/** \rst
Add a new module to the table of builtins for the interpreter. Must be
defined in global scope. The first macro parameter is the name of the
module (without quotes). The second parameter is the variable which will
be used as the interface to add functions and classes to the module.
.. code-block:: cpp
PYBIND11_EMBEDDED_MODULE(example, m) {
// ... initialize functions and classes here
m.def("foo", []() {
return "Hello, World!";
});
}
\endrst */
#define PYBIND11_EMBEDDED_MODULE(name, variable) \
static ::pybind11::module_::module_def PYBIND11_CONCAT(pybind11_module_def_, name); \
static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
static PyObject PYBIND11_CONCAT(*pybind11_init_wrapper_, name)() { \
auto m = ::pybind11::module_::create_extension_module( \
PYBIND11_TOSTRING(name), nullptr, &PYBIND11_CONCAT(pybind11_module_def_, name)); \
try { \
PYBIND11_CONCAT(pybind11_init_, name)(m); \
return m.ptr(); \
} \
PYBIND11_CATCH_INIT_EXCEPTIONS \
} \
PYBIND11_EMBEDDED_MODULE_IMPL(name) \
::pybind11::detail::embedded_module PYBIND11_CONCAT(pybind11_module_, name)( \
PYBIND11_TOSTRING(name), PYBIND11_CONCAT(pybind11_init_impl_, name)); \
void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ \
& variable) // NOLINT(bugprone-macro-parentheses)
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// Python 2.7/3.x compatible version of `PyImport_AppendInittab` and error checks.
struct embedded_module {
using init_t = PyObject *(*) ();
embedded_module(const char *name, init_t init) {
if (Py_IsInitialized() != 0) {
pybind11_fail("Can't add new modules after the interpreter has been initialized");
}
auto result = PyImport_AppendInittab(name, init);
if (result == -1) {
pybind11_fail("Insufficient memory to add a new module");
}
}
};
struct wide_char_arg_deleter {
void operator()(wchar_t *ptr) const {
// API docs: https://docs.python.org/3/c-api/sys.html#c.Py_DecodeLocale
PyMem_RawFree(ptr);
}
};
inline wchar_t *widen_chars(const char *safe_arg) {
wchar_t *widened_arg = Py_DecodeLocale(safe_arg, nullptr);
return widened_arg;
}
inline void precheck_interpreter() {
if (Py_IsInitialized() != 0) {
pybind11_fail("The interpreter is already running");
}
}
#if !defined(PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX)
# define PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX (0x03080000)
#endif
#if PY_VERSION_HEX < PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX
inline void initialize_interpreter_pre_pyconfig(bool init_signal_handlers,
int argc,
const char *const *argv,
bool add_program_dir_to_path) {
detail::precheck_interpreter();
Py_InitializeEx(init_signal_handlers ? 1 : 0);
# if defined(WITH_THREAD) && PY_VERSION_HEX < 0x03070000
PyEval_InitThreads();
# endif
// Before it was special-cased in python 3.8, passing an empty or null argv
// caused a segfault, so we have to reimplement the special case ourselves.
bool special_case = (argv == nullptr || argc <= 0);
const char *const empty_argv[]{"\0"};
const char *const *safe_argv = special_case ? empty_argv : argv;
if (special_case) {
argc = 1;
}
auto argv_size = static_cast<size_t>(argc);
// SetArgv* on python 3 takes wchar_t, so we have to convert.
std::unique_ptr<wchar_t *[]> widened_argv(new wchar_t *[argv_size]);
std::vector<std::unique_ptr<wchar_t[], detail::wide_char_arg_deleter>> widened_argv_entries;
widened_argv_entries.reserve(argv_size);
for (size_t ii = 0; ii < argv_size; ++ii) {
widened_argv_entries.emplace_back(detail::widen_chars(safe_argv[ii]));
if (!widened_argv_entries.back()) {
// A null here indicates a character-encoding failure or the python
// interpreter out of memory. Give up.
return;
}
widened_argv[ii] = widened_argv_entries.back().get();
}
auto *pysys_argv = widened_argv.get();
PySys_SetArgvEx(argc, pysys_argv, static_cast<int>(add_program_dir_to_path));
}
#endif
PYBIND11_NAMESPACE_END(detail)
#if PY_VERSION_HEX >= PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX
inline void initialize_interpreter(PyConfig *config,
int argc = 0,
const char *const *argv = nullptr,
bool add_program_dir_to_path = true) {
detail::precheck_interpreter();
PyStatus status = PyConfig_SetBytesArgv(config, argc, const_cast<char *const *>(argv));
if (PyStatus_Exception(status) != 0) {
// A failure here indicates a character-encoding failure or the python
// interpreter out of memory. Give up.
PyConfig_Clear(config);
throw std::runtime_error(PyStatus_IsError(status) != 0 ? status.err_msg
: "Failed to prepare CPython");
}
status = Py_InitializeFromConfig(config);
if (PyStatus_Exception(status) != 0) {
PyConfig_Clear(config);
throw std::runtime_error(PyStatus_IsError(status) != 0 ? status.err_msg
: "Failed to init CPython");
}
if (add_program_dir_to_path) {
PyRun_SimpleString("import sys, os.path; "
"sys.path.insert(0, "
"os.path.abspath(os.path.dirname(sys.argv[0])) "
"if sys.argv and os.path.exists(sys.argv[0]) else '')");
}
PyConfig_Clear(config);
}
#endif
/** \rst
Initialize the Python interpreter. No other pybind11 or CPython API functions can be
called before this is done; with the exception of `PYBIND11_EMBEDDED_MODULE`. The
optional `init_signal_handlers` parameter can be used to skip the registration of
signal handlers (see the `Python documentation`_ for details). Calling this function
again after the interpreter has already been initialized is a fatal error.
If initializing the Python interpreter fails, then the program is terminated. (This
is controlled by the CPython runtime and is an exception to pybind11's normal behavior
of throwing exceptions on errors.)
The remaining optional parameters, `argc`, `argv`, and `add_program_dir_to_path` are
used to populate ``sys.argv`` and ``sys.path``.
See the |PySys_SetArgvEx documentation|_ for details.
.. _Python documentation: https://docs.python.org/3/c-api/init.html#c.Py_InitializeEx
.. |PySys_SetArgvEx documentation| replace:: ``PySys_SetArgvEx`` documentation
.. _PySys_SetArgvEx documentation: https://docs.python.org/3/c-api/init.html#c.PySys_SetArgvEx
\endrst */
inline void initialize_interpreter(bool init_signal_handlers = true,
int argc = 0,
const char *const *argv = nullptr,
bool add_program_dir_to_path = true) {
#if PY_VERSION_HEX < PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX
detail::initialize_interpreter_pre_pyconfig(
init_signal_handlers, argc, argv, add_program_dir_to_path);
#else
PyConfig config;
PyConfig_InitPythonConfig(&config);
// See PR #4473 for background
config.parse_argv = 0;
config.install_signal_handlers = init_signal_handlers ? 1 : 0;
initialize_interpreter(&config, argc, argv, add_program_dir_to_path);
#endif
}
/** \rst
Shut down the Python interpreter. No pybind11 or CPython API functions can be called
after this. In addition, pybind11 objects must not outlive the interpreter:
.. code-block:: cpp
{ // BAD
py::initialize_interpreter();
auto hello = py::str("Hello, World!");
py::finalize_interpreter();
} // <-- BOOM, hello's destructor is called after interpreter shutdown
{ // GOOD
py::initialize_interpreter();
{ // scoped
auto hello = py::str("Hello, World!");
} // <-- OK, hello is cleaned up properly
py::finalize_interpreter();
}
{ // BETTER
py::scoped_interpreter guard{};
auto hello = py::str("Hello, World!");
}
.. warning::
The interpreter can be restarted by calling `initialize_interpreter` again.
Modules created using pybind11 can be safely re-initialized. However, Python
itself cannot completely unload binary extension modules and there are several
caveats with regard to interpreter restarting. All the details can be found
in the CPython documentation. In short, not all interpreter memory may be
freed, either due to reference cycles or user-created global data.
\endrst */
inline void finalize_interpreter() {
// Get the internals pointer (without creating it if it doesn't exist). It's possible for the
// internals to be created during Py_Finalize() (e.g. if a py::capsule calls `get_internals()`
// during destruction), so we get the pointer-pointer here and check it after Py_Finalize().
detail::internals **internals_ptr_ptr = detail::get_internals_pp();
// It could also be stashed in state_dict, so look there too:
if (object internals_obj
= get_internals_obj_from_state_dict(detail::get_python_state_dict())) {
internals_ptr_ptr = detail::get_internals_pp_from_capsule(internals_obj);
}
// Local internals contains data managed by the current interpreter, so we must clear them to
// avoid undefined behaviors when initializing another interpreter
detail::get_local_internals().registered_types_cpp.clear();
detail::get_local_internals().registered_exception_translators.clear();
Py_Finalize();
if (internals_ptr_ptr) {
delete *internals_ptr_ptr;
*internals_ptr_ptr = nullptr;
}
}
/** \rst
Scope guard version of `initialize_interpreter` and `finalize_interpreter`.
This a move-only guard and only a single instance can exist.
See `initialize_interpreter` for a discussion of its constructor arguments.
.. code-block:: cpp
#include <pybind11/embed.h>
int main() {
py::scoped_interpreter guard{};
py::print(Hello, World!);
} // <-- interpreter shutdown
\endrst */
class scoped_interpreter {
public:
explicit scoped_interpreter(bool init_signal_handlers = true,
int argc = 0,
const char *const *argv = nullptr,
bool add_program_dir_to_path = true) {
initialize_interpreter(init_signal_handlers, argc, argv, add_program_dir_to_path);
}
#if PY_VERSION_HEX >= PYBIND11_PYCONFIG_SUPPORT_PY_VERSION_HEX
explicit scoped_interpreter(PyConfig *config,
int argc = 0,
const char *const *argv = nullptr,
bool add_program_dir_to_path = true) {
initialize_interpreter(config, argc, argv, add_program_dir_to_path);
}
#endif
scoped_interpreter(const scoped_interpreter &) = delete;
scoped_interpreter(scoped_interpreter &&other) noexcept { other.is_valid = false; }
scoped_interpreter &operator=(const scoped_interpreter &) = delete;
scoped_interpreter &operator=(scoped_interpreter &&) = delete;
~scoped_interpreter() {
if (is_valid) {
finalize_interpreter();
}
}
private:
bool is_valid = true;
};
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/eval.h | C/C++ Header | /*
pybind11/eval.h: Support for evaluating Python expressions and statements
from strings and files
Copyright (c) 2016 Klemens Morgenstern <klemens.morgenstern@ed-chemnitz.de> and
Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include <utility>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
inline void ensure_builtins_in_globals(object &global) {
#if defined(PYPY_VERSION) || PY_VERSION_HEX < 0x03080000
// Running exec and eval adds `builtins` module under `__builtins__` key to
// globals if not yet present. Python 3.8 made PyRun_String behave
// similarly. Let's also do that for older versions, for consistency. This
// was missing from PyPy3.8 7.3.7.
if (!global.contains("__builtins__"))
global["__builtins__"] = module_::import(PYBIND11_BUILTINS_MODULE);
#else
(void) global;
#endif
}
PYBIND11_NAMESPACE_END(detail)
enum eval_mode {
/// Evaluate a string containing an isolated expression
eval_expr,
/// Evaluate a string containing a single statement. Returns \c none
eval_single_statement,
/// Evaluate a string containing a sequence of statement. Returns \c none
eval_statements
};
template <eval_mode mode = eval_expr>
object eval(const str &expr, object global = globals(), object local = object()) {
if (!local) {
local = global;
}
detail::ensure_builtins_in_globals(global);
/* PyRun_String does not accept a PyObject / encoding specifier,
this seems to be the only alternative */
std::string buffer = "# -*- coding: utf-8 -*-\n" + (std::string) expr;
int start = 0;
switch (mode) {
case eval_expr:
start = Py_eval_input;
break;
case eval_single_statement:
start = Py_single_input;
break;
case eval_statements:
start = Py_file_input;
break;
default:
pybind11_fail("invalid evaluation mode");
}
PyObject *result = PyRun_String(buffer.c_str(), start, global.ptr(), local.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
template <eval_mode mode = eval_expr, size_t N>
object eval(const char (&s)[N], object global = globals(), object local = object()) {
/* Support raw string literals by removing common leading whitespace */
auto expr = (s[0] == '\n') ? str(module_::import("textwrap").attr("dedent")(s)) : str(s);
return eval<mode>(expr, std::move(global), std::move(local));
}
inline void exec(const str &expr, object global = globals(), object local = object()) {
eval<eval_statements>(expr, std::move(global), std::move(local));
}
template <size_t N>
void exec(const char (&s)[N], object global = globals(), object local = object()) {
eval<eval_statements>(s, std::move(global), std::move(local));
}
#if defined(PYPY_VERSION)
template <eval_mode mode = eval_statements>
object eval_file(str, object, object) {
pybind11_fail("eval_file not supported in PyPy3. Use eval");
}
template <eval_mode mode = eval_statements>
object eval_file(str, object) {
pybind11_fail("eval_file not supported in PyPy3. Use eval");
}
template <eval_mode mode = eval_statements>
object eval_file(str) {
pybind11_fail("eval_file not supported in PyPy3. Use eval");
}
#else
template <eval_mode mode = eval_statements>
object eval_file(str fname, object global = globals(), object local = object()) {
if (!local) {
local = global;
}
detail::ensure_builtins_in_globals(global);
int start = 0;
switch (mode) {
case eval_expr:
start = Py_eval_input;
break;
case eval_single_statement:
start = Py_single_input;
break;
case eval_statements:
start = Py_file_input;
break;
default:
pybind11_fail("invalid evaluation mode");
}
int closeFile = 1;
std::string fname_str = (std::string) fname;
FILE *f = _Py_fopen_obj(fname.ptr(), "r");
if (!f) {
PyErr_Clear();
pybind11_fail("File \"" + fname_str + "\" could not be opened!");
}
if (!global.contains("__file__")) {
global["__file__"] = std::move(fname);
}
PyObject *result
= PyRun_FileEx(f, fname_str.c_str(), start, global.ptr(), local.ptr(), closeFile);
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
#endif
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/functional.h | C/C++ Header | /*
pybind11/functional.h: std::function<> support
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include <functional>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename Return, typename... Args>
struct type_caster<std::function<Return(Args...)>> {
using type = std::function<Return(Args...)>;
using retval_type = conditional_t<std::is_same<Return, void>::value, void_type, Return>;
using function_type = Return (*)(Args...);
public:
bool load(handle src, bool convert) {
if (src.is_none()) {
// Defer accepting None to other overloads (if we aren't in convert mode):
if (!convert) {
return false;
}
return true;
}
if (!isinstance<function>(src)) {
return false;
}
auto func = reinterpret_borrow<function>(src);
/*
When passing a C++ function as an argument to another C++
function via Python, every function call would normally involve
a full C++ -> Python -> C++ roundtrip, which can be prohibitive.
Here, we try to at least detect the case where the function is
stateless (i.e. function pointer or lambda function without
captured variables), in which case the roundtrip can be avoided.
*/
if (auto cfunc = func.cpp_function()) {
auto *cfunc_self = PyCFunction_GET_SELF(cfunc.ptr());
if (cfunc_self == nullptr) {
PyErr_Clear();
} else if (isinstance<capsule>(cfunc_self)) {
auto c = reinterpret_borrow<capsule>(cfunc_self);
function_record *rec = nullptr;
// Check that we can safely reinterpret the capsule into a function_record
if (detail::is_function_record_capsule(c)) {
rec = c.get_pointer<function_record>();
}
while (rec != nullptr) {
if (rec->is_stateless
&& same_type(typeid(function_type),
*reinterpret_cast<const std::type_info *>(rec->data[1]))) {
struct capture {
function_type f;
};
value = ((capture *) &rec->data)->f;
return true;
}
rec = rec->next;
}
}
// PYPY segfaults here when passing builtin function like sum.
// Raising an fail exception here works to prevent the segfault, but only on gcc.
// See PR #1413 for full details
}
// ensure GIL is held during functor destruction
struct func_handle {
function f;
#if !(defined(_MSC_VER) && _MSC_VER == 1916 && defined(PYBIND11_CPP17))
// This triggers a syntax error under very special conditions (very weird indeed).
explicit
#endif
func_handle(function &&f_) noexcept
: f(std::move(f_)) {
}
func_handle(const func_handle &f_) { operator=(f_); }
func_handle &operator=(const func_handle &f_) {
gil_scoped_acquire acq;
f = f_.f;
return *this;
}
~func_handle() {
gil_scoped_acquire acq;
function kill_f(std::move(f));
}
};
// to emulate 'move initialization capture' in C++11
struct func_wrapper {
func_handle hfunc;
explicit func_wrapper(func_handle &&hf) noexcept : hfunc(std::move(hf)) {}
Return operator()(Args... args) const {
gil_scoped_acquire acq;
// casts the returned object as a rvalue to the return type
return hfunc.f(std::forward<Args>(args)...).template cast<Return>();
}
};
value = func_wrapper(func_handle(std::move(func)));
return true;
}
template <typename Func>
static handle cast(Func &&f_, return_value_policy policy, handle /* parent */) {
if (!f_) {
return none().release();
}
auto result = f_.template target<function_type>();
if (result) {
return cpp_function(*result, policy).release();
}
return cpp_function(std::forward<Func>(f_), policy).release();
}
PYBIND11_TYPE_CASTER(type,
const_name("Callable[[") + concat(make_caster<Args>::name...)
+ const_name("], ") + make_caster<retval_type>::name
+ const_name("]"));
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/gil.h | C/C++ Header | /*
pybind11/gil.h: RAII helpers for managing the GIL
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#if defined(WITH_THREAD) && !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
# include "detail/internals.h"
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// forward declarations
PyThreadState *get_thread_state_unchecked();
PYBIND11_NAMESPACE_END(detail)
#if defined(WITH_THREAD)
# if !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
/* The functions below essentially reproduce the PyGILState_* API using a RAII
* pattern, but there are a few important differences:
*
* 1. When acquiring the GIL from an non-main thread during the finalization
* phase, the GILState API blindly terminates the calling thread, which
* is often not what is wanted. This API does not do this.
*
* 2. The gil_scoped_release function can optionally cut the relationship
* of a PyThreadState and its associated thread, which allows moving it to
* another thread (this is a fairly rare/advanced use case).
*
* 3. The reference count of an acquired thread state can be controlled. This
* can be handy to prevent cases where callbacks issued from an external
* thread would otherwise constantly construct and destroy thread state data
* structures.
*
* See the Python bindings of NanoGUI (http://github.com/wjakob/nanogui) for an
* example which uses features 2 and 3 to migrate the Python thread of
* execution to another thread (to run the event loop on the original thread,
* in this case).
*/
class gil_scoped_acquire {
public:
PYBIND11_NOINLINE gil_scoped_acquire() {
auto &internals = detail::get_internals();
tstate = (PyThreadState *) PYBIND11_TLS_GET_VALUE(internals.tstate);
if (!tstate) {
/* Check if the GIL was acquired using the PyGILState_* API instead (e.g. if
calling from a Python thread). Since we use a different key, this ensures
we don't create a new thread state and deadlock in PyEval_AcquireThread
below. Note we don't save this state with internals.tstate, since we don't
create it we would fail to clear it (its reference count should be > 0). */
tstate = PyGILState_GetThisThreadState();
}
if (!tstate) {
tstate = PyThreadState_New(internals.istate);
# if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
if (!tstate) {
pybind11_fail("scoped_acquire: could not create thread state!");
}
# endif
tstate->gilstate_counter = 0;
PYBIND11_TLS_REPLACE_VALUE(internals.tstate, tstate);
} else {
release = detail::get_thread_state_unchecked() != tstate;
}
if (release) {
PyEval_AcquireThread(tstate);
}
inc_ref();
}
gil_scoped_acquire(const gil_scoped_acquire &) = delete;
gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete;
void inc_ref() { ++tstate->gilstate_counter; }
PYBIND11_NOINLINE void dec_ref() {
--tstate->gilstate_counter;
# if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
if (detail::get_thread_state_unchecked() != tstate) {
pybind11_fail("scoped_acquire::dec_ref(): thread state must be current!");
}
if (tstate->gilstate_counter < 0) {
pybind11_fail("scoped_acquire::dec_ref(): reference count underflow!");
}
# endif
if (tstate->gilstate_counter == 0) {
# if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
if (!release) {
pybind11_fail("scoped_acquire::dec_ref(): internal error!");
}
# endif
PyThreadState_Clear(tstate);
if (active) {
PyThreadState_DeleteCurrent();
}
PYBIND11_TLS_DELETE_VALUE(detail::get_internals().tstate);
release = false;
}
}
/// This method will disable the PyThreadState_DeleteCurrent call and the
/// GIL won't be acquired. This method should be used if the interpreter
/// could be shutting down when this is called, as thread deletion is not
/// allowed during shutdown. Check _Py_IsFinalizing() on Python 3.7+, and
/// protect subsequent code.
PYBIND11_NOINLINE void disarm() { active = false; }
PYBIND11_NOINLINE ~gil_scoped_acquire() {
dec_ref();
if (release) {
PyEval_SaveThread();
}
}
private:
PyThreadState *tstate = nullptr;
bool release = true;
bool active = true;
};
class gil_scoped_release {
public:
explicit gil_scoped_release(bool disassoc = false) : disassoc(disassoc) {
// `get_internals()` must be called here unconditionally in order to initialize
// `internals.tstate` for subsequent `gil_scoped_acquire` calls. Otherwise, an
// initialization race could occur as multiple threads try `gil_scoped_acquire`.
auto &internals = detail::get_internals();
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
tstate = PyEval_SaveThread();
if (disassoc) {
// Python >= 3.7 can remove this, it's an int before 3.7
// NOLINTNEXTLINE(readability-qualified-auto)
auto key = internals.tstate;
PYBIND11_TLS_DELETE_VALUE(key);
}
}
gil_scoped_release(const gil_scoped_release &) = delete;
gil_scoped_release &operator=(const gil_scoped_release &) = delete;
/// This method will disable the PyThreadState_DeleteCurrent call and the
/// GIL won't be acquired. This method should be used if the interpreter
/// could be shutting down when this is called, as thread deletion is not
/// allowed during shutdown. Check _Py_IsFinalizing() on Python 3.7+, and
/// protect subsequent code.
PYBIND11_NOINLINE void disarm() { active = false; }
~gil_scoped_release() {
if (!tstate) {
return;
}
// `PyEval_RestoreThread()` should not be called if runtime is finalizing
if (active) {
PyEval_RestoreThread(tstate);
}
if (disassoc) {
// Python >= 3.7 can remove this, it's an int before 3.7
// NOLINTNEXTLINE(readability-qualified-auto)
auto key = detail::get_internals().tstate;
PYBIND11_TLS_REPLACE_VALUE(key, tstate);
}
}
private:
PyThreadState *tstate;
bool disassoc;
bool active = true;
};
# else // PYBIND11_SIMPLE_GIL_MANAGEMENT
class gil_scoped_acquire {
PyGILState_STATE state;
public:
gil_scoped_acquire() : state{PyGILState_Ensure()} {}
gil_scoped_acquire(const gil_scoped_acquire &) = delete;
gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete;
~gil_scoped_acquire() { PyGILState_Release(state); }
void disarm() {}
};
class gil_scoped_release {
PyThreadState *state;
public:
gil_scoped_release() : state{PyEval_SaveThread()} {}
gil_scoped_release(const gil_scoped_release &) = delete;
gil_scoped_release &operator=(const gil_scoped_release &) = delete;
~gil_scoped_release() { PyEval_RestoreThread(state); }
void disarm() {}
};
# endif // PYBIND11_SIMPLE_GIL_MANAGEMENT
#else // WITH_THREAD
class gil_scoped_acquire {
public:
gil_scoped_acquire() {
// Trick to suppress `unused variable` error messages (at call sites).
(void) (this != (this + 1));
}
gil_scoped_acquire(const gil_scoped_acquire &) = delete;
gil_scoped_acquire &operator=(const gil_scoped_acquire &) = delete;
void disarm() {}
};
class gil_scoped_release {
public:
gil_scoped_release() {
// Trick to suppress `unused variable` error messages (at call sites).
(void) (this != (this + 1));
}
gil_scoped_release(const gil_scoped_release &) = delete;
gil_scoped_release &operator=(const gil_scoped_release &) = delete;
void disarm() {}
};
#endif // WITH_THREAD
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/iostream.h | C/C++ Header | /*
pybind11/iostream.h -- Tools to assist with redirecting cout and cerr to Python
Copyright (c) 2017 Henry F. Schreiner
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
WARNING: The implementation in this file is NOT thread safe. Multiple
threads writing to a redirected ostream concurrently cause data races
and potentially buffer overflows. Therefore it is currently a requirement
that all (possibly) concurrent redirected ostream writes are protected by
a mutex.
#HelpAppreciated: Work on iostream.h thread safety.
For more background see the discussions under
https://github.com/pybind/pybind11/pull/2982 and
https://github.com/pybind/pybind11/pull/2995.
*/
#pragma once
#include "pybind11.h"
#include <algorithm>
#include <cstring>
#include <iostream>
#include <iterator>
#include <memory>
#include <ostream>
#include <streambuf>
#include <string>
#include <utility>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
// Buffer that writes to Python instead of C++
class pythonbuf : public std::streambuf {
private:
using traits_type = std::streambuf::traits_type;
const size_t buf_size;
std::unique_ptr<char[]> d_buffer;
object pywrite;
object pyflush;
int overflow(int c) override {
if (!traits_type::eq_int_type(c, traits_type::eof())) {
*pptr() = traits_type::to_char_type(c);
pbump(1);
}
return sync() == 0 ? traits_type::not_eof(c) : traits_type::eof();
}
// Computes how many bytes at the end of the buffer are part of an
// incomplete sequence of UTF-8 bytes.
// Precondition: pbase() < pptr()
size_t utf8_remainder() const {
const auto rbase = std::reverse_iterator<char *>(pbase());
const auto rpptr = std::reverse_iterator<char *>(pptr());
auto is_ascii = [](char c) { return (static_cast<unsigned char>(c) & 0x80) == 0x00; };
auto is_leading = [](char c) { return (static_cast<unsigned char>(c) & 0xC0) == 0xC0; };
auto is_leading_2b = [](char c) { return static_cast<unsigned char>(c) <= 0xDF; };
auto is_leading_3b = [](char c) { return static_cast<unsigned char>(c) <= 0xEF; };
// If the last character is ASCII, there are no incomplete code points
if (is_ascii(*rpptr)) {
return 0;
}
// Otherwise, work back from the end of the buffer and find the first
// UTF-8 leading byte
const auto rpend = rbase - rpptr >= 3 ? rpptr + 3 : rbase;
const auto leading = std::find_if(rpptr, rpend, is_leading);
if (leading == rbase) {
return 0;
}
const auto dist = static_cast<size_t>(leading - rpptr);
size_t remainder = 0;
if (dist == 0) {
remainder = 1; // 1-byte code point is impossible
} else if (dist == 1) {
remainder = is_leading_2b(*leading) ? 0 : dist + 1;
} else if (dist == 2) {
remainder = is_leading_3b(*leading) ? 0 : dist + 1;
}
// else if (dist >= 3), at least 4 bytes before encountering an UTF-8
// leading byte, either no remainder or invalid UTF-8.
// Invalid UTF-8 will cause an exception later when converting
// to a Python string, so that's not handled here.
return remainder;
}
// This function must be non-virtual to be called in a destructor.
int _sync() {
if (pbase() != pptr()) { // If buffer is not empty
gil_scoped_acquire tmp;
// This subtraction cannot be negative, so dropping the sign.
auto size = static_cast<size_t>(pptr() - pbase());
size_t remainder = utf8_remainder();
if (size > remainder) {
str line(pbase(), size - remainder);
pywrite(std::move(line));
pyflush();
}
// Copy the remainder at the end of the buffer to the beginning:
if (remainder > 0) {
std::memmove(pbase(), pptr() - remainder, remainder);
}
setp(pbase(), epptr());
pbump(static_cast<int>(remainder));
}
return 0;
}
int sync() override { return _sync(); }
public:
explicit pythonbuf(const object &pyostream, size_t buffer_size = 1024)
: buf_size(buffer_size), d_buffer(new char[buf_size]), pywrite(pyostream.attr("write")),
pyflush(pyostream.attr("flush")) {
setp(d_buffer.get(), d_buffer.get() + buf_size - 1);
}
pythonbuf(pythonbuf &&) = default;
/// Sync before destroy
~pythonbuf() override { _sync(); }
};
PYBIND11_NAMESPACE_END(detail)
/** \rst
This a move-only guard that redirects output.
.. code-block:: cpp
#include <pybind11/iostream.h>
...
{
py::scoped_ostream_redirect output;
std::cout << "Hello, World!"; // Python stdout
} // <-- return std::cout to normal
You can explicitly pass the c++ stream and the python object,
for example to guard stderr instead.
.. code-block:: cpp
{
py::scoped_ostream_redirect output{
std::cerr, py::module::import("sys").attr("stderr")};
std::cout << "Hello, World!";
}
\endrst */
class scoped_ostream_redirect {
protected:
std::streambuf *old;
std::ostream &costream;
detail::pythonbuf buffer;
public:
explicit scoped_ostream_redirect(std::ostream &costream = std::cout,
const object &pyostream
= module_::import("sys").attr("stdout"))
: costream(costream), buffer(pyostream) {
old = costream.rdbuf(&buffer);
}
~scoped_ostream_redirect() { costream.rdbuf(old); }
scoped_ostream_redirect(const scoped_ostream_redirect &) = delete;
scoped_ostream_redirect(scoped_ostream_redirect &&other) = default;
scoped_ostream_redirect &operator=(const scoped_ostream_redirect &) = delete;
scoped_ostream_redirect &operator=(scoped_ostream_redirect &&) = delete;
};
/** \rst
Like `scoped_ostream_redirect`, but redirects cerr by default. This class
is provided primary to make ``py::call_guard`` easier to make.
.. code-block:: cpp
m.def("noisy_func", &noisy_func,
py::call_guard<scoped_ostream_redirect,
scoped_estream_redirect>());
\endrst */
class scoped_estream_redirect : public scoped_ostream_redirect {
public:
explicit scoped_estream_redirect(std::ostream &costream = std::cerr,
const object &pyostream
= module_::import("sys").attr("stderr"))
: scoped_ostream_redirect(costream, pyostream) {}
};
PYBIND11_NAMESPACE_BEGIN(detail)
// Class to redirect output as a context manager. C++ backend.
class OstreamRedirect {
bool do_stdout_;
bool do_stderr_;
std::unique_ptr<scoped_ostream_redirect> redirect_stdout;
std::unique_ptr<scoped_estream_redirect> redirect_stderr;
public:
explicit OstreamRedirect(bool do_stdout = true, bool do_stderr = true)
: do_stdout_(do_stdout), do_stderr_(do_stderr) {}
void enter() {
if (do_stdout_) {
redirect_stdout.reset(new scoped_ostream_redirect());
}
if (do_stderr_) {
redirect_stderr.reset(new scoped_estream_redirect());
}
}
void exit() {
redirect_stdout.reset();
redirect_stderr.reset();
}
};
PYBIND11_NAMESPACE_END(detail)
/** \rst
This is a helper function to add a C++ redirect context manager to Python
instead of using a C++ guard. To use it, add the following to your binding code:
.. code-block:: cpp
#include <pybind11/iostream.h>
...
py::add_ostream_redirect(m, "ostream_redirect");
You now have a Python context manager that redirects your output:
.. code-block:: python
with m.ostream_redirect():
m.print_to_cout_function()
This manager can optionally be told which streams to operate on:
.. code-block:: python
with m.ostream_redirect(stdout=true, stderr=true):
m.noisy_function_with_error_printing()
\endrst */
inline class_<detail::OstreamRedirect>
add_ostream_redirect(module_ m, const std::string &name = "ostream_redirect") {
return class_<detail::OstreamRedirect>(std::move(m), name.c_str(), module_local())
.def(init<bool, bool>(), arg("stdout") = true, arg("stderr") = true)
.def("__enter__", &detail::OstreamRedirect::enter)
.def("__exit__", [](detail::OstreamRedirect &self_, const args &) { self_.exit(); });
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/numpy.h | C/C++ Header | /*
pybind11/numpy.h: Basic NumPy support, vectorize() wrapper
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include "complex.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <numeric>
#include <sstream>
#include <string>
#include <type_traits>
#include <typeindex>
#include <utility>
#include <vector>
/* This will be true on all flat address space platforms and allows us to reduce the
whole npy_intp / ssize_t / Py_intptr_t business down to just ssize_t for all size
and dimension types (e.g. shape, strides, indexing), instead of inflicting this
upon the library user. */
static_assert(sizeof(::pybind11::ssize_t) == sizeof(Py_intptr_t), "ssize_t != Py_intptr_t");
static_assert(std::is_signed<Py_intptr_t>::value, "Py_intptr_t must be signed");
// We now can reinterpret_cast between py::ssize_t and Py_intptr_t (MSVC + PyPy cares)
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
class array; // Forward declaration
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
struct handle_type_name<array> {
static constexpr auto name = const_name("numpy.ndarray");
};
template <typename type, typename SFINAE = void>
struct npy_format_descriptor;
struct PyArrayDescr_Proxy {
PyObject_HEAD
PyObject *typeobj;
char kind;
char type;
char byteorder;
char flags;
int type_num;
int elsize;
int alignment;
char *subarray;
PyObject *fields;
PyObject *names;
};
struct PyArray_Proxy {
PyObject_HEAD
char *data;
int nd;
ssize_t *dimensions;
ssize_t *strides;
PyObject *base;
PyObject *descr;
int flags;
};
struct PyVoidScalarObject_Proxy {
PyObject_VAR_HEAD char *obval;
PyArrayDescr_Proxy *descr;
int flags;
PyObject *base;
};
struct numpy_type_info {
PyObject *dtype_ptr;
std::string format_str;
};
struct numpy_internals {
std::unordered_map<std::type_index, numpy_type_info> registered_dtypes;
numpy_type_info *get_type_info(const std::type_info &tinfo, bool throw_if_missing = true) {
auto it = registered_dtypes.find(std::type_index(tinfo));
if (it != registered_dtypes.end()) {
return &(it->second);
}
if (throw_if_missing) {
pybind11_fail(std::string("NumPy type info missing for ") + tinfo.name());
}
return nullptr;
}
template <typename T>
numpy_type_info *get_type_info(bool throw_if_missing = true) {
return get_type_info(typeid(typename std::remove_cv<T>::type), throw_if_missing);
}
};
PYBIND11_NOINLINE void load_numpy_internals(numpy_internals *&ptr) {
ptr = &get_or_create_shared_data<numpy_internals>("_numpy_internals");
}
inline numpy_internals &get_numpy_internals() {
static numpy_internals *ptr = nullptr;
if (!ptr) {
load_numpy_internals(ptr);
}
return *ptr;
}
template <typename T>
struct same_size {
template <typename U>
using as = bool_constant<sizeof(T) == sizeof(U)>;
};
template <typename Concrete>
constexpr int platform_lookup() {
return -1;
}
// Lookup a type according to its size, and return a value corresponding to the NumPy typenum.
template <typename Concrete, typename T, typename... Ts, typename... Ints>
constexpr int platform_lookup(int I, Ints... Is) {
return sizeof(Concrete) == sizeof(T) ? I : platform_lookup<Concrete, Ts...>(Is...);
}
struct npy_api {
enum constants {
NPY_ARRAY_C_CONTIGUOUS_ = 0x0001,
NPY_ARRAY_F_CONTIGUOUS_ = 0x0002,
NPY_ARRAY_OWNDATA_ = 0x0004,
NPY_ARRAY_FORCECAST_ = 0x0010,
NPY_ARRAY_ENSUREARRAY_ = 0x0040,
NPY_ARRAY_ALIGNED_ = 0x0100,
NPY_ARRAY_WRITEABLE_ = 0x0400,
NPY_BOOL_ = 0,
NPY_BYTE_,
NPY_UBYTE_,
NPY_SHORT_,
NPY_USHORT_,
NPY_INT_,
NPY_UINT_,
NPY_LONG_,
NPY_ULONG_,
NPY_LONGLONG_,
NPY_ULONGLONG_,
NPY_FLOAT_,
NPY_DOUBLE_,
NPY_LONGDOUBLE_,
NPY_CFLOAT_,
NPY_CDOUBLE_,
NPY_CLONGDOUBLE_,
NPY_OBJECT_ = 17,
NPY_STRING_,
NPY_UNICODE_,
NPY_VOID_,
// Platform-dependent normalization
NPY_INT8_ = NPY_BYTE_,
NPY_UINT8_ = NPY_UBYTE_,
NPY_INT16_ = NPY_SHORT_,
NPY_UINT16_ = NPY_USHORT_,
// `npy_common.h` defines the integer aliases. In order, it checks:
// NPY_BITSOF_LONG, NPY_BITSOF_LONGLONG, NPY_BITSOF_INT, NPY_BITSOF_SHORT, NPY_BITSOF_CHAR
// and assigns the alias to the first matching size, so we should check in this order.
NPY_INT32_
= platform_lookup<std::int32_t, long, int, short>(NPY_LONG_, NPY_INT_, NPY_SHORT_),
NPY_UINT32_ = platform_lookup<std::uint32_t, unsigned long, unsigned int, unsigned short>(
NPY_ULONG_, NPY_UINT_, NPY_USHORT_),
NPY_INT64_
= platform_lookup<std::int64_t, long, long long, int>(NPY_LONG_, NPY_LONGLONG_, NPY_INT_),
NPY_UINT64_
= platform_lookup<std::uint64_t, unsigned long, unsigned long long, unsigned int>(
NPY_ULONG_, NPY_ULONGLONG_, NPY_UINT_),
};
struct PyArray_Dims {
Py_intptr_t *ptr;
int len;
};
static npy_api &get() {
static npy_api api = lookup();
return api;
}
bool PyArray_Check_(PyObject *obj) const {
return PyObject_TypeCheck(obj, PyArray_Type_) != 0;
}
bool PyArrayDescr_Check_(PyObject *obj) const {
return PyObject_TypeCheck(obj, PyArrayDescr_Type_) != 0;
}
unsigned int (*PyArray_GetNDArrayCFeatureVersion_)();
PyObject *(*PyArray_DescrFromType_)(int);
PyObject *(*PyArray_NewFromDescr_)(PyTypeObject *,
PyObject *,
int,
Py_intptr_t const *,
Py_intptr_t const *,
void *,
int,
PyObject *);
// Unused. Not removed because that affects ABI of the class.
PyObject *(*PyArray_DescrNewFromType_)(int);
int (*PyArray_CopyInto_)(PyObject *, PyObject *);
PyObject *(*PyArray_NewCopy_)(PyObject *, int);
PyTypeObject *PyArray_Type_;
PyTypeObject *PyVoidArrType_Type_;
PyTypeObject *PyArrayDescr_Type_;
PyObject *(*PyArray_DescrFromScalar_)(PyObject *);
PyObject *(*PyArray_FromAny_)(PyObject *, PyObject *, int, int, int, PyObject *);
int (*PyArray_DescrConverter_)(PyObject *, PyObject **);
bool (*PyArray_EquivTypes_)(PyObject *, PyObject *);
int (*PyArray_GetArrayParamsFromObject_)(PyObject *,
PyObject *,
unsigned char,
PyObject **,
int *,
Py_intptr_t *,
PyObject **,
PyObject *);
PyObject *(*PyArray_Squeeze_)(PyObject *);
// Unused. Not removed because that affects ABI of the class.
int (*PyArray_SetBaseObject_)(PyObject *, PyObject *);
PyObject *(*PyArray_Resize_)(PyObject *, PyArray_Dims *, int, int);
PyObject *(*PyArray_Newshape_)(PyObject *, PyArray_Dims *, int);
PyObject *(*PyArray_View_)(PyObject *, PyObject *, PyObject *);
private:
enum functions {
API_PyArray_GetNDArrayCFeatureVersion = 211,
API_PyArray_Type = 2,
API_PyArrayDescr_Type = 3,
API_PyVoidArrType_Type = 39,
API_PyArray_DescrFromType = 45,
API_PyArray_DescrFromScalar = 57,
API_PyArray_FromAny = 69,
API_PyArray_Resize = 80,
API_PyArray_CopyInto = 82,
API_PyArray_NewCopy = 85,
API_PyArray_NewFromDescr = 94,
API_PyArray_DescrNewFromType = 96,
API_PyArray_Newshape = 135,
API_PyArray_Squeeze = 136,
API_PyArray_View = 137,
API_PyArray_DescrConverter = 174,
API_PyArray_EquivTypes = 182,
API_PyArray_GetArrayParamsFromObject = 278,
API_PyArray_SetBaseObject = 282
};
static npy_api lookup() {
module_ m = module_::import("numpy.core.multiarray");
auto c = m.attr("_ARRAY_API");
void **api_ptr = (void **) PyCapsule_GetPointer(c.ptr(), nullptr);
npy_api api;
#define DECL_NPY_API(Func) api.Func##_ = (decltype(api.Func##_)) api_ptr[API_##Func];
DECL_NPY_API(PyArray_GetNDArrayCFeatureVersion);
if (api.PyArray_GetNDArrayCFeatureVersion_() < 0x7) {
pybind11_fail("pybind11 numpy support requires numpy >= 1.7.0");
}
DECL_NPY_API(PyArray_Type);
DECL_NPY_API(PyVoidArrType_Type);
DECL_NPY_API(PyArrayDescr_Type);
DECL_NPY_API(PyArray_DescrFromType);
DECL_NPY_API(PyArray_DescrFromScalar);
DECL_NPY_API(PyArray_FromAny);
DECL_NPY_API(PyArray_Resize);
DECL_NPY_API(PyArray_CopyInto);
DECL_NPY_API(PyArray_NewCopy);
DECL_NPY_API(PyArray_NewFromDescr);
DECL_NPY_API(PyArray_DescrNewFromType);
DECL_NPY_API(PyArray_Newshape);
DECL_NPY_API(PyArray_Squeeze);
DECL_NPY_API(PyArray_View);
DECL_NPY_API(PyArray_DescrConverter);
DECL_NPY_API(PyArray_EquivTypes);
DECL_NPY_API(PyArray_GetArrayParamsFromObject);
DECL_NPY_API(PyArray_SetBaseObject);
#undef DECL_NPY_API
return api;
}
};
inline PyArray_Proxy *array_proxy(void *ptr) { return reinterpret_cast<PyArray_Proxy *>(ptr); }
inline const PyArray_Proxy *array_proxy(const void *ptr) {
return reinterpret_cast<const PyArray_Proxy *>(ptr);
}
inline PyArrayDescr_Proxy *array_descriptor_proxy(PyObject *ptr) {
return reinterpret_cast<PyArrayDescr_Proxy *>(ptr);
}
inline const PyArrayDescr_Proxy *array_descriptor_proxy(const PyObject *ptr) {
return reinterpret_cast<const PyArrayDescr_Proxy *>(ptr);
}
inline bool check_flags(const void *ptr, int flag) {
return (flag == (array_proxy(ptr)->flags & flag));
}
template <typename T>
struct is_std_array : std::false_type {};
template <typename T, size_t N>
struct is_std_array<std::array<T, N>> : std::true_type {};
template <typename T>
struct is_complex : std::false_type {};
template <typename T>
struct is_complex<std::complex<T>> : std::true_type {};
template <typename T>
struct array_info_scalar {
using type = T;
static constexpr bool is_array = false;
static constexpr bool is_empty = false;
static constexpr auto extents = const_name("");
static void append_extents(list & /* shape */) {}
};
// Computes underlying type and a comma-separated list of extents for array
// types (any mix of std::array and built-in arrays). An array of char is
// treated as scalar because it gets special handling.
template <typename T>
struct array_info : array_info_scalar<T> {};
template <typename T, size_t N>
struct array_info<std::array<T, N>> {
using type = typename array_info<T>::type;
static constexpr bool is_array = true;
static constexpr bool is_empty = (N == 0) || array_info<T>::is_empty;
static constexpr size_t extent = N;
// appends the extents to shape
static void append_extents(list &shape) {
shape.append(N);
array_info<T>::append_extents(shape);
}
static constexpr auto extents = const_name<array_info<T>::is_array>(
concat(const_name<N>(), array_info<T>::extents), const_name<N>());
};
// For numpy we have special handling for arrays of characters, so we don't include
// the size in the array extents.
template <size_t N>
struct array_info<char[N]> : array_info_scalar<char[N]> {};
template <size_t N>
struct array_info<std::array<char, N>> : array_info_scalar<std::array<char, N>> {};
template <typename T, size_t N>
struct array_info<T[N]> : array_info<std::array<T, N>> {};
template <typename T>
using remove_all_extents_t = typename array_info<T>::type;
template <typename T>
using is_pod_struct
= all_of<std::is_standard_layout<T>, // since we're accessing directly in memory
// we need a standard layout type
#if defined(__GLIBCXX__) \
&& (__GLIBCXX__ < 20150422 || __GLIBCXX__ == 20150426 || __GLIBCXX__ == 20150623 \
|| __GLIBCXX__ == 20150626 || __GLIBCXX__ == 20160803)
// libstdc++ < 5 (including versions 4.8.5, 4.9.3 and 4.9.4 which were released after
// 5) don't implement is_trivially_copyable, so approximate it
std::is_trivially_destructible<T>,
satisfies_any_of<T, std::has_trivial_copy_constructor, std::has_trivial_copy_assign>,
#else
std::is_trivially_copyable<T>,
#endif
satisfies_none_of<T,
std::is_reference,
std::is_array,
is_std_array,
std::is_arithmetic,
is_complex,
std::is_enum>>;
// Replacement for std::is_pod (deprecated in C++20)
template <typename T>
using is_pod = all_of<std::is_standard_layout<T>, std::is_trivial<T>>;
template <ssize_t Dim = 0, typename Strides>
ssize_t byte_offset_unsafe(const Strides &) {
return 0;
}
template <ssize_t Dim = 0, typename Strides, typename... Ix>
ssize_t byte_offset_unsafe(const Strides &strides, ssize_t i, Ix... index) {
return i * strides[Dim] + byte_offset_unsafe<Dim + 1>(strides, index...);
}
/**
* Proxy class providing unsafe, unchecked const access to array data. This is constructed through
* the `unchecked<T, N>()` method of `array` or the `unchecked<N>()` method of `array_t<T>`. `Dims`
* will be -1 for dimensions determined at runtime.
*/
template <typename T, ssize_t Dims>
class unchecked_reference {
protected:
static constexpr bool Dynamic = Dims < 0;
const unsigned char *data_;
// Storing the shape & strides in local variables (i.e. these arrays) allows the compiler to
// make large performance gains on big, nested loops, but requires compile-time dimensions
conditional_t<Dynamic, const ssize_t *, std::array<ssize_t, (size_t) Dims>> shape_, strides_;
const ssize_t dims_;
friend class pybind11::array;
// Constructor for compile-time dimensions:
template <bool Dyn = Dynamic>
unchecked_reference(const void *data,
const ssize_t *shape,
const ssize_t *strides,
enable_if_t<!Dyn, ssize_t>)
: data_{reinterpret_cast<const unsigned char *>(data)}, dims_{Dims} {
for (size_t i = 0; i < (size_t) dims_; i++) {
shape_[i] = shape[i];
strides_[i] = strides[i];
}
}
// Constructor for runtime dimensions:
template <bool Dyn = Dynamic>
unchecked_reference(const void *data,
const ssize_t *shape,
const ssize_t *strides,
enable_if_t<Dyn, ssize_t> dims)
: data_{reinterpret_cast<const unsigned char *>(data)}, shape_{shape}, strides_{strides},
dims_{dims} {}
public:
/**
* Unchecked const reference access to data at the given indices. For a compile-time known
* number of dimensions, this requires the correct number of arguments; for run-time
* dimensionality, this is not checked (and so is up to the caller to use safely).
*/
template <typename... Ix>
const T &operator()(Ix... index) const {
static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
"Invalid number of indices for unchecked array reference");
return *reinterpret_cast<const T *>(data_
+ byte_offset_unsafe(strides_, ssize_t(index)...));
}
/**
* Unchecked const reference access to data; this operator only participates if the reference
* is to a 1-dimensional array. When present, this is exactly equivalent to `obj(index)`.
*/
template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
const T &operator[](ssize_t index) const {
return operator()(index);
}
/// Pointer access to the data at the given indices.
template <typename... Ix>
const T *data(Ix... ix) const {
return &operator()(ssize_t(ix)...);
}
/// Returns the item size, i.e. sizeof(T)
constexpr static ssize_t itemsize() { return sizeof(T); }
/// Returns the shape (i.e. size) of dimension `dim`
ssize_t shape(ssize_t dim) const { return shape_[(size_t) dim]; }
/// Returns the number of dimensions of the array
ssize_t ndim() const { return dims_; }
/// Returns the total number of elements in the referenced array, i.e. the product of the
/// shapes
template <bool Dyn = Dynamic>
enable_if_t<!Dyn, ssize_t> size() const {
return std::accumulate(
shape_.begin(), shape_.end(), (ssize_t) 1, std::multiplies<ssize_t>());
}
template <bool Dyn = Dynamic>
enable_if_t<Dyn, ssize_t> size() const {
return std::accumulate(shape_, shape_ + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
}
/// Returns the total number of bytes used by the referenced data. Note that the actual span
/// in memory may be larger if the referenced array has non-contiguous strides (e.g. for a
/// slice).
ssize_t nbytes() const { return size() * itemsize(); }
};
template <typename T, ssize_t Dims>
class unchecked_mutable_reference : public unchecked_reference<T, Dims> {
friend class pybind11::array;
using ConstBase = unchecked_reference<T, Dims>;
using ConstBase::ConstBase;
using ConstBase::Dynamic;
public:
// Bring in const-qualified versions from base class
using ConstBase::operator();
using ConstBase::operator[];
/// Mutable, unchecked access to data at the given indices.
template <typename... Ix>
T &operator()(Ix... index) {
static_assert(ssize_t{sizeof...(Ix)} == Dims || Dynamic,
"Invalid number of indices for unchecked array reference");
return const_cast<T &>(ConstBase::operator()(index...));
}
/**
* Mutable, unchecked access data at the given index; this operator only participates if the
* reference is to a 1-dimensional array (or has runtime dimensions). When present, this is
* exactly equivalent to `obj(index)`.
*/
template <ssize_t D = Dims, typename = enable_if_t<D == 1 || Dynamic>>
T &operator[](ssize_t index) {
return operator()(index);
}
/// Mutable pointer access to the data at the given indices.
template <typename... Ix>
T *mutable_data(Ix... ix) {
return &operator()(ssize_t(ix)...);
}
};
template <typename T, ssize_t Dim>
struct type_caster<unchecked_reference<T, Dim>> {
static_assert(Dim == 0 && Dim > 0 /* always fail */,
"unchecked array proxy object is not castable");
};
template <typename T, ssize_t Dim>
struct type_caster<unchecked_mutable_reference<T, Dim>>
: type_caster<unchecked_reference<T, Dim>> {};
PYBIND11_NAMESPACE_END(detail)
class dtype : public object {
public:
PYBIND11_OBJECT_DEFAULT(dtype, object, detail::npy_api::get().PyArrayDescr_Check_)
explicit dtype(const buffer_info &info) {
dtype descr(_dtype_from_pep3118()(pybind11::str(info.format)));
// If info.itemsize == 0, use the value calculated from the format string
m_ptr = descr.strip_padding(info.itemsize != 0 ? info.itemsize : descr.itemsize())
.release()
.ptr();
}
explicit dtype(const pybind11::str &format) : dtype(from_args(format)) {}
explicit dtype(const std::string &format) : dtype(pybind11::str(format)) {}
explicit dtype(const char *format) : dtype(pybind11::str(format)) {}
dtype(list names, list formats, list offsets, ssize_t itemsize) {
dict args;
args["names"] = std::move(names);
args["formats"] = std::move(formats);
args["offsets"] = std::move(offsets);
args["itemsize"] = pybind11::int_(itemsize);
m_ptr = from_args(args).release().ptr();
}
/// Return dtype for the given typenum (one of the NPY_TYPES).
/// https://numpy.org/devdocs/reference/c-api/array.html#c.PyArray_DescrFromType
explicit dtype(int typenum)
: object(detail::npy_api::get().PyArray_DescrFromType_(typenum), stolen_t{}) {
if (m_ptr == nullptr) {
throw error_already_set();
}
}
/// This is essentially the same as calling numpy.dtype(args) in Python.
static dtype from_args(const object &args) {
PyObject *ptr = nullptr;
if ((detail::npy_api::get().PyArray_DescrConverter_(args.ptr(), &ptr) == 0) || !ptr) {
throw error_already_set();
}
return reinterpret_steal<dtype>(ptr);
}
/// Return dtype associated with a C++ type.
template <typename T>
static dtype of() {
return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::dtype();
}
/// Size of the data type in bytes.
ssize_t itemsize() const { return detail::array_descriptor_proxy(m_ptr)->elsize; }
/// Returns true for structured data types.
bool has_fields() const { return detail::array_descriptor_proxy(m_ptr)->names != nullptr; }
/// Single-character code for dtype's kind.
/// For example, floating point types are 'f' and integral types are 'i'.
char kind() const { return detail::array_descriptor_proxy(m_ptr)->kind; }
/// Single-character for dtype's type.
/// For example, ``float`` is 'f', ``double`` 'd', ``int`` 'i', and ``long`` 'l'.
char char_() const {
// Note: The signature, `dtype::char_` follows the naming of NumPy's
// public Python API (i.e., ``dtype.char``), rather than its internal
// C API (``PyArray_Descr::type``).
return detail::array_descriptor_proxy(m_ptr)->type;
}
/// type number of dtype.
int num() const {
// Note: The signature, `dtype::num` follows the naming of NumPy's public
// Python API (i.e., ``dtype.num``), rather than its internal
// C API (``PyArray_Descr::type_num``).
return detail::array_descriptor_proxy(m_ptr)->type_num;
}
/// Single character for byteorder
char byteorder() const { return detail::array_descriptor_proxy(m_ptr)->byteorder; }
/// Alignment of the data type
int alignment() const { return detail::array_descriptor_proxy(m_ptr)->alignment; }
/// Flags for the array descriptor
char flags() const { return detail::array_descriptor_proxy(m_ptr)->flags; }
private:
static object _dtype_from_pep3118() {
static PyObject *obj = module_::import("numpy.core._internal")
.attr("_dtype_from_pep3118")
.cast<object>()
.release()
.ptr();
return reinterpret_borrow<object>(obj);
}
dtype strip_padding(ssize_t itemsize) {
// Recursively strip all void fields with empty names that are generated for
// padding fields (as of NumPy v1.11).
if (!has_fields()) {
return *this;
}
struct field_descr {
pybind11::str name;
object format;
pybind11::int_ offset;
field_descr(pybind11::str &&name, object &&format, pybind11::int_ &&offset)
: name{std::move(name)}, format{std::move(format)}, offset{std::move(offset)} {};
};
auto field_dict = attr("fields").cast<dict>();
std::vector<field_descr> field_descriptors;
field_descriptors.reserve(field_dict.size());
for (auto field : field_dict.attr("items")()) {
auto spec = field.cast<tuple>();
auto name = spec[0].cast<pybind11::str>();
auto spec_fo = spec[1].cast<tuple>();
auto format = spec_fo[0].cast<dtype>();
auto offset = spec_fo[1].cast<pybind11::int_>();
if ((len(name) == 0u) && format.kind() == 'V') {
continue;
}
field_descriptors.emplace_back(
std::move(name), format.strip_padding(format.itemsize()), std::move(offset));
}
std::sort(field_descriptors.begin(),
field_descriptors.end(),
[](const field_descr &a, const field_descr &b) {
return a.offset.cast<int>() < b.offset.cast<int>();
});
list names, formats, offsets;
for (auto &descr : field_descriptors) {
names.append(std::move(descr.name));
formats.append(std::move(descr.format));
offsets.append(std::move(descr.offset));
}
return dtype(std::move(names), std::move(formats), std::move(offsets), itemsize);
}
};
class array : public buffer {
public:
PYBIND11_OBJECT_CVT(array, buffer, detail::npy_api::get().PyArray_Check_, raw_array)
enum {
c_style = detail::npy_api::NPY_ARRAY_C_CONTIGUOUS_,
f_style = detail::npy_api::NPY_ARRAY_F_CONTIGUOUS_,
forcecast = detail::npy_api::NPY_ARRAY_FORCECAST_
};
array() : array(0, static_cast<const double *>(nullptr)) {}
using ShapeContainer = detail::any_container<ssize_t>;
using StridesContainer = detail::any_container<ssize_t>;
// Constructs an array taking shape/strides from arbitrary container types
array(const pybind11::dtype &dt,
ShapeContainer shape,
StridesContainer strides,
const void *ptr = nullptr,
handle base = handle()) {
if (strides->empty()) {
*strides = detail::c_strides(*shape, dt.itemsize());
}
auto ndim = shape->size();
if (ndim != strides->size()) {
pybind11_fail("NumPy: shape ndim doesn't match strides ndim");
}
auto descr = dt;
int flags = 0;
if (base && ptr) {
if (isinstance<array>(base)) {
/* Copy flags from base (except ownership bit) */
flags = reinterpret_borrow<array>(base).flags()
& ~detail::npy_api::NPY_ARRAY_OWNDATA_;
} else {
/* Writable by default, easy to downgrade later on if needed */
flags = detail::npy_api::NPY_ARRAY_WRITEABLE_;
}
}
auto &api = detail::npy_api::get();
auto tmp = reinterpret_steal<object>(api.PyArray_NewFromDescr_(
api.PyArray_Type_,
descr.release().ptr(),
(int) ndim,
// Use reinterpret_cast for PyPy on Windows (remove if fixed, checked on 7.3.1)
reinterpret_cast<Py_intptr_t *>(shape->data()),
reinterpret_cast<Py_intptr_t *>(strides->data()),
const_cast<void *>(ptr),
flags,
nullptr));
if (!tmp) {
throw error_already_set();
}
if (ptr) {
if (base) {
api.PyArray_SetBaseObject_(tmp.ptr(), base.inc_ref().ptr());
} else {
tmp = reinterpret_steal<object>(
api.PyArray_NewCopy_(tmp.ptr(), -1 /* any order */));
}
}
m_ptr = tmp.release().ptr();
}
array(const pybind11::dtype &dt,
ShapeContainer shape,
const void *ptr = nullptr,
handle base = handle())
: array(dt, std::move(shape), {}, ptr, base) {}
template <typename T,
typename
= detail::enable_if_t<std::is_integral<T>::value && !std::is_same<bool, T>::value>>
array(const pybind11::dtype &dt, T count, const void *ptr = nullptr, handle base = handle())
: array(dt, {{count}}, ptr, base) {}
template <typename T>
array(ShapeContainer shape, StridesContainer strides, const T *ptr, handle base = handle())
: array(pybind11::dtype::of<T>(), std::move(shape), std::move(strides), ptr, base) {}
template <typename T>
array(ShapeContainer shape, const T *ptr, handle base = handle())
: array(std::move(shape), {}, ptr, base) {}
template <typename T>
explicit array(ssize_t count, const T *ptr, handle base = handle())
: array({count}, {}, ptr, base) {}
explicit array(const buffer_info &info, handle base = handle())
: array(pybind11::dtype(info), info.shape, info.strides, info.ptr, base) {}
/// Array descriptor (dtype)
pybind11::dtype dtype() const {
return reinterpret_borrow<pybind11::dtype>(detail::array_proxy(m_ptr)->descr);
}
/// Total number of elements
ssize_t size() const {
return std::accumulate(shape(), shape() + ndim(), (ssize_t) 1, std::multiplies<ssize_t>());
}
/// Byte size of a single element
ssize_t itemsize() const {
return detail::array_descriptor_proxy(detail::array_proxy(m_ptr)->descr)->elsize;
}
/// Total number of bytes
ssize_t nbytes() const { return size() * itemsize(); }
/// Number of dimensions
ssize_t ndim() const { return detail::array_proxy(m_ptr)->nd; }
/// Base object
object base() const { return reinterpret_borrow<object>(detail::array_proxy(m_ptr)->base); }
/// Dimensions of the array
const ssize_t *shape() const { return detail::array_proxy(m_ptr)->dimensions; }
/// Dimension along a given axis
ssize_t shape(ssize_t dim) const {
if (dim >= ndim()) {
fail_dim_check(dim, "invalid axis");
}
return shape()[dim];
}
/// Strides of the array
const ssize_t *strides() const { return detail::array_proxy(m_ptr)->strides; }
/// Stride along a given axis
ssize_t strides(ssize_t dim) const {
if (dim >= ndim()) {
fail_dim_check(dim, "invalid axis");
}
return strides()[dim];
}
/// Return the NumPy array flags
int flags() const { return detail::array_proxy(m_ptr)->flags; }
/// If set, the array is writeable (otherwise the buffer is read-only)
bool writeable() const {
return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_WRITEABLE_);
}
/// If set, the array owns the data (will be freed when the array is deleted)
bool owndata() const {
return detail::check_flags(m_ptr, detail::npy_api::NPY_ARRAY_OWNDATA_);
}
/// Pointer to the contained data. If index is not provided, points to the
/// beginning of the buffer. May throw if the index would lead to out of bounds access.
template <typename... Ix>
const void *data(Ix... index) const {
return static_cast<const void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
}
/// Mutable pointer to the contained data. If index is not provided, points to the
/// beginning of the buffer. May throw if the index would lead to out of bounds access.
/// May throw if the array is not writeable.
template <typename... Ix>
void *mutable_data(Ix... index) {
check_writeable();
return static_cast<void *>(detail::array_proxy(m_ptr)->data + offset_at(index...));
}
/// Byte offset from beginning of the array to a given index (full or partial).
/// May throw if the index would lead to out of bounds access.
template <typename... Ix>
ssize_t offset_at(Ix... index) const {
if ((ssize_t) sizeof...(index) > ndim()) {
fail_dim_check(sizeof...(index), "too many indices for an array");
}
return byte_offset(ssize_t(index)...);
}
ssize_t offset_at() const { return 0; }
/// Item count from beginning of the array to a given index (full or partial).
/// May throw if the index would lead to out of bounds access.
template <typename... Ix>
ssize_t index_at(Ix... index) const {
return offset_at(index...) / itemsize();
}
/**
* Returns a proxy object that provides access to the array's data without bounds or
* dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
* care: the array must not be destroyed or reshaped for the duration of the returned object,
* and the caller must take care not to access invalid dimensions or dimension indices.
*/
template <typename T, ssize_t Dims = -1>
detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
if (Dims >= 0 && ndim() != Dims) {
throw std::domain_error("array has incorrect number of dimensions: "
+ std::to_string(ndim()) + "; expected "
+ std::to_string(Dims));
}
return detail::unchecked_mutable_reference<T, Dims>(
mutable_data(), shape(), strides(), ndim());
}
/**
* Returns a proxy object that provides const access to the array's data without bounds or
* dimensionality checking. Unlike `mutable_unchecked()`, this does not require that the
* underlying array have the `writable` flag. Use with care: the array must not be destroyed
* or reshaped for the duration of the returned object, and the caller must take care not to
* access invalid dimensions or dimension indices.
*/
template <typename T, ssize_t Dims = -1>
detail::unchecked_reference<T, Dims> unchecked() const & {
if (Dims >= 0 && ndim() != Dims) {
throw std::domain_error("array has incorrect number of dimensions: "
+ std::to_string(ndim()) + "; expected "
+ std::to_string(Dims));
}
return detail::unchecked_reference<T, Dims>(data(), shape(), strides(), ndim());
}
/// Return a new view with all of the dimensions of length 1 removed
array squeeze() {
auto &api = detail::npy_api::get();
return reinterpret_steal<array>(api.PyArray_Squeeze_(m_ptr));
}
/// Resize array to given shape
/// If refcheck is true and more that one reference exist to this array
/// then resize will succeed only if it makes a reshape, i.e. original size doesn't change
void resize(ShapeContainer new_shape, bool refcheck = true) {
detail::npy_api::PyArray_Dims d
= {// Use reinterpret_cast for PyPy on Windows (remove if fixed, checked on 7.3.1)
reinterpret_cast<Py_intptr_t *>(new_shape->data()),
int(new_shape->size())};
// try to resize, set ordering param to -1 cause it's not used anyway
auto new_array = reinterpret_steal<object>(
detail::npy_api::get().PyArray_Resize_(m_ptr, &d, int(refcheck), -1));
if (!new_array) {
throw error_already_set();
}
if (isinstance<array>(new_array)) {
*this = std::move(new_array);
}
}
/// Optional `order` parameter omitted, to be added as needed.
array reshape(ShapeContainer new_shape) {
detail::npy_api::PyArray_Dims d
= {reinterpret_cast<Py_intptr_t *>(new_shape->data()), int(new_shape->size())};
auto new_array
= reinterpret_steal<array>(detail::npy_api::get().PyArray_Newshape_(m_ptr, &d, 0));
if (!new_array) {
throw error_already_set();
}
return new_array;
}
/// Create a view of an array in a different data type.
/// This function may fundamentally reinterpret the data in the array.
/// It is the responsibility of the caller to ensure that this is safe.
/// Only supports the `dtype` argument, the `type` argument is omitted,
/// to be added as needed.
array view(const std::string &dtype) {
auto &api = detail::npy_api::get();
auto new_view = reinterpret_steal<array>(api.PyArray_View_(
m_ptr, dtype::from_args(pybind11::str(dtype)).release().ptr(), nullptr));
if (!new_view) {
throw error_already_set();
}
return new_view;
}
/// Ensure that the argument is a NumPy array
/// In case of an error, nullptr is returned and the Python error is cleared.
static array ensure(handle h, int ExtraFlags = 0) {
auto result = reinterpret_steal<array>(raw_array(h.ptr(), ExtraFlags));
if (!result) {
PyErr_Clear();
}
return result;
}
protected:
template <typename, typename>
friend struct detail::npy_format_descriptor;
void fail_dim_check(ssize_t dim, const std::string &msg) const {
throw index_error(msg + ": " + std::to_string(dim) + " (ndim = " + std::to_string(ndim())
+ ')');
}
template <typename... Ix>
ssize_t byte_offset(Ix... index) const {
check_dimensions(index...);
return detail::byte_offset_unsafe(strides(), ssize_t(index)...);
}
void check_writeable() const {
if (!writeable()) {
throw std::domain_error("array is not writeable");
}
}
template <typename... Ix>
void check_dimensions(Ix... index) const {
check_dimensions_impl(ssize_t(0), shape(), ssize_t(index)...);
}
void check_dimensions_impl(ssize_t, const ssize_t *) const {}
template <typename... Ix>
void check_dimensions_impl(ssize_t axis, const ssize_t *shape, ssize_t i, Ix... index) const {
if (i >= *shape) {
throw index_error(std::string("index ") + std::to_string(i)
+ " is out of bounds for axis " + std::to_string(axis)
+ " with size " + std::to_string(*shape));
}
check_dimensions_impl(axis + 1, shape + 1, index...);
}
/// Create array from any object -- always returns a new reference
static PyObject *raw_array(PyObject *ptr, int ExtraFlags = 0) {
if (ptr == nullptr) {
PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array from a nullptr");
return nullptr;
}
return detail::npy_api::get().PyArray_FromAny_(
ptr, nullptr, 0, 0, detail::npy_api::NPY_ARRAY_ENSUREARRAY_ | ExtraFlags, nullptr);
}
};
template <typename T, int ExtraFlags = array::forcecast>
class array_t : public array {
private:
struct private_ctor {};
// Delegating constructor needed when both moving and accessing in the same constructor
array_t(private_ctor,
ShapeContainer &&shape,
StridesContainer &&strides,
const T *ptr,
handle base)
: array(std::move(shape), std::move(strides), ptr, base) {}
public:
static_assert(!detail::array_info<T>::is_array, "Array types cannot be used with array_t");
using value_type = T;
array_t() : array(0, static_cast<const T *>(nullptr)) {}
array_t(handle h, borrowed_t) : array(h, borrowed_t{}) {}
array_t(handle h, stolen_t) : array(h, stolen_t{}) {}
PYBIND11_DEPRECATED("Use array_t<T>::ensure() instead")
array_t(handle h, bool is_borrowed) : array(raw_array_t(h.ptr()), stolen_t{}) {
if (!m_ptr) {
PyErr_Clear();
}
if (!is_borrowed) {
Py_XDECREF(h.ptr());
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
array_t(const object &o) : array(raw_array_t(o.ptr()), stolen_t{}) {
if (!m_ptr) {
throw error_already_set();
}
}
explicit array_t(const buffer_info &info, handle base = handle()) : array(info, base) {}
array_t(ShapeContainer shape,
StridesContainer strides,
const T *ptr = nullptr,
handle base = handle())
: array(std::move(shape), std::move(strides), ptr, base) {}
explicit array_t(ShapeContainer shape, const T *ptr = nullptr, handle base = handle())
: array_t(private_ctor{},
std::move(shape),
(ExtraFlags & f_style) != 0 ? detail::f_strides(*shape, itemsize())
: detail::c_strides(*shape, itemsize()),
ptr,
base) {}
explicit array_t(ssize_t count, const T *ptr = nullptr, handle base = handle())
: array({count}, {}, ptr, base) {}
constexpr ssize_t itemsize() const { return sizeof(T); }
template <typename... Ix>
ssize_t index_at(Ix... index) const {
return offset_at(index...) / itemsize();
}
template <typename... Ix>
const T *data(Ix... index) const {
return static_cast<const T *>(array::data(index...));
}
template <typename... Ix>
T *mutable_data(Ix... index) {
return static_cast<T *>(array::mutable_data(index...));
}
// Reference to element at a given index
template <typename... Ix>
const T &at(Ix... index) const {
if ((ssize_t) sizeof...(index) != ndim()) {
fail_dim_check(sizeof...(index), "index dimension mismatch");
}
return *(static_cast<const T *>(array::data())
+ byte_offset(ssize_t(index)...) / itemsize());
}
// Mutable reference to element at a given index
template <typename... Ix>
T &mutable_at(Ix... index) {
if ((ssize_t) sizeof...(index) != ndim()) {
fail_dim_check(sizeof...(index), "index dimension mismatch");
}
return *(static_cast<T *>(array::mutable_data())
+ byte_offset(ssize_t(index)...) / itemsize());
}
/**
* Returns a proxy object that provides access to the array's data without bounds or
* dimensionality checking. Will throw if the array is missing the `writeable` flag. Use with
* care: the array must not be destroyed or reshaped for the duration of the returned object,
* and the caller must take care not to access invalid dimensions or dimension indices.
*/
template <ssize_t Dims = -1>
detail::unchecked_mutable_reference<T, Dims> mutable_unchecked() & {
return array::mutable_unchecked<T, Dims>();
}
/**
* Returns a proxy object that provides const access to the array's data without bounds or
* dimensionality checking. Unlike `mutable_unchecked()`, this does not require that the
* underlying array have the `writable` flag. Use with care: the array must not be destroyed
* or reshaped for the duration of the returned object, and the caller must take care not to
* access invalid dimensions or dimension indices.
*/
template <ssize_t Dims = -1>
detail::unchecked_reference<T, Dims> unchecked() const & {
return array::unchecked<T, Dims>();
}
/// Ensure that the argument is a NumPy array of the correct dtype (and if not, try to convert
/// it). In case of an error, nullptr is returned and the Python error is cleared.
static array_t ensure(handle h) {
auto result = reinterpret_steal<array_t>(raw_array_t(h.ptr()));
if (!result) {
PyErr_Clear();
}
return result;
}
static bool check_(handle h) {
const auto &api = detail::npy_api::get();
return api.PyArray_Check_(h.ptr())
&& api.PyArray_EquivTypes_(detail::array_proxy(h.ptr())->descr,
dtype::of<T>().ptr())
&& detail::check_flags(h.ptr(), ExtraFlags & (array::c_style | array::f_style));
}
protected:
/// Create array from any object -- always returns a new reference
static PyObject *raw_array_t(PyObject *ptr) {
if (ptr == nullptr) {
PyErr_SetString(PyExc_ValueError, "cannot create a pybind11::array_t from a nullptr");
return nullptr;
}
return detail::npy_api::get().PyArray_FromAny_(ptr,
dtype::of<T>().release().ptr(),
0,
0,
detail::npy_api::NPY_ARRAY_ENSUREARRAY_
| ExtraFlags,
nullptr);
}
};
template <typename T>
struct format_descriptor<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
static std::string format() {
return detail::npy_format_descriptor<typename std::remove_cv<T>::type>::format();
}
};
template <size_t N>
struct format_descriptor<char[N]> {
static std::string format() { return std::to_string(N) + 's'; }
};
template <size_t N>
struct format_descriptor<std::array<char, N>> {
static std::string format() { return std::to_string(N) + 's'; }
};
template <typename T>
struct format_descriptor<T, detail::enable_if_t<std::is_enum<T>::value>> {
static std::string format() {
return format_descriptor<
typename std::remove_cv<typename std::underlying_type<T>::type>::type>::format();
}
};
template <typename T>
struct format_descriptor<T, detail::enable_if_t<detail::array_info<T>::is_array>> {
static std::string format() {
using namespace detail;
static constexpr auto extents = const_name("(") + array_info<T>::extents + const_name(")");
return extents.text + format_descriptor<remove_all_extents_t<T>>::format();
}
};
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename T, int ExtraFlags>
struct pyobject_caster<array_t<T, ExtraFlags>> {
using type = array_t<T, ExtraFlags>;
bool load(handle src, bool convert) {
if (!convert && !type::check_(src)) {
return false;
}
value = type::ensure(src);
return static_cast<bool>(value);
}
static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {
return src.inc_ref();
}
PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);
};
template <typename T>
struct compare_buffer_info<T, detail::enable_if_t<detail::is_pod_struct<T>::value>> {
static bool compare(const buffer_info &b) {
return npy_api::get().PyArray_EquivTypes_(dtype::of<T>().ptr(), dtype(b).ptr());
}
};
template <typename T, typename = void>
struct npy_format_descriptor_name;
template <typename T>
struct npy_format_descriptor_name<T, enable_if_t<std::is_integral<T>::value>> {
static constexpr auto name = const_name<std::is_same<T, bool>::value>(
const_name("bool"),
const_name<std::is_signed<T>::value>("numpy.int", "numpy.uint")
+ const_name<sizeof(T) * 8>());
};
template <typename T>
struct npy_format_descriptor_name<T, enable_if_t<std::is_floating_point<T>::value>> {
static constexpr auto name = const_name < std::is_same<T, float>::value
|| std::is_same<T, const float>::value
|| std::is_same<T, double>::value
|| std::is_same<T, const double>::value
> (const_name("numpy.float") + const_name<sizeof(T) * 8>(),
const_name("numpy.longdouble"));
};
template <typename T>
struct npy_format_descriptor_name<T, enable_if_t<is_complex<T>::value>> {
static constexpr auto name = const_name < std::is_same<typename T::value_type, float>::value
|| std::is_same<typename T::value_type, const float>::value
|| std::is_same<typename T::value_type, double>::value
|| std::is_same<typename T::value_type, const double>::value
> (const_name("numpy.complex")
+ const_name<sizeof(typename T::value_type) * 16>(),
const_name("numpy.longcomplex"));
};
template <typename T>
struct npy_format_descriptor<
T,
enable_if_t<satisfies_any_of<T, std::is_arithmetic, is_complex>::value>>
: npy_format_descriptor_name<T> {
private:
// NB: the order here must match the one in common.h
constexpr static const int values[15] = {npy_api::NPY_BOOL_,
npy_api::NPY_BYTE_,
npy_api::NPY_UBYTE_,
npy_api::NPY_INT16_,
npy_api::NPY_UINT16_,
npy_api::NPY_INT32_,
npy_api::NPY_UINT32_,
npy_api::NPY_INT64_,
npy_api::NPY_UINT64_,
npy_api::NPY_FLOAT_,
npy_api::NPY_DOUBLE_,
npy_api::NPY_LONGDOUBLE_,
npy_api::NPY_CFLOAT_,
npy_api::NPY_CDOUBLE_,
npy_api::NPY_CLONGDOUBLE_};
public:
static constexpr int value = values[detail::is_fmt_numeric<T>::index];
static pybind11::dtype dtype() { return pybind11::dtype(/*typenum*/ value); }
};
template <typename T>
struct npy_format_descriptor<T, enable_if_t<is_same_ignoring_cvref<T, PyObject *>::value>> {
static constexpr auto name = const_name("object");
static constexpr int value = npy_api::NPY_OBJECT_;
static pybind11::dtype dtype() { return pybind11::dtype(/*typenum*/ value); }
};
#define PYBIND11_DECL_CHAR_FMT \
static constexpr auto name = const_name("S") + const_name<N>(); \
static pybind11::dtype dtype() { \
return pybind11::dtype(std::string("S") + std::to_string(N)); \
}
template <size_t N>
struct npy_format_descriptor<char[N]> {
PYBIND11_DECL_CHAR_FMT
};
template <size_t N>
struct npy_format_descriptor<std::array<char, N>> {
PYBIND11_DECL_CHAR_FMT
};
#undef PYBIND11_DECL_CHAR_FMT
template <typename T>
struct npy_format_descriptor<T, enable_if_t<array_info<T>::is_array>> {
private:
using base_descr = npy_format_descriptor<typename array_info<T>::type>;
public:
static_assert(!array_info<T>::is_empty, "Zero-sized arrays are not supported");
static constexpr auto name
= const_name("(") + array_info<T>::extents + const_name(")") + base_descr::name;
static pybind11::dtype dtype() {
list shape;
array_info<T>::append_extents(shape);
return pybind11::dtype::from_args(
pybind11::make_tuple(base_descr::dtype(), std::move(shape)));
}
};
template <typename T>
struct npy_format_descriptor<T, enable_if_t<std::is_enum<T>::value>> {
private:
using base_descr = npy_format_descriptor<typename std::underlying_type<T>::type>;
public:
static constexpr auto name = base_descr::name;
static pybind11::dtype dtype() { return base_descr::dtype(); }
};
struct field_descriptor {
const char *name;
ssize_t offset;
ssize_t size;
std::string format;
dtype descr;
};
PYBIND11_NOINLINE void register_structured_dtype(any_container<field_descriptor> fields,
const std::type_info &tinfo,
ssize_t itemsize,
bool (*direct_converter)(PyObject *, void *&)) {
auto &numpy_internals = get_numpy_internals();
if (numpy_internals.get_type_info(tinfo, false)) {
pybind11_fail("NumPy: dtype is already registered");
}
// Use ordered fields because order matters as of NumPy 1.14:
// https://docs.scipy.org/doc/numpy/release.html#multiple-field-indexing-assignment-of-structured-arrays
std::vector<field_descriptor> ordered_fields(std::move(fields));
std::sort(
ordered_fields.begin(),
ordered_fields.end(),
[](const field_descriptor &a, const field_descriptor &b) { return a.offset < b.offset; });
list names, formats, offsets;
for (auto &field : ordered_fields) {
if (!field.descr) {
pybind11_fail(std::string("NumPy: unsupported field dtype: `") + field.name + "` @ "
+ tinfo.name());
}
names.append(pybind11::str(field.name));
formats.append(field.descr);
offsets.append(pybind11::int_(field.offset));
}
auto *dtype_ptr
= pybind11::dtype(std::move(names), std::move(formats), std::move(offsets), itemsize)
.release()
.ptr();
// There is an existing bug in NumPy (as of v1.11): trailing bytes are
// not encoded explicitly into the format string. This will supposedly
// get fixed in v1.12; for further details, see these:
// - https://github.com/numpy/numpy/issues/7797
// - https://github.com/numpy/numpy/pull/7798
// Because of this, we won't use numpy's logic to generate buffer format
// strings and will just do it ourselves.
ssize_t offset = 0;
std::ostringstream oss;
// mark the structure as unaligned with '^', because numpy and C++ don't
// always agree about alignment (particularly for complex), and we're
// explicitly listing all our padding. This depends on none of the fields
// overriding the endianness. Putting the ^ in front of individual fields
// isn't guaranteed to work due to https://github.com/numpy/numpy/issues/9049
oss << "^T{";
for (auto &field : ordered_fields) {
if (field.offset > offset) {
oss << (field.offset - offset) << 'x';
}
oss << field.format << ':' << field.name << ':';
offset = field.offset + field.size;
}
if (itemsize > offset) {
oss << (itemsize - offset) << 'x';
}
oss << '}';
auto format_str = oss.str();
// Smoke test: verify that NumPy properly parses our buffer format string
auto &api = npy_api::get();
auto arr = array(buffer_info(nullptr, itemsize, format_str, 1));
if (!api.PyArray_EquivTypes_(dtype_ptr, arr.dtype().ptr())) {
pybind11_fail("NumPy: invalid buffer descriptor!");
}
auto tindex = std::type_index(tinfo);
numpy_internals.registered_dtypes[tindex] = {dtype_ptr, std::move(format_str)};
get_internals().direct_conversions[tindex].push_back(direct_converter);
}
template <typename T, typename SFINAE>
struct npy_format_descriptor {
static_assert(is_pod_struct<T>::value,
"Attempt to use a non-POD or unimplemented POD type as a numpy dtype");
static constexpr auto name = make_caster<T>::name;
static pybind11::dtype dtype() { return reinterpret_borrow<pybind11::dtype>(dtype_ptr()); }
static std::string format() {
static auto format_str = get_numpy_internals().get_type_info<T>(true)->format_str;
return format_str;
}
static void register_dtype(any_container<field_descriptor> fields) {
register_structured_dtype(std::move(fields),
typeid(typename std::remove_cv<T>::type),
sizeof(T),
&direct_converter);
}
private:
static PyObject *dtype_ptr() {
static PyObject *ptr = get_numpy_internals().get_type_info<T>(true)->dtype_ptr;
return ptr;
}
static bool direct_converter(PyObject *obj, void *&value) {
auto &api = npy_api::get();
if (!PyObject_TypeCheck(obj, api.PyVoidArrType_Type_)) {
return false;
}
if (auto descr = reinterpret_steal<object>(api.PyArray_DescrFromScalar_(obj))) {
if (api.PyArray_EquivTypes_(dtype_ptr(), descr.ptr())) {
value = ((PyVoidScalarObject_Proxy *) obj)->obval;
return true;
}
}
return false;
}
};
#ifdef __CLION_IDE__ // replace heavy macro with dummy code for the IDE (doesn't affect code)
# define PYBIND11_NUMPY_DTYPE(Type, ...) ((void) 0)
# define PYBIND11_NUMPY_DTYPE_EX(Type, ...) ((void) 0)
#else
# define PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, Name) \
::pybind11::detail::field_descriptor { \
Name, offsetof(T, Field), sizeof(decltype(std::declval<T>().Field)), \
::pybind11::format_descriptor<decltype(std::declval<T>().Field)>::format(), \
::pybind11::detail::npy_format_descriptor< \
decltype(std::declval<T>().Field)>::dtype() \
}
// Extract name, offset and format descriptor for a struct field
# define PYBIND11_FIELD_DESCRIPTOR(T, Field) PYBIND11_FIELD_DESCRIPTOR_EX(T, Field, #Field)
// The main idea of this macro is borrowed from https://github.com/swansontec/map-macro
// (C) William Swanson, Paul Fultz
# define PYBIND11_EVAL0(...) __VA_ARGS__
# define PYBIND11_EVAL1(...) PYBIND11_EVAL0(PYBIND11_EVAL0(PYBIND11_EVAL0(__VA_ARGS__)))
# define PYBIND11_EVAL2(...) PYBIND11_EVAL1(PYBIND11_EVAL1(PYBIND11_EVAL1(__VA_ARGS__)))
# define PYBIND11_EVAL3(...) PYBIND11_EVAL2(PYBIND11_EVAL2(PYBIND11_EVAL2(__VA_ARGS__)))
# define PYBIND11_EVAL4(...) PYBIND11_EVAL3(PYBIND11_EVAL3(PYBIND11_EVAL3(__VA_ARGS__)))
# define PYBIND11_EVAL(...) PYBIND11_EVAL4(PYBIND11_EVAL4(PYBIND11_EVAL4(__VA_ARGS__)))
# define PYBIND11_MAP_END(...)
# define PYBIND11_MAP_OUT
# define PYBIND11_MAP_COMMA ,
# define PYBIND11_MAP_GET_END() 0, PYBIND11_MAP_END
# define PYBIND11_MAP_NEXT0(test, next, ...) next PYBIND11_MAP_OUT
# define PYBIND11_MAP_NEXT1(test, next) PYBIND11_MAP_NEXT0(test, next, 0)
# define PYBIND11_MAP_NEXT(test, next) PYBIND11_MAP_NEXT1(PYBIND11_MAP_GET_END test, next)
# if defined(_MSC_VER) \
&& !defined(__clang__) // MSVC is not as eager to expand macros, hence this workaround
# define PYBIND11_MAP_LIST_NEXT1(test, next) \
PYBIND11_EVAL0(PYBIND11_MAP_NEXT0(test, PYBIND11_MAP_COMMA next, 0))
# else
# define PYBIND11_MAP_LIST_NEXT1(test, next) \
PYBIND11_MAP_NEXT0(test, PYBIND11_MAP_COMMA next, 0)
# endif
# define PYBIND11_MAP_LIST_NEXT(test, next) \
PYBIND11_MAP_LIST_NEXT1(PYBIND11_MAP_GET_END test, next)
# define PYBIND11_MAP_LIST0(f, t, x, peek, ...) \
f(t, x) PYBIND11_MAP_LIST_NEXT(peek, PYBIND11_MAP_LIST1)(f, t, peek, __VA_ARGS__)
# define PYBIND11_MAP_LIST1(f, t, x, peek, ...) \
f(t, x) PYBIND11_MAP_LIST_NEXT(peek, PYBIND11_MAP_LIST0)(f, t, peek, __VA_ARGS__)
// PYBIND11_MAP_LIST(f, t, a1, a2, ...) expands to f(t, a1), f(t, a2), ...
# define PYBIND11_MAP_LIST(f, t, ...) \
PYBIND11_EVAL(PYBIND11_MAP_LIST1(f, t, __VA_ARGS__, (), 0))
# define PYBIND11_NUMPY_DTYPE(Type, ...) \
::pybind11::detail::npy_format_descriptor<Type>::register_dtype( \
::std::vector<::pybind11::detail::field_descriptor>{ \
PYBIND11_MAP_LIST(PYBIND11_FIELD_DESCRIPTOR, Type, __VA_ARGS__)})
# if defined(_MSC_VER) && !defined(__clang__)
# define PYBIND11_MAP2_LIST_NEXT1(test, next) \
PYBIND11_EVAL0(PYBIND11_MAP_NEXT0(test, PYBIND11_MAP_COMMA next, 0))
# else
# define PYBIND11_MAP2_LIST_NEXT1(test, next) \
PYBIND11_MAP_NEXT0(test, PYBIND11_MAP_COMMA next, 0)
# endif
# define PYBIND11_MAP2_LIST_NEXT(test, next) \
PYBIND11_MAP2_LIST_NEXT1(PYBIND11_MAP_GET_END test, next)
# define PYBIND11_MAP2_LIST0(f, t, x1, x2, peek, ...) \
f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT(peek, PYBIND11_MAP2_LIST1)(f, t, peek, __VA_ARGS__)
# define PYBIND11_MAP2_LIST1(f, t, x1, x2, peek, ...) \
f(t, x1, x2) PYBIND11_MAP2_LIST_NEXT(peek, PYBIND11_MAP2_LIST0)(f, t, peek, __VA_ARGS__)
// PYBIND11_MAP2_LIST(f, t, a1, a2, ...) expands to f(t, a1, a2), f(t, a3, a4), ...
# define PYBIND11_MAP2_LIST(f, t, ...) \
PYBIND11_EVAL(PYBIND11_MAP2_LIST1(f, t, __VA_ARGS__, (), 0))
# define PYBIND11_NUMPY_DTYPE_EX(Type, ...) \
::pybind11::detail::npy_format_descriptor<Type>::register_dtype( \
::std::vector<::pybind11::detail::field_descriptor>{ \
PYBIND11_MAP2_LIST(PYBIND11_FIELD_DESCRIPTOR_EX, Type, __VA_ARGS__)})
#endif // __CLION_IDE__
class common_iterator {
public:
using container_type = std::vector<ssize_t>;
using value_type = container_type::value_type;
using size_type = container_type::size_type;
common_iterator() : m_strides() {}
common_iterator(void *ptr, const container_type &strides, const container_type &shape)
: p_ptr(reinterpret_cast<char *>(ptr)), m_strides(strides.size()) {
m_strides.back() = static_cast<value_type>(strides.back());
for (size_type i = m_strides.size() - 1; i != 0; --i) {
size_type j = i - 1;
auto s = static_cast<value_type>(shape[i]);
m_strides[j] = strides[j] + m_strides[i] - strides[i] * s;
}
}
void increment(size_type dim) { p_ptr += m_strides[dim]; }
void *data() const { return p_ptr; }
private:
char *p_ptr{nullptr};
container_type m_strides;
};
template <size_t N>
class multi_array_iterator {
public:
using container_type = std::vector<ssize_t>;
multi_array_iterator(const std::array<buffer_info, N> &buffers, const container_type &shape)
: m_shape(shape.size()), m_index(shape.size(), 0), m_common_iterator() {
// Manual copy to avoid conversion warning if using std::copy
for (size_t i = 0; i < shape.size(); ++i) {
m_shape[i] = shape[i];
}
container_type strides(shape.size());
for (size_t i = 0; i < N; ++i) {
init_common_iterator(buffers[i], shape, m_common_iterator[i], strides);
}
}
multi_array_iterator &operator++() {
for (size_t j = m_index.size(); j != 0; --j) {
size_t i = j - 1;
if (++m_index[i] != m_shape[i]) {
increment_common_iterator(i);
break;
}
m_index[i] = 0;
}
return *this;
}
template <size_t K, class T = void>
T *data() const {
return reinterpret_cast<T *>(m_common_iterator[K].data());
}
private:
using common_iter = common_iterator;
void init_common_iterator(const buffer_info &buffer,
const container_type &shape,
common_iter &iterator,
container_type &strides) {
auto buffer_shape_iter = buffer.shape.rbegin();
auto buffer_strides_iter = buffer.strides.rbegin();
auto shape_iter = shape.rbegin();
auto strides_iter = strides.rbegin();
while (buffer_shape_iter != buffer.shape.rend()) {
if (*shape_iter == *buffer_shape_iter) {
*strides_iter = *buffer_strides_iter;
} else {
*strides_iter = 0;
}
++buffer_shape_iter;
++buffer_strides_iter;
++shape_iter;
++strides_iter;
}
std::fill(strides_iter, strides.rend(), 0);
iterator = common_iter(buffer.ptr, strides, shape);
}
void increment_common_iterator(size_t dim) {
for (auto &iter : m_common_iterator) {
iter.increment(dim);
}
}
container_type m_shape;
container_type m_index;
std::array<common_iter, N> m_common_iterator;
};
enum class broadcast_trivial { non_trivial, c_trivial, f_trivial };
// Populates the shape and number of dimensions for the set of buffers. Returns a
// broadcast_trivial enum value indicating whether the broadcast is "trivial"--that is, has each
// buffer being either a singleton or a full-size, C-contiguous (`c_trivial`) or Fortran-contiguous
// (`f_trivial`) storage buffer; returns `non_trivial` otherwise.
template <size_t N>
broadcast_trivial
broadcast(const std::array<buffer_info, N> &buffers, ssize_t &ndim, std::vector<ssize_t> &shape) {
ndim = std::accumulate(
buffers.begin(), buffers.end(), ssize_t(0), [](ssize_t res, const buffer_info &buf) {
return std::max(res, buf.ndim);
});
shape.clear();
shape.resize((size_t) ndim, 1);
// Figure out the output size, and make sure all input arrays conform (i.e. are either size 1
// or the full size).
for (size_t i = 0; i < N; ++i) {
auto res_iter = shape.rbegin();
auto end = buffers[i].shape.rend();
for (auto shape_iter = buffers[i].shape.rbegin(); shape_iter != end;
++shape_iter, ++res_iter) {
const auto &dim_size_in = *shape_iter;
auto &dim_size_out = *res_iter;
// Each input dimension can either be 1 or `n`, but `n` values must match across
// buffers
if (dim_size_out == 1) {
dim_size_out = dim_size_in;
} else if (dim_size_in != 1 && dim_size_in != dim_size_out) {
pybind11_fail("pybind11::vectorize: incompatible size/dimension of inputs!");
}
}
}
bool trivial_broadcast_c = true;
bool trivial_broadcast_f = true;
for (size_t i = 0; i < N && (trivial_broadcast_c || trivial_broadcast_f); ++i) {
if (buffers[i].size == 1) {
continue;
}
// Require the same number of dimensions:
if (buffers[i].ndim != ndim) {
return broadcast_trivial::non_trivial;
}
// Require all dimensions be full-size:
if (!std::equal(buffers[i].shape.cbegin(), buffers[i].shape.cend(), shape.cbegin())) {
return broadcast_trivial::non_trivial;
}
// Check for C contiguity (but only if previous inputs were also C contiguous)
if (trivial_broadcast_c) {
ssize_t expect_stride = buffers[i].itemsize;
auto end = buffers[i].shape.crend();
for (auto shape_iter = buffers[i].shape.crbegin(),
stride_iter = buffers[i].strides.crbegin();
trivial_broadcast_c && shape_iter != end;
++shape_iter, ++stride_iter) {
if (expect_stride == *stride_iter) {
expect_stride *= *shape_iter;
} else {
trivial_broadcast_c = false;
}
}
}
// Check for Fortran contiguity (if previous inputs were also F contiguous)
if (trivial_broadcast_f) {
ssize_t expect_stride = buffers[i].itemsize;
auto end = buffers[i].shape.cend();
for (auto shape_iter = buffers[i].shape.cbegin(),
stride_iter = buffers[i].strides.cbegin();
trivial_broadcast_f && shape_iter != end;
++shape_iter, ++stride_iter) {
if (expect_stride == *stride_iter) {
expect_stride *= *shape_iter;
} else {
trivial_broadcast_f = false;
}
}
}
}
return trivial_broadcast_c ? broadcast_trivial::c_trivial
: trivial_broadcast_f ? broadcast_trivial::f_trivial
: broadcast_trivial::non_trivial;
}
template <typename T>
struct vectorize_arg {
static_assert(!std::is_rvalue_reference<T>::value,
"Functions with rvalue reference arguments cannot be vectorized");
// The wrapped function gets called with this type:
using call_type = remove_reference_t<T>;
// Is this a vectorized argument?
static constexpr bool vectorize
= satisfies_any_of<call_type, std::is_arithmetic, is_complex, is_pod>::value
&& satisfies_none_of<call_type,
std::is_pointer,
std::is_array,
is_std_array,
std::is_enum>::value
&& (!std::is_reference<T>::value
|| (std::is_lvalue_reference<T>::value && std::is_const<call_type>::value));
// Accept this type: an array for vectorized types, otherwise the type as-is:
using type = conditional_t<vectorize, array_t<remove_cv_t<call_type>, array::forcecast>, T>;
};
// py::vectorize when a return type is present
template <typename Func, typename Return, typename... Args>
struct vectorize_returned_array {
using Type = array_t<Return>;
static Type create(broadcast_trivial trivial, const std::vector<ssize_t> &shape) {
if (trivial == broadcast_trivial::f_trivial) {
return array_t<Return, array::f_style>(shape);
}
return array_t<Return>(shape);
}
static Return *mutable_data(Type &array) { return array.mutable_data(); }
static Return call(Func &f, Args &...args) { return f(args...); }
static void call(Return *out, size_t i, Func &f, Args &...args) { out[i] = f(args...); }
};
// py::vectorize when a return type is not present
template <typename Func, typename... Args>
struct vectorize_returned_array<Func, void, Args...> {
using Type = none;
static Type create(broadcast_trivial, const std::vector<ssize_t> &) { return none(); }
static void *mutable_data(Type &) { return nullptr; }
static detail::void_type call(Func &f, Args &...args) {
f(args...);
return {};
}
static void call(void *, size_t, Func &f, Args &...args) { f(args...); }
};
template <typename Func, typename Return, typename... Args>
struct vectorize_helper {
// NVCC for some reason breaks if NVectorized is private
#ifdef __CUDACC__
public:
#else
private:
#endif
static constexpr size_t N = sizeof...(Args);
static constexpr size_t NVectorized = constexpr_sum(vectorize_arg<Args>::vectorize...);
static_assert(
NVectorized >= 1,
"pybind11::vectorize(...) requires a function with at least one vectorizable argument");
public:
template <typename T,
// SFINAE to prevent shadowing the copy constructor.
typename = detail::enable_if_t<
!std::is_same<vectorize_helper, typename std::decay<T>::type>::value>>
explicit vectorize_helper(T &&f) : f(std::forward<T>(f)) {}
object operator()(typename vectorize_arg<Args>::type... args) {
return run(args...,
make_index_sequence<N>(),
select_indices<vectorize_arg<Args>::vectorize...>(),
make_index_sequence<NVectorized>());
}
private:
remove_reference_t<Func> f;
// Internal compiler error in MSVC 19.16.27025.1 (Visual Studio 2017 15.9.4), when compiling
// with "/permissive-" flag when arg_call_types is manually inlined.
using arg_call_types = std::tuple<typename vectorize_arg<Args>::call_type...>;
template <size_t Index>
using param_n_t = typename std::tuple_element<Index, arg_call_types>::type;
using returned_array = vectorize_returned_array<Func, Return, Args...>;
// Runs a vectorized function given arguments tuple and three index sequences:
// - Index is the full set of 0 ... (N-1) argument indices;
// - VIndex is the subset of argument indices with vectorized parameters, letting us access
// vectorized arguments (anything not in this sequence is passed through)
// - BIndex is a incremental sequence (beginning at 0) of the same size as VIndex, so that
// we can store vectorized buffer_infos in an array (argument VIndex has its buffer at
// index BIndex in the array).
template <size_t... Index, size_t... VIndex, size_t... BIndex>
object run(typename vectorize_arg<Args>::type &...args,
index_sequence<Index...> i_seq,
index_sequence<VIndex...> vi_seq,
index_sequence<BIndex...> bi_seq) {
// Pointers to values the function was called with; the vectorized ones set here will start
// out as array_t<T> pointers, but they will be changed them to T pointers before we make
// call the wrapped function. Non-vectorized pointers are left as-is.
std::array<void *, N> params{{&args...}};
// The array of `buffer_info`s of vectorized arguments:
std::array<buffer_info, NVectorized> buffers{
{reinterpret_cast<array *>(params[VIndex])->request()...}};
/* Determine dimensions parameters of output array */
ssize_t nd = 0;
std::vector<ssize_t> shape(0);
auto trivial = broadcast(buffers, nd, shape);
auto ndim = (size_t) nd;
size_t size
= std::accumulate(shape.begin(), shape.end(), (size_t) 1, std::multiplies<size_t>());
// If all arguments are 0-dimension arrays (i.e. single values) return a plain value (i.e.
// not wrapped in an array).
if (size == 1 && ndim == 0) {
PYBIND11_EXPAND_SIDE_EFFECTS(params[VIndex] = buffers[BIndex].ptr);
return cast(
returned_array::call(f, *reinterpret_cast<param_n_t<Index> *>(params[Index])...));
}
auto result = returned_array::create(trivial, shape);
PYBIND11_WARNING_PUSH
#ifdef PYBIND11_DETECTED_CLANG_WITH_MISLEADING_CALL_STD_MOVE_EXPLICITLY_WARNING
PYBIND11_WARNING_DISABLE_CLANG("-Wreturn-std-move")
#endif
if (size == 0) {
return result;
}
/* Call the function */
auto *mutable_data = returned_array::mutable_data(result);
if (trivial == broadcast_trivial::non_trivial) {
apply_broadcast(buffers, params, mutable_data, size, shape, i_seq, vi_seq, bi_seq);
} else {
apply_trivial(buffers, params, mutable_data, size, i_seq, vi_seq, bi_seq);
}
return result;
PYBIND11_WARNING_POP
}
template <size_t... Index, size_t... VIndex, size_t... BIndex>
void apply_trivial(std::array<buffer_info, NVectorized> &buffers,
std::array<void *, N> ¶ms,
Return *out,
size_t size,
index_sequence<Index...>,
index_sequence<VIndex...>,
index_sequence<BIndex...>) {
// Initialize an array of mutable byte references and sizes with references set to the
// appropriate pointer in `params`; as we iterate, we'll increment each pointer by its size
// (except for singletons, which get an increment of 0).
std::array<std::pair<unsigned char *&, const size_t>, NVectorized> vecparams{
{std::pair<unsigned char *&, const size_t>(
reinterpret_cast<unsigned char *&>(params[VIndex] = buffers[BIndex].ptr),
buffers[BIndex].size == 1 ? 0 : sizeof(param_n_t<VIndex>))...}};
for (size_t i = 0; i < size; ++i) {
returned_array::call(
out, i, f, *reinterpret_cast<param_n_t<Index> *>(params[Index])...);
for (auto &x : vecparams) {
x.first += x.second;
}
}
}
template <size_t... Index, size_t... VIndex, size_t... BIndex>
void apply_broadcast(std::array<buffer_info, NVectorized> &buffers,
std::array<void *, N> ¶ms,
Return *out,
size_t size,
const std::vector<ssize_t> &output_shape,
index_sequence<Index...>,
index_sequence<VIndex...>,
index_sequence<BIndex...>) {
multi_array_iterator<NVectorized> input_iter(buffers, output_shape);
for (size_t i = 0; i < size; ++i, ++input_iter) {
PYBIND11_EXPAND_SIDE_EFFECTS((params[VIndex] = input_iter.template data<BIndex>()));
returned_array::call(
out, i, f, *reinterpret_cast<param_n_t<Index> *>(std::get<Index>(params))...);
}
}
};
template <typename Func, typename Return, typename... Args>
vectorize_helper<Func, Return, Args...> vectorize_extractor(const Func &f, Return (*)(Args...)) {
return detail::vectorize_helper<Func, Return, Args...>(f);
}
template <typename T, int Flags>
struct handle_type_name<array_t<T, Flags>> {
static constexpr auto name
= const_name("numpy.ndarray[") + npy_format_descriptor<T>::name + const_name("]");
};
PYBIND11_NAMESPACE_END(detail)
// Vanilla pointer vectorizer:
template <typename Return, typename... Args>
detail::vectorize_helper<Return (*)(Args...), Return, Args...> vectorize(Return (*f)(Args...)) {
return detail::vectorize_helper<Return (*)(Args...), Return, Args...>(f);
}
// lambda vectorizer:
template <typename Func, detail::enable_if_t<detail::is_lambda<Func>::value, int> = 0>
auto vectorize(Func &&f)
-> decltype(detail::vectorize_extractor(std::forward<Func>(f),
(detail::function_signature_t<Func> *) nullptr)) {
return detail::vectorize_extractor(std::forward<Func>(f),
(detail::function_signature_t<Func> *) nullptr);
}
// Vectorize a class method (non-const):
template <typename Return,
typename Class,
typename... Args,
typename Helper = detail::vectorize_helper<
decltype(std::mem_fn(std::declval<Return (Class::*)(Args...)>())),
Return,
Class *,
Args...>>
Helper vectorize(Return (Class::*f)(Args...)) {
return Helper(std::mem_fn(f));
}
// Vectorize a class method (const):
template <typename Return,
typename Class,
typename... Args,
typename Helper = detail::vectorize_helper<
decltype(std::mem_fn(std::declval<Return (Class::*)(Args...) const>())),
Return,
const Class *,
Args...>>
Helper vectorize(Return (Class::*f)(Args...) const) {
return Helper(std::mem_fn(f));
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/operators.h | C/C++ Header | /*
pybind11/operator.h: Metatemplates for operator overloading
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// Enumeration with all supported operator types
enum op_id : int {
op_add,
op_sub,
op_mul,
op_div,
op_mod,
op_divmod,
op_pow,
op_lshift,
op_rshift,
op_and,
op_xor,
op_or,
op_neg,
op_pos,
op_abs,
op_invert,
op_int,
op_long,
op_float,
op_str,
op_cmp,
op_gt,
op_ge,
op_lt,
op_le,
op_eq,
op_ne,
op_iadd,
op_isub,
op_imul,
op_idiv,
op_imod,
op_ilshift,
op_irshift,
op_iand,
op_ixor,
op_ior,
op_complex,
op_bool,
op_nonzero,
op_repr,
op_truediv,
op_itruediv,
op_hash
};
enum op_type : int {
op_l, /* base type on left */
op_r, /* base type on right */
op_u /* unary operator */
};
struct self_t {};
static const self_t self = self_t();
/// Type for an unused type slot
struct undefined_t {};
/// Don't warn about an unused variable
inline self_t __self() { return self; }
/// base template of operator implementations
template <op_id, op_type, typename B, typename L, typename R>
struct op_impl {};
/// Operator implementation generator
template <op_id id, op_type ot, typename L, typename R>
struct op_ {
static constexpr bool op_enable_if_hook = true;
template <typename Class, typename... Extra>
void execute(Class &cl, const Extra &...extra) const {
using Base = typename Class::type;
using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>;
using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>;
using op = op_impl<id, ot, Base, L_type, R_type>;
cl.def(op::name(), &op::execute, is_operator(), extra...);
}
template <typename Class, typename... Extra>
void execute_cast(Class &cl, const Extra &...extra) const {
using Base = typename Class::type;
using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>;
using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>;
using op = op_impl<id, ot, Base, L_type, R_type>;
cl.def(op::name(), &op::execute_cast, is_operator(), extra...);
}
};
#define PYBIND11_BINARY_OPERATOR(id, rid, op, expr) \
template <typename B, typename L, typename R> \
struct op_impl<op_##id, op_l, B, L, R> { \
static char const *name() { return "__" #id "__"; } \
static auto execute(const L &l, const R &r) -> decltype(expr) { return (expr); } \
static B execute_cast(const L &l, const R &r) { return B(expr); } \
}; \
template <typename B, typename L, typename R> \
struct op_impl<op_##id, op_r, B, L, R> { \
static char const *name() { return "__" #rid "__"; } \
static auto execute(const R &r, const L &l) -> decltype(expr) { return (expr); } \
static B execute_cast(const R &r, const L &l) { return B(expr); } \
}; \
inline op_<op_##id, op_l, self_t, self_t> op(const self_t &, const self_t &) { \
return op_<op_##id, op_l, self_t, self_t>(); \
} \
template <typename T> \
op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \
return op_<op_##id, op_l, self_t, T>(); \
} \
template <typename T> \
op_<op_##id, op_r, T, self_t> op(const T &, const self_t &) { \
return op_<op_##id, op_r, T, self_t>(); \
}
#define PYBIND11_INPLACE_OPERATOR(id, op, expr) \
template <typename B, typename L, typename R> \
struct op_impl<op_##id, op_l, B, L, R> { \
static char const *name() { return "__" #id "__"; } \
static auto execute(L &l, const R &r) -> decltype(expr) { return expr; } \
static B execute_cast(L &l, const R &r) { return B(expr); } \
}; \
template <typename T> \
op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \
return op_<op_##id, op_l, self_t, T>(); \
}
#define PYBIND11_UNARY_OPERATOR(id, op, expr) \
template <typename B, typename L> \
struct op_impl<op_##id, op_u, B, L, undefined_t> { \
static char const *name() { return "__" #id "__"; } \
static auto execute(const L &l) -> decltype(expr) { return expr; } \
static B execute_cast(const L &l) { return B(expr); } \
}; \
inline op_<op_##id, op_u, self_t, undefined_t> op(const self_t &) { \
return op_<op_##id, op_u, self_t, undefined_t>(); \
}
PYBIND11_BINARY_OPERATOR(sub, rsub, operator-, l - r)
PYBIND11_BINARY_OPERATOR(add, radd, operator+, l + r)
PYBIND11_BINARY_OPERATOR(mul, rmul, operator*, l *r)
PYBIND11_BINARY_OPERATOR(truediv, rtruediv, operator/, l / r)
PYBIND11_BINARY_OPERATOR(mod, rmod, operator%, l % r)
PYBIND11_BINARY_OPERATOR(lshift, rlshift, operator<<, l << r)
PYBIND11_BINARY_OPERATOR(rshift, rrshift, operator>>, l >> r)
PYBIND11_BINARY_OPERATOR(and, rand, operator&, l &r)
PYBIND11_BINARY_OPERATOR(xor, rxor, operator^, l ^ r)
PYBIND11_BINARY_OPERATOR(eq, eq, operator==, l == r)
PYBIND11_BINARY_OPERATOR(ne, ne, operator!=, l != r)
PYBIND11_BINARY_OPERATOR(or, ror, operator|, l | r)
PYBIND11_BINARY_OPERATOR(gt, lt, operator>, l > r)
PYBIND11_BINARY_OPERATOR(ge, le, operator>=, l >= r)
PYBIND11_BINARY_OPERATOR(lt, gt, operator<, l < r)
PYBIND11_BINARY_OPERATOR(le, ge, operator<=, l <= r)
// PYBIND11_BINARY_OPERATOR(pow, rpow, pow, std::pow(l, r))
PYBIND11_INPLACE_OPERATOR(iadd, operator+=, l += r)
PYBIND11_INPLACE_OPERATOR(isub, operator-=, l -= r)
PYBIND11_INPLACE_OPERATOR(imul, operator*=, l *= r)
PYBIND11_INPLACE_OPERATOR(itruediv, operator/=, l /= r)
PYBIND11_INPLACE_OPERATOR(imod, operator%=, l %= r)
PYBIND11_INPLACE_OPERATOR(ilshift, operator<<=, l <<= r)
PYBIND11_INPLACE_OPERATOR(irshift, operator>>=, l >>= r)
PYBIND11_INPLACE_OPERATOR(iand, operator&=, l &= r)
PYBIND11_INPLACE_OPERATOR(ixor, operator^=, l ^= r)
PYBIND11_INPLACE_OPERATOR(ior, operator|=, l |= r)
PYBIND11_UNARY_OPERATOR(neg, operator-, -l)
PYBIND11_UNARY_OPERATOR(pos, operator+, +l)
// WARNING: This usage of `abs` should only be done for existing STL overloads.
// Adding overloads directly in to the `std::` namespace is advised against:
// https://en.cppreference.com/w/cpp/language/extending_std
PYBIND11_UNARY_OPERATOR(abs, abs, std::abs(l))
PYBIND11_UNARY_OPERATOR(hash, hash, std::hash<L>()(l))
PYBIND11_UNARY_OPERATOR(invert, operator~, (~l))
PYBIND11_UNARY_OPERATOR(bool, operator!, !!l)
PYBIND11_UNARY_OPERATOR(int, int_, (int) l)
PYBIND11_UNARY_OPERATOR(float, float_, (double) l)
#undef PYBIND11_BINARY_OPERATOR
#undef PYBIND11_INPLACE_OPERATOR
#undef PYBIND11_UNARY_OPERATOR
PYBIND11_NAMESPACE_END(detail)
using detail::self;
// Add named operators so that they are accessible via `py::`.
using detail::hash;
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/options.h | C/C++ Header | /*
pybind11/options.h: global settings that are configurable at runtime.
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
class options {
public:
// Default RAII constructor, which leaves settings as they currently are.
options() : previous_state(global_state()) {}
// Class is non-copyable.
options(const options &) = delete;
options &operator=(const options &) = delete;
// Destructor, which restores settings that were in effect before.
~options() { global_state() = previous_state; }
// Setter methods (affect the global state):
options &disable_user_defined_docstrings() & {
global_state().show_user_defined_docstrings = false;
return *this;
}
options &enable_user_defined_docstrings() & {
global_state().show_user_defined_docstrings = true;
return *this;
}
options &disable_function_signatures() & {
global_state().show_function_signatures = false;
return *this;
}
options &enable_function_signatures() & {
global_state().show_function_signatures = true;
return *this;
}
options &disable_enum_members_docstring() & {
global_state().show_enum_members_docstring = false;
return *this;
}
options &enable_enum_members_docstring() & {
global_state().show_enum_members_docstring = true;
return *this;
}
// Getter methods (return the global state):
static bool show_user_defined_docstrings() {
return global_state().show_user_defined_docstrings;
}
static bool show_function_signatures() { return global_state().show_function_signatures; }
static bool show_enum_members_docstring() {
return global_state().show_enum_members_docstring;
}
// This type is not meant to be allocated on the heap.
void *operator new(size_t) = delete;
private:
struct state {
bool show_user_defined_docstrings = true; //< Include user-supplied texts in docstrings.
bool show_function_signatures = true; //< Include auto-generated function signatures
// in docstrings.
bool show_enum_members_docstring = true; //< Include auto-generated member list in enum
// docstrings.
};
static state &global_state() {
static state instance;
return instance;
}
state previous_state;
};
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/pybind11.h | C/C++ Header | /*
pybind11/pybind11.h: Main header file of the C++11 python
binding generator library
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/class.h"
#include "detail/init.h"
#include "attr.h"
#include "gil.h"
#include "options.h"
#include <cstdlib>
#include <cstring>
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <vector>
#if defined(__cpp_lib_launder) && !(defined(_MSC_VER) && (_MSC_VER < 1914))
# define PYBIND11_STD_LAUNDER std::launder
# define PYBIND11_HAS_STD_LAUNDER 1
#else
# define PYBIND11_STD_LAUNDER
# define PYBIND11_HAS_STD_LAUNDER 0
#endif
#if defined(__GNUG__) && !defined(__clang__)
# include <cxxabi.h>
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
/* https://stackoverflow.com/questions/46798456/handling-gccs-noexcept-type-warning
This warning is about ABI compatibility, not code health.
It is only actually needed in a couple places, but apparently GCC 7 "generates this warning if
and only if the first template instantiation ... involves noexcept" [stackoverflow], therefore
it could get triggered from seemingly random places, depending on user code.
No other GCC version generates this warning.
*/
#if defined(__GNUC__) && __GNUC__ == 7
PYBIND11_WARNING_DISABLE_GCC("-Wnoexcept-type")
#endif
PYBIND11_WARNING_DISABLE_MSVC(4127)
PYBIND11_NAMESPACE_BEGIN(detail)
// Apply all the extensions translators from a list
// Return true if one of the translators completed without raising an exception
// itself. Return of false indicates that if there are other translators
// available, they should be tried.
inline bool apply_exception_translators(std::forward_list<ExceptionTranslator> &translators) {
auto last_exception = std::current_exception();
for (auto &translator : translators) {
try {
translator(last_exception);
return true;
} catch (...) {
last_exception = std::current_exception();
}
}
return false;
}
#if defined(_MSC_VER)
# define PYBIND11_COMPAT_STRDUP _strdup
#else
# define PYBIND11_COMPAT_STRDUP strdup
#endif
PYBIND11_NAMESPACE_END(detail)
/// Wraps an arbitrary C++ function/method/lambda function/.. into a callable Python object
class cpp_function : public function {
public:
cpp_function() = default;
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(std::nullptr_t) {}
cpp_function(std::nullptr_t, const is_setter &) {}
/// Construct a cpp_function from a vanilla function pointer
template <typename Return, typename... Args, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (*f)(Args...), const Extra &...extra) {
initialize(f, f, extra...);
}
/// Construct a cpp_function from a lambda function (possibly with internal state)
template <typename Func,
typename... Extra,
typename = detail::enable_if_t<detail::is_lambda<Func>::value>>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Func &&f, const Extra &...extra) {
initialize(
std::forward<Func>(f), (detail::function_signature_t<Func> *) nullptr, extra...);
}
/// Construct a cpp_function from a class method (non-const, no ref-qualifier)
template <typename Return, typename Class, typename... Arg, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (Class::*f)(Arg...), const Extra &...extra) {
initialize(
[f](Class *c, Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
(Return(*)(Class *, Arg...)) nullptr,
extra...);
}
/// Construct a cpp_function from a class method (non-const, lvalue ref-qualifier)
/// A copy of the overload for non-const functions without explicit ref-qualifier
/// but with an added `&`.
template <typename Return, typename Class, typename... Arg, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (Class::*f)(Arg...) &, const Extra &...extra) {
initialize(
[f](Class *c, Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
(Return(*)(Class *, Arg...)) nullptr,
extra...);
}
/// Construct a cpp_function from a class method (const, no ref-qualifier)
template <typename Return, typename Class, typename... Arg, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (Class::*f)(Arg...) const, const Extra &...extra) {
initialize([f](const Class *c,
Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
(Return(*)(const Class *, Arg...)) nullptr,
extra...);
}
/// Construct a cpp_function from a class method (const, lvalue ref-qualifier)
/// A copy of the overload for const functions without explicit ref-qualifier
/// but with an added `&`.
template <typename Return, typename Class, typename... Arg, typename... Extra>
// NOLINTNEXTLINE(google-explicit-constructor)
cpp_function(Return (Class::*f)(Arg...) const &, const Extra &...extra) {
initialize([f](const Class *c,
Arg... args) -> Return { return (c->*f)(std::forward<Arg>(args)...); },
(Return(*)(const Class *, Arg...)) nullptr,
extra...);
}
/// Return the function name
object name() const { return attr("__name__"); }
protected:
struct InitializingFunctionRecordDeleter {
// `destruct(function_record, false)`: `initialize_generic` copies strings and
// takes care of cleaning up in case of exceptions. So pass `false` to `free_strings`.
void operator()(detail::function_record *rec) { destruct(rec, false); }
};
using unique_function_record
= std::unique_ptr<detail::function_record, InitializingFunctionRecordDeleter>;
/// Space optimization: don't inline this frequently instantiated fragment
PYBIND11_NOINLINE unique_function_record make_function_record() {
return unique_function_record(new detail::function_record());
}
/// Special internal constructor for functors, lambda functions, etc.
template <typename Func, typename Return, typename... Args, typename... Extra>
void initialize(Func &&f, Return (*)(Args...), const Extra &...extra) {
using namespace detail;
struct capture {
remove_reference_t<Func> f;
};
/* Store the function including any extra state it might have (e.g. a lambda capture
* object) */
// The unique_ptr makes sure nothing is leaked in case of an exception.
auto unique_rec = make_function_record();
auto *rec = unique_rec.get();
/* Store the capture object directly in the function record if there is enough space */
if (sizeof(capture) <= sizeof(rec->data)) {
/* Without these pragmas, GCC warns that there might not be
enough space to use the placement new operator. However, the
'if' statement above ensures that this is the case. */
PYBIND11_WARNING_PUSH
#if defined(__GNUG__) && __GNUC__ >= 6
PYBIND11_WARNING_DISABLE_GCC("-Wplacement-new")
#endif
new ((capture *) &rec->data) capture{std::forward<Func>(f)};
#if !PYBIND11_HAS_STD_LAUNDER
PYBIND11_WARNING_DISABLE_GCC("-Wstrict-aliasing")
#endif
// UB without std::launder, but without breaking ABI and/or
// a significant refactoring it's "impossible" to solve.
if (!std::is_trivially_destructible<capture>::value) {
rec->free_data = [](function_record *r) {
auto data = PYBIND11_STD_LAUNDER((capture *) &r->data);
(void) data;
data->~capture();
};
}
PYBIND11_WARNING_POP
} else {
rec->data[0] = new capture{std::forward<Func>(f)};
rec->free_data = [](function_record *r) { delete ((capture *) r->data[0]); };
}
/* Type casters for the function arguments and return value */
using cast_in = argument_loader<Args...>;
using cast_out
= make_caster<conditional_t<std::is_void<Return>::value, void_type, Return>>;
static_assert(
expected_num_args<Extra...>(
sizeof...(Args), cast_in::args_pos >= 0, cast_in::has_kwargs),
"The number of argument annotations does not match the number of function arguments");
/* Dispatch code which converts function arguments and performs the actual function call */
rec->impl = [](function_call &call) -> handle {
cast_in args_converter;
/* Try to cast the function arguments into the C++ domain */
if (!args_converter.load_args(call)) {
return PYBIND11_TRY_NEXT_OVERLOAD;
}
/* Invoke call policy pre-call hook */
process_attributes<Extra...>::precall(call);
/* Get a pointer to the capture object */
const auto *data = (sizeof(capture) <= sizeof(call.func.data) ? &call.func.data
: call.func.data[0]);
auto *cap = const_cast<capture *>(reinterpret_cast<const capture *>(data));
/* Override policy for rvalues -- usually to enforce rvp::move on an rvalue */
return_value_policy policy
= return_value_policy_override<Return>::policy(call.func.policy);
/* Function scope guard -- defaults to the compile-to-nothing `void_type` */
using Guard = extract_guard_t<Extra...>;
/* Perform the function call */
handle result;
if (call.func.is_setter) {
(void) std::move(args_converter).template call<Return, Guard>(cap->f);
result = none().release();
} else {
result = cast_out::cast(
std::move(args_converter).template call<Return, Guard>(cap->f),
policy,
call.parent);
}
/* Invoke call policy post-call hook */
process_attributes<Extra...>::postcall(call, result);
return result;
};
rec->nargs_pos = cast_in::args_pos >= 0
? static_cast<std::uint16_t>(cast_in::args_pos)
: sizeof...(Args) - cast_in::has_kwargs; // Will get reduced more if
// we have a kw_only
rec->has_args = cast_in::args_pos >= 0;
rec->has_kwargs = cast_in::has_kwargs;
/* Process any user-provided function attributes */
process_attributes<Extra...>::init(extra..., rec);
{
constexpr bool has_kw_only_args = any_of<std::is_same<kw_only, Extra>...>::value,
has_pos_only_args = any_of<std::is_same<pos_only, Extra>...>::value,
has_arg_annotations = any_of<is_keyword<Extra>...>::value;
static_assert(has_arg_annotations || !has_kw_only_args,
"py::kw_only requires the use of argument annotations");
static_assert(has_arg_annotations || !has_pos_only_args,
"py::pos_only requires the use of argument annotations (for docstrings "
"and aligning the annotations to the argument)");
static_assert(constexpr_sum(is_kw_only<Extra>::value...) <= 1,
"py::kw_only may be specified only once");
static_assert(constexpr_sum(is_pos_only<Extra>::value...) <= 1,
"py::pos_only may be specified only once");
constexpr auto kw_only_pos = constexpr_first<is_kw_only, Extra...>();
constexpr auto pos_only_pos = constexpr_first<is_pos_only, Extra...>();
static_assert(!(has_kw_only_args && has_pos_only_args) || pos_only_pos < kw_only_pos,
"py::pos_only must come before py::kw_only");
}
/* Generate a readable signature describing the function's arguments and return
value types */
static constexpr auto signature
= const_name("(") + cast_in::arg_names + const_name(") -> ") + cast_out::name;
PYBIND11_DESCR_CONSTEXPR auto types = decltype(signature)::types();
/* Register the function with Python from generic (non-templated) code */
// Pass on the ownership over the `unique_rec` to `initialize_generic`. `rec` stays valid.
initialize_generic(std::move(unique_rec), signature.text, types.data(), sizeof...(Args));
/* Stash some additional information used by an important optimization in 'functional.h' */
using FunctionType = Return (*)(Args...);
constexpr bool is_function_ptr
= std::is_convertible<Func, FunctionType>::value && sizeof(capture) == sizeof(void *);
if (is_function_ptr) {
rec->is_stateless = true;
rec->data[1]
= const_cast<void *>(reinterpret_cast<const void *>(&typeid(FunctionType)));
}
}
// Utility class that keeps track of all duplicated strings, and cleans them up in its
// destructor, unless they are released. Basically a RAII-solution to deal with exceptions
// along the way.
class strdup_guard {
public:
strdup_guard() = default;
strdup_guard(const strdup_guard &) = delete;
strdup_guard &operator=(const strdup_guard &) = delete;
~strdup_guard() {
for (auto *s : strings) {
std::free(s);
}
}
char *operator()(const char *s) {
auto *t = PYBIND11_COMPAT_STRDUP(s);
strings.push_back(t);
return t;
}
void release() { strings.clear(); }
private:
std::vector<char *> strings;
};
/// Register a function call with Python (generic non-templated code goes here)
void initialize_generic(unique_function_record &&unique_rec,
const char *text,
const std::type_info *const *types,
size_t args) {
// Do NOT receive `unique_rec` by value. If this function fails to move out the unique_ptr,
// we do not want this to destruct the pointer. `initialize` (the caller) still relies on
// the pointee being alive after this call. Only move out if a `capsule` is going to keep
// it alive.
auto *rec = unique_rec.get();
// Keep track of strdup'ed strings, and clean them up as long as the function's capsule
// has not taken ownership yet (when `unique_rec.release()` is called).
// Note: This cannot easily be fixed by a `unique_ptr` with custom deleter, because the
// strings are only referenced before strdup'ing. So only *after* the following block could
// `destruct` safely be called, but even then, `repr` could still throw in the middle of
// copying all strings.
strdup_guard guarded_strdup;
/* Create copies of all referenced C-style strings */
rec->name = guarded_strdup(rec->name ? rec->name : "");
if (rec->doc) {
rec->doc = guarded_strdup(rec->doc);
}
for (auto &a : rec->args) {
if (a.name) {
a.name = guarded_strdup(a.name);
}
if (a.descr) {
a.descr = guarded_strdup(a.descr);
} else if (a.value) {
a.descr = guarded_strdup(repr(a.value).cast<std::string>().c_str());
}
}
rec->is_constructor = (std::strcmp(rec->name, "__init__") == 0)
|| (std::strcmp(rec->name, "__setstate__") == 0);
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES) && !defined(PYBIND11_DISABLE_NEW_STYLE_INIT_WARNING)
if (rec->is_constructor && !rec->is_new_style_constructor) {
const auto class_name
= detail::get_fully_qualified_tp_name((PyTypeObject *) rec->scope.ptr());
const auto func_name = std::string(rec->name);
PyErr_WarnEx(PyExc_FutureWarning,
("pybind11-bound class '" + class_name
+ "' is using an old-style "
"placement-new '"
+ func_name
+ "' which has been deprecated. See "
"the upgrade guide in pybind11's docs. This message is only visible "
"when compiled in debug mode.")
.c_str(),
0);
}
#endif
/* Generate a proper function signature */
std::string signature;
size_t type_index = 0, arg_index = 0;
bool is_starred = false;
for (const auto *pc = text; *pc != '\0'; ++pc) {
const auto c = *pc;
if (c == '{') {
// Write arg name for everything except *args and **kwargs.
is_starred = *(pc + 1) == '*';
if (is_starred) {
continue;
}
// Separator for keyword-only arguments, placed before the kw
// arguments start (unless we are already putting an *args)
if (!rec->has_args && arg_index == rec->nargs_pos) {
signature += "*, ";
}
if (arg_index < rec->args.size() && rec->args[arg_index].name) {
signature += rec->args[arg_index].name;
} else if (arg_index == 0 && rec->is_method) {
signature += "self";
} else {
signature += "arg" + std::to_string(arg_index - (rec->is_method ? 1 : 0));
}
signature += ": ";
} else if (c == '}') {
// Write default value if available.
if (!is_starred && arg_index < rec->args.size() && rec->args[arg_index].descr) {
signature += " = ";
signature += rec->args[arg_index].descr;
}
// Separator for positional-only arguments (placed after the
// argument, rather than before like *
if (rec->nargs_pos_only > 0 && (arg_index + 1) == rec->nargs_pos_only) {
signature += ", /";
}
if (!is_starred) {
arg_index++;
}
} else if (c == '%') {
const std::type_info *t = types[type_index++];
if (!t) {
pybind11_fail("Internal error while parsing type signature (1)");
}
if (auto *tinfo = detail::get_type_info(*t)) {
handle th((PyObject *) tinfo->type);
signature += th.attr("__module__").cast<std::string>() + "."
+ th.attr("__qualname__").cast<std::string>();
} else if (rec->is_new_style_constructor && arg_index == 0) {
// A new-style `__init__` takes `self` as `value_and_holder`.
// Rewrite it to the proper class type.
signature += rec->scope.attr("__module__").cast<std::string>() + "."
+ rec->scope.attr("__qualname__").cast<std::string>();
} else {
std::string tname(t->name());
detail::clean_type_id(tname);
signature += tname;
}
} else {
signature += c;
}
}
if (arg_index != args - rec->has_args - rec->has_kwargs || types[type_index] != nullptr) {
pybind11_fail("Internal error while parsing type signature (2)");
}
rec->signature = guarded_strdup(signature.c_str());
rec->args.shrink_to_fit();
rec->nargs = (std::uint16_t) args;
if (rec->sibling && PYBIND11_INSTANCE_METHOD_CHECK(rec->sibling.ptr())) {
rec->sibling = PYBIND11_INSTANCE_METHOD_GET_FUNCTION(rec->sibling.ptr());
}
detail::function_record *chain = nullptr, *chain_start = rec;
if (rec->sibling) {
if (PyCFunction_Check(rec->sibling.ptr())) {
auto *self = PyCFunction_GET_SELF(rec->sibling.ptr());
if (!isinstance<capsule>(self)) {
chain = nullptr;
} else {
auto rec_capsule = reinterpret_borrow<capsule>(self);
if (detail::is_function_record_capsule(rec_capsule)) {
chain = rec_capsule.get_pointer<detail::function_record>();
/* Never append a method to an overload chain of a parent class;
instead, hide the parent's overloads in this case */
if (!chain->scope.is(rec->scope)) {
chain = nullptr;
}
} else {
chain = nullptr;
}
}
}
// Don't trigger for things like the default __init__, which are wrapper_descriptors
// that we are intentionally replacing
else if (!rec->sibling.is_none() && rec->name[0] != '_') {
pybind11_fail("Cannot overload existing non-function object \""
+ std::string(rec->name) + "\" with a function of the same name");
}
}
if (!chain) {
/* No existing overload was found, create a new function object */
rec->def = new PyMethodDef();
std::memset(rec->def, 0, sizeof(PyMethodDef));
rec->def->ml_name = rec->name;
rec->def->ml_meth
= reinterpret_cast<PyCFunction>(reinterpret_cast<void (*)()>(dispatcher));
rec->def->ml_flags = METH_VARARGS | METH_KEYWORDS;
capsule rec_capsule(unique_rec.release(),
detail::get_function_record_capsule_name(),
[](void *ptr) { destruct((detail::function_record *) ptr); });
guarded_strdup.release();
object scope_module;
if (rec->scope) {
if (hasattr(rec->scope, "__module__")) {
scope_module = rec->scope.attr("__module__");
} else if (hasattr(rec->scope, "__name__")) {
scope_module = rec->scope.attr("__name__");
}
}
m_ptr = PyCFunction_NewEx(rec->def, rec_capsule.ptr(), scope_module.ptr());
if (!m_ptr) {
pybind11_fail("cpp_function::cpp_function(): Could not allocate function object");
}
} else {
/* Append at the beginning or end of the overload chain */
m_ptr = rec->sibling.ptr();
inc_ref();
if (chain->is_method != rec->is_method) {
pybind11_fail(
"overloading a method with both static and instance methods is not supported; "
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
"#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for more "
"details"
#else
"error while attempting to bind "
+ std::string(rec->is_method ? "instance" : "static") + " method "
+ std::string(pybind11::str(rec->scope.attr("__name__"))) + "."
+ std::string(rec->name) + signature
#endif
);
}
if (rec->prepend) {
// Beginning of chain; we need to replace the capsule's current head-of-the-chain
// pointer with this one, then make this one point to the previous head of the
// chain.
chain_start = rec;
rec->next = chain;
auto rec_capsule
= reinterpret_borrow<capsule>(((PyCFunctionObject *) m_ptr)->m_self);
rec_capsule.set_pointer(unique_rec.release());
guarded_strdup.release();
} else {
// Or end of chain (normal behavior)
chain_start = chain;
while (chain->next) {
chain = chain->next;
}
chain->next = unique_rec.release();
guarded_strdup.release();
}
}
std::string signatures;
int index = 0;
/* Create a nice pydoc rec including all signatures and
docstrings of the functions in the overload chain */
if (chain && options::show_function_signatures()) {
// First a generic signature
signatures += rec->name;
signatures += "(*args, **kwargs)\n";
signatures += "Overloaded function.\n\n";
}
// Then specific overload signatures
bool first_user_def = true;
for (auto *it = chain_start; it != nullptr; it = it->next) {
if (options::show_function_signatures()) {
if (index > 0) {
signatures += '\n';
}
if (chain) {
signatures += std::to_string(++index) + ". ";
}
signatures += rec->name;
signatures += it->signature;
signatures += '\n';
}
if (it->doc && it->doc[0] != '\0' && options::show_user_defined_docstrings()) {
// If we're appending another docstring, and aren't printing function signatures,
// we need to append a newline first:
if (!options::show_function_signatures()) {
if (first_user_def) {
first_user_def = false;
} else {
signatures += '\n';
}
}
if (options::show_function_signatures()) {
signatures += '\n';
}
signatures += it->doc;
if (options::show_function_signatures()) {
signatures += '\n';
}
}
}
/* Install docstring */
auto *func = (PyCFunctionObject *) m_ptr;
std::free(const_cast<char *>(func->m_ml->ml_doc));
// Install docstring if it's non-empty (when at least one option is enabled)
func->m_ml->ml_doc
= signatures.empty() ? nullptr : PYBIND11_COMPAT_STRDUP(signatures.c_str());
if (rec->is_method) {
m_ptr = PYBIND11_INSTANCE_METHOD_NEW(m_ptr, rec->scope.ptr());
if (!m_ptr) {
pybind11_fail(
"cpp_function::cpp_function(): Could not allocate instance method object");
}
Py_DECREF(func);
}
}
/// When a cpp_function is GCed, release any memory allocated by pybind11
static void destruct(detail::function_record *rec, bool free_strings = true) {
// If on Python 3.9, check the interpreter "MICRO" (patch) version.
// If this is running on 3.9.0, we have to work around a bug.
#if !defined(PYPY_VERSION) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 9
static bool is_zero = Py_GetVersion()[4] == '0';
#endif
while (rec) {
detail::function_record *next = rec->next;
if (rec->free_data) {
rec->free_data(rec);
}
// During initialization, these strings might not have been copied yet,
// so they cannot be freed. Once the function has been created, they can.
// Check `make_function_record` for more details.
if (free_strings) {
std::free((char *) rec->name);
std::free((char *) rec->doc);
std::free((char *) rec->signature);
for (auto &arg : rec->args) {
std::free(const_cast<char *>(arg.name));
std::free(const_cast<char *>(arg.descr));
}
}
for (auto &arg : rec->args) {
arg.value.dec_ref();
}
if (rec->def) {
std::free(const_cast<char *>(rec->def->ml_doc));
// Python 3.9.0 decref's these in the wrong order; rec->def
// If loaded on 3.9.0, let these leak (use Python 3.9.1 at runtime to fix)
// See https://github.com/python/cpython/pull/22670
#if !defined(PYPY_VERSION) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 9
if (!is_zero) {
delete rec->def;
}
#else
delete rec->def;
#endif
}
delete rec;
rec = next;
}
}
/// Main dispatch logic for calls to functions bound using pybind11
static PyObject *dispatcher(PyObject *self, PyObject *args_in, PyObject *kwargs_in) {
using namespace detail;
assert(isinstance<capsule>(self));
/* Iterator over the list of potentially admissible overloads */
const function_record *overloads = reinterpret_cast<function_record *>(
PyCapsule_GetPointer(self, get_function_record_capsule_name())),
*it = overloads;
assert(overloads != nullptr);
/* Need to know how many arguments + keyword arguments there are to pick the right
overload */
const auto n_args_in = (size_t) PyTuple_GET_SIZE(args_in);
handle parent = n_args_in > 0 ? PyTuple_GET_ITEM(args_in, 0) : nullptr,
result = PYBIND11_TRY_NEXT_OVERLOAD;
auto self_value_and_holder = value_and_holder();
if (overloads->is_constructor) {
if (!parent
|| !PyObject_TypeCheck(parent.ptr(), (PyTypeObject *) overloads->scope.ptr())) {
PyErr_SetString(
PyExc_TypeError,
"__init__(self, ...) called with invalid or missing `self` argument");
return nullptr;
}
auto *const tinfo = get_type_info((PyTypeObject *) overloads->scope.ptr());
auto *const pi = reinterpret_cast<instance *>(parent.ptr());
self_value_and_holder = pi->get_value_and_holder(tinfo, true);
// If this value is already registered it must mean __init__ is invoked multiple times;
// we really can't support that in C++, so just ignore the second __init__.
if (self_value_and_holder.instance_registered()) {
return none().release().ptr();
}
}
try {
// We do this in two passes: in the first pass, we load arguments with `convert=false`;
// in the second, we allow conversion (except for arguments with an explicit
// py::arg().noconvert()). This lets us prefer calls without conversion, with
// conversion as a fallback.
std::vector<function_call> second_pass;
// However, if there are no overloads, we can just skip the no-convert pass entirely
const bool overloaded = it != nullptr && it->next != nullptr;
for (; it != nullptr; it = it->next) {
/* For each overload:
1. Copy all positional arguments we were given, also checking to make sure that
named positional arguments weren't *also* specified via kwarg.
2. If we weren't given enough, try to make up the omitted ones by checking
whether they were provided by a kwarg matching the `py::arg("name")` name. If
so, use it (and remove it from kwargs); if not, see if the function binding
provided a default that we can use.
3. Ensure that either all keyword arguments were "consumed", or that the
function takes a kwargs argument to accept unconsumed kwargs.
4. Any positional arguments still left get put into a tuple (for args), and any
leftover kwargs get put into a dict.
5. Pack everything into a vector; if we have py::args or py::kwargs, they are an
extra tuple or dict at the end of the positional arguments.
6. Call the function call dispatcher (function_record::impl)
If one of these fail, move on to the next overload and keep trying until we get
a result other than PYBIND11_TRY_NEXT_OVERLOAD.
*/
const function_record &func = *it;
size_t num_args = func.nargs; // Number of positional arguments that we need
if (func.has_args) {
--num_args; // (but don't count py::args
}
if (func.has_kwargs) {
--num_args; // or py::kwargs)
}
size_t pos_args = func.nargs_pos;
if (!func.has_args && n_args_in > pos_args) {
continue; // Too many positional arguments for this overload
}
if (n_args_in < pos_args && func.args.size() < pos_args) {
continue; // Not enough positional arguments given, and not enough defaults to
// fill in the blanks
}
function_call call(func, parent);
// Protect std::min with parentheses
size_t args_to_copy = (std::min)(pos_args, n_args_in);
size_t args_copied = 0;
// 0. Inject new-style `self` argument
if (func.is_new_style_constructor) {
// The `value` may have been preallocated by an old-style `__init__`
// if it was a preceding candidate for overload resolution.
if (self_value_and_holder) {
self_value_and_holder.type->dealloc(self_value_and_holder);
}
call.init_self = PyTuple_GET_ITEM(args_in, 0);
call.args.emplace_back(reinterpret_cast<PyObject *>(&self_value_and_holder));
call.args_convert.push_back(false);
++args_copied;
}
// 1. Copy any position arguments given.
bool bad_arg = false;
for (; args_copied < args_to_copy; ++args_copied) {
const argument_record *arg_rec
= args_copied < func.args.size() ? &func.args[args_copied] : nullptr;
if (kwargs_in && arg_rec && arg_rec->name
&& dict_getitemstring(kwargs_in, arg_rec->name)) {
bad_arg = true;
break;
}
handle arg(PyTuple_GET_ITEM(args_in, args_copied));
if (arg_rec && !arg_rec->none && arg.is_none()) {
bad_arg = true;
break;
}
call.args.push_back(arg);
call.args_convert.push_back(arg_rec ? arg_rec->convert : true);
}
if (bad_arg) {
continue; // Maybe it was meant for another overload (issue #688)
}
// Keep track of how many position args we copied out in case we need to come back
// to copy the rest into a py::args argument.
size_t positional_args_copied = args_copied;
// We'll need to copy this if we steal some kwargs for defaults
dict kwargs = reinterpret_borrow<dict>(kwargs_in);
// 1.5. Fill in any missing pos_only args from defaults if they exist
if (args_copied < func.nargs_pos_only) {
for (; args_copied < func.nargs_pos_only; ++args_copied) {
const auto &arg_rec = func.args[args_copied];
handle value;
if (arg_rec.value) {
value = arg_rec.value;
}
if (value) {
call.args.push_back(value);
call.args_convert.push_back(arg_rec.convert);
} else {
break;
}
}
if (args_copied < func.nargs_pos_only) {
continue; // Not enough defaults to fill the positional arguments
}
}
// 2. Check kwargs and, failing that, defaults that may help complete the list
if (args_copied < num_args) {
bool copied_kwargs = false;
for (; args_copied < num_args; ++args_copied) {
const auto &arg_rec = func.args[args_copied];
handle value;
if (kwargs_in && arg_rec.name) {
value = dict_getitemstring(kwargs.ptr(), arg_rec.name);
}
if (value) {
// Consume a kwargs value
if (!copied_kwargs) {
kwargs = reinterpret_steal<dict>(PyDict_Copy(kwargs.ptr()));
copied_kwargs = true;
}
if (PyDict_DelItemString(kwargs.ptr(), arg_rec.name) == -1) {
throw error_already_set();
}
} else if (arg_rec.value) {
value = arg_rec.value;
}
if (!arg_rec.none && value.is_none()) {
break;
}
if (value) {
// If we're at the py::args index then first insert a stub for it to be
// replaced later
if (func.has_args && call.args.size() == func.nargs_pos) {
call.args.push_back(none());
}
call.args.push_back(value);
call.args_convert.push_back(arg_rec.convert);
} else {
break;
}
}
if (args_copied < num_args) {
continue; // Not enough arguments, defaults, or kwargs to fill the
// positional arguments
}
}
// 3. Check everything was consumed (unless we have a kwargs arg)
if (kwargs && !kwargs.empty() && !func.has_kwargs) {
continue; // Unconsumed kwargs, but no py::kwargs argument to accept them
}
// 4a. If we have a py::args argument, create a new tuple with leftovers
if (func.has_args) {
tuple extra_args;
if (args_to_copy == 0) {
// We didn't copy out any position arguments from the args_in tuple, so we
// can reuse it directly without copying:
extra_args = reinterpret_borrow<tuple>(args_in);
} else if (positional_args_copied >= n_args_in) {
extra_args = tuple(0);
} else {
size_t args_size = n_args_in - positional_args_copied;
extra_args = tuple(args_size);
for (size_t i = 0; i < args_size; ++i) {
extra_args[i] = PyTuple_GET_ITEM(args_in, positional_args_copied + i);
}
}
if (call.args.size() <= func.nargs_pos) {
call.args.push_back(extra_args);
} else {
call.args[func.nargs_pos] = extra_args;
}
call.args_convert.push_back(false);
call.args_ref = std::move(extra_args);
}
// 4b. If we have a py::kwargs, pass on any remaining kwargs
if (func.has_kwargs) {
if (!kwargs.ptr()) {
kwargs = dict(); // If we didn't get one, send an empty one
}
call.args.push_back(kwargs);
call.args_convert.push_back(false);
call.kwargs_ref = std::move(kwargs);
}
// 5. Put everything in a vector. Not technically step 5, we've been building it
// in `call.args` all along.
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
if (call.args.size() != func.nargs || call.args_convert.size() != func.nargs) {
pybind11_fail("Internal error: function call dispatcher inserted wrong number "
"of arguments!");
}
#endif
std::vector<bool> second_pass_convert;
if (overloaded) {
// We're in the first no-convert pass, so swap out the conversion flags for a
// set of all-false flags. If the call fails, we'll swap the flags back in for
// the conversion-allowed call below.
second_pass_convert.resize(func.nargs, false);
call.args_convert.swap(second_pass_convert);
}
// 6. Call the function.
try {
loader_life_support guard{};
result = func.impl(call);
} catch (reference_cast_error &) {
result = PYBIND11_TRY_NEXT_OVERLOAD;
}
if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) {
break;
}
if (overloaded) {
// The (overloaded) call failed; if the call has at least one argument that
// permits conversion (i.e. it hasn't been explicitly specified `.noconvert()`)
// then add this call to the list of second pass overloads to try.
for (size_t i = func.is_method ? 1 : 0; i < pos_args; i++) {
if (second_pass_convert[i]) {
// Found one: swap the converting flags back in and store the call for
// the second pass.
call.args_convert.swap(second_pass_convert);
second_pass.push_back(std::move(call));
break;
}
}
}
}
if (overloaded && !second_pass.empty() && result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) {
// The no-conversion pass finished without success, try again with conversion
// allowed
for (auto &call : second_pass) {
try {
loader_life_support guard{};
result = call.func.impl(call);
} catch (reference_cast_error &) {
result = PYBIND11_TRY_NEXT_OVERLOAD;
}
if (result.ptr() != PYBIND11_TRY_NEXT_OVERLOAD) {
// The error reporting logic below expects 'it' to be valid, as it would be
// if we'd encountered this failure in the first-pass loop.
if (!result) {
it = &call.func;
}
break;
}
}
}
} catch (error_already_set &e) {
e.restore();
return nullptr;
#ifdef __GLIBCXX__
} catch (abi::__forced_unwind &) {
throw;
#endif
} catch (...) {
/* When an exception is caught, give each registered exception
translator a chance to translate it to a Python exception. First
all module-local translators will be tried in reverse order of
registration. If none of the module-locale translators handle
the exception (or there are no module-locale translators) then
the global translators will be tried, also in reverse order of
registration.
A translator may choose to do one of the following:
- catch the exception and call PyErr_SetString or PyErr_SetObject
to set a standard (or custom) Python exception, or
- do nothing and let the exception fall through to the next translator, or
- delegate translation to the next translator by throwing a new type of exception.
*/
auto &local_exception_translators
= get_local_internals().registered_exception_translators;
if (detail::apply_exception_translators(local_exception_translators)) {
return nullptr;
}
auto &exception_translators = get_internals().registered_exception_translators;
if (detail::apply_exception_translators(exception_translators)) {
return nullptr;
}
PyErr_SetString(PyExc_SystemError,
"Exception escaped from default exception translator!");
return nullptr;
}
auto append_note_if_missing_header_is_suspected = [](std::string &msg) {
if (msg.find("std::") != std::string::npos) {
msg += "\n\n"
"Did you forget to `#include <pybind11/stl.h>`? Or <pybind11/complex.h>,\n"
"<pybind11/functional.h>, <pybind11/chrono.h>, etc. Some automatic\n"
"conversions are optional and require extra headers to be included\n"
"when compiling your pybind11 module.";
}
};
if (result.ptr() == PYBIND11_TRY_NEXT_OVERLOAD) {
if (overloads->is_operator) {
return handle(Py_NotImplemented).inc_ref().ptr();
}
std::string msg = std::string(overloads->name) + "(): incompatible "
+ std::string(overloads->is_constructor ? "constructor" : "function")
+ " arguments. The following argument types are supported:\n";
int ctr = 0;
for (const function_record *it2 = overloads; it2 != nullptr; it2 = it2->next) {
msg += " " + std::to_string(++ctr) + ". ";
bool wrote_sig = false;
if (overloads->is_constructor) {
// For a constructor, rewrite `(self: Object, arg0, ...) -> NoneType` as
// `Object(arg0, ...)`
std::string sig = it2->signature;
size_t start = sig.find('(') + 7; // skip "(self: "
if (start < sig.size()) {
// End at the , for the next argument
size_t end = sig.find(", "), next = end + 2;
size_t ret = sig.rfind(" -> ");
// Or the ), if there is no comma:
if (end >= sig.size()) {
next = end = sig.find(')');
}
if (start < end && next < sig.size()) {
msg.append(sig, start, end - start);
msg += '(';
msg.append(sig, next, ret - next);
wrote_sig = true;
}
}
}
if (!wrote_sig) {
msg += it2->signature;
}
msg += '\n';
}
msg += "\nInvoked with: ";
auto args_ = reinterpret_borrow<tuple>(args_in);
bool some_args = false;
for (size_t ti = overloads->is_constructor ? 1 : 0; ti < args_.size(); ++ti) {
if (!some_args) {
some_args = true;
} else {
msg += ", ";
}
try {
msg += pybind11::repr(args_[ti]);
} catch (const error_already_set &) {
msg += "<repr raised Error>";
}
}
if (kwargs_in) {
auto kwargs = reinterpret_borrow<dict>(kwargs_in);
if (!kwargs.empty()) {
if (some_args) {
msg += "; ";
}
msg += "kwargs: ";
bool first = true;
for (auto kwarg : kwargs) {
if (first) {
first = false;
} else {
msg += ", ";
}
msg += pybind11::str("{}=").format(kwarg.first);
try {
msg += pybind11::repr(kwarg.second);
} catch (const error_already_set &) {
msg += "<repr raised Error>";
}
}
}
}
append_note_if_missing_header_is_suspected(msg);
// Attach additional error info to the exception if supported
if (PyErr_Occurred()) {
// #HelpAppreciated: unit test coverage for this branch.
raise_from(PyExc_TypeError, msg.c_str());
return nullptr;
}
PyErr_SetString(PyExc_TypeError, msg.c_str());
return nullptr;
}
if (!result) {
std::string msg = "Unable to convert function return value to a "
"Python type! The signature was\n\t";
msg += it->signature;
append_note_if_missing_header_is_suspected(msg);
// Attach additional error info to the exception if supported
if (PyErr_Occurred()) {
raise_from(PyExc_TypeError, msg.c_str());
return nullptr;
}
PyErr_SetString(PyExc_TypeError, msg.c_str());
return nullptr;
}
if (overloads->is_constructor && !self_value_and_holder.holder_constructed()) {
auto *pi = reinterpret_cast<instance *>(parent.ptr());
self_value_and_holder.type->init_instance(pi, nullptr);
}
return result.ptr();
}
};
/// Wrapper for Python extension modules
class module_ : public object {
public:
PYBIND11_OBJECT_DEFAULT(module_, object, PyModule_Check)
/// Create a new top-level Python module with the given name and docstring
PYBIND11_DEPRECATED("Use PYBIND11_MODULE or module_::create_extension_module instead")
explicit module_(const char *name, const char *doc = nullptr) {
*this = create_extension_module(name, doc, new PyModuleDef());
}
/** \rst
Create Python binding for a new function within the module scope. ``Func``
can be a plain C++ function, a function pointer, or a lambda function. For
details on the ``Extra&& ... extra`` argument, see section :ref:`extras`.
\endrst */
template <typename Func, typename... Extra>
module_ &def(const char *name_, Func &&f, const Extra &...extra) {
cpp_function func(std::forward<Func>(f),
name(name_),
scope(*this),
sibling(getattr(*this, name_, none())),
extra...);
// NB: allow overwriting here because cpp_function sets up a chain with the intention of
// overwriting (and has already checked internally that it isn't overwriting
// non-functions).
add_object(name_, func, true /* overwrite */);
return *this;
}
/** \rst
Create and return a new Python submodule with the given name and docstring.
This also works recursively, i.e.
.. code-block:: cpp
py::module_ m("example", "pybind11 example plugin");
py::module_ m2 = m.def_submodule("sub", "A submodule of 'example'");
py::module_ m3 = m2.def_submodule("subsub", "A submodule of 'example.sub'");
\endrst */
module_ def_submodule(const char *name, const char *doc = nullptr) {
const char *this_name = PyModule_GetName(m_ptr);
if (this_name == nullptr) {
throw error_already_set();
}
std::string full_name = std::string(this_name) + '.' + name;
handle submodule = PyImport_AddModule(full_name.c_str());
if (!submodule) {
throw error_already_set();
}
auto result = reinterpret_borrow<module_>(submodule);
if (doc && options::show_user_defined_docstrings()) {
result.attr("__doc__") = pybind11::str(doc);
}
attr(name) = result;
return result;
}
/// Import and return a module or throws `error_already_set`.
static module_ import(const char *name) {
PyObject *obj = PyImport_ImportModule(name);
if (!obj) {
throw error_already_set();
}
return reinterpret_steal<module_>(obj);
}
/// Reload the module or throws `error_already_set`.
void reload() {
PyObject *obj = PyImport_ReloadModule(ptr());
if (!obj) {
throw error_already_set();
}
*this = reinterpret_steal<module_>(obj);
}
/** \rst
Adds an object to the module using the given name. Throws if an object with the given name
already exists.
``overwrite`` should almost always be false: attempting to overwrite objects that pybind11
has established will, in most cases, break things.
\endrst */
PYBIND11_NOINLINE void add_object(const char *name, handle obj, bool overwrite = false) {
if (!overwrite && hasattr(*this, name)) {
pybind11_fail(
"Error during initialization: multiple incompatible definitions with name \""
+ std::string(name) + "\"");
}
PyModule_AddObject(ptr(), name, obj.inc_ref().ptr() /* steals a reference */);
}
using module_def = PyModuleDef; // TODO: Can this be removed (it was needed only for Python 2)?
/** \rst
Create a new top-level module that can be used as the main module of a C extension.
``def`` should point to a statically allocated module_def.
\endrst */
static module_ create_extension_module(const char *name, const char *doc, module_def *def) {
// module_def is PyModuleDef
// Placement new (not an allocation).
def = new (def)
PyModuleDef{/* m_base */ PyModuleDef_HEAD_INIT,
/* m_name */ name,
/* m_doc */ options::show_user_defined_docstrings() ? doc : nullptr,
/* m_size */ -1,
/* m_methods */ nullptr,
/* m_slots */ nullptr,
/* m_traverse */ nullptr,
/* m_clear */ nullptr,
/* m_free */ nullptr};
auto *m = PyModule_Create(def);
if (m == nullptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Internal error in module_::create_extension_module()");
}
// TODO: Should be reinterpret_steal for Python 3, but Python also steals it again when
// returned from PyInit_...
// For Python 2, reinterpret_borrow was correct.
return reinterpret_borrow<module_>(m);
}
};
// When inside a namespace (or anywhere as long as it's not the first item on a line),
// C++20 allows "module" to be used. This is provided for backward compatibility, and for
// simplicity, if someone wants to use py::module for example, that is perfectly safe.
using module = module_;
/// \ingroup python_builtins
/// Return a dictionary representing the global variables in the current execution frame,
/// or ``__main__.__dict__`` if there is no frame (usually when the interpreter is embedded).
inline dict globals() {
PyObject *p = PyEval_GetGlobals();
return reinterpret_borrow<dict>(p ? p : module_::import("__main__").attr("__dict__").ptr());
}
template <typename... Args, typename = detail::enable_if_t<args_are_all_keyword_or_ds<Args...>()>>
PYBIND11_DEPRECATED("make_simple_namespace should be replaced with "
"py::module_::import(\"types\").attr(\"SimpleNamespace\") ")
object make_simple_namespace(Args &&...args_) {
return module_::import("types").attr("SimpleNamespace")(std::forward<Args>(args_)...);
}
PYBIND11_NAMESPACE_BEGIN(detail)
/// Generic support for creating new Python heap types
class generic_type : public object {
public:
PYBIND11_OBJECT_DEFAULT(generic_type, object, PyType_Check)
protected:
void initialize(const type_record &rec) {
if (rec.scope && hasattr(rec.scope, "__dict__")
&& rec.scope.attr("__dict__").contains(rec.name)) {
pybind11_fail("generic_type: cannot initialize type \"" + std::string(rec.name)
+ "\": an object with that name is already defined");
}
if ((rec.module_local ? get_local_type_info(*rec.type) : get_global_type_info(*rec.type))
!= nullptr) {
pybind11_fail("generic_type: type \"" + std::string(rec.name)
+ "\" is already registered!");
}
m_ptr = make_new_python_type(rec);
/* Register supplemental type information in C++ dict */
auto *tinfo = new detail::type_info();
tinfo->type = (PyTypeObject *) m_ptr;
tinfo->cpptype = rec.type;
tinfo->type_size = rec.type_size;
tinfo->type_align = rec.type_align;
tinfo->operator_new = rec.operator_new;
tinfo->holder_size_in_ptrs = size_in_ptrs(rec.holder_size);
tinfo->init_instance = rec.init_instance;
tinfo->dealloc = rec.dealloc;
tinfo->simple_type = true;
tinfo->simple_ancestors = true;
tinfo->default_holder = rec.default_holder;
tinfo->module_local = rec.module_local;
auto &internals = get_internals();
auto tindex = std::type_index(*rec.type);
tinfo->direct_conversions = &internals.direct_conversions[tindex];
if (rec.module_local) {
get_local_internals().registered_types_cpp[tindex] = tinfo;
} else {
internals.registered_types_cpp[tindex] = tinfo;
}
internals.registered_types_py[(PyTypeObject *) m_ptr] = {tinfo};
if (rec.bases.size() > 1 || rec.multiple_inheritance) {
mark_parents_nonsimple(tinfo->type);
tinfo->simple_ancestors = false;
} else if (rec.bases.size() == 1) {
auto *parent_tinfo = get_type_info((PyTypeObject *) rec.bases[0].ptr());
assert(parent_tinfo != nullptr);
bool parent_simple_ancestors = parent_tinfo->simple_ancestors;
tinfo->simple_ancestors = parent_simple_ancestors;
// The parent can no longer be a simple type if it has MI and has a child
parent_tinfo->simple_type = parent_tinfo->simple_type && parent_simple_ancestors;
}
if (rec.module_local) {
// Stash the local typeinfo and loader so that external modules can access it.
tinfo->module_local_load = &type_caster_generic::local_load;
setattr(m_ptr, PYBIND11_MODULE_LOCAL_ID, capsule(tinfo));
}
}
/// Helper function which tags all parents of a type using mult. inheritance
void mark_parents_nonsimple(PyTypeObject *value) {
auto t = reinterpret_borrow<tuple>(value->tp_bases);
for (handle h : t) {
auto *tinfo2 = get_type_info((PyTypeObject *) h.ptr());
if (tinfo2) {
tinfo2->simple_type = false;
}
mark_parents_nonsimple((PyTypeObject *) h.ptr());
}
}
void install_buffer_funcs(buffer_info *(*get_buffer)(PyObject *, void *),
void *get_buffer_data) {
auto *type = (PyHeapTypeObject *) m_ptr;
auto *tinfo = detail::get_type_info(&type->ht_type);
if (!type->ht_type.tp_as_buffer) {
pybind11_fail("To be able to register buffer protocol support for the type '"
+ get_fully_qualified_tp_name(tinfo->type)
+ "' the associated class<>(..) invocation must "
"include the pybind11::buffer_protocol() annotation!");
}
tinfo->get_buffer = get_buffer;
tinfo->get_buffer_data = get_buffer_data;
}
// rec_func must be set for either fget or fset.
void def_property_static_impl(const char *name,
handle fget,
handle fset,
detail::function_record *rec_func) {
const auto is_static = (rec_func != nullptr) && !(rec_func->is_method && rec_func->scope);
const auto has_doc = (rec_func != nullptr) && (rec_func->doc != nullptr)
&& pybind11::options::show_user_defined_docstrings();
auto property = handle(
(PyObject *) (is_static ? get_internals().static_property_type : &PyProperty_Type));
attr(name) = property(fget.ptr() ? fget : none(),
fset.ptr() ? fset : none(),
/*deleter*/ none(),
pybind11::str(has_doc ? rec_func->doc : ""));
}
};
/// Set the pointer to operator new if it exists. The cast is needed because it can be overloaded.
template <typename T,
typename = void_t<decltype(static_cast<void *(*) (size_t)>(T::operator new))>>
void set_operator_new(type_record *r) {
r->operator_new = &T::operator new;
}
template <typename>
void set_operator_new(...) {}
template <typename T, typename SFINAE = void>
struct has_operator_delete : std::false_type {};
template <typename T>
struct has_operator_delete<T, void_t<decltype(static_cast<void (*)(void *)>(T::operator delete))>>
: std::true_type {};
template <typename T, typename SFINAE = void>
struct has_operator_delete_size : std::false_type {};
template <typename T>
struct has_operator_delete_size<
T,
void_t<decltype(static_cast<void (*)(void *, size_t)>(T::operator delete))>> : std::true_type {
};
/// Call class-specific delete if it exists or global otherwise. Can also be an overload set.
template <typename T, enable_if_t<has_operator_delete<T>::value, int> = 0>
void call_operator_delete(T *p, size_t, size_t) {
T::operator delete(p);
}
template <typename T,
enable_if_t<!has_operator_delete<T>::value && has_operator_delete_size<T>::value, int>
= 0>
void call_operator_delete(T *p, size_t s, size_t) {
T::operator delete(p, s);
}
inline void call_operator_delete(void *p, size_t s, size_t a) {
(void) s;
(void) a;
#if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
if (a > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
# ifdef __cpp_sized_deallocation
::operator delete(p, s, std::align_val_t(a));
# else
::operator delete(p, std::align_val_t(a));
# endif
return;
}
#endif
#ifdef __cpp_sized_deallocation
::operator delete(p, s);
#else
::operator delete(p);
#endif
}
inline void add_class_method(object &cls, const char *name_, const cpp_function &cf) {
cls.attr(cf.name()) = cf;
if (std::strcmp(name_, "__eq__") == 0 && !cls.attr("__dict__").contains("__hash__")) {
cls.attr("__hash__") = none();
}
}
PYBIND11_NAMESPACE_END(detail)
/// Given a pointer to a member function, cast it to its `Derived` version.
/// Forward everything else unchanged.
template <typename /*Derived*/, typename F>
auto method_adaptor(F &&f) -> decltype(std::forward<F>(f)) {
return std::forward<F>(f);
}
template <typename Derived, typename Return, typename Class, typename... Args>
auto method_adaptor(Return (Class::*pmf)(Args...)) -> Return (Derived::*)(Args...) {
static_assert(
detail::is_accessible_base_of<Class, Derived>::value,
"Cannot bind an inaccessible base class method; use a lambda definition instead");
return pmf;
}
template <typename Derived, typename Return, typename Class, typename... Args>
auto method_adaptor(Return (Class::*pmf)(Args...) const) -> Return (Derived::*)(Args...) const {
static_assert(
detail::is_accessible_base_of<Class, Derived>::value,
"Cannot bind an inaccessible base class method; use a lambda definition instead");
return pmf;
}
template <typename type_, typename... options>
class class_ : public detail::generic_type {
template <typename T>
using is_holder = detail::is_holder_type<type_, T>;
template <typename T>
using is_subtype = detail::is_strict_base_of<type_, T>;
template <typename T>
using is_base = detail::is_strict_base_of<T, type_>;
// struct instead of using here to help MSVC:
template <typename T>
struct is_valid_class_option : detail::any_of<is_holder<T>, is_subtype<T>, is_base<T>> {};
public:
using type = type_;
using type_alias = detail::exactly_one_t<is_subtype, void, options...>;
constexpr static bool has_alias = !std::is_void<type_alias>::value;
using holder_type = detail::exactly_one_t<is_holder, std::unique_ptr<type>, options...>;
static_assert(detail::all_of<is_valid_class_option<options>...>::value,
"Unknown/invalid class_ template parameters provided");
static_assert(!has_alias || std::is_polymorphic<type>::value,
"Cannot use an alias class with a non-polymorphic type");
PYBIND11_OBJECT(class_, generic_type, PyType_Check)
template <typename... Extra>
class_(handle scope, const char *name, const Extra &...extra) {
using namespace detail;
// MI can only be specified via class_ template options, not constructor parameters
static_assert(
none_of<is_pyobject<Extra>...>::value || // no base class arguments, or:
(constexpr_sum(is_pyobject<Extra>::value...) == 1 && // Exactly one base
constexpr_sum(is_base<options>::value...) == 0 && // no template option bases
// no multiple_inheritance attr
none_of<std::is_same<multiple_inheritance, Extra>...>::value),
"Error: multiple inheritance bases must be specified via class_ template options");
type_record record;
record.scope = scope;
record.name = name;
record.type = &typeid(type);
record.type_size = sizeof(conditional_t<has_alias, type_alias, type>);
record.type_align = alignof(conditional_t<has_alias, type_alias, type> &);
record.holder_size = sizeof(holder_type);
record.init_instance = init_instance;
record.dealloc = dealloc;
record.default_holder = detail::is_instantiation<std::unique_ptr, holder_type>::value;
set_operator_new<type>(&record);
/* Register base classes specified via template arguments to class_, if any */
PYBIND11_EXPAND_SIDE_EFFECTS(add_base<options>(record));
/* Process optional arguments, if any */
process_attributes<Extra...>::init(extra..., &record);
generic_type::initialize(record);
if (has_alias) {
auto &instances = record.module_local ? get_local_internals().registered_types_cpp
: get_internals().registered_types_cpp;
instances[std::type_index(typeid(type_alias))]
= instances[std::type_index(typeid(type))];
}
}
template <typename Base, detail::enable_if_t<is_base<Base>::value, int> = 0>
static void add_base(detail::type_record &rec) {
rec.add_base(typeid(Base), [](void *src) -> void * {
return static_cast<Base *>(reinterpret_cast<type *>(src));
});
}
template <typename Base, detail::enable_if_t<!is_base<Base>::value, int> = 0>
static void add_base(detail::type_record &) {}
template <typename Func, typename... Extra>
class_ &def(const char *name_, Func &&f, const Extra &...extra) {
cpp_function cf(method_adaptor<type>(std::forward<Func>(f)),
name(name_),
is_method(*this),
sibling(getattr(*this, name_, none())),
extra...);
add_class_method(*this, name_, cf);
return *this;
}
template <typename Func, typename... Extra>
class_ &def_static(const char *name_, Func &&f, const Extra &...extra) {
static_assert(!std::is_member_function_pointer<Func>::value,
"def_static(...) called with a non-static member function pointer");
cpp_function cf(std::forward<Func>(f),
name(name_),
scope(*this),
sibling(getattr(*this, name_, none())),
extra...);
auto cf_name = cf.name();
attr(std::move(cf_name)) = staticmethod(std::move(cf));
return *this;
}
template <typename T, typename... Extra, detail::enable_if_t<T::op_enable_if_hook, int> = 0>
class_ &def(const T &op, const Extra &...extra) {
op.execute(*this, extra...);
return *this;
}
template <typename T, typename... Extra, detail::enable_if_t<T::op_enable_if_hook, int> = 0>
class_ &def_cast(const T &op, const Extra &...extra) {
op.execute_cast(*this, extra...);
return *this;
}
template <typename... Args, typename... Extra>
class_ &def(const detail::initimpl::constructor<Args...> &init, const Extra &...extra) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(init);
init.execute(*this, extra...);
return *this;
}
template <typename... Args, typename... Extra>
class_ &def(const detail::initimpl::alias_constructor<Args...> &init, const Extra &...extra) {
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(init);
init.execute(*this, extra...);
return *this;
}
template <typename... Args, typename... Extra>
class_ &def(detail::initimpl::factory<Args...> &&init, const Extra &...extra) {
std::move(init).execute(*this, extra...);
return *this;
}
template <typename... Args, typename... Extra>
class_ &def(detail::initimpl::pickle_factory<Args...> &&pf, const Extra &...extra) {
std::move(pf).execute(*this, extra...);
return *this;
}
template <typename Func>
class_ &def_buffer(Func &&func) {
struct capture {
Func func;
};
auto *ptr = new capture{std::forward<Func>(func)};
install_buffer_funcs(
[](PyObject *obj, void *ptr) -> buffer_info * {
detail::make_caster<type> caster;
if (!caster.load(obj, false)) {
return nullptr;
}
return new buffer_info(((capture *) ptr)->func(std::move(caster)));
},
ptr);
weakref(m_ptr, cpp_function([ptr](handle wr) {
delete ptr;
wr.dec_ref();
}))
.release();
return *this;
}
template <typename Return, typename Class, typename... Args>
class_ &def_buffer(Return (Class::*func)(Args...)) {
return def_buffer([func](type &obj) { return (obj.*func)(); });
}
template <typename Return, typename Class, typename... Args>
class_ &def_buffer(Return (Class::*func)(Args...) const) {
return def_buffer([func](const type &obj) { return (obj.*func)(); });
}
template <typename C, typename D, typename... Extra>
class_ &def_readwrite(const char *name, D C::*pm, const Extra &...extra) {
static_assert(std::is_same<C, type>::value || std::is_base_of<C, type>::value,
"def_readwrite() requires a class member (or base class member)");
cpp_function fget([pm](const type &c) -> const D & { return c.*pm; }, is_method(*this)),
fset([pm](type &c, const D &value) { c.*pm = value; }, is_method(*this));
def_property(name, fget, fset, return_value_policy::reference_internal, extra...);
return *this;
}
template <typename C, typename D, typename... Extra>
class_ &def_readonly(const char *name, const D C::*pm, const Extra &...extra) {
static_assert(std::is_same<C, type>::value || std::is_base_of<C, type>::value,
"def_readonly() requires a class member (or base class member)");
cpp_function fget([pm](const type &c) -> const D & { return c.*pm; }, is_method(*this));
def_property_readonly(name, fget, return_value_policy::reference_internal, extra...);
return *this;
}
template <typename D, typename... Extra>
class_ &def_readwrite_static(const char *name, D *pm, const Extra &...extra) {
cpp_function fget([pm](const object &) -> const D & { return *pm; }, scope(*this)),
fset([pm](const object &, const D &value) { *pm = value; }, scope(*this));
def_property_static(name, fget, fset, return_value_policy::reference, extra...);
return *this;
}
template <typename D, typename... Extra>
class_ &def_readonly_static(const char *name, const D *pm, const Extra &...extra) {
cpp_function fget([pm](const object &) -> const D & { return *pm; }, scope(*this));
def_property_readonly_static(name, fget, return_value_policy::reference, extra...);
return *this;
}
/// Uses return_value_policy::reference_internal by default
template <typename Getter, typename... Extra>
class_ &def_property_readonly(const char *name, const Getter &fget, const Extra &...extra) {
return def_property_readonly(name,
cpp_function(method_adaptor<type>(fget)),
return_value_policy::reference_internal,
extra...);
}
/// Uses cpp_function's return_value_policy by default
template <typename... Extra>
class_ &
def_property_readonly(const char *name, const cpp_function &fget, const Extra &...extra) {
return def_property(name, fget, nullptr, extra...);
}
/// Uses return_value_policy::reference by default
template <typename Getter, typename... Extra>
class_ &
def_property_readonly_static(const char *name, const Getter &fget, const Extra &...extra) {
return def_property_readonly_static(
name, cpp_function(fget), return_value_policy::reference, extra...);
}
/// Uses cpp_function's return_value_policy by default
template <typename... Extra>
class_ &def_property_readonly_static(const char *name,
const cpp_function &fget,
const Extra &...extra) {
return def_property_static(name, fget, nullptr, extra...);
}
/// Uses return_value_policy::reference_internal by default
template <typename Getter, typename Setter, typename... Extra>
class_ &
def_property(const char *name, const Getter &fget, const Setter &fset, const Extra &...extra) {
return def_property(
name, fget, cpp_function(method_adaptor<type>(fset), is_setter()), extra...);
}
template <typename Getter, typename... Extra>
class_ &def_property(const char *name,
const Getter &fget,
const cpp_function &fset,
const Extra &...extra) {
return def_property(name,
cpp_function(method_adaptor<type>(fget)),
fset,
return_value_policy::reference_internal,
extra...);
}
/// Uses cpp_function's return_value_policy by default
template <typename... Extra>
class_ &def_property(const char *name,
const cpp_function &fget,
const cpp_function &fset,
const Extra &...extra) {
return def_property_static(name, fget, fset, is_method(*this), extra...);
}
/// Uses return_value_policy::reference by default
template <typename Getter, typename... Extra>
class_ &def_property_static(const char *name,
const Getter &fget,
const cpp_function &fset,
const Extra &...extra) {
return def_property_static(
name, cpp_function(fget), fset, return_value_policy::reference, extra...);
}
/// Uses cpp_function's return_value_policy by default
template <typename... Extra>
class_ &def_property_static(const char *name,
const cpp_function &fget,
const cpp_function &fset,
const Extra &...extra) {
static_assert(0 == detail::constexpr_sum(std::is_base_of<arg, Extra>::value...),
"Argument annotations are not allowed for properties");
auto rec_fget = get_function_record(fget), rec_fset = get_function_record(fset);
auto *rec_active = rec_fget;
if (rec_fget) {
char *doc_prev = rec_fget->doc; /* 'extra' field may include a property-specific
documentation string */
detail::process_attributes<Extra...>::init(extra..., rec_fget);
if (rec_fget->doc && rec_fget->doc != doc_prev) {
std::free(doc_prev);
rec_fget->doc = PYBIND11_COMPAT_STRDUP(rec_fget->doc);
}
}
if (rec_fset) {
char *doc_prev = rec_fset->doc;
detail::process_attributes<Extra...>::init(extra..., rec_fset);
if (rec_fset->doc && rec_fset->doc != doc_prev) {
std::free(doc_prev);
rec_fset->doc = PYBIND11_COMPAT_STRDUP(rec_fset->doc);
}
if (!rec_active) {
rec_active = rec_fset;
}
}
def_property_static_impl(name, fget, fset, rec_active);
return *this;
}
private:
/// Initialize holder object, variant 1: object derives from enable_shared_from_this
template <typename T>
static void init_holder(detail::instance *inst,
detail::value_and_holder &v_h,
const holder_type * /* unused */,
const std::enable_shared_from_this<T> * /* dummy */) {
auto sh = std::dynamic_pointer_cast<typename holder_type::element_type>(
detail::try_get_shared_from_this(v_h.value_ptr<type>()));
if (sh) {
new (std::addressof(v_h.holder<holder_type>())) holder_type(std::move(sh));
v_h.set_holder_constructed();
}
if (!v_h.holder_constructed() && inst->owned) {
new (std::addressof(v_h.holder<holder_type>())) holder_type(v_h.value_ptr<type>());
v_h.set_holder_constructed();
}
}
static void init_holder_from_existing(const detail::value_and_holder &v_h,
const holder_type *holder_ptr,
std::true_type /*is_copy_constructible*/) {
new (std::addressof(v_h.holder<holder_type>()))
holder_type(*reinterpret_cast<const holder_type *>(holder_ptr));
}
static void init_holder_from_existing(const detail::value_and_holder &v_h,
const holder_type *holder_ptr,
std::false_type /*is_copy_constructible*/) {
new (std::addressof(v_h.holder<holder_type>()))
holder_type(std::move(*const_cast<holder_type *>(holder_ptr)));
}
/// Initialize holder object, variant 2: try to construct from existing holder object, if
/// possible
static void init_holder(detail::instance *inst,
detail::value_and_holder &v_h,
const holder_type *holder_ptr,
const void * /* dummy -- not enable_shared_from_this<T>) */) {
if (holder_ptr) {
init_holder_from_existing(v_h, holder_ptr, std::is_copy_constructible<holder_type>());
v_h.set_holder_constructed();
} else if (detail::always_construct_holder<holder_type>::value || inst->owned) {
new (std::addressof(v_h.holder<holder_type>())) holder_type(v_h.value_ptr<type>());
v_h.set_holder_constructed();
}
}
/// Performs instance initialization including constructing a holder and registering the known
/// instance. Should be called as soon as the `type` value_ptr is set for an instance. Takes
/// an optional pointer to an existing holder to use; if not specified and the instance is
/// `.owned`, a new holder will be constructed to manage the value pointer.
static void init_instance(detail::instance *inst, const void *holder_ptr) {
auto v_h = inst->get_value_and_holder(detail::get_type_info(typeid(type)));
if (!v_h.instance_registered()) {
register_instance(inst, v_h.value_ptr(), v_h.type);
v_h.set_instance_registered();
}
init_holder(inst, v_h, (const holder_type *) holder_ptr, v_h.value_ptr<type>());
}
/// Deallocates an instance; via holder, if constructed; otherwise via operator delete.
static void dealloc(detail::value_and_holder &v_h) {
// We could be deallocating because we are cleaning up after a Python exception.
// If so, the Python error indicator will be set. We need to clear that before
// running the destructor, in case the destructor code calls more Python.
// If we don't, the Python API will exit with an exception, and pybind11 will
// throw error_already_set from the C++ destructor which is forbidden and triggers
// std::terminate().
error_scope scope;
if (v_h.holder_constructed()) {
v_h.holder<holder_type>().~holder_type();
v_h.set_holder_constructed(false);
} else {
detail::call_operator_delete(
v_h.value_ptr<type>(), v_h.type->type_size, v_h.type->type_align);
}
v_h.value_ptr() = nullptr;
}
static detail::function_record *get_function_record(handle h) {
h = detail::get_function(h);
if (!h) {
return nullptr;
}
handle func_self = PyCFunction_GET_SELF(h.ptr());
if (!func_self) {
throw error_already_set();
}
if (!isinstance<capsule>(func_self)) {
return nullptr;
}
auto cap = reinterpret_borrow<capsule>(func_self);
if (!detail::is_function_record_capsule(cap)) {
return nullptr;
}
return cap.get_pointer<detail::function_record>();
}
};
/// Binds an existing constructor taking arguments Args...
template <typename... Args>
detail::initimpl::constructor<Args...> init() {
return {};
}
/// Like `init<Args...>()`, but the instance is always constructed through the alias class (even
/// when not inheriting on the Python side).
template <typename... Args>
detail::initimpl::alias_constructor<Args...> init_alias() {
return {};
}
/// Binds a factory function as a constructor
template <typename Func, typename Ret = detail::initimpl::factory<Func>>
Ret init(Func &&f) {
return {std::forward<Func>(f)};
}
/// Dual-argument factory function: the first function is called when no alias is needed, the
/// second when an alias is needed (i.e. due to python-side inheritance). Arguments must be
/// identical.
template <typename CFunc, typename AFunc, typename Ret = detail::initimpl::factory<CFunc, AFunc>>
Ret init(CFunc &&c, AFunc &&a) {
return {std::forward<CFunc>(c), std::forward<AFunc>(a)};
}
/// Binds pickling functions `__getstate__` and `__setstate__` and ensures that the type
/// returned by `__getstate__` is the same as the argument accepted by `__setstate__`.
template <typename GetState, typename SetState>
detail::initimpl::pickle_factory<GetState, SetState> pickle(GetState &&g, SetState &&s) {
return {std::forward<GetState>(g), std::forward<SetState>(s)};
}
PYBIND11_NAMESPACE_BEGIN(detail)
inline str enum_name(handle arg) {
dict entries = arg.get_type().attr("__entries");
for (auto kv : entries) {
if (handle(kv.second[int_(0)]).equal(arg)) {
return pybind11::str(kv.first);
}
}
return "???";
}
struct enum_base {
enum_base(const handle &base, const handle &parent) : m_base(base), m_parent(parent) {}
PYBIND11_NOINLINE void init(bool is_arithmetic, bool is_convertible) {
m_base.attr("__entries") = dict();
auto property = handle((PyObject *) &PyProperty_Type);
auto static_property = handle((PyObject *) get_internals().static_property_type);
m_base.attr("__repr__") = cpp_function(
[](const object &arg) -> str {
handle type = type::handle_of(arg);
object type_name = type.attr("__name__");
return pybind11::str("<{}.{}: {}>")
.format(std::move(type_name), enum_name(arg), int_(arg));
},
name("__repr__"),
is_method(m_base));
m_base.attr("name") = property(cpp_function(&enum_name, name("name"), is_method(m_base)));
m_base.attr("__str__") = cpp_function(
[](handle arg) -> str {
object type_name = type::handle_of(arg).attr("__name__");
return pybind11::str("{}.{}").format(std::move(type_name), enum_name(arg));
},
name("name"),
is_method(m_base));
if (options::show_enum_members_docstring()) {
m_base.attr("__doc__") = static_property(
cpp_function(
[](handle arg) -> std::string {
std::string docstring;
dict entries = arg.attr("__entries");
if (((PyTypeObject *) arg.ptr())->tp_doc) {
docstring += std::string(
reinterpret_cast<PyTypeObject *>(arg.ptr())->tp_doc);
docstring += "\n\n";
}
docstring += "Members:";
for (auto kv : entries) {
auto key = std::string(pybind11::str(kv.first));
auto comment = kv.second[int_(1)];
docstring += "\n\n ";
docstring += key;
if (!comment.is_none()) {
docstring += " : ";
docstring += pybind11::str(comment).cast<std::string>();
}
}
return docstring;
},
name("__doc__")),
none(),
none(),
"");
}
m_base.attr("__members__") = static_property(cpp_function(
[](handle arg) -> dict {
dict entries = arg.attr("__entries"),
m;
for (auto kv : entries) {
m[kv.first] = kv.second[int_(0)];
}
return m;
},
name("__members__")),
none(),
none(),
"");
#define PYBIND11_ENUM_OP_STRICT(op, expr, strict_behavior) \
m_base.attr(op) = cpp_function( \
[](const object &a, const object &b) { \
if (!type::handle_of(a).is(type::handle_of(b))) \
strict_behavior; /* NOLINT(bugprone-macro-parentheses) */ \
return expr; \
}, \
name(op), \
is_method(m_base), \
arg("other"))
#define PYBIND11_ENUM_OP_CONV(op, expr) \
m_base.attr(op) = cpp_function( \
[](const object &a_, const object &b_) { \
int_ a(a_), b(b_); \
return expr; \
}, \
name(op), \
is_method(m_base), \
arg("other"))
#define PYBIND11_ENUM_OP_CONV_LHS(op, expr) \
m_base.attr(op) = cpp_function( \
[](const object &a_, const object &b) { \
int_ a(a_); \
return expr; \
}, \
name(op), \
is_method(m_base), \
arg("other"))
if (is_convertible) {
PYBIND11_ENUM_OP_CONV_LHS("__eq__", !b.is_none() && a.equal(b));
PYBIND11_ENUM_OP_CONV_LHS("__ne__", b.is_none() || !a.equal(b));
if (is_arithmetic) {
PYBIND11_ENUM_OP_CONV("__lt__", a < b);
PYBIND11_ENUM_OP_CONV("__gt__", a > b);
PYBIND11_ENUM_OP_CONV("__le__", a <= b);
PYBIND11_ENUM_OP_CONV("__ge__", a >= b);
PYBIND11_ENUM_OP_CONV("__and__", a & b);
PYBIND11_ENUM_OP_CONV("__rand__", a & b);
PYBIND11_ENUM_OP_CONV("__or__", a | b);
PYBIND11_ENUM_OP_CONV("__ror__", a | b);
PYBIND11_ENUM_OP_CONV("__xor__", a ^ b);
PYBIND11_ENUM_OP_CONV("__rxor__", a ^ b);
m_base.attr("__invert__")
= cpp_function([](const object &arg) { return ~(int_(arg)); },
name("__invert__"),
is_method(m_base));
}
} else {
PYBIND11_ENUM_OP_STRICT("__eq__", int_(a).equal(int_(b)), return false);
PYBIND11_ENUM_OP_STRICT("__ne__", !int_(a).equal(int_(b)), return true);
if (is_arithmetic) {
#define PYBIND11_THROW throw type_error("Expected an enumeration of matching type!");
PYBIND11_ENUM_OP_STRICT("__lt__", int_(a) < int_(b), PYBIND11_THROW);
PYBIND11_ENUM_OP_STRICT("__gt__", int_(a) > int_(b), PYBIND11_THROW);
PYBIND11_ENUM_OP_STRICT("__le__", int_(a) <= int_(b), PYBIND11_THROW);
PYBIND11_ENUM_OP_STRICT("__ge__", int_(a) >= int_(b), PYBIND11_THROW);
#undef PYBIND11_THROW
}
}
#undef PYBIND11_ENUM_OP_CONV_LHS
#undef PYBIND11_ENUM_OP_CONV
#undef PYBIND11_ENUM_OP_STRICT
m_base.attr("__getstate__") = cpp_function(
[](const object &arg) { return int_(arg); }, name("__getstate__"), is_method(m_base));
m_base.attr("__hash__") = cpp_function(
[](const object &arg) { return int_(arg); }, name("__hash__"), is_method(m_base));
}
PYBIND11_NOINLINE void value(char const *name_, object value, const char *doc = nullptr) {
dict entries = m_base.attr("__entries");
str name(name_);
if (entries.contains(name)) {
std::string type_name = (std::string) str(m_base.attr("__name__"));
throw value_error(std::move(type_name) + ": element \"" + std::string(name_)
+ "\" already exists!");
}
entries[name] = pybind11::make_tuple(value, doc);
m_base.attr(std::move(name)) = std::move(value);
}
PYBIND11_NOINLINE void export_values() {
dict entries = m_base.attr("__entries");
for (auto kv : entries) {
m_parent.attr(kv.first) = kv.second[int_(0)];
}
}
handle m_base;
handle m_parent;
};
template <bool is_signed, size_t length>
struct equivalent_integer {};
template <>
struct equivalent_integer<true, 1> {
using type = int8_t;
};
template <>
struct equivalent_integer<false, 1> {
using type = uint8_t;
};
template <>
struct equivalent_integer<true, 2> {
using type = int16_t;
};
template <>
struct equivalent_integer<false, 2> {
using type = uint16_t;
};
template <>
struct equivalent_integer<true, 4> {
using type = int32_t;
};
template <>
struct equivalent_integer<false, 4> {
using type = uint32_t;
};
template <>
struct equivalent_integer<true, 8> {
using type = int64_t;
};
template <>
struct equivalent_integer<false, 8> {
using type = uint64_t;
};
template <typename IntLike>
using equivalent_integer_t =
typename equivalent_integer<std::is_signed<IntLike>::value, sizeof(IntLike)>::type;
PYBIND11_NAMESPACE_END(detail)
/// Binds C++ enumerations and enumeration classes to Python
template <typename Type>
class enum_ : public class_<Type> {
public:
using Base = class_<Type>;
using Base::attr;
using Base::def;
using Base::def_property_readonly;
using Base::def_property_readonly_static;
using Underlying = typename std::underlying_type<Type>::type;
// Scalar is the integer representation of underlying type
using Scalar = detail::conditional_t<detail::any_of<detail::is_std_char_type<Underlying>,
std::is_same<Underlying, bool>>::value,
detail::equivalent_integer_t<Underlying>,
Underlying>;
template <typename... Extra>
enum_(const handle &scope, const char *name, const Extra &...extra)
: class_<Type>(scope, name, extra...), m_base(*this, scope) {
constexpr bool is_arithmetic = detail::any_of<std::is_same<arithmetic, Extra>...>::value;
constexpr bool is_convertible = std::is_convertible<Type, Underlying>::value;
m_base.init(is_arithmetic, is_convertible);
def(init([](Scalar i) { return static_cast<Type>(i); }), arg("value"));
def_property_readonly("value", [](Type value) { return (Scalar) value; });
def("__int__", [](Type value) { return (Scalar) value; });
def("__index__", [](Type value) { return (Scalar) value; });
attr("__setstate__") = cpp_function(
[](detail::value_and_holder &v_h, Scalar arg) {
detail::initimpl::setstate<Base>(
v_h, static_cast<Type>(arg), Py_TYPE(v_h.inst) != v_h.type->type);
},
detail::is_new_style_constructor(),
pybind11::name("__setstate__"),
is_method(*this),
arg("state"));
}
/// Export enumeration entries into the parent scope
enum_ &export_values() {
m_base.export_values();
return *this;
}
/// Add an enumeration entry
enum_ &value(char const *name, Type value, const char *doc = nullptr) {
m_base.value(name, pybind11::cast(value, return_value_policy::copy), doc);
return *this;
}
private:
detail::enum_base m_base;
};
PYBIND11_NAMESPACE_BEGIN(detail)
PYBIND11_NOINLINE void keep_alive_impl(handle nurse, handle patient) {
if (!nurse || !patient) {
pybind11_fail("Could not activate keep_alive!");
}
if (patient.is_none() || nurse.is_none()) {
return; /* Nothing to keep alive or nothing to be kept alive by */
}
auto tinfo = all_type_info(Py_TYPE(nurse.ptr()));
if (!tinfo.empty()) {
/* It's a pybind-registered type, so we can store the patient in the
* internal list. */
add_patient(nurse.ptr(), patient.ptr());
} else {
/* Fall back to clever approach based on weak references taken from
* Boost.Python. This is not used for pybind-registered types because
* the objects can be destroyed out-of-order in a GC pass. */
cpp_function disable_lifesupport([patient](handle weakref) {
patient.dec_ref();
weakref.dec_ref();
});
weakref wr(nurse, disable_lifesupport);
patient.inc_ref(); /* reference patient and leak the weak reference */
(void) wr.release();
}
}
PYBIND11_NOINLINE void
keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret) {
auto get_arg = [&](size_t n) {
if (n == 0) {
return ret;
}
if (n == 1 && call.init_self) {
return call.init_self;
}
if (n <= call.args.size()) {
return call.args[n - 1];
}
return handle();
};
keep_alive_impl(get_arg(Nurse), get_arg(Patient));
}
inline std::pair<decltype(internals::registered_types_py)::iterator, bool>
all_type_info_get_cache(PyTypeObject *type) {
auto res = get_internals()
.registered_types_py
#ifdef __cpp_lib_unordered_map_try_emplace
.try_emplace(type);
#else
.emplace(type, std::vector<detail::type_info *>());
#endif
if (res.second) {
// New cache entry created; set up a weak reference to automatically remove it if the type
// gets destroyed:
weakref((PyObject *) type, cpp_function([type](handle wr) {
get_internals().registered_types_py.erase(type);
// TODO consolidate the erasure code in pybind11_meta_dealloc() in class.h
auto &cache = get_internals().inactive_override_cache;
for (auto it = cache.begin(), last = cache.end(); it != last;) {
if (it->first == reinterpret_cast<PyObject *>(type)) {
it = cache.erase(it);
} else {
++it;
}
}
wr.dec_ref();
}))
.release();
}
return res;
}
/* There are a large number of apparently unused template arguments because
* each combination requires a separate py::class_ registration.
*/
template <typename Access,
return_value_policy Policy,
typename Iterator,
typename Sentinel,
typename ValueType,
typename... Extra>
struct iterator_state {
Iterator it;
Sentinel end;
bool first_or_done;
};
// Note: these helpers take the iterator by non-const reference because some
// iterators in the wild can't be dereferenced when const. The & after Iterator
// is required for MSVC < 16.9. SFINAE cannot be reused for result_type due to
// bugs in ICC, NVCC, and PGI compilers. See PR #3293.
template <typename Iterator, typename SFINAE = decltype(*std::declval<Iterator &>())>
struct iterator_access {
using result_type = decltype(*std::declval<Iterator &>());
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
result_type operator()(Iterator &it) const { return *it; }
};
template <typename Iterator, typename SFINAE = decltype((*std::declval<Iterator &>()).first)>
class iterator_key_access {
private:
using pair_type = decltype(*std::declval<Iterator &>());
public:
/* If either the pair itself or the element of the pair is a reference, we
* want to return a reference, otherwise a value. When the decltype
* expression is parenthesized it is based on the value category of the
* expression; otherwise it is the declared type of the pair member.
* The use of declval<pair_type> in the second branch rather than directly
* using *std::declval<Iterator &>() is a workaround for nvcc
* (it's not used in the first branch because going via decltype and back
* through declval does not perfectly preserve references).
*/
using result_type
= conditional_t<std::is_reference<decltype(*std::declval<Iterator &>())>::value,
decltype(((*std::declval<Iterator &>()).first)),
decltype(std::declval<pair_type>().first)>;
result_type operator()(Iterator &it) const { return (*it).first; }
};
template <typename Iterator, typename SFINAE = decltype((*std::declval<Iterator &>()).second)>
class iterator_value_access {
private:
using pair_type = decltype(*std::declval<Iterator &>());
public:
using result_type
= conditional_t<std::is_reference<decltype(*std::declval<Iterator &>())>::value,
decltype(((*std::declval<Iterator &>()).second)),
decltype(std::declval<pair_type>().second)>;
result_type operator()(Iterator &it) const { return (*it).second; }
};
template <typename Access,
return_value_policy Policy,
typename Iterator,
typename Sentinel,
typename ValueType,
typename... Extra>
iterator make_iterator_impl(Iterator first, Sentinel last, Extra &&...extra) {
using state = detail::iterator_state<Access, Policy, Iterator, Sentinel, ValueType, Extra...>;
// TODO: state captures only the types of Extra, not the values
if (!detail::get_type_info(typeid(state), false)) {
class_<state>(handle(), "iterator", pybind11::module_local())
.def("__iter__", [](state &s) -> state & { return s; })
.def(
"__next__",
[](state &s) -> ValueType {
if (!s.first_or_done) {
++s.it;
} else {
s.first_or_done = false;
}
if (s.it == s.end) {
s.first_or_done = true;
throw stop_iteration();
}
return Access()(s.it);
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
},
std::forward<Extra>(extra)...,
Policy);
}
return cast(state{first, last, true});
}
PYBIND11_NAMESPACE_END(detail)
/// Makes a python iterator from a first and past-the-end C++ InputIterator.
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Iterator,
typename Sentinel,
typename ValueType = typename detail::iterator_access<Iterator>::result_type,
typename... Extra>
iterator make_iterator(Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_access<Iterator>,
Policy,
Iterator,
Sentinel,
ValueType,
Extra...>(first, last, std::forward<Extra>(extra)...);
}
/// Makes a python iterator over the keys (`.first`) of a iterator over pairs from a
/// first and past-the-end InputIterator.
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Iterator,
typename Sentinel,
typename KeyType = typename detail::iterator_key_access<Iterator>::result_type,
typename... Extra>
iterator make_key_iterator(Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_key_access<Iterator>,
Policy,
Iterator,
Sentinel,
KeyType,
Extra...>(first, last, std::forward<Extra>(extra)...);
}
/// Makes a python iterator over the values (`.second`) of a iterator over pairs from a
/// first and past-the-end InputIterator.
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Iterator,
typename Sentinel,
typename ValueType = typename detail::iterator_value_access<Iterator>::result_type,
typename... Extra>
iterator make_value_iterator(Iterator first, Sentinel last, Extra &&...extra) {
return detail::make_iterator_impl<detail::iterator_value_access<Iterator>,
Policy,
Iterator,
Sentinel,
ValueType,
Extra...>(first, last, std::forward<Extra>(extra)...);
}
/// Makes an iterator over values of an stl container or other container supporting
/// `std::begin()`/`std::end()`
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Type,
typename... Extra>
iterator make_iterator(Type &value, Extra &&...extra) {
return make_iterator<Policy>(
std::begin(value), std::end(value), std::forward<Extra>(extra)...);
}
/// Makes an iterator over the keys (`.first`) of a stl map-like container supporting
/// `std::begin()`/`std::end()`
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Type,
typename... Extra>
iterator make_key_iterator(Type &value, Extra &&...extra) {
return make_key_iterator<Policy>(
std::begin(value), std::end(value), std::forward<Extra>(extra)...);
}
/// Makes an iterator over the values (`.second`) of a stl map-like container supporting
/// `std::begin()`/`std::end()`
template <return_value_policy Policy = return_value_policy::reference_internal,
typename Type,
typename... Extra>
iterator make_value_iterator(Type &value, Extra &&...extra) {
return make_value_iterator<Policy>(
std::begin(value), std::end(value), std::forward<Extra>(extra)...);
}
template <typename InputType, typename OutputType>
void implicitly_convertible() {
struct set_flag {
bool &flag;
explicit set_flag(bool &flag_) : flag(flag_) { flag_ = true; }
~set_flag() { flag = false; }
};
auto implicit_caster = [](PyObject *obj, PyTypeObject *type) -> PyObject * {
static bool currently_used = false;
if (currently_used) { // implicit conversions are non-reentrant
return nullptr;
}
set_flag flag_helper(currently_used);
if (!detail::make_caster<InputType>().load(obj, false)) {
return nullptr;
}
tuple args(1);
args[0] = obj;
PyObject *result = PyObject_Call((PyObject *) type, args.ptr(), nullptr);
if (result == nullptr) {
PyErr_Clear();
}
return result;
};
if (auto *tinfo = detail::get_type_info(typeid(OutputType))) {
tinfo->implicit_conversions.emplace_back(std::move(implicit_caster));
} else {
pybind11_fail("implicitly_convertible: Unable to find type " + type_id<OutputType>());
}
}
inline void register_exception_translator(ExceptionTranslator &&translator) {
detail::get_internals().registered_exception_translators.push_front(
std::forward<ExceptionTranslator>(translator));
}
/**
* Add a new module-local exception translator. Locally registered functions
* will be tried before any globally registered exception translators, which
* will only be invoked if the module-local handlers do not deal with
* the exception.
*/
inline void register_local_exception_translator(ExceptionTranslator &&translator) {
detail::get_local_internals().registered_exception_translators.push_front(
std::forward<ExceptionTranslator>(translator));
}
/**
* Wrapper to generate a new Python exception type.
*
* This should only be used with PyErr_SetString for now.
* It is not (yet) possible to use as a py::base.
* Template type argument is reserved for future use.
*/
template <typename type>
class exception : public object {
public:
exception() = default;
exception(handle scope, const char *name, handle base = PyExc_Exception) {
std::string full_name
= scope.attr("__name__").cast<std::string>() + std::string(".") + name;
m_ptr = PyErr_NewException(const_cast<char *>(full_name.c_str()), base.ptr(), nullptr);
if (hasattr(scope, "__dict__") && scope.attr("__dict__").contains(name)) {
pybind11_fail("Error during initialization: multiple incompatible "
"definitions with name \""
+ std::string(name) + "\"");
}
scope.attr(name) = *this;
}
// Sets the current python exception to this exception object with the given message
void operator()(const char *message) { PyErr_SetString(m_ptr, message); }
};
PYBIND11_NAMESPACE_BEGIN(detail)
// Returns a reference to a function-local static exception object used in the simple
// register_exception approach below. (It would be simpler to have the static local variable
// directly in register_exception, but that makes clang <3.5 segfault - issue #1349).
template <typename CppException>
exception<CppException> &get_exception_object() {
static exception<CppException> ex;
return ex;
}
// Helper function for register_exception and register_local_exception
template <typename CppException>
exception<CppException> &
register_exception_impl(handle scope, const char *name, handle base, bool isLocal) {
auto &ex = detail::get_exception_object<CppException>();
if (!ex) {
ex = exception<CppException>(scope, name, base);
}
auto register_func
= isLocal ? ®ister_local_exception_translator : ®ister_exception_translator;
register_func([](std::exception_ptr p) {
if (!p) {
return;
}
try {
std::rethrow_exception(p);
} catch (const CppException &e) {
detail::get_exception_object<CppException>()(e.what());
}
});
return ex;
}
PYBIND11_NAMESPACE_END(detail)
/**
* Registers a Python exception in `m` of the given `name` and installs a translator to
* translate the C++ exception to the created Python exception using the what() method.
* This is intended for simple exception translations; for more complex translation, register the
* exception object and translator directly.
*/
template <typename CppException>
exception<CppException> &
register_exception(handle scope, const char *name, handle base = PyExc_Exception) {
return detail::register_exception_impl<CppException>(scope, name, base, false /* isLocal */);
}
/**
* Registers a Python exception in `m` of the given `name` and installs a translator to
* translate the C++ exception to the created Python exception using the what() method.
* This translator will only be used for exceptions that are thrown in this module and will be
* tried before global exception translators, including those registered with register_exception.
* This is intended for simple exception translations; for more complex translation, register the
* exception object and translator directly.
*/
template <typename CppException>
exception<CppException> &
register_local_exception(handle scope, const char *name, handle base = PyExc_Exception) {
return detail::register_exception_impl<CppException>(scope, name, base, true /* isLocal */);
}
PYBIND11_NAMESPACE_BEGIN(detail)
PYBIND11_NOINLINE void print(const tuple &args, const dict &kwargs) {
auto strings = tuple(args.size());
for (size_t i = 0; i < args.size(); ++i) {
strings[i] = str(args[i]);
}
auto sep = kwargs.contains("sep") ? kwargs["sep"] : str(" ");
auto line = sep.attr("join")(std::move(strings));
object file;
if (kwargs.contains("file")) {
file = kwargs["file"].cast<object>();
} else {
try {
file = module_::import("sys").attr("stdout");
} catch (const error_already_set &) {
/* If print() is called from code that is executed as
part of garbage collection during interpreter shutdown,
importing 'sys' can fail. Give up rather than crashing the
interpreter in this case. */
return;
}
}
auto write = file.attr("write");
write(std::move(line));
write(kwargs.contains("end") ? kwargs["end"] : str("\n"));
if (kwargs.contains("flush") && kwargs["flush"].cast<bool>()) {
file.attr("flush")();
}
}
PYBIND11_NAMESPACE_END(detail)
template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
void print(Args &&...args) {
auto c = detail::collect_arguments<policy>(std::forward<Args>(args)...);
detail::print(c.args(), c.kwargs());
}
inline void
error_already_set::m_fetched_error_deleter(detail::error_fetch_and_normalize *raw_ptr) {
gil_scoped_acquire gil;
error_scope scope;
delete raw_ptr;
}
inline const char *error_already_set::what() const noexcept {
gil_scoped_acquire gil;
error_scope scope;
return m_fetched_error->error_string().c_str();
}
PYBIND11_NAMESPACE_BEGIN(detail)
inline function
get_type_override(const void *this_ptr, const type_info *this_type, const char *name) {
handle self = get_object_handle(this_ptr, this_type);
if (!self) {
return function();
}
handle type = type::handle_of(self);
auto key = std::make_pair(type.ptr(), name);
/* Cache functions that aren't overridden in Python to avoid
many costly Python dictionary lookups below */
auto &cache = get_internals().inactive_override_cache;
if (cache.find(key) != cache.end()) {
return function();
}
function override = getattr(self, name, function());
if (override.is_cpp_function()) {
cache.insert(std::move(key));
return function();
}
/* Don't call dispatch code if invoked from overridden function.
Unfortunately this doesn't work on PyPy. */
#if !defined(PYPY_VERSION)
# if PY_VERSION_HEX >= 0x03090000
PyFrameObject *frame = PyThreadState_GetFrame(PyThreadState_Get());
if (frame != nullptr) {
PyCodeObject *f_code = PyFrame_GetCode(frame);
// f_code is guaranteed to not be NULL
if ((std::string) str(f_code->co_name) == name && f_code->co_argcount > 0) {
PyObject *locals = PyEval_GetLocals();
if (locals != nullptr) {
PyObject *co_varnames = PyObject_GetAttrString((PyObject *) f_code, "co_varnames");
PyObject *self_arg = PyTuple_GET_ITEM(co_varnames, 0);
Py_DECREF(co_varnames);
PyObject *self_caller = dict_getitem(locals, self_arg);
if (self_caller == self.ptr()) {
Py_DECREF(f_code);
Py_DECREF(frame);
return function();
}
}
}
Py_DECREF(f_code);
Py_DECREF(frame);
}
# else
PyFrameObject *frame = PyThreadState_Get()->frame;
if (frame != nullptr && (std::string) str(frame->f_code->co_name) == name
&& frame->f_code->co_argcount > 0) {
PyFrame_FastToLocals(frame);
PyObject *self_caller
= dict_getitem(frame->f_locals, PyTuple_GET_ITEM(frame->f_code->co_varnames, 0));
if (self_caller == self.ptr()) {
return function();
}
}
# endif
#else
/* PyPy currently doesn't provide a detailed cpyext emulation of
frame objects, so we have to emulate this using Python. This
is going to be slow..*/
dict d;
d["self"] = self;
d["name"] = pybind11::str(name);
PyObject *result
= PyRun_String("import inspect\n"
"frame = inspect.currentframe()\n"
"if frame is not None:\n"
" frame = frame.f_back\n"
" if frame is not None and str(frame.f_code.co_name) == name and "
"frame.f_code.co_argcount > 0:\n"
" self_caller = frame.f_locals[frame.f_code.co_varnames[0]]\n"
" if self_caller == self:\n"
" self = None\n",
Py_file_input,
d.ptr(),
d.ptr());
if (result == nullptr)
throw error_already_set();
Py_DECREF(result);
if (d["self"].is_none())
return function();
#endif
return override;
}
PYBIND11_NAMESPACE_END(detail)
/** \rst
Try to retrieve a python method by the provided name from the instance pointed to by the
this_ptr.
:this_ptr: The pointer to the object the overridden method should be retrieved for. This should
be the first non-trampoline class encountered in the inheritance chain.
:name: The name of the overridden Python method to retrieve.
:return: The Python method by this name from the object or an empty function wrapper.
\endrst */
template <class T>
function get_override(const T *this_ptr, const char *name) {
auto *tinfo = detail::get_type_info(typeid(T));
return tinfo ? detail::get_type_override(this_ptr, tinfo, name) : function();
}
#define PYBIND11_OVERRIDE_IMPL(ret_type, cname, name, ...) \
do { \
pybind11::gil_scoped_acquire gil; \
pybind11::function override \
= pybind11::get_override(static_cast<const cname *>(this), name); \
if (override) { \
auto o = override(__VA_ARGS__); \
if (pybind11::detail::cast_is_temporary_value_reference<ret_type>::value) { \
static pybind11::detail::override_caster_t<ret_type> caster; \
return pybind11::detail::cast_ref<ret_type>(std::move(o), caster); \
} \
return pybind11::detail::cast_safe<ret_type>(std::move(o)); \
} \
} while (false)
/** \rst
Macro to populate the virtual method in the trampoline class. This macro tries to look up a
method named 'fn' from the Python side, deals with the :ref:`gil` and necessary argument
conversions to call this method and return the appropriate type.
See :ref:`overriding_virtuals` for more information. This macro should be used when the method
name in C is not the same as the method name in Python. For example with `__str__`.
.. code-block:: cpp
std::string toString() override {
PYBIND11_OVERRIDE_NAME(
std::string, // Return type (ret_type)
Animal, // Parent class (cname)
"__str__", // Name of method in Python (name)
toString, // Name of function in C++ (fn)
);
}
\endrst */
#define PYBIND11_OVERRIDE_NAME(ret_type, cname, name, fn, ...) \
do { \
PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__); \
return cname::fn(__VA_ARGS__); \
} while (false)
/** \rst
Macro for pure virtual functions, this function is identical to
:c:macro:`PYBIND11_OVERRIDE_NAME`, except that it throws if no override can be found.
\endrst */
#define PYBIND11_OVERRIDE_PURE_NAME(ret_type, cname, name, fn, ...) \
do { \
PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__); \
pybind11::pybind11_fail( \
"Tried to call pure virtual function \"" PYBIND11_STRINGIFY(cname) "::" name "\""); \
} while (false)
/** \rst
Macro to populate the virtual method in the trampoline class. This macro tries to look up the
method from the Python side, deals with the :ref:`gil` and necessary argument conversions to
call this method and return the appropriate type. This macro should be used if the method name
in C and in Python are identical.
See :ref:`overriding_virtuals` for more information.
.. code-block:: cpp
class PyAnimal : public Animal {
public:
// Inherit the constructors
using Animal::Animal;
// Trampoline (need one for each virtual function)
std::string go(int n_times) override {
PYBIND11_OVERRIDE_PURE(
std::string, // Return type (ret_type)
Animal, // Parent class (cname)
go, // Name of function in C++ (must match Python name) (fn)
n_times // Argument(s) (...)
);
}
};
\endrst */
#define PYBIND11_OVERRIDE(ret_type, cname, fn, ...) \
PYBIND11_OVERRIDE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
/** \rst
Macro for pure virtual functions, this function is identical to :c:macro:`PYBIND11_OVERRIDE`,
except that it throws if no override can be found.
\endrst */
#define PYBIND11_OVERRIDE_PURE(ret_type, cname, fn, ...) \
PYBIND11_OVERRIDE_PURE_NAME( \
PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), #fn, fn, __VA_ARGS__)
// Deprecated versions
PYBIND11_DEPRECATED("get_type_overload has been deprecated")
inline function
get_type_overload(const void *this_ptr, const detail::type_info *this_type, const char *name) {
return detail::get_type_override(this_ptr, this_type, name);
}
template <class T>
inline function get_overload(const T *this_ptr, const char *name) {
return get_override(this_ptr, name);
}
#define PYBIND11_OVERLOAD_INT(ret_type, cname, name, ...) \
PYBIND11_OVERRIDE_IMPL(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, __VA_ARGS__)
#define PYBIND11_OVERLOAD_NAME(ret_type, cname, name, fn, ...) \
PYBIND11_OVERRIDE_NAME(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, fn, __VA_ARGS__)
#define PYBIND11_OVERLOAD_PURE_NAME(ret_type, cname, name, fn, ...) \
PYBIND11_OVERRIDE_PURE_NAME( \
PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), name, fn, __VA_ARGS__);
#define PYBIND11_OVERLOAD(ret_type, cname, fn, ...) \
PYBIND11_OVERRIDE(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), fn, __VA_ARGS__)
#define PYBIND11_OVERLOAD_PURE(ret_type, cname, fn, ...) \
PYBIND11_OVERRIDE_PURE(PYBIND11_TYPE(ret_type), PYBIND11_TYPE(cname), fn, __VA_ARGS__);
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/pytypes.h | C/C++ Header | /*
pybind11/pytypes.h: Convenience wrapper classes for basic Python types
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include "buffer_info.h"
#include <assert.h>
#include <cstddef>
#include <exception>
#include <frameobject.h>
#include <iterator>
#include <memory>
#include <string>
#include <type_traits>
#include <typeinfo>
#include <utility>
#if defined(PYBIND11_HAS_OPTIONAL)
# include <optional>
#endif
#ifdef PYBIND11_HAS_STRING_VIEW
# include <string_view>
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_WARNING_DISABLE_MSVC(4127)
/* A few forward declarations */
class handle;
class object;
class str;
class iterator;
class type;
struct arg;
struct arg_v;
PYBIND11_NAMESPACE_BEGIN(detail)
class args_proxy;
bool isinstance_generic(handle obj, const std::type_info &tp);
// Accessor forward declarations
template <typename Policy>
class accessor;
namespace accessor_policies {
struct obj_attr;
struct str_attr;
struct generic_item;
struct sequence_item;
struct list_item;
struct tuple_item;
} // namespace accessor_policies
using obj_attr_accessor = accessor<accessor_policies::obj_attr>;
using str_attr_accessor = accessor<accessor_policies::str_attr>;
using item_accessor = accessor<accessor_policies::generic_item>;
using sequence_accessor = accessor<accessor_policies::sequence_item>;
using list_accessor = accessor<accessor_policies::list_item>;
using tuple_accessor = accessor<accessor_policies::tuple_item>;
/// Tag and check to identify a class which implements the Python object API
class pyobject_tag {};
template <typename T>
using is_pyobject = std::is_base_of<pyobject_tag, remove_reference_t<T>>;
/** \rst
A mixin class which adds common functions to `handle`, `object` and various accessors.
The only requirement for `Derived` is to implement ``PyObject *Derived::ptr() const``.
\endrst */
template <typename Derived>
class object_api : public pyobject_tag {
const Derived &derived() const { return static_cast<const Derived &>(*this); }
public:
/** \rst
Return an iterator equivalent to calling ``iter()`` in Python. The object
must be a collection which supports the iteration protocol.
\endrst */
iterator begin() const;
/// Return a sentinel which ends iteration.
iterator end() const;
/** \rst
Return an internal functor to invoke the object's sequence protocol. Casting
the returned ``detail::item_accessor`` instance to a `handle` or `object`
subclass causes a corresponding call to ``__getitem__``. Assigning a `handle`
or `object` subclass causes a call to ``__setitem__``.
\endrst */
item_accessor operator[](handle key) const;
/// See above (the only difference is that the key's reference is stolen)
item_accessor operator[](object &&key) const;
/// See above (the only difference is that the key is provided as a string literal)
item_accessor operator[](const char *key) const;
/** \rst
Return an internal functor to access the object's attributes. Casting the
returned ``detail::obj_attr_accessor`` instance to a `handle` or `object`
subclass causes a corresponding call to ``getattr``. Assigning a `handle`
or `object` subclass causes a call to ``setattr``.
\endrst */
obj_attr_accessor attr(handle key) const;
/// See above (the only difference is that the key's reference is stolen)
obj_attr_accessor attr(object &&key) const;
/// See above (the only difference is that the key is provided as a string literal)
str_attr_accessor attr(const char *key) const;
/** \rst
Matches * unpacking in Python, e.g. to unpack arguments out of a ``tuple``
or ``list`` for a function call. Applying another * to the result yields
** unpacking, e.g. to unpack a dict as function keyword arguments.
See :ref:`calling_python_functions`.
\endrst */
args_proxy operator*() const;
/// Check if the given item is contained within this object, i.e. ``item in obj``.
template <typename T>
bool contains(T &&item) const;
/** \rst
Assuming the Python object is a function or implements the ``__call__``
protocol, ``operator()`` invokes the underlying function, passing an
arbitrary set of parameters. The result is returned as a `object` and
may need to be converted back into a Python object using `handle::cast()`.
When some of the arguments cannot be converted to Python objects, the
function will throw a `cast_error` exception. When the Python function
call fails, a `error_already_set` exception is thrown.
\endrst */
template <return_value_policy policy = return_value_policy::automatic_reference,
typename... Args>
object operator()(Args &&...args) const;
template <return_value_policy policy = return_value_policy::automatic_reference,
typename... Args>
PYBIND11_DEPRECATED("call(...) was deprecated in favor of operator()(...)")
object call(Args &&...args) const;
/// Equivalent to ``obj is other`` in Python.
bool is(object_api const &other) const { return derived().ptr() == other.derived().ptr(); }
/// Equivalent to ``obj is None`` in Python.
bool is_none() const { return derived().ptr() == Py_None; }
/// Equivalent to obj == other in Python
bool equal(object_api const &other) const { return rich_compare(other, Py_EQ); }
bool not_equal(object_api const &other) const { return rich_compare(other, Py_NE); }
bool operator<(object_api const &other) const { return rich_compare(other, Py_LT); }
bool operator<=(object_api const &other) const { return rich_compare(other, Py_LE); }
bool operator>(object_api const &other) const { return rich_compare(other, Py_GT); }
bool operator>=(object_api const &other) const { return rich_compare(other, Py_GE); }
object operator-() const;
object operator~() const;
object operator+(object_api const &other) const;
object operator+=(object_api const &other);
object operator-(object_api const &other) const;
object operator-=(object_api const &other);
object operator*(object_api const &other) const;
object operator*=(object_api const &other);
object operator/(object_api const &other) const;
object operator/=(object_api const &other);
object operator|(object_api const &other) const;
object operator|=(object_api const &other);
object operator&(object_api const &other) const;
object operator&=(object_api const &other);
object operator^(object_api const &other) const;
object operator^=(object_api const &other);
object operator<<(object_api const &other) const;
object operator<<=(object_api const &other);
object operator>>(object_api const &other) const;
object operator>>=(object_api const &other);
PYBIND11_DEPRECATED("Use py::str(obj) instead")
pybind11::str str() const;
/// Get or set the object's docstring, i.e. ``obj.__doc__``.
str_attr_accessor doc() const;
/// Return the object's current reference count
int ref_count() const { return static_cast<int>(Py_REFCNT(derived().ptr())); }
// TODO PYBIND11_DEPRECATED(
// "Call py::type::handle_of(h) or py::type::of(h) instead of h.get_type()")
handle get_type() const;
private:
bool rich_compare(object_api const &other, int value) const;
};
template <typename T>
using is_pyobj_ptr_or_nullptr_t = detail::any_of<std::is_same<T, PyObject *>,
std::is_same<T, PyObject *const>,
std::is_same<T, std::nullptr_t>>;
PYBIND11_NAMESPACE_END(detail)
#if !defined(PYBIND11_HANDLE_REF_DEBUG) && !defined(NDEBUG)
# define PYBIND11_HANDLE_REF_DEBUG
#endif
/** \rst
Holds a reference to a Python object (no reference counting)
The `handle` class is a thin wrapper around an arbitrary Python object (i.e. a
``PyObject *`` in Python's C API). It does not perform any automatic reference
counting and merely provides a basic C++ interface to various Python API functions.
.. seealso::
The `object` class inherits from `handle` and adds automatic reference
counting features.
\endrst */
class handle : public detail::object_api<handle> {
public:
/// The default constructor creates a handle with a ``nullptr``-valued pointer
handle() = default;
/// Enable implicit conversion from ``PyObject *`` and ``nullptr``.
/// Not using ``handle(PyObject *ptr)`` to avoid implicit conversion from ``0``.
template <typename T,
detail::enable_if_t<detail::is_pyobj_ptr_or_nullptr_t<T>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
handle(T ptr) : m_ptr(ptr) {}
/// Enable implicit conversion through ``T::operator PyObject *()``.
template <
typename T,
detail::enable_if_t<detail::all_of<detail::none_of<std::is_base_of<handle, T>,
detail::is_pyobj_ptr_or_nullptr_t<T>>,
std::is_convertible<T, PyObject *>>::value,
int>
= 0>
// NOLINTNEXTLINE(google-explicit-constructor)
handle(T &obj) : m_ptr(obj) {}
/// Return the underlying ``PyObject *`` pointer
PyObject *ptr() const { return m_ptr; }
PyObject *&ptr() { return m_ptr; }
/** \rst
Manually increase the reference count of the Python object. Usually, it is
preferable to use the `object` class which derives from `handle` and calls
this function automatically. Returns a reference to itself.
\endrst */
const handle &inc_ref() const & {
#ifdef PYBIND11_HANDLE_REF_DEBUG
inc_ref_counter(1);
#endif
#ifdef PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
if (m_ptr != nullptr && !PyGILState_Check()) {
throw_gilstate_error("pybind11::handle::inc_ref()");
}
#endif
Py_XINCREF(m_ptr);
return *this;
}
/** \rst
Manually decrease the reference count of the Python object. Usually, it is
preferable to use the `object` class which derives from `handle` and calls
this function automatically. Returns a reference to itself.
\endrst */
const handle &dec_ref() const & {
#ifdef PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
if (m_ptr != nullptr && !PyGILState_Check()) {
throw_gilstate_error("pybind11::handle::dec_ref()");
}
#endif
Py_XDECREF(m_ptr);
return *this;
}
/** \rst
Attempt to cast the Python object into the given C++ type. A `cast_error`
will be throw upon failure.
\endrst */
template <typename T>
T cast() const;
/// Return ``true`` when the `handle` wraps a valid Python object
explicit operator bool() const { return m_ptr != nullptr; }
/** \rst
Deprecated: Check that the underlying pointers are the same.
Equivalent to ``obj1 is obj2`` in Python.
\endrst */
PYBIND11_DEPRECATED("Use obj1.is(obj2) instead")
bool operator==(const handle &h) const { return m_ptr == h.m_ptr; }
PYBIND11_DEPRECATED("Use !obj1.is(obj2) instead")
bool operator!=(const handle &h) const { return m_ptr != h.m_ptr; }
PYBIND11_DEPRECATED("Use handle::operator bool() instead")
bool check() const { return m_ptr != nullptr; }
protected:
PyObject *m_ptr = nullptr;
private:
#ifdef PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
void throw_gilstate_error(const std::string &function_name) const {
fprintf(
stderr,
"%s is being called while the GIL is either not held or invalid. Please see "
"https://pybind11.readthedocs.io/en/stable/advanced/"
"misc.html#common-sources-of-global-interpreter-lock-errors for debugging advice.\n"
"If you are convinced there is no bug in your code, you can #define "
"PYBIND11_NO_ASSERT_GIL_HELD_INCREF_DECREF"
"to disable this check. In that case you have to ensure this #define is consistently "
"used for all translation units linked into a given pybind11 extension, otherwise "
"there will be ODR violations.",
function_name.c_str());
fflush(stderr);
if (Py_TYPE(m_ptr)->tp_name != nullptr) {
fprintf(stderr,
"The failing %s call was triggered on a %s object.\n",
function_name.c_str(),
Py_TYPE(m_ptr)->tp_name);
fflush(stderr);
}
throw std::runtime_error(function_name + " PyGILState_Check() failure.");
}
#endif
#ifdef PYBIND11_HANDLE_REF_DEBUG
static std::size_t inc_ref_counter(std::size_t add) {
thread_local std::size_t counter = 0;
counter += add;
return counter;
}
public:
static std::size_t inc_ref_counter() { return inc_ref_counter(0); }
#endif
};
/** \rst
Holds a reference to a Python object (with reference counting)
Like `handle`, the `object` class is a thin wrapper around an arbitrary Python
object (i.e. a ``PyObject *`` in Python's C API). In contrast to `handle`, it
optionally increases the object's reference count upon construction, and it
*always* decreases the reference count when the `object` instance goes out of
scope and is destructed. When using `object` instances consistently, it is much
easier to get reference counting right at the first attempt.
\endrst */
class object : public handle {
public:
object() = default;
PYBIND11_DEPRECATED("Use reinterpret_borrow<object>() or reinterpret_steal<object>()")
object(handle h, bool is_borrowed) : handle(h) {
if (is_borrowed) {
inc_ref();
}
}
/// Copy constructor; always increases the reference count
object(const object &o) : handle(o) { inc_ref(); }
/// Move constructor; steals the object from ``other`` and preserves its reference count
object(object &&other) noexcept : handle(other) { other.m_ptr = nullptr; }
/// Destructor; automatically calls `handle::dec_ref()`
~object() { dec_ref(); }
/** \rst
Resets the internal pointer to ``nullptr`` without decreasing the
object's reference count. The function returns a raw handle to the original
Python object.
\endrst */
handle release() {
PyObject *tmp = m_ptr;
m_ptr = nullptr;
return handle(tmp);
}
object &operator=(const object &other) {
// Skip inc_ref and dec_ref if both objects are the same
if (!this->is(other)) {
other.inc_ref();
// Use temporary variable to ensure `*this` remains valid while
// `Py_XDECREF` executes, in case `*this` is accessible from Python.
handle temp(m_ptr);
m_ptr = other.m_ptr;
temp.dec_ref();
}
return *this;
}
object &operator=(object &&other) noexcept {
if (this != &other) {
handle temp(m_ptr);
m_ptr = other.m_ptr;
other.m_ptr = nullptr;
temp.dec_ref();
}
return *this;
}
#define PYBIND11_INPLACE_OP(iop) \
object iop(object_api const &other) { return operator=(handle::iop(other)); }
PYBIND11_INPLACE_OP(operator+=)
PYBIND11_INPLACE_OP(operator-=)
PYBIND11_INPLACE_OP(operator*=)
PYBIND11_INPLACE_OP(operator/=)
PYBIND11_INPLACE_OP(operator|=)
PYBIND11_INPLACE_OP(operator&=)
PYBIND11_INPLACE_OP(operator^=)
PYBIND11_INPLACE_OP(operator<<=)
PYBIND11_INPLACE_OP(operator>>=)
#undef PYBIND11_INPLACE_OP
// Calling cast() on an object lvalue just copies (via handle::cast)
template <typename T>
T cast() const &;
// Calling on an object rvalue does a move, if needed and/or possible
template <typename T>
T cast() &&;
protected:
// Tags for choosing constructors from raw PyObject *
struct borrowed_t {};
struct stolen_t {};
/// @cond BROKEN
template <typename T>
friend T reinterpret_borrow(handle);
template <typename T>
friend T reinterpret_steal(handle);
/// @endcond
public:
// Only accessible from derived classes and the reinterpret_* functions
object(handle h, borrowed_t) : handle(h) { inc_ref(); }
object(handle h, stolen_t) : handle(h) {}
};
/** \rst
Declare that a `handle` or ``PyObject *`` is a certain type and borrow the reference.
The target type ``T`` must be `object` or one of its derived classes. The function
doesn't do any conversions or checks. It's up to the user to make sure that the
target type is correct.
.. code-block:: cpp
PyObject *p = PyList_GetItem(obj, index);
py::object o = reinterpret_borrow<py::object>(p);
// or
py::tuple t = reinterpret_borrow<py::tuple>(p); // <-- `p` must be already be a `tuple`
\endrst */
template <typename T>
T reinterpret_borrow(handle h) {
return {h, object::borrowed_t{}};
}
/** \rst
Like `reinterpret_borrow`, but steals the reference.
.. code-block:: cpp
PyObject *p = PyObject_Str(obj);
py::str s = reinterpret_steal<py::str>(p); // <-- `p` must be already be a `str`
\endrst */
template <typename T>
T reinterpret_steal(handle h) {
return {h, object::stolen_t{}};
}
PYBIND11_NAMESPACE_BEGIN(detail)
// Equivalent to obj.__class__.__name__ (or obj.__name__ if obj is a class).
inline const char *obj_class_name(PyObject *obj) {
if (PyType_Check(obj)) {
return reinterpret_cast<PyTypeObject *>(obj)->tp_name;
}
return Py_TYPE(obj)->tp_name;
}
std::string error_string();
// The code in this struct is very unusual, to minimize the chances of
// masking bugs (elsewhere) by errors during the error handling (here).
// This is meant to be a lifeline for troubleshooting long-running processes
// that crash under conditions that are virtually impossible to reproduce.
// Low-level implementation alternatives are preferred to higher-level ones
// that might raise cascading exceptions. Last-ditch-kind-of attempts are made
// to report as much of the original error as possible, even if there are
// secondary issues obtaining some of the details.
struct error_fetch_and_normalize {
// This comment only applies to Python <= 3.11:
// Immediate normalization is long-established behavior (starting with
// https://github.com/pybind/pybind11/commit/135ba8deafb8bf64a15b24d1513899eb600e2011
// from Sep 2016) and safest. Normalization could be deferred, but this could mask
// errors elsewhere, the performance gain is very minor in typical situations
// (usually the dominant bottleneck is EH unwinding), and the implementation here
// would be more complex.
// Starting with Python 3.12, PyErr_Fetch() normalizes exceptions immediately.
// Any errors during normalization are tracked under __notes__.
explicit error_fetch_and_normalize(const char *called) {
PyErr_Fetch(&m_type.ptr(), &m_value.ptr(), &m_trace.ptr());
if (!m_type) {
pybind11_fail("Internal error: " + std::string(called)
+ " called while "
"Python error indicator not set.");
}
const char *exc_type_name_orig = detail::obj_class_name(m_type.ptr());
if (exc_type_name_orig == nullptr) {
pybind11_fail("Internal error: " + std::string(called)
+ " failed to obtain the name "
"of the original active exception type.");
}
m_lazy_error_string = exc_type_name_orig;
#if PY_VERSION_HEX >= 0x030C0000
// The presence of __notes__ is likely due to exception normalization
// errors, although that is not necessarily true, therefore insert a
// hint only:
if (PyObject_HasAttrString(m_value.ptr(), "__notes__")) {
m_lazy_error_string += "[WITH __notes__]";
}
#else
// PyErr_NormalizeException() may change the exception type if there are cascading
// failures. This can potentially be extremely confusing.
PyErr_NormalizeException(&m_type.ptr(), &m_value.ptr(), &m_trace.ptr());
if (m_type.ptr() == nullptr) {
pybind11_fail("Internal error: " + std::string(called)
+ " failed to normalize the "
"active exception.");
}
const char *exc_type_name_norm = detail::obj_class_name(m_type.ptr());
if (exc_type_name_norm == nullptr) {
pybind11_fail("Internal error: " + std::string(called)
+ " failed to obtain the name "
"of the normalized active exception type.");
}
# if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x07030a00
// This behavior runs the risk of masking errors in the error handling, but avoids a
// conflict with PyPy, which relies on the normalization here to change OSError to
// FileNotFoundError (https://github.com/pybind/pybind11/issues/4075).
m_lazy_error_string = exc_type_name_norm;
# else
if (exc_type_name_norm != m_lazy_error_string) {
std::string msg = std::string(called)
+ ": MISMATCH of original and normalized "
"active exception types: ";
msg += "ORIGINAL ";
msg += m_lazy_error_string;
msg += " REPLACED BY ";
msg += exc_type_name_norm;
msg += ": " + format_value_and_trace();
pybind11_fail(msg);
}
# endif
#endif
}
error_fetch_and_normalize(const error_fetch_and_normalize &) = delete;
error_fetch_and_normalize(error_fetch_and_normalize &&) = delete;
std::string format_value_and_trace() const {
std::string result;
std::string message_error_string;
if (m_value) {
auto value_str = reinterpret_steal<object>(PyObject_Str(m_value.ptr()));
constexpr const char *message_unavailable_exc
= "<MESSAGE UNAVAILABLE DUE TO ANOTHER EXCEPTION>";
if (!value_str) {
message_error_string = detail::error_string();
result = message_unavailable_exc;
} else {
// Not using `value_str.cast<std::string>()`, to not potentially throw a secondary
// error_already_set that will then result in process termination (#4288).
auto value_bytes = reinterpret_steal<object>(
PyUnicode_AsEncodedString(value_str.ptr(), "utf-8", "backslashreplace"));
if (!value_bytes) {
message_error_string = detail::error_string();
result = message_unavailable_exc;
} else {
char *buffer = nullptr;
Py_ssize_t length = 0;
if (PyBytes_AsStringAndSize(value_bytes.ptr(), &buffer, &length) == -1) {
message_error_string = detail::error_string();
result = message_unavailable_exc;
} else {
result = std::string(buffer, static_cast<std::size_t>(length));
}
}
}
#if PY_VERSION_HEX >= 0x030B0000
auto notes
= reinterpret_steal<object>(PyObject_GetAttrString(m_value.ptr(), "__notes__"));
if (!notes) {
PyErr_Clear(); // No notes is good news.
} else {
auto len_notes = PyList_Size(notes.ptr());
if (len_notes < 0) {
result += "\nFAILURE obtaining len(__notes__): " + detail::error_string();
} else {
result += "\n__notes__ (len=" + std::to_string(len_notes) + "):";
for (ssize_t i = 0; i < len_notes; i++) {
PyObject *note = PyList_GET_ITEM(notes.ptr(), i);
auto note_bytes = reinterpret_steal<object>(
PyUnicode_AsEncodedString(note, "utf-8", "backslashreplace"));
if (!note_bytes) {
result += "\nFAILURE obtaining __notes__[" + std::to_string(i)
+ "]: " + detail::error_string();
} else {
char *buffer = nullptr;
Py_ssize_t length = 0;
if (PyBytes_AsStringAndSize(note_bytes.ptr(), &buffer, &length)
== -1) {
result += "\nFAILURE formatting __notes__[" + std::to_string(i)
+ "]: " + detail::error_string();
} else {
result += '\n';
result += std::string(buffer, static_cast<std::size_t>(length));
}
}
}
}
}
#endif
} else {
result = "<MESSAGE UNAVAILABLE>";
}
if (result.empty()) {
result = "<EMPTY MESSAGE>";
}
bool have_trace = false;
if (m_trace) {
#if !defined(PYPY_VERSION)
auto *tb = reinterpret_cast<PyTracebackObject *>(m_trace.ptr());
// Get the deepest trace possible.
while (tb->tb_next) {
tb = tb->tb_next;
}
PyFrameObject *frame = tb->tb_frame;
Py_XINCREF(frame);
result += "\n\nAt:\n";
while (frame) {
# if PY_VERSION_HEX >= 0x030900B1
PyCodeObject *f_code = PyFrame_GetCode(frame);
# else
PyCodeObject *f_code = frame->f_code;
Py_INCREF(f_code);
# endif
int lineno = PyFrame_GetLineNumber(frame);
result += " ";
result += handle(f_code->co_filename).cast<std::string>();
result += '(';
result += std::to_string(lineno);
result += "): ";
result += handle(f_code->co_name).cast<std::string>();
result += '\n';
Py_DECREF(f_code);
# if PY_VERSION_HEX >= 0x030900B1
auto *b_frame = PyFrame_GetBack(frame);
# else
auto *b_frame = frame->f_back;
Py_XINCREF(b_frame);
# endif
Py_DECREF(frame);
frame = b_frame;
}
have_trace = true;
#endif //! defined(PYPY_VERSION)
}
if (!message_error_string.empty()) {
if (!have_trace) {
result += '\n';
}
result += "\nMESSAGE UNAVAILABLE DUE TO EXCEPTION: " + message_error_string;
}
return result;
}
std::string const &error_string() const {
if (!m_lazy_error_string_completed) {
m_lazy_error_string += ": " + format_value_and_trace();
m_lazy_error_string_completed = true;
}
return m_lazy_error_string;
}
void restore() {
if (m_restore_called) {
pybind11_fail("Internal error: pybind11::detail::error_fetch_and_normalize::restore() "
"called a second time. ORIGINAL ERROR: "
+ error_string());
}
PyErr_Restore(m_type.inc_ref().ptr(), m_value.inc_ref().ptr(), m_trace.inc_ref().ptr());
m_restore_called = true;
}
bool matches(handle exc) const {
return (PyErr_GivenExceptionMatches(m_type.ptr(), exc.ptr()) != 0);
}
// Not protecting these for simplicity.
object m_type, m_value, m_trace;
private:
// Only protecting invariants.
mutable std::string m_lazy_error_string;
mutable bool m_lazy_error_string_completed = false;
mutable bool m_restore_called = false;
};
inline std::string error_string() {
return error_fetch_and_normalize("pybind11::detail::error_string").error_string();
}
PYBIND11_NAMESPACE_END(detail)
/// Fetch and hold an error which was already set in Python. An instance of this is typically
/// thrown to propagate python-side errors back through C++ which can either be caught manually or
/// else falls back to the function dispatcher (which then raises the captured error back to
/// python).
class PYBIND11_EXPORT_EXCEPTION error_already_set : public std::exception {
public:
/// Fetches the current Python exception (using PyErr_Fetch()), which will clear the
/// current Python error indicator.
error_already_set()
: m_fetched_error{new detail::error_fetch_and_normalize("pybind11::error_already_set"),
m_fetched_error_deleter} {}
/// The what() result is built lazily on demand.
/// WARNING: This member function needs to acquire the Python GIL. This can lead to
/// crashes (undefined behavior) if the Python interpreter is finalizing.
const char *what() const noexcept override;
/// Restores the currently-held Python error (which will clear the Python error indicator first
/// if already set).
/// NOTE: This member function will always restore the normalized exception, which may or may
/// not be the original Python exception.
/// WARNING: The GIL must be held when this member function is called!
void restore() { m_fetched_error->restore(); }
/// If it is impossible to raise the currently-held error, such as in a destructor, we can
/// write it out using Python's unraisable hook (`sys.unraisablehook`). The error context
/// should be some object whose `repr()` helps identify the location of the error. Python
/// already knows the type and value of the error, so there is no need to repeat that.
void discard_as_unraisable(object err_context) {
restore();
PyErr_WriteUnraisable(err_context.ptr());
}
/// An alternate version of `discard_as_unraisable()`, where a string provides information on
/// the location of the error. For example, `__func__` could be helpful.
/// WARNING: The GIL must be held when this member function is called!
void discard_as_unraisable(const char *err_context) {
discard_as_unraisable(reinterpret_steal<object>(PYBIND11_FROM_STRING(err_context)));
}
// Does nothing; provided for backwards compatibility.
PYBIND11_DEPRECATED("Use of error_already_set.clear() is deprecated")
void clear() {}
/// Check if the currently trapped error type matches the given Python exception class (or a
/// subclass thereof). May also be passed a tuple to search for any exception class matches in
/// the given tuple.
bool matches(handle exc) const { return m_fetched_error->matches(exc); }
const object &type() const { return m_fetched_error->m_type; }
const object &value() const { return m_fetched_error->m_value; }
const object &trace() const { return m_fetched_error->m_trace; }
private:
std::shared_ptr<detail::error_fetch_and_normalize> m_fetched_error;
/// WARNING: This custom deleter needs to acquire the Python GIL. This can lead to
/// crashes (undefined behavior) if the Python interpreter is finalizing.
static void m_fetched_error_deleter(detail::error_fetch_and_normalize *raw_ptr);
};
/// Replaces the current Python error indicator with the chosen error, performing a
/// 'raise from' to indicate that the chosen error was caused by the original error.
inline void raise_from(PyObject *type, const char *message) {
// Based on _PyErr_FormatVFromCause:
// https://github.com/python/cpython/blob/467ab194fc6189d9f7310c89937c51abeac56839/Python/errors.c#L405
// See https://github.com/pybind/pybind11/pull/2112 for details.
PyObject *exc = nullptr, *val = nullptr, *val2 = nullptr, *tb = nullptr;
assert(PyErr_Occurred());
PyErr_Fetch(&exc, &val, &tb);
PyErr_NormalizeException(&exc, &val, &tb);
if (tb != nullptr) {
PyException_SetTraceback(val, tb);
Py_DECREF(tb);
}
Py_DECREF(exc);
assert(!PyErr_Occurred());
PyErr_SetString(type, message);
PyErr_Fetch(&exc, &val2, &tb);
PyErr_NormalizeException(&exc, &val2, &tb);
Py_INCREF(val);
PyException_SetCause(val2, val);
PyException_SetContext(val2, val);
PyErr_Restore(exc, val2, tb);
}
/// Sets the current Python error indicator with the chosen error, performing a 'raise from'
/// from the error contained in error_already_set to indicate that the chosen error was
/// caused by the original error.
inline void raise_from(error_already_set &err, PyObject *type, const char *message) {
err.restore();
raise_from(type, message);
}
/** \defgroup python_builtins const_name
Unless stated otherwise, the following C++ functions behave the same
as their Python counterparts.
*/
/** \ingroup python_builtins
\rst
Return true if ``obj`` is an instance of ``T``. Type ``T`` must be a subclass of
`object` or a class which was exposed to Python as ``py::class_<T>``.
\endrst */
template <typename T, detail::enable_if_t<std::is_base_of<object, T>::value, int> = 0>
bool isinstance(handle obj) {
return T::check_(obj);
}
template <typename T, detail::enable_if_t<!std::is_base_of<object, T>::value, int> = 0>
bool isinstance(handle obj) {
return detail::isinstance_generic(obj, typeid(T));
}
template <>
inline bool isinstance<handle>(handle) = delete;
template <>
inline bool isinstance<object>(handle obj) {
return obj.ptr() != nullptr;
}
/// \ingroup python_builtins
/// Return true if ``obj`` is an instance of the ``type``.
inline bool isinstance(handle obj, handle type) {
const auto result = PyObject_IsInstance(obj.ptr(), type.ptr());
if (result == -1) {
throw error_already_set();
}
return result != 0;
}
/// \addtogroup python_builtins
/// @{
inline bool hasattr(handle obj, handle name) {
return PyObject_HasAttr(obj.ptr(), name.ptr()) == 1;
}
inline bool hasattr(handle obj, const char *name) {
return PyObject_HasAttrString(obj.ptr(), name) == 1;
}
inline void delattr(handle obj, handle name) {
if (PyObject_DelAttr(obj.ptr(), name.ptr()) != 0) {
throw error_already_set();
}
}
inline void delattr(handle obj, const char *name) {
if (PyObject_DelAttrString(obj.ptr(), name) != 0) {
throw error_already_set();
}
}
inline object getattr(handle obj, handle name) {
PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
inline object getattr(handle obj, const char *name) {
PyObject *result = PyObject_GetAttrString(obj.ptr(), name);
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
inline object getattr(handle obj, handle name, handle default_) {
if (PyObject *result = PyObject_GetAttr(obj.ptr(), name.ptr())) {
return reinterpret_steal<object>(result);
}
PyErr_Clear();
return reinterpret_borrow<object>(default_);
}
inline object getattr(handle obj, const char *name, handle default_) {
if (PyObject *result = PyObject_GetAttrString(obj.ptr(), name)) {
return reinterpret_steal<object>(result);
}
PyErr_Clear();
return reinterpret_borrow<object>(default_);
}
inline void setattr(handle obj, handle name, handle value) {
if (PyObject_SetAttr(obj.ptr(), name.ptr(), value.ptr()) != 0) {
throw error_already_set();
}
}
inline void setattr(handle obj, const char *name, handle value) {
if (PyObject_SetAttrString(obj.ptr(), name, value.ptr()) != 0) {
throw error_already_set();
}
}
inline ssize_t hash(handle obj) {
auto h = PyObject_Hash(obj.ptr());
if (h == -1) {
throw error_already_set();
}
return h;
}
/// @} python_builtins
PYBIND11_NAMESPACE_BEGIN(detail)
inline handle get_function(handle value) {
if (value) {
if (PyInstanceMethod_Check(value.ptr())) {
value = PyInstanceMethod_GET_FUNCTION(value.ptr());
} else if (PyMethod_Check(value.ptr())) {
value = PyMethod_GET_FUNCTION(value.ptr());
}
}
return value;
}
// Reimplementation of python's dict helper functions to ensure that exceptions
// aren't swallowed (see #2862)
// copied from cpython _PyDict_GetItemStringWithError
inline PyObject *dict_getitemstring(PyObject *v, const char *key) {
PyObject *kv = nullptr, *rv = nullptr;
kv = PyUnicode_FromString(key);
if (kv == nullptr) {
throw error_already_set();
}
rv = PyDict_GetItemWithError(v, kv);
Py_DECREF(kv);
if (rv == nullptr && PyErr_Occurred()) {
throw error_already_set();
}
return rv;
}
inline PyObject *dict_getitem(PyObject *v, PyObject *key) {
PyObject *rv = PyDict_GetItemWithError(v, key);
if (rv == nullptr && PyErr_Occurred()) {
throw error_already_set();
}
return rv;
}
// Helper aliases/functions to support implicit casting of values given to python
// accessors/methods. When given a pyobject, this simply returns the pyobject as-is; for other C++
// type, the value goes through pybind11::cast(obj) to convert it to an `object`.
template <typename T, enable_if_t<is_pyobject<T>::value, int> = 0>
auto object_or_cast(T &&o) -> decltype(std::forward<T>(o)) {
return std::forward<T>(o);
}
// The following casting version is implemented in cast.h:
template <typename T, enable_if_t<!is_pyobject<T>::value, int> = 0>
object object_or_cast(T &&o);
// Match a PyObject*, which we want to convert directly to handle via its converting constructor
inline handle object_or_cast(PyObject *ptr) { return ptr; }
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC(4522) // warning C4522: multiple assignment operators specified
template <typename Policy>
class accessor : public object_api<accessor<Policy>> {
using key_type = typename Policy::key_type;
public:
accessor(handle obj, key_type key) : obj(obj), key(std::move(key)) {}
accessor(const accessor &) = default;
accessor(accessor &&) noexcept = default;
// accessor overload required to override default assignment operator (templates are not
// allowed to replace default compiler-generated assignments).
void operator=(const accessor &a) && { std::move(*this).operator=(handle(a)); }
void operator=(const accessor &a) & { operator=(handle(a)); }
template <typename T>
void operator=(T &&value) && {
Policy::set(obj, key, object_or_cast(std::forward<T>(value)));
}
template <typename T>
void operator=(T &&value) & {
get_cache() = ensure_object(object_or_cast(std::forward<T>(value)));
}
template <typename T = Policy>
PYBIND11_DEPRECATED(
"Use of obj.attr(...) as bool is deprecated in favor of pybind11::hasattr(obj, ...)")
explicit
operator enable_if_t<std::is_same<T, accessor_policies::str_attr>::value
|| std::is_same<T, accessor_policies::obj_attr>::value,
bool>() const {
return hasattr(obj, key);
}
template <typename T = Policy>
PYBIND11_DEPRECATED("Use of obj[key] as bool is deprecated in favor of obj.contains(key)")
explicit
operator enable_if_t<std::is_same<T, accessor_policies::generic_item>::value, bool>() const {
return obj.contains(key);
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator object() const { return get_cache(); }
PyObject *ptr() const { return get_cache().ptr(); }
template <typename T>
T cast() const {
return get_cache().template cast<T>();
}
private:
static object ensure_object(object &&o) { return std::move(o); }
static object ensure_object(handle h) { return reinterpret_borrow<object>(h); }
object &get_cache() const {
if (!cache) {
cache = Policy::get(obj, key);
}
return cache;
}
private:
handle obj;
key_type key;
mutable object cache;
};
PYBIND11_WARNING_POP
PYBIND11_NAMESPACE_BEGIN(accessor_policies)
struct obj_attr {
using key_type = object;
static object get(handle obj, handle key) { return getattr(obj, key); }
static void set(handle obj, handle key, handle val) { setattr(obj, key, val); }
};
struct str_attr {
using key_type = const char *;
static object get(handle obj, const char *key) { return getattr(obj, key); }
static void set(handle obj, const char *key, handle val) { setattr(obj, key, val); }
};
struct generic_item {
using key_type = object;
static object get(handle obj, handle key) {
PyObject *result = PyObject_GetItem(obj.ptr(), key.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
static void set(handle obj, handle key, handle val) {
if (PyObject_SetItem(obj.ptr(), key.ptr(), val.ptr()) != 0) {
throw error_already_set();
}
}
};
struct sequence_item {
using key_type = size_t;
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static object get(handle obj, const IdxType &index) {
PyObject *result = PySequence_GetItem(obj.ptr(), ssize_t_cast(index));
if (!result) {
throw error_already_set();
}
return reinterpret_steal<object>(result);
}
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static void set(handle obj, const IdxType &index, handle val) {
// PySequence_SetItem does not steal a reference to 'val'
if (PySequence_SetItem(obj.ptr(), ssize_t_cast(index), val.ptr()) != 0) {
throw error_already_set();
}
}
};
struct list_item {
using key_type = size_t;
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static object get(handle obj, const IdxType &index) {
PyObject *result = PyList_GetItem(obj.ptr(), ssize_t_cast(index));
if (!result) {
throw error_already_set();
}
return reinterpret_borrow<object>(result);
}
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static void set(handle obj, const IdxType &index, handle val) {
// PyList_SetItem steals a reference to 'val'
if (PyList_SetItem(obj.ptr(), ssize_t_cast(index), val.inc_ref().ptr()) != 0) {
throw error_already_set();
}
}
};
struct tuple_item {
using key_type = size_t;
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static object get(handle obj, const IdxType &index) {
PyObject *result = PyTuple_GetItem(obj.ptr(), ssize_t_cast(index));
if (!result) {
throw error_already_set();
}
return reinterpret_borrow<object>(result);
}
template <typename IdxType, detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
static void set(handle obj, const IdxType &index, handle val) {
// PyTuple_SetItem steals a reference to 'val'
if (PyTuple_SetItem(obj.ptr(), ssize_t_cast(index), val.inc_ref().ptr()) != 0) {
throw error_already_set();
}
}
};
PYBIND11_NAMESPACE_END(accessor_policies)
/// STL iterator template used for tuple, list, sequence and dict
template <typename Policy>
class generic_iterator : public Policy {
using It = generic_iterator;
public:
using difference_type = ssize_t;
using iterator_category = typename Policy::iterator_category;
using value_type = typename Policy::value_type;
using reference = typename Policy::reference;
using pointer = typename Policy::pointer;
generic_iterator() = default;
generic_iterator(handle seq, ssize_t index) : Policy(seq, index) {}
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference operator*() const { return Policy::dereference(); }
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference operator[](difference_type n) const { return *(*this + n); }
pointer operator->() const { return **this; }
It &operator++() {
Policy::increment();
return *this;
}
It operator++(int) {
auto copy = *this;
Policy::increment();
return copy;
}
It &operator--() {
Policy::decrement();
return *this;
}
It operator--(int) {
auto copy = *this;
Policy::decrement();
return copy;
}
It &operator+=(difference_type n) {
Policy::advance(n);
return *this;
}
It &operator-=(difference_type n) {
Policy::advance(-n);
return *this;
}
friend It operator+(const It &a, difference_type n) {
auto copy = a;
return copy += n;
}
friend It operator+(difference_type n, const It &b) { return b + n; }
friend It operator-(const It &a, difference_type n) {
auto copy = a;
return copy -= n;
}
friend difference_type operator-(const It &a, const It &b) { return a.distance_to(b); }
friend bool operator==(const It &a, const It &b) { return a.equal(b); }
friend bool operator!=(const It &a, const It &b) { return !(a == b); }
friend bool operator<(const It &a, const It &b) { return b - a > 0; }
friend bool operator>(const It &a, const It &b) { return b < a; }
friend bool operator>=(const It &a, const It &b) { return !(a < b); }
friend bool operator<=(const It &a, const It &b) { return !(a > b); }
};
PYBIND11_NAMESPACE_BEGIN(iterator_policies)
/// Quick proxy class needed to implement ``operator->`` for iterators which can't return pointers
template <typename T>
struct arrow_proxy {
T value;
// NOLINTNEXTLINE(google-explicit-constructor)
arrow_proxy(T &&value) noexcept : value(std::move(value)) {}
T *operator->() const { return &value; }
};
/// Lightweight iterator policy using just a simple pointer: see ``PySequence_Fast_ITEMS``
class sequence_fast_readonly {
protected:
using iterator_category = std::random_access_iterator_tag;
using value_type = handle;
using reference = const handle; // PR #3263
using pointer = arrow_proxy<const handle>;
sequence_fast_readonly(handle obj, ssize_t n) : ptr(PySequence_Fast_ITEMS(obj.ptr()) + n) {}
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference dereference() const { return *ptr; }
void increment() { ++ptr; }
void decrement() { --ptr; }
void advance(ssize_t n) { ptr += n; }
bool equal(const sequence_fast_readonly &b) const { return ptr == b.ptr; }
ssize_t distance_to(const sequence_fast_readonly &b) const { return ptr - b.ptr; }
private:
PyObject **ptr;
};
/// Full read and write access using the sequence protocol: see ``detail::sequence_accessor``
class sequence_slow_readwrite {
protected:
using iterator_category = std::random_access_iterator_tag;
using value_type = object;
using reference = sequence_accessor;
using pointer = arrow_proxy<const sequence_accessor>;
sequence_slow_readwrite(handle obj, ssize_t index) : obj(obj), index(index) {}
reference dereference() const { return {obj, static_cast<size_t>(index)}; }
void increment() { ++index; }
void decrement() { --index; }
void advance(ssize_t n) { index += n; }
bool equal(const sequence_slow_readwrite &b) const { return index == b.index; }
ssize_t distance_to(const sequence_slow_readwrite &b) const { return index - b.index; }
private:
handle obj;
ssize_t index;
};
/// Python's dictionary protocol permits this to be a forward iterator
class dict_readonly {
protected:
using iterator_category = std::forward_iterator_tag;
using value_type = std::pair<handle, handle>;
using reference = const value_type; // PR #3263
using pointer = arrow_proxy<const value_type>;
dict_readonly() = default;
dict_readonly(handle obj, ssize_t pos) : obj(obj), pos(pos) { increment(); }
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference dereference() const { return {key, value}; }
void increment() {
if (PyDict_Next(obj.ptr(), &pos, &key, &value) == 0) {
pos = -1;
}
}
bool equal(const dict_readonly &b) const { return pos == b.pos; }
private:
handle obj;
PyObject *key = nullptr, *value = nullptr;
ssize_t pos = -1;
};
PYBIND11_NAMESPACE_END(iterator_policies)
#if !defined(PYPY_VERSION)
using tuple_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;
using list_iterator = generic_iterator<iterator_policies::sequence_fast_readonly>;
#else
using tuple_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
using list_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
#endif
using sequence_iterator = generic_iterator<iterator_policies::sequence_slow_readwrite>;
using dict_iterator = generic_iterator<iterator_policies::dict_readonly>;
inline bool PyIterable_Check(PyObject *obj) {
PyObject *iter = PyObject_GetIter(obj);
if (iter) {
Py_DECREF(iter);
return true;
}
PyErr_Clear();
return false;
}
inline bool PyNone_Check(PyObject *o) { return o == Py_None; }
inline bool PyEllipsis_Check(PyObject *o) { return o == Py_Ellipsis; }
#ifdef PYBIND11_STR_LEGACY_PERMISSIVE
inline bool PyUnicode_Check_Permissive(PyObject *o) {
return PyUnicode_Check(o) || PYBIND11_BYTES_CHECK(o);
}
# define PYBIND11_STR_CHECK_FUN detail::PyUnicode_Check_Permissive
#else
# define PYBIND11_STR_CHECK_FUN PyUnicode_Check
#endif
inline bool PyStaticMethod_Check(PyObject *o) { return o->ob_type == &PyStaticMethod_Type; }
class kwargs_proxy : public handle {
public:
explicit kwargs_proxy(handle h) : handle(h) {}
};
class args_proxy : public handle {
public:
explicit args_proxy(handle h) : handle(h) {}
kwargs_proxy operator*() const { return kwargs_proxy(*this); }
};
/// Python argument categories (using PEP 448 terms)
template <typename T>
using is_keyword = std::is_base_of<arg, T>;
template <typename T>
using is_s_unpacking = std::is_same<args_proxy, T>; // * unpacking
template <typename T>
using is_ds_unpacking = std::is_same<kwargs_proxy, T>; // ** unpacking
template <typename T>
using is_positional = satisfies_none_of<T, is_keyword, is_s_unpacking, is_ds_unpacking>;
template <typename T>
using is_keyword_or_ds = satisfies_any_of<T, is_keyword, is_ds_unpacking>;
// Call argument collector forward declarations
template <return_value_policy policy = return_value_policy::automatic_reference>
class simple_collector;
template <return_value_policy policy = return_value_policy::automatic_reference>
class unpacking_collector;
PYBIND11_NAMESPACE_END(detail)
// TODO: After the deprecated constructors are removed, this macro can be simplified by
// inheriting ctors: `using Parent::Parent`. It's not an option right now because
// the `using` statement triggers the parent deprecation warning even if the ctor
// isn't even used.
#define PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
public: \
PYBIND11_DEPRECATED("Use reinterpret_borrow<" #Name ">() or reinterpret_steal<" #Name ">()") \
Name(handle h, bool is_borrowed) \
: Parent(is_borrowed ? Parent(h, borrowed_t{}) : Parent(h, stolen_t{})) {} \
Name(handle h, borrowed_t) : Parent(h, borrowed_t{}) {} \
Name(handle h, stolen_t) : Parent(h, stolen_t{}) {} \
PYBIND11_DEPRECATED("Use py::isinstance<py::python_type>(obj) instead") \
bool check() const { return m_ptr != nullptr && (CheckFun(m_ptr) != 0); } \
static bool check_(handle h) { return h.ptr() != nullptr && CheckFun(h.ptr()); } \
template <typename Policy_> /* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(const ::pybind11::detail::accessor<Policy_> &a) : Name(object(a)) {}
#define PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \
PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
/* This is deliberately not 'explicit' to allow implicit conversion from object: */ \
/* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(const object &o) \
: Parent(check_(o) ? o.inc_ref().ptr() : ConvertFun(o.ptr()), stolen_t{}) { \
if (!m_ptr) \
throw ::pybind11::error_already_set(); \
} \
/* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(object &&o) : Parent(check_(o) ? o.release().ptr() : ConvertFun(o.ptr()), stolen_t{}) { \
if (!m_ptr) \
throw ::pybind11::error_already_set(); \
}
#define PYBIND11_OBJECT_CVT_DEFAULT(Name, Parent, CheckFun, ConvertFun) \
PYBIND11_OBJECT_CVT(Name, Parent, CheckFun, ConvertFun) \
Name() = default;
#define PYBIND11_OBJECT_CHECK_FAILED(Name, o_ptr) \
::pybind11::type_error("Object of type '" \
+ ::pybind11::detail::get_fully_qualified_tp_name(Py_TYPE(o_ptr)) \
+ "' is not an instance of '" #Name "'")
#define PYBIND11_OBJECT(Name, Parent, CheckFun) \
PYBIND11_OBJECT_COMMON(Name, Parent, CheckFun) \
/* This is deliberately not 'explicit' to allow implicit conversion from object: */ \
/* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(const object &o) : Parent(o) { \
if (m_ptr && !check_(m_ptr)) \
throw PYBIND11_OBJECT_CHECK_FAILED(Name, m_ptr); \
} \
/* NOLINTNEXTLINE(google-explicit-constructor) */ \
Name(object &&o) : Parent(std::move(o)) { \
if (m_ptr && !check_(m_ptr)) \
throw PYBIND11_OBJECT_CHECK_FAILED(Name, m_ptr); \
}
#define PYBIND11_OBJECT_DEFAULT(Name, Parent, CheckFun) \
PYBIND11_OBJECT(Name, Parent, CheckFun) \
Name() = default;
/// \addtogroup pytypes
/// @{
/** \rst
Wraps a Python iterator so that it can also be used as a C++ input iterator
Caveat: copying an iterator does not (and cannot) clone the internal
state of the Python iterable. This also applies to the post-increment
operator. This iterator should only be used to retrieve the current
value using ``operator*()``.
\endrst */
class iterator : public object {
public:
using iterator_category = std::input_iterator_tag;
using difference_type = ssize_t;
using value_type = handle;
using reference = const handle; // PR #3263
using pointer = const handle *;
PYBIND11_OBJECT_DEFAULT(iterator, object, PyIter_Check)
iterator &operator++() {
advance();
return *this;
}
iterator operator++(int) {
auto rv = *this;
advance();
return rv;
}
// NOLINTNEXTLINE(readability-const-return-type) // PR #3263
reference operator*() const {
if (m_ptr && !value.ptr()) {
auto &self = const_cast<iterator &>(*this);
self.advance();
}
return value;
}
pointer operator->() const {
operator*();
return &value;
}
/** \rst
The value which marks the end of the iteration. ``it == iterator::sentinel()``
is equivalent to catching ``StopIteration`` in Python.
.. code-block:: cpp
void foo(py::iterator it) {
while (it != py::iterator::sentinel()) {
// use `*it`
++it;
}
}
\endrst */
static iterator sentinel() { return {}; }
friend bool operator==(const iterator &a, const iterator &b) { return a->ptr() == b->ptr(); }
friend bool operator!=(const iterator &a, const iterator &b) { return a->ptr() != b->ptr(); }
private:
void advance() {
value = reinterpret_steal<object>(PyIter_Next(m_ptr));
if (value.ptr() == nullptr && PyErr_Occurred()) {
throw error_already_set();
}
}
private:
object value = {};
};
class type : public object {
public:
PYBIND11_OBJECT(type, object, PyType_Check)
/// Return a type handle from a handle or an object
static handle handle_of(handle h) { return handle((PyObject *) Py_TYPE(h.ptr())); }
/// Return a type object from a handle or an object
static type of(handle h) { return type(type::handle_of(h), borrowed_t{}); }
// Defined in pybind11/cast.h
/// Convert C++ type to handle if previously registered. Does not convert
/// standard types, like int, float. etc. yet.
/// See https://github.com/pybind/pybind11/issues/2486
template <typename T>
static handle handle_of();
/// Convert C++ type to type if previously registered. Does not convert
/// standard types, like int, float. etc. yet.
/// See https://github.com/pybind/pybind11/issues/2486
template <typename T>
static type of() {
return type(type::handle_of<T>(), borrowed_t{});
}
};
class iterable : public object {
public:
PYBIND11_OBJECT_DEFAULT(iterable, object, detail::PyIterable_Check)
};
class bytes;
class str : public object {
public:
PYBIND11_OBJECT_CVT(str, object, PYBIND11_STR_CHECK_FUN, raw_str)
template <typename SzType, detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
str(const char *c, const SzType &n)
: object(PyUnicode_FromStringAndSize(c, ssize_t_cast(n)), stolen_t{}) {
if (!m_ptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate string object!");
}
}
// 'explicit' is explicitly omitted from the following constructors to allow implicit
// conversion to py::str from C++ string-like objects
// NOLINTNEXTLINE(google-explicit-constructor)
str(const char *c = "") : object(PyUnicode_FromString(c), stolen_t{}) {
if (!m_ptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate string object!");
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
str(const std::string &s) : str(s.data(), s.size()) {}
#ifdef PYBIND11_HAS_STRING_VIEW
// enable_if is needed to avoid "ambiguous conversion" errors (see PR #3521).
template <typename T, detail::enable_if_t<std::is_same<T, std::string_view>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
str(T s) : str(s.data(), s.size()) {}
# ifdef PYBIND11_HAS_U8STRING
// reinterpret_cast here is safe (C++20 guarantees char8_t has the same size/alignment as char)
// NOLINTNEXTLINE(google-explicit-constructor)
str(std::u8string_view s) : str(reinterpret_cast<const char *>(s.data()), s.size()) {}
# endif
#endif
explicit str(const bytes &b);
/** \rst
Return a string representation of the object. This is analogous to
the ``str()`` function in Python.
\endrst */
explicit str(handle h) : object(raw_str(h.ptr()), stolen_t{}) {
if (!m_ptr) {
throw error_already_set();
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::string() const {
object temp = *this;
if (PyUnicode_Check(m_ptr)) {
temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(m_ptr));
if (!temp) {
throw error_already_set();
}
}
char *buffer = nullptr;
ssize_t length = 0;
if (PyBytes_AsStringAndSize(temp.ptr(), &buffer, &length) != 0) {
throw error_already_set();
}
return std::string(buffer, (size_t) length);
}
template <typename... Args>
str format(Args &&...args) const {
return attr("format")(std::forward<Args>(args)...);
}
private:
/// Return string representation -- always returns a new reference, even if already a str
static PyObject *raw_str(PyObject *op) {
PyObject *str_value = PyObject_Str(op);
return str_value;
}
};
/// @} pytypes
inline namespace literals {
/** \rst
String literal version of `str`
\endrst */
inline str operator"" _s(const char *s, size_t size) { return {s, size}; }
} // namespace literals
/// \addtogroup pytypes
/// @{
class bytes : public object {
public:
PYBIND11_OBJECT(bytes, object, PYBIND11_BYTES_CHECK)
// Allow implicit conversion:
// NOLINTNEXTLINE(google-explicit-constructor)
bytes(const char *c = "") : object(PYBIND11_BYTES_FROM_STRING(c), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate bytes object!");
}
}
template <typename SzType, detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
bytes(const char *c, const SzType &n)
: object(PYBIND11_BYTES_FROM_STRING_AND_SIZE(c, ssize_t_cast(n)), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate bytes object!");
}
}
// Allow implicit conversion:
// NOLINTNEXTLINE(google-explicit-constructor)
bytes(const std::string &s) : bytes(s.data(), s.size()) {}
explicit bytes(const pybind11::str &s);
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::string() const { return string_op<std::string>(); }
#ifdef PYBIND11_HAS_STRING_VIEW
// enable_if is needed to avoid "ambiguous conversion" errors (see PR #3521).
template <typename T, detail::enable_if_t<std::is_same<T, std::string_view>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
bytes(T s) : bytes(s.data(), s.size()) {}
// Obtain a string view that views the current `bytes` buffer value. Note that this is only
// valid so long as the `bytes` instance remains alive and so generally should not outlive the
// lifetime of the `bytes` instance.
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::string_view() const { return string_op<std::string_view>(); }
#endif
private:
template <typename T>
T string_op() const {
char *buffer = nullptr;
ssize_t length = 0;
if (PyBytes_AsStringAndSize(m_ptr, &buffer, &length) != 0) {
throw error_already_set();
}
return {buffer, static_cast<size_t>(length)};
}
};
// Note: breathe >= 4.17.0 will fail to build docs if the below two constructors
// are included in the doxygen group; close here and reopen after as a workaround
/// @} pytypes
inline bytes::bytes(const pybind11::str &s) {
object temp = s;
if (PyUnicode_Check(s.ptr())) {
temp = reinterpret_steal<object>(PyUnicode_AsUTF8String(s.ptr()));
if (!temp) {
throw error_already_set();
}
}
char *buffer = nullptr;
ssize_t length = 0;
if (PyBytes_AsStringAndSize(temp.ptr(), &buffer, &length) != 0) {
throw error_already_set();
}
auto obj = reinterpret_steal<object>(PYBIND11_BYTES_FROM_STRING_AND_SIZE(buffer, length));
if (!obj) {
pybind11_fail("Could not allocate bytes object!");
}
m_ptr = obj.release().ptr();
}
inline str::str(const bytes &b) {
char *buffer = nullptr;
ssize_t length = 0;
if (PyBytes_AsStringAndSize(b.ptr(), &buffer, &length) != 0) {
throw error_already_set();
}
auto obj = reinterpret_steal<object>(PyUnicode_FromStringAndSize(buffer, length));
if (!obj) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate string object!");
}
m_ptr = obj.release().ptr();
}
/// \addtogroup pytypes
/// @{
class bytearray : public object {
public:
PYBIND11_OBJECT_CVT(bytearray, object, PyByteArray_Check, PyByteArray_FromObject)
template <typename SzType, detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
bytearray(const char *c, const SzType &n)
: object(PyByteArray_FromStringAndSize(c, ssize_t_cast(n)), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate bytearray object!");
}
}
bytearray() : bytearray("", 0) {}
explicit bytearray(const std::string &s) : bytearray(s.data(), s.size()) {}
size_t size() const { return static_cast<size_t>(PyByteArray_Size(m_ptr)); }
explicit operator std::string() const {
char *buffer = PyByteArray_AS_STRING(m_ptr);
ssize_t size = PyByteArray_GET_SIZE(m_ptr);
return std::string(buffer, static_cast<size_t>(size));
}
};
// Note: breathe >= 4.17.0 will fail to build docs if the below two constructors
// are included in the doxygen group; close here and reopen after as a workaround
/// @} pytypes
/// \addtogroup pytypes
/// @{
class none : public object {
public:
PYBIND11_OBJECT(none, object, detail::PyNone_Check)
none() : object(Py_None, borrowed_t{}) {}
};
class ellipsis : public object {
public:
PYBIND11_OBJECT(ellipsis, object, detail::PyEllipsis_Check)
ellipsis() : object(Py_Ellipsis, borrowed_t{}) {}
};
class bool_ : public object {
public:
PYBIND11_OBJECT_CVT(bool_, object, PyBool_Check, raw_bool)
bool_() : object(Py_False, borrowed_t{}) {}
// Allow implicit conversion from and to `bool`:
// NOLINTNEXTLINE(google-explicit-constructor)
bool_(bool value) : object(value ? Py_True : Py_False, borrowed_t{}) {}
// NOLINTNEXTLINE(google-explicit-constructor)
operator bool() const { return (m_ptr != nullptr) && PyLong_AsLong(m_ptr) != 0; }
private:
/// Return the truth value of an object -- always returns a new reference
static PyObject *raw_bool(PyObject *op) {
const auto value = PyObject_IsTrue(op);
if (value == -1) {
return nullptr;
}
return handle(value != 0 ? Py_True : Py_False).inc_ref().ptr();
}
};
PYBIND11_NAMESPACE_BEGIN(detail)
// Converts a value to the given unsigned type. If an error occurs, you get back (Unsigned) -1;
// otherwise you get back the unsigned long or unsigned long long value cast to (Unsigned).
// (The distinction is critically important when casting a returned -1 error value to some other
// unsigned type: (A)-1 != (B)-1 when A and B are unsigned types of different sizes).
template <typename Unsigned>
Unsigned as_unsigned(PyObject *o) {
if (sizeof(Unsigned) <= sizeof(unsigned long)) {
unsigned long v = PyLong_AsUnsignedLong(o);
return v == (unsigned long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;
}
unsigned long long v = PyLong_AsUnsignedLongLong(o);
return v == (unsigned long long) -1 && PyErr_Occurred() ? (Unsigned) -1 : (Unsigned) v;
}
PYBIND11_NAMESPACE_END(detail)
class int_ : public object {
public:
PYBIND11_OBJECT_CVT(int_, object, PYBIND11_LONG_CHECK, PyNumber_Long)
int_() : object(PyLong_FromLong(0), stolen_t{}) {}
// Allow implicit conversion from C++ integral types:
template <typename T, detail::enable_if_t<std::is_integral<T>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
int_(T value) {
if (sizeof(T) <= sizeof(long)) {
if (std::is_signed<T>::value) {
m_ptr = PyLong_FromLong((long) value);
} else {
m_ptr = PyLong_FromUnsignedLong((unsigned long) value);
}
} else {
if (std::is_signed<T>::value) {
m_ptr = PyLong_FromLongLong((long long) value);
} else {
m_ptr = PyLong_FromUnsignedLongLong((unsigned long long) value);
}
}
if (!m_ptr) {
pybind11_fail("Could not allocate int object!");
}
}
template <typename T, detail::enable_if_t<std::is_integral<T>::value, int> = 0>
// NOLINTNEXTLINE(google-explicit-constructor)
operator T() const {
return std::is_unsigned<T>::value ? detail::as_unsigned<T>(m_ptr)
: sizeof(T) <= sizeof(long) ? (T) PyLong_AsLong(m_ptr)
: (T) PYBIND11_LONG_AS_LONGLONG(m_ptr);
}
};
class float_ : public object {
public:
PYBIND11_OBJECT_CVT(float_, object, PyFloat_Check, PyNumber_Float)
// Allow implicit conversion from float/double:
// NOLINTNEXTLINE(google-explicit-constructor)
float_(float value) : object(PyFloat_FromDouble((double) value), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate float object!");
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
float_(double value = .0) : object(PyFloat_FromDouble((double) value), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate float object!");
}
}
// NOLINTNEXTLINE(google-explicit-constructor)
operator float() const { return (float) PyFloat_AsDouble(m_ptr); }
// NOLINTNEXTLINE(google-explicit-constructor)
operator double() const { return (double) PyFloat_AsDouble(m_ptr); }
};
class weakref : public object {
public:
PYBIND11_OBJECT_CVT_DEFAULT(weakref, object, PyWeakref_Check, raw_weakref)
explicit weakref(handle obj, handle callback = {})
: object(PyWeakref_NewRef(obj.ptr(), callback.ptr()), stolen_t{}) {
if (!m_ptr) {
if (PyErr_Occurred()) {
throw error_already_set();
}
pybind11_fail("Could not allocate weak reference!");
}
}
private:
static PyObject *raw_weakref(PyObject *o) { return PyWeakref_NewRef(o, nullptr); }
};
class slice : public object {
public:
PYBIND11_OBJECT_DEFAULT(slice, object, PySlice_Check)
slice(handle start, handle stop, handle step)
: object(PySlice_New(start.ptr(), stop.ptr(), step.ptr()), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate slice object!");
}
}
#ifdef PYBIND11_HAS_OPTIONAL
slice(std::optional<ssize_t> start, std::optional<ssize_t> stop, std::optional<ssize_t> step)
: slice(index_to_object(start), index_to_object(stop), index_to_object(step)) {}
#else
slice(ssize_t start_, ssize_t stop_, ssize_t step_)
: slice(int_(start_), int_(stop_), int_(step_)) {}
#endif
bool
compute(size_t length, size_t *start, size_t *stop, size_t *step, size_t *slicelength) const {
return PySlice_GetIndicesEx((PYBIND11_SLICE_OBJECT *) m_ptr,
(ssize_t) length,
(ssize_t *) start,
(ssize_t *) stop,
(ssize_t *) step,
(ssize_t *) slicelength)
== 0;
}
bool compute(
ssize_t length, ssize_t *start, ssize_t *stop, ssize_t *step, ssize_t *slicelength) const {
return PySlice_GetIndicesEx(
(PYBIND11_SLICE_OBJECT *) m_ptr, length, start, stop, step, slicelength)
== 0;
}
private:
template <typename T>
static object index_to_object(T index) {
return index ? object(int_(*index)) : object(none());
}
};
class capsule : public object {
public:
PYBIND11_OBJECT_DEFAULT(capsule, object, PyCapsule_CheckExact)
PYBIND11_DEPRECATED("Use reinterpret_borrow<capsule>() or reinterpret_steal<capsule>()")
capsule(PyObject *ptr, bool is_borrowed)
: object(is_borrowed ? object(ptr, borrowed_t{}) : object(ptr, stolen_t{})) {}
explicit capsule(const void *value,
const char *name = nullptr,
PyCapsule_Destructor destructor = nullptr)
: object(PyCapsule_New(const_cast<void *>(value), name, destructor), stolen_t{}) {
if (!m_ptr) {
throw error_already_set();
}
}
PYBIND11_DEPRECATED("Please use the ctor with value, name, destructor args")
capsule(const void *value, PyCapsule_Destructor destructor)
: object(PyCapsule_New(const_cast<void *>(value), nullptr, destructor), stolen_t{}) {
if (!m_ptr) {
throw error_already_set();
}
}
/// Capsule name is nullptr.
capsule(const void *value, void (*destructor)(void *)) {
initialize_with_void_ptr_destructor(value, nullptr, destructor);
}
capsule(const void *value, const char *name, void (*destructor)(void *)) {
initialize_with_void_ptr_destructor(value, name, destructor);
}
explicit capsule(void (*destructor)()) {
m_ptr = PyCapsule_New(reinterpret_cast<void *>(destructor), nullptr, [](PyObject *o) {
const char *name = get_name_in_error_scope(o);
auto destructor = reinterpret_cast<void (*)()>(PyCapsule_GetPointer(o, name));
if (destructor == nullptr) {
throw error_already_set();
}
destructor();
});
if (!m_ptr) {
throw error_already_set();
}
}
template <typename T>
operator T *() const { // NOLINT(google-explicit-constructor)
return get_pointer<T>();
}
/// Get the pointer the capsule holds.
template <typename T = void>
T *get_pointer() const {
const auto *name = this->name();
T *result = static_cast<T *>(PyCapsule_GetPointer(m_ptr, name));
if (!result) {
throw error_already_set();
}
return result;
}
/// Replaces a capsule's pointer *without* calling the destructor on the existing one.
void set_pointer(const void *value) {
if (PyCapsule_SetPointer(m_ptr, const_cast<void *>(value)) != 0) {
throw error_already_set();
}
}
const char *name() const {
const char *name = PyCapsule_GetName(m_ptr);
if ((name == nullptr) && PyErr_Occurred()) {
throw error_already_set();
}
return name;
}
/// Replaces a capsule's name *without* calling the destructor on the existing one.
void set_name(const char *new_name) {
if (PyCapsule_SetName(m_ptr, new_name) != 0) {
throw error_already_set();
}
}
private:
static const char *get_name_in_error_scope(PyObject *o) {
error_scope error_guard;
const char *name = PyCapsule_GetName(o);
if ((name == nullptr) && PyErr_Occurred()) {
// write out and consume error raised by call to PyCapsule_GetName
PyErr_WriteUnraisable(o);
}
return name;
}
void initialize_with_void_ptr_destructor(const void *value,
const char *name,
void (*destructor)(void *)) {
m_ptr = PyCapsule_New(const_cast<void *>(value), name, [](PyObject *o) {
// guard if destructor called while err indicator is set
error_scope error_guard;
auto destructor = reinterpret_cast<void (*)(void *)>(PyCapsule_GetContext(o));
if (destructor == nullptr && PyErr_Occurred()) {
throw error_already_set();
}
const char *name = get_name_in_error_scope(o);
void *ptr = PyCapsule_GetPointer(o, name);
if (ptr == nullptr) {
throw error_already_set();
}
if (destructor != nullptr) {
destructor(ptr);
}
});
if (!m_ptr || PyCapsule_SetContext(m_ptr, reinterpret_cast<void *>(destructor)) != 0) {
throw error_already_set();
}
}
};
class tuple : public object {
public:
PYBIND11_OBJECT_CVT(tuple, object, PyTuple_Check, PySequence_Tuple)
template <typename SzType = ssize_t,
detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
// Some compilers generate link errors when using `const SzType &` here:
explicit tuple(SzType size = 0) : object(PyTuple_New(ssize_t_cast(size)), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate tuple object!");
}
}
size_t size() const { return (size_t) PyTuple_Size(m_ptr); }
bool empty() const { return size() == 0; }
detail::tuple_accessor operator[](size_t index) const { return {*this, index}; }
template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
detail::item_accessor operator[](T &&o) const {
return object::operator[](std::forward<T>(o));
}
detail::tuple_iterator begin() const { return {*this, 0}; }
detail::tuple_iterator end() const { return {*this, PyTuple_GET_SIZE(m_ptr)}; }
};
// We need to put this into a separate function because the Intel compiler
// fails to compile enable_if_t<all_of<is_keyword_or_ds<Args>...>::value> part below
// (tested with ICC 2021.1 Beta 20200827).
template <typename... Args>
constexpr bool args_are_all_keyword_or_ds() {
return detail::all_of<detail::is_keyword_or_ds<Args>...>::value;
}
class dict : public object {
public:
PYBIND11_OBJECT_CVT(dict, object, PyDict_Check, raw_dict)
dict() : object(PyDict_New(), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate dict object!");
}
}
template <typename... Args,
typename = detail::enable_if_t<args_are_all_keyword_or_ds<Args...>()>,
// MSVC workaround: it can't compile an out-of-line definition, so defer the
// collector
typename collector = detail::deferred_t<detail::unpacking_collector<>, Args...>>
explicit dict(Args &&...args) : dict(collector(std::forward<Args>(args)...).kwargs()) {}
size_t size() const { return (size_t) PyDict_Size(m_ptr); }
bool empty() const { return size() == 0; }
detail::dict_iterator begin() const { return {*this, 0}; }
detail::dict_iterator end() const { return {}; }
void clear() /* py-non-const */ { PyDict_Clear(ptr()); }
template <typename T>
bool contains(T &&key) const {
auto result = PyDict_Contains(m_ptr, detail::object_or_cast(std::forward<T>(key)).ptr());
if (result == -1) {
throw error_already_set();
}
return result == 1;
}
private:
/// Call the `dict` Python type -- always returns a new reference
static PyObject *raw_dict(PyObject *op) {
if (PyDict_Check(op)) {
return handle(op).inc_ref().ptr();
}
return PyObject_CallFunctionObjArgs((PyObject *) &PyDict_Type, op, nullptr);
}
};
class sequence : public object {
public:
PYBIND11_OBJECT_DEFAULT(sequence, object, PySequence_Check)
size_t size() const {
ssize_t result = PySequence_Size(m_ptr);
if (result == -1) {
throw error_already_set();
}
return (size_t) result;
}
bool empty() const { return size() == 0; }
detail::sequence_accessor operator[](size_t index) const { return {*this, index}; }
template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
detail::item_accessor operator[](T &&o) const {
return object::operator[](std::forward<T>(o));
}
detail::sequence_iterator begin() const { return {*this, 0}; }
detail::sequence_iterator end() const { return {*this, PySequence_Size(m_ptr)}; }
};
class list : public object {
public:
PYBIND11_OBJECT_CVT(list, object, PyList_Check, PySequence_List)
template <typename SzType = ssize_t,
detail::enable_if_t<std::is_integral<SzType>::value, int> = 0>
// Some compilers generate link errors when using `const SzType &` here:
explicit list(SzType size = 0) : object(PyList_New(ssize_t_cast(size)), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate list object!");
}
}
size_t size() const { return (size_t) PyList_Size(m_ptr); }
bool empty() const { return size() == 0; }
detail::list_accessor operator[](size_t index) const { return {*this, index}; }
template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
detail::item_accessor operator[](T &&o) const {
return object::operator[](std::forward<T>(o));
}
detail::list_iterator begin() const { return {*this, 0}; }
detail::list_iterator end() const { return {*this, PyList_GET_SIZE(m_ptr)}; }
template <typename T>
void append(T &&val) /* py-non-const */ {
if (PyList_Append(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) != 0) {
throw error_already_set();
}
}
template <typename IdxType,
typename ValType,
detail::enable_if_t<std::is_integral<IdxType>::value, int> = 0>
void insert(const IdxType &index, ValType &&val) /* py-non-const */ {
if (PyList_Insert(m_ptr,
ssize_t_cast(index),
detail::object_or_cast(std::forward<ValType>(val)).ptr())
!= 0) {
throw error_already_set();
}
}
};
class args : public tuple {
PYBIND11_OBJECT_DEFAULT(args, tuple, PyTuple_Check)
};
class kwargs : public dict {
PYBIND11_OBJECT_DEFAULT(kwargs, dict, PyDict_Check)
};
class anyset : public object {
public:
PYBIND11_OBJECT(anyset, object, PyAnySet_Check)
size_t size() const { return static_cast<size_t>(PySet_Size(m_ptr)); }
bool empty() const { return size() == 0; }
template <typename T>
bool contains(T &&val) const {
auto result = PySet_Contains(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr());
if (result == -1) {
throw error_already_set();
}
return result == 1;
}
};
class set : public anyset {
public:
PYBIND11_OBJECT_CVT(set, anyset, PySet_Check, PySet_New)
set() : anyset(PySet_New(nullptr), stolen_t{}) {
if (!m_ptr) {
pybind11_fail("Could not allocate set object!");
}
}
template <typename T>
bool add(T &&val) /* py-non-const */ {
return PySet_Add(m_ptr, detail::object_or_cast(std::forward<T>(val)).ptr()) == 0;
}
void clear() /* py-non-const */ { PySet_Clear(m_ptr); }
};
class frozenset : public anyset {
public:
PYBIND11_OBJECT_CVT(frozenset, anyset, PyFrozenSet_Check, PyFrozenSet_New)
};
class function : public object {
public:
PYBIND11_OBJECT_DEFAULT(function, object, PyCallable_Check)
handle cpp_function() const {
handle fun = detail::get_function(m_ptr);
if (fun && PyCFunction_Check(fun.ptr())) {
return fun;
}
return handle();
}
bool is_cpp_function() const { return (bool) cpp_function(); }
};
class staticmethod : public object {
public:
PYBIND11_OBJECT_CVT(staticmethod, object, detail::PyStaticMethod_Check, PyStaticMethod_New)
};
class buffer : public object {
public:
PYBIND11_OBJECT_DEFAULT(buffer, object, PyObject_CheckBuffer)
buffer_info request(bool writable = false) const {
int flags = PyBUF_STRIDES | PyBUF_FORMAT;
if (writable) {
flags |= PyBUF_WRITABLE;
}
auto *view = new Py_buffer();
if (PyObject_GetBuffer(m_ptr, view, flags) != 0) {
delete view;
throw error_already_set();
}
return buffer_info(view);
}
};
class memoryview : public object {
public:
PYBIND11_OBJECT_CVT(memoryview, object, PyMemoryView_Check, PyMemoryView_FromObject)
/** \rst
Creates ``memoryview`` from ``buffer_info``.
``buffer_info`` must be created from ``buffer::request()``. Otherwise
throws an exception.
For creating a ``memoryview`` from objects that support buffer protocol,
use ``memoryview(const object& obj)`` instead of this constructor.
\endrst */
explicit memoryview(const buffer_info &info) {
if (!info.view()) {
pybind11_fail("Prohibited to create memoryview without Py_buffer");
}
// Note: PyMemoryView_FromBuffer never increments obj reference.
m_ptr = (info.view()->obj) ? PyMemoryView_FromObject(info.view()->obj)
: PyMemoryView_FromBuffer(info.view());
if (!m_ptr) {
pybind11_fail("Unable to create memoryview from buffer descriptor");
}
}
/** \rst
Creates ``memoryview`` from static buffer.
This method is meant for providing a ``memoryview`` for C/C++ buffer not
managed by Python. The caller is responsible for managing the lifetime
of ``ptr`` and ``format``, which MUST outlive the memoryview constructed
here.
See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
.. _PyMemoryView_FromBuffer:
https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromBuffer
:param ptr: Pointer to the buffer.
:param itemsize: Byte size of an element.
:param format: Pointer to the null-terminated format string. For
homogeneous Buffers, this should be set to
``format_descriptor<T>::value``.
:param shape: Shape of the tensor (1 entry per dimension).
:param strides: Number of bytes between adjacent entries (for each
per dimension).
:param readonly: Flag to indicate if the underlying storage may be
written to.
\endrst */
static memoryview from_buffer(void *ptr,
ssize_t itemsize,
const char *format,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides,
bool readonly = false);
static memoryview from_buffer(const void *ptr,
ssize_t itemsize,
const char *format,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides) {
return memoryview::from_buffer(
const_cast<void *>(ptr), itemsize, format, std::move(shape), std::move(strides), true);
}
template <typename T>
static memoryview from_buffer(T *ptr,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides,
bool readonly = false) {
return memoryview::from_buffer(reinterpret_cast<void *>(ptr),
sizeof(T),
format_descriptor<T>::value,
std::move(shape),
std::move(strides),
readonly);
}
template <typename T>
static memoryview from_buffer(const T *ptr,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides) {
return memoryview::from_buffer(
const_cast<T *>(ptr), std::move(shape), std::move(strides), true);
}
/** \rst
Creates ``memoryview`` from static memory.
This method is meant for providing a ``memoryview`` for C/C++ buffer not
managed by Python. The caller is responsible for managing the lifetime
of ``mem``, which MUST outlive the memoryview constructed here.
See also: Python C API documentation for `PyMemoryView_FromBuffer`_.
.. _PyMemoryView_FromMemory:
https://docs.python.org/c-api/memoryview.html#c.PyMemoryView_FromMemory
\endrst */
static memoryview from_memory(void *mem, ssize_t size, bool readonly = false) {
PyObject *ptr = PyMemoryView_FromMemory(
reinterpret_cast<char *>(mem), size, (readonly) ? PyBUF_READ : PyBUF_WRITE);
if (!ptr) {
pybind11_fail("Could not allocate memoryview object!");
}
return memoryview(object(ptr, stolen_t{}));
}
static memoryview from_memory(const void *mem, ssize_t size) {
return memoryview::from_memory(const_cast<void *>(mem), size, true);
}
#ifdef PYBIND11_HAS_STRING_VIEW
static memoryview from_memory(std::string_view mem) {
return from_memory(const_cast<char *>(mem.data()), static_cast<ssize_t>(mem.size()), true);
}
#endif
};
/// @cond DUPLICATE
inline memoryview memoryview::from_buffer(void *ptr,
ssize_t itemsize,
const char *format,
detail::any_container<ssize_t> shape,
detail::any_container<ssize_t> strides,
bool readonly) {
size_t ndim = shape->size();
if (ndim != strides->size()) {
pybind11_fail("memoryview: shape length doesn't match strides length");
}
ssize_t size = ndim != 0u ? 1 : 0;
for (size_t i = 0; i < ndim; ++i) {
size *= (*shape)[i];
}
Py_buffer view;
view.buf = ptr;
view.obj = nullptr;
view.len = size * itemsize;
view.readonly = static_cast<int>(readonly);
view.itemsize = itemsize;
view.format = const_cast<char *>(format);
view.ndim = static_cast<int>(ndim);
view.shape = shape->data();
view.strides = strides->data();
view.suboffsets = nullptr;
view.internal = nullptr;
PyObject *obj = PyMemoryView_FromBuffer(&view);
if (!obj) {
throw error_already_set();
}
return memoryview(object(obj, stolen_t{}));
}
/// @endcond
/// @} pytypes
/// \addtogroup python_builtins
/// @{
/// Get the length of a Python object.
inline size_t len(handle h) {
ssize_t result = PyObject_Length(h.ptr());
if (result < 0) {
throw error_already_set();
}
return (size_t) result;
}
/// Get the length hint of a Python object.
/// Returns 0 when this cannot be determined.
inline size_t len_hint(handle h) {
ssize_t result = PyObject_LengthHint(h.ptr(), 0);
if (result < 0) {
// Sometimes a length can't be determined at all (eg generators)
// In which case simply return 0
PyErr_Clear();
return 0;
}
return (size_t) result;
}
inline str repr(handle h) {
PyObject *str_value = PyObject_Repr(h.ptr());
if (!str_value) {
throw error_already_set();
}
return reinterpret_steal<str>(str_value);
}
inline iterator iter(handle obj) {
PyObject *result = PyObject_GetIter(obj.ptr());
if (!result) {
throw error_already_set();
}
return reinterpret_steal<iterator>(result);
}
/// @} python_builtins
PYBIND11_NAMESPACE_BEGIN(detail)
template <typename D>
iterator object_api<D>::begin() const {
return iter(derived());
}
template <typename D>
iterator object_api<D>::end() const {
return iterator::sentinel();
}
template <typename D>
item_accessor object_api<D>::operator[](handle key) const {
return {derived(), reinterpret_borrow<object>(key)};
}
template <typename D>
item_accessor object_api<D>::operator[](object &&key) const {
return {derived(), std::move(key)};
}
template <typename D>
item_accessor object_api<D>::operator[](const char *key) const {
return {derived(), pybind11::str(key)};
}
template <typename D>
obj_attr_accessor object_api<D>::attr(handle key) const {
return {derived(), reinterpret_borrow<object>(key)};
}
template <typename D>
obj_attr_accessor object_api<D>::attr(object &&key) const {
return {derived(), std::move(key)};
}
template <typename D>
str_attr_accessor object_api<D>::attr(const char *key) const {
return {derived(), key};
}
template <typename D>
args_proxy object_api<D>::operator*() const {
return args_proxy(derived().ptr());
}
template <typename D>
template <typename T>
bool object_api<D>::contains(T &&item) const {
return attr("__contains__")(std::forward<T>(item)).template cast<bool>();
}
template <typename D>
pybind11::str object_api<D>::str() const {
return pybind11::str(derived());
}
template <typename D>
str_attr_accessor object_api<D>::doc() const {
return attr("__doc__");
}
template <typename D>
handle object_api<D>::get_type() const {
return type::handle_of(derived());
}
template <typename D>
bool object_api<D>::rich_compare(object_api const &other, int value) const {
int rv = PyObject_RichCompareBool(derived().ptr(), other.derived().ptr(), value);
if (rv == -1) {
throw error_already_set();
}
return rv == 1;
}
#define PYBIND11_MATH_OPERATOR_UNARY(op, fn) \
template <typename D> \
object object_api<D>::op() const { \
object result = reinterpret_steal<object>(fn(derived().ptr())); \
if (!result.ptr()) \
throw error_already_set(); \
return result; \
}
#define PYBIND11_MATH_OPERATOR_BINARY(op, fn) \
template <typename D> \
object object_api<D>::op(object_api const &other) const { \
object result = reinterpret_steal<object>(fn(derived().ptr(), other.derived().ptr())); \
if (!result.ptr()) \
throw error_already_set(); \
return result; \
}
#define PYBIND11_MATH_OPERATOR_BINARY_INPLACE(iop, fn) \
template <typename D> \
object object_api<D>::iop(object_api const &other) { \
object result = reinterpret_steal<object>(fn(derived().ptr(), other.derived().ptr())); \
if (!result.ptr()) \
throw error_already_set(); \
return result; \
}
PYBIND11_MATH_OPERATOR_UNARY(operator~, PyNumber_Invert)
PYBIND11_MATH_OPERATOR_UNARY(operator-, PyNumber_Negative)
PYBIND11_MATH_OPERATOR_BINARY(operator+, PyNumber_Add)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator+=, PyNumber_InPlaceAdd)
PYBIND11_MATH_OPERATOR_BINARY(operator-, PyNumber_Subtract)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator-=, PyNumber_InPlaceSubtract)
PYBIND11_MATH_OPERATOR_BINARY(operator*, PyNumber_Multiply)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator*=, PyNumber_InPlaceMultiply)
PYBIND11_MATH_OPERATOR_BINARY(operator/, PyNumber_TrueDivide)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator/=, PyNumber_InPlaceTrueDivide)
PYBIND11_MATH_OPERATOR_BINARY(operator|, PyNumber_Or)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator|=, PyNumber_InPlaceOr)
PYBIND11_MATH_OPERATOR_BINARY(operator&, PyNumber_And)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator&=, PyNumber_InPlaceAnd)
PYBIND11_MATH_OPERATOR_BINARY(operator^, PyNumber_Xor)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator^=, PyNumber_InPlaceXor)
PYBIND11_MATH_OPERATOR_BINARY(operator<<, PyNumber_Lshift)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator<<=, PyNumber_InPlaceLshift)
PYBIND11_MATH_OPERATOR_BINARY(operator>>, PyNumber_Rshift)
PYBIND11_MATH_OPERATOR_BINARY_INPLACE(operator>>=, PyNumber_InPlaceRshift)
#undef PYBIND11_MATH_OPERATOR_UNARY
#undef PYBIND11_MATH_OPERATOR_BINARY
#undef PYBIND11_MATH_OPERATOR_BINARY_INPLACE
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/stl.h | C/C++ Header | /*
pybind11/stl.h: Transparent conversion for STL data types
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "pybind11.h"
#include "detail/common.h"
#include <deque>
#include <list>
#include <map>
#include <ostream>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <valarray>
// See `detail/common.h` for implementation of these guards.
#if defined(PYBIND11_HAS_OPTIONAL)
# include <optional>
#elif defined(PYBIND11_HAS_EXP_OPTIONAL)
# include <experimental/optional>
#endif
#if defined(PYBIND11_HAS_VARIANT)
# include <variant>
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/// Extracts an const lvalue reference or rvalue reference for U based on the type of T (e.g. for
/// forwarding a container element). Typically used indirect via forwarded_type(), below.
template <typename T, typename U>
using forwarded_type = conditional_t<std::is_lvalue_reference<T>::value,
remove_reference_t<U> &,
remove_reference_t<U> &&>;
/// Forwards a value U as rvalue or lvalue according to whether T is rvalue or lvalue; typically
/// used for forwarding a container's elements.
template <typename T, typename U>
constexpr forwarded_type<T, U> forward_like(U &&u) {
return std::forward<detail::forwarded_type<T, U>>(std::forward<U>(u));
}
// Checks if a container has a STL style reserve method.
// This will only return true for a `reserve()` with a `void` return.
template <typename C>
using has_reserve_method = std::is_same<decltype(std::declval<C>().reserve(0)), void>;
template <typename Type, typename Key>
struct set_caster {
using type = Type;
using key_conv = make_caster<Key>;
private:
template <typename T = Type, enable_if_t<has_reserve_method<T>::value, int> = 0>
void reserve_maybe(const anyset &s, Type *) {
value.reserve(s.size());
}
void reserve_maybe(const anyset &, void *) {}
public:
bool load(handle src, bool convert) {
if (!isinstance<anyset>(src)) {
return false;
}
auto s = reinterpret_borrow<anyset>(src);
value.clear();
reserve_maybe(s, &value);
for (auto entry : s) {
key_conv conv;
if (!conv.load(entry, convert)) {
return false;
}
value.insert(cast_op<Key &&>(std::move(conv)));
}
return true;
}
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
if (!std::is_lvalue_reference<T>::value) {
policy = return_value_policy_override<Key>::policy(policy);
}
pybind11::set s;
for (auto &&value : src) {
auto value_ = reinterpret_steal<object>(
key_conv::cast(detail::forward_like<T>(value), policy, parent));
if (!value_ || !s.add(std::move(value_))) {
return handle();
}
}
return s.release();
}
PYBIND11_TYPE_CASTER(type, const_name("Set[") + key_conv::name + const_name("]"));
};
template <typename Type, typename Key, typename Value>
struct map_caster {
using key_conv = make_caster<Key>;
using value_conv = make_caster<Value>;
private:
template <typename T = Type, enable_if_t<has_reserve_method<T>::value, int> = 0>
void reserve_maybe(const dict &d, Type *) {
value.reserve(d.size());
}
void reserve_maybe(const dict &, void *) {}
public:
bool load(handle src, bool convert) {
if (!isinstance<dict>(src)) {
return false;
}
auto d = reinterpret_borrow<dict>(src);
value.clear();
reserve_maybe(d, &value);
for (auto it : d) {
key_conv kconv;
value_conv vconv;
if (!kconv.load(it.first.ptr(), convert) || !vconv.load(it.second.ptr(), convert)) {
return false;
}
value.emplace(cast_op<Key &&>(std::move(kconv)), cast_op<Value &&>(std::move(vconv)));
}
return true;
}
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
dict d;
return_value_policy policy_key = policy;
return_value_policy policy_value = policy;
if (!std::is_lvalue_reference<T>::value) {
policy_key = return_value_policy_override<Key>::policy(policy_key);
policy_value = return_value_policy_override<Value>::policy(policy_value);
}
for (auto &&kv : src) {
auto key = reinterpret_steal<object>(
key_conv::cast(detail::forward_like<T>(kv.first), policy_key, parent));
auto value = reinterpret_steal<object>(
value_conv::cast(detail::forward_like<T>(kv.second), policy_value, parent));
if (!key || !value) {
return handle();
}
d[std::move(key)] = std::move(value);
}
return d.release();
}
PYBIND11_TYPE_CASTER(Type,
const_name("Dict[") + key_conv::name + const_name(", ") + value_conv::name
+ const_name("]"));
};
template <typename Type, typename Value>
struct list_caster {
using value_conv = make_caster<Value>;
bool load(handle src, bool convert) {
if (!isinstance<sequence>(src) || isinstance<bytes>(src) || isinstance<str>(src)) {
return false;
}
auto s = reinterpret_borrow<sequence>(src);
value.clear();
reserve_maybe(s, &value);
for (auto it : s) {
value_conv conv;
if (!conv.load(it, convert)) {
return false;
}
value.push_back(cast_op<Value &&>(std::move(conv)));
}
return true;
}
private:
template <typename T = Type, enable_if_t<has_reserve_method<T>::value, int> = 0>
void reserve_maybe(const sequence &s, Type *) {
value.reserve(s.size());
}
void reserve_maybe(const sequence &, void *) {}
public:
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
if (!std::is_lvalue_reference<T>::value) {
policy = return_value_policy_override<Value>::policy(policy);
}
list l(src.size());
ssize_t index = 0;
for (auto &&value : src) {
auto value_ = reinterpret_steal<object>(
value_conv::cast(detail::forward_like<T>(value), policy, parent));
if (!value_) {
return handle();
}
PyList_SET_ITEM(l.ptr(), index++, value_.release().ptr()); // steals a reference
}
return l.release();
}
PYBIND11_TYPE_CASTER(Type, const_name("List[") + value_conv::name + const_name("]"));
};
template <typename Type, typename Alloc>
struct type_caster<std::vector<Type, Alloc>> : list_caster<std::vector<Type, Alloc>, Type> {};
template <typename Type, typename Alloc>
struct type_caster<std::deque<Type, Alloc>> : list_caster<std::deque<Type, Alloc>, Type> {};
template <typename Type, typename Alloc>
struct type_caster<std::list<Type, Alloc>> : list_caster<std::list<Type, Alloc>, Type> {};
template <typename ArrayType, typename Value, bool Resizable, size_t Size = 0>
struct array_caster {
using value_conv = make_caster<Value>;
private:
template <bool R = Resizable>
bool require_size(enable_if_t<R, size_t> size) {
if (value.size() != size) {
value.resize(size);
}
return true;
}
template <bool R = Resizable>
bool require_size(enable_if_t<!R, size_t> size) {
return size == Size;
}
public:
bool load(handle src, bool convert) {
if (!isinstance<sequence>(src)) {
return false;
}
auto l = reinterpret_borrow<sequence>(src);
if (!require_size(l.size())) {
return false;
}
size_t ctr = 0;
for (auto it : l) {
value_conv conv;
if (!conv.load(it, convert)) {
return false;
}
value[ctr++] = cast_op<Value &&>(std::move(conv));
}
return true;
}
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
list l(src.size());
ssize_t index = 0;
for (auto &&value : src) {
auto value_ = reinterpret_steal<object>(
value_conv::cast(detail::forward_like<T>(value), policy, parent));
if (!value_) {
return handle();
}
PyList_SET_ITEM(l.ptr(), index++, value_.release().ptr()); // steals a reference
}
return l.release();
}
PYBIND11_TYPE_CASTER(ArrayType,
const_name<Resizable>(const_name(""), const_name("Annotated["))
+ const_name("List[") + value_conv::name + const_name("]")
+ const_name<Resizable>(const_name(""),
const_name(", FixedSize(")
+ const_name<Size>() + const_name(")]")));
};
template <typename Type, size_t Size>
struct type_caster<std::array<Type, Size>>
: array_caster<std::array<Type, Size>, Type, false, Size> {};
template <typename Type>
struct type_caster<std::valarray<Type>> : array_caster<std::valarray<Type>, Type, true> {};
template <typename Key, typename Compare, typename Alloc>
struct type_caster<std::set<Key, Compare, Alloc>>
: set_caster<std::set<Key, Compare, Alloc>, Key> {};
template <typename Key, typename Hash, typename Equal, typename Alloc>
struct type_caster<std::unordered_set<Key, Hash, Equal, Alloc>>
: set_caster<std::unordered_set<Key, Hash, Equal, Alloc>, Key> {};
template <typename Key, typename Value, typename Compare, typename Alloc>
struct type_caster<std::map<Key, Value, Compare, Alloc>>
: map_caster<std::map<Key, Value, Compare, Alloc>, Key, Value> {};
template <typename Key, typename Value, typename Hash, typename Equal, typename Alloc>
struct type_caster<std::unordered_map<Key, Value, Hash, Equal, Alloc>>
: map_caster<std::unordered_map<Key, Value, Hash, Equal, Alloc>, Key, Value> {};
// This type caster is intended to be used for std::optional and std::experimental::optional
template <typename Type, typename Value = typename Type::value_type>
struct optional_caster {
using value_conv = make_caster<Value>;
template <typename T>
static handle cast(T &&src, return_value_policy policy, handle parent) {
if (!src) {
return none().release();
}
if (!std::is_lvalue_reference<T>::value) {
policy = return_value_policy_override<Value>::policy(policy);
}
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
return value_conv::cast(*std::forward<T>(src), policy, parent);
}
bool load(handle src, bool convert) {
if (!src) {
return false;
}
if (src.is_none()) {
return true; // default-constructed value is already empty
}
value_conv inner_caster;
if (!inner_caster.load(src, convert)) {
return false;
}
value.emplace(cast_op<Value &&>(std::move(inner_caster)));
return true;
}
PYBIND11_TYPE_CASTER(Type, const_name("Optional[") + value_conv::name + const_name("]"));
};
#if defined(PYBIND11_HAS_OPTIONAL)
template <typename T>
struct type_caster<std::optional<T>> : public optional_caster<std::optional<T>> {};
template <>
struct type_caster<std::nullopt_t> : public void_caster<std::nullopt_t> {};
#endif
#if defined(PYBIND11_HAS_EXP_OPTIONAL)
template <typename T>
struct type_caster<std::experimental::optional<T>>
: public optional_caster<std::experimental::optional<T>> {};
template <>
struct type_caster<std::experimental::nullopt_t>
: public void_caster<std::experimental::nullopt_t> {};
#endif
/// Visit a variant and cast any found type to Python
struct variant_caster_visitor {
return_value_policy policy;
handle parent;
using result_type = handle; // required by boost::variant in C++11
template <typename T>
result_type operator()(T &&src) const {
return make_caster<T>::cast(std::forward<T>(src), policy, parent);
}
};
/// Helper class which abstracts away variant's `visit` function. `std::variant` and similar
/// `namespace::variant` types which provide a `namespace::visit()` function are handled here
/// automatically using argument-dependent lookup. Users can provide specializations for other
/// variant-like classes, e.g. `boost::variant` and `boost::apply_visitor`.
template <template <typename...> class Variant>
struct visit_helper {
template <typename... Args>
static auto call(Args &&...args) -> decltype(visit(std::forward<Args>(args)...)) {
return visit(std::forward<Args>(args)...);
}
};
/// Generic variant caster
template <typename Variant>
struct variant_caster;
template <template <typename...> class V, typename... Ts>
struct variant_caster<V<Ts...>> {
static_assert(sizeof...(Ts) > 0, "Variant must consist of at least one alternative.");
template <typename U, typename... Us>
bool load_alternative(handle src, bool convert, type_list<U, Us...>) {
auto caster = make_caster<U>();
if (caster.load(src, convert)) {
value = cast_op<U>(std::move(caster));
return true;
}
return load_alternative(src, convert, type_list<Us...>{});
}
bool load_alternative(handle, bool, type_list<>) { return false; }
bool load(handle src, bool convert) {
// Do a first pass without conversions to improve constructor resolution.
// E.g. `py::int_(1).cast<variant<double, int>>()` needs to fill the `int`
// slot of the variant. Without two-pass loading `double` would be filled
// because it appears first and a conversion is possible.
if (convert && load_alternative(src, false, type_list<Ts...>{})) {
return true;
}
return load_alternative(src, convert, type_list<Ts...>{});
}
template <typename Variant>
static handle cast(Variant &&src, return_value_policy policy, handle parent) {
return visit_helper<V>::call(variant_caster_visitor{policy, parent},
std::forward<Variant>(src));
}
using Type = V<Ts...>;
PYBIND11_TYPE_CASTER(Type,
const_name("Union[") + detail::concat(make_caster<Ts>::name...)
+ const_name("]"));
};
#if defined(PYBIND11_HAS_VARIANT)
template <typename... Ts>
struct type_caster<std::variant<Ts...>> : variant_caster<std::variant<Ts...>> {};
template <>
struct type_caster<std::monostate> : public void_caster<std::monostate> {};
#endif
PYBIND11_NAMESPACE_END(detail)
inline std::ostream &operator<<(std::ostream &os, const handle &obj) {
#ifdef PYBIND11_HAS_STRING_VIEW
os << str(obj).cast<std::string_view>();
#else
os << (std::string) str(obj);
#endif
return os;
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/stl/filesystem.h | C/C++ Header | // Copyright (c) 2021 The Pybind Development Team.
// All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#pragma once
#include "../pybind11.h"
#include "../detail/common.h"
#include "../detail/descr.h"
#include "../cast.h"
#include "../pytypes.h"
#include <string>
#ifdef __has_include
# if defined(PYBIND11_CPP17)
# if __has_include(<filesystem>) && \
PY_VERSION_HEX >= 0x03060000
# include <filesystem>
# define PYBIND11_HAS_FILESYSTEM 1
# elif __has_include(<experimental/filesystem>)
# include <experimental/filesystem>
# define PYBIND11_HAS_EXPERIMENTAL_FILESYSTEM 1
# endif
# endif
#endif
#if !defined(PYBIND11_HAS_FILESYSTEM) && !defined(PYBIND11_HAS_EXPERIMENTAL_FILESYSTEM) \
&& !defined(PYBIND11_HAS_FILESYSTEM_IS_OPTIONAL)
# error \
"Neither #include <filesystem> nor #include <experimental/filesystem is available. (Use -DPYBIND11_HAS_FILESYSTEM_IS_OPTIONAL to ignore.)"
#endif
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
#if defined(PYBIND11_HAS_FILESYSTEM) || defined(PYBIND11_HAS_EXPERIMENTAL_FILESYSTEM)
template <typename T>
struct path_caster {
private:
static PyObject *unicode_from_fs_native(const std::string &w) {
# if !defined(PYPY_VERSION)
return PyUnicode_DecodeFSDefaultAndSize(w.c_str(), ssize_t(w.size()));
# else
// PyPy mistakenly declares the first parameter as non-const.
return PyUnicode_DecodeFSDefaultAndSize(const_cast<char *>(w.c_str()), ssize_t(w.size()));
# endif
}
static PyObject *unicode_from_fs_native(const std::wstring &w) {
return PyUnicode_FromWideChar(w.c_str(), ssize_t(w.size()));
}
public:
static handle cast(const T &path, return_value_policy, handle) {
if (auto py_str = unicode_from_fs_native(path.native())) {
return module_::import("pathlib")
.attr("Path")(reinterpret_steal<object>(py_str))
.release();
}
return nullptr;
}
bool load(handle handle, bool) {
// PyUnicode_FSConverter and PyUnicode_FSDecoder normally take care of
// calling PyOS_FSPath themselves, but that's broken on PyPy (PyPy
// issue #3168) so we do it ourselves instead.
PyObject *buf = PyOS_FSPath(handle.ptr());
if (!buf) {
PyErr_Clear();
return false;
}
PyObject *native = nullptr;
if constexpr (std::is_same_v<typename T::value_type, char>) {
if (PyUnicode_FSConverter(buf, &native) != 0) {
if (auto *c_str = PyBytes_AsString(native)) {
// AsString returns a pointer to the internal buffer, which
// must not be free'd.
value = c_str;
}
}
} else if constexpr (std::is_same_v<typename T::value_type, wchar_t>) {
if (PyUnicode_FSDecoder(buf, &native) != 0) {
if (auto *c_str = PyUnicode_AsWideCharString(native, nullptr)) {
// AsWideCharString returns a new string that must be free'd.
value = c_str; // Copies the string.
PyMem_Free(c_str);
}
}
}
Py_XDECREF(native);
Py_DECREF(buf);
if (PyErr_Occurred()) {
PyErr_Clear();
return false;
}
return true;
}
PYBIND11_TYPE_CASTER(T, const_name("os.PathLike"));
};
#endif // PYBIND11_HAS_FILESYSTEM || defined(PYBIND11_HAS_EXPERIMENTAL_FILESYSTEM)
#if defined(PYBIND11_HAS_FILESYSTEM)
template <>
struct type_caster<std::filesystem::path> : public path_caster<std::filesystem::path> {};
#elif defined(PYBIND11_HAS_EXPERIMENTAL_FILESYSTEM)
template <>
struct type_caster<std::experimental::filesystem::path>
: public path_caster<std::experimental::filesystem::path> {};
#endif
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/stl_bind.h | C/C++ Header | /*
pybind11/std_bind.h: Binding generators for STL data types
Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#pragma once
#include "detail/common.h"
#include "detail/type_caster_base.h"
#include "cast.h"
#include "operators.h"
#include <algorithm>
#include <sstream>
#include <type_traits>
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
/* SFINAE helper class used by 'is_comparable */
template <typename T>
struct container_traits {
template <typename T2>
static std::true_type
test_comparable(decltype(std::declval<const T2 &>() == std::declval<const T2 &>()) *);
template <typename T2>
static std::false_type test_comparable(...);
template <typename T2>
static std::true_type test_value(typename T2::value_type *);
template <typename T2>
static std::false_type test_value(...);
template <typename T2>
static std::true_type test_pair(typename T2::first_type *, typename T2::second_type *);
template <typename T2>
static std::false_type test_pair(...);
static constexpr const bool is_comparable
= std::is_same<std::true_type, decltype(test_comparable<T>(nullptr))>::value;
static constexpr const bool is_pair
= std::is_same<std::true_type, decltype(test_pair<T>(nullptr, nullptr))>::value;
static constexpr const bool is_vector
= std::is_same<std::true_type, decltype(test_value<T>(nullptr))>::value;
static constexpr const bool is_element = !is_pair && !is_vector;
};
/* Default: is_comparable -> std::false_type */
template <typename T, typename SFINAE = void>
struct is_comparable : std::false_type {};
/* For non-map data structures, check whether operator== can be instantiated */
template <typename T>
struct is_comparable<
T,
enable_if_t<container_traits<T>::is_element && container_traits<T>::is_comparable>>
: std::true_type {};
/* For a vector/map data structure, recursively check the value type
(which is std::pair for maps) */
template <typename T>
struct is_comparable<T, enable_if_t<container_traits<T>::is_vector>>
: is_comparable<typename recursive_container_traits<T>::type_to_check_recursively> {};
template <>
struct is_comparable<recursive_bottom> : std::true_type {};
/* For pairs, recursively check the two data types */
template <typename T>
struct is_comparable<T, enable_if_t<container_traits<T>::is_pair>> {
static constexpr const bool value = is_comparable<typename T::first_type>::value
&& is_comparable<typename T::second_type>::value;
};
/* Fallback functions */
template <typename, typename, typename... Args>
void vector_if_copy_constructible(const Args &...) {}
template <typename, typename, typename... Args>
void vector_if_equal_operator(const Args &...) {}
template <typename, typename, typename... Args>
void vector_if_insertion_operator(const Args &...) {}
template <typename, typename, typename... Args>
void vector_modifiers(const Args &...) {}
template <typename Vector, typename Class_>
void vector_if_copy_constructible(enable_if_t<is_copy_constructible<Vector>::value, Class_> &cl) {
cl.def(init<const Vector &>(), "Copy constructor");
}
template <typename Vector, typename Class_>
void vector_if_equal_operator(enable_if_t<is_comparable<Vector>::value, Class_> &cl) {
using T = typename Vector::value_type;
cl.def(self == self);
cl.def(self != self);
cl.def(
"count",
[](const Vector &v, const T &x) { return std::count(v.begin(), v.end(), x); },
arg("x"),
"Return the number of times ``x`` appears in the list");
cl.def(
"remove",
[](Vector &v, const T &x) {
auto p = std::find(v.begin(), v.end(), x);
if (p != v.end()) {
v.erase(p);
} else {
throw value_error();
}
},
arg("x"),
"Remove the first item from the list whose value is x. "
"It is an error if there is no such item.");
cl.def(
"__contains__",
[](const Vector &v, const T &x) { return std::find(v.begin(), v.end(), x) != v.end(); },
arg("x"),
"Return true the container contains ``x``");
}
// Vector modifiers -- requires a copyable vector_type:
// (Technically, some of these (pop and __delitem__) don't actually require copyability, but it
// seems silly to allow deletion but not insertion, so include them here too.)
template <typename Vector, typename Class_>
void vector_modifiers(
enable_if_t<is_copy_constructible<typename Vector::value_type>::value, Class_> &cl) {
using T = typename Vector::value_type;
using SizeType = typename Vector::size_type;
using DiffType = typename Vector::difference_type;
auto wrap_i = [](DiffType i, SizeType n) {
if (i < 0) {
i += n;
}
if (i < 0 || (SizeType) i >= n) {
throw index_error();
}
return i;
};
cl.def(
"append",
[](Vector &v, const T &value) { v.push_back(value); },
arg("x"),
"Add an item to the end of the list");
cl.def(init([](const iterable &it) {
auto v = std::unique_ptr<Vector>(new Vector());
v->reserve(len_hint(it));
for (handle h : it) {
v->push_back(h.cast<T>());
}
return v.release();
}));
cl.def(
"clear", [](Vector &v) { v.clear(); }, "Clear the contents");
cl.def(
"extend",
[](Vector &v, const Vector &src) { v.insert(v.end(), src.begin(), src.end()); },
arg("L"),
"Extend the list by appending all the items in the given list");
cl.def(
"extend",
[](Vector &v, const iterable &it) {
const size_t old_size = v.size();
v.reserve(old_size + len_hint(it));
try {
for (handle h : it) {
v.push_back(h.cast<T>());
}
} catch (const cast_error &) {
v.erase(v.begin() + static_cast<typename Vector::difference_type>(old_size),
v.end());
try {
v.shrink_to_fit();
} catch (const std::exception &) {
// Do nothing
}
throw;
}
},
arg("L"),
"Extend the list by appending all the items in the given list");
cl.def(
"insert",
[](Vector &v, DiffType i, const T &x) {
// Can't use wrap_i; i == v.size() is OK
if (i < 0) {
i += v.size();
}
if (i < 0 || (SizeType) i > v.size()) {
throw index_error();
}
v.insert(v.begin() + i, x);
},
arg("i"),
arg("x"),
"Insert an item at a given position.");
cl.def(
"pop",
[](Vector &v) {
if (v.empty()) {
throw index_error();
}
T t = std::move(v.back());
v.pop_back();
return t;
},
"Remove and return the last item");
cl.def(
"pop",
[wrap_i](Vector &v, DiffType i) {
i = wrap_i(i, v.size());
T t = std::move(v[(SizeType) i]);
v.erase(std::next(v.begin(), i));
return t;
},
arg("i"),
"Remove and return the item at index ``i``");
cl.def("__setitem__", [wrap_i](Vector &v, DiffType i, const T &t) {
i = wrap_i(i, v.size());
v[(SizeType) i] = t;
});
/// Slicing protocol
cl.def(
"__getitem__",
[](const Vector &v, const slice &slice) -> Vector * {
size_t start = 0, stop = 0, step = 0, slicelength = 0;
if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) {
throw error_already_set();
}
auto *seq = new Vector();
seq->reserve((size_t) slicelength);
for (size_t i = 0; i < slicelength; ++i) {
seq->push_back(v[start]);
start += step;
}
return seq;
},
arg("s"),
"Retrieve list elements using a slice object");
cl.def(
"__setitem__",
[](Vector &v, const slice &slice, const Vector &value) {
size_t start = 0, stop = 0, step = 0, slicelength = 0;
if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) {
throw error_already_set();
}
if (slicelength != value.size()) {
throw std::runtime_error(
"Left and right hand size of slice assignment have different sizes!");
}
for (size_t i = 0; i < slicelength; ++i) {
v[start] = value[i];
start += step;
}
},
"Assign list elements using a slice object");
cl.def(
"__delitem__",
[wrap_i](Vector &v, DiffType i) {
i = wrap_i(i, v.size());
v.erase(v.begin() + i);
},
"Delete the list elements at index ``i``");
cl.def(
"__delitem__",
[](Vector &v, const slice &slice) {
size_t start = 0, stop = 0, step = 0, slicelength = 0;
if (!slice.compute(v.size(), &start, &stop, &step, &slicelength)) {
throw error_already_set();
}
if (step == 1 && false) {
v.erase(v.begin() + (DiffType) start, v.begin() + DiffType(start + slicelength));
} else {
for (size_t i = 0; i < slicelength; ++i) {
v.erase(v.begin() + DiffType(start));
start += step - 1;
}
}
},
"Delete list elements using a slice object");
}
// If the type has an operator[] that doesn't return a reference (most notably std::vector<bool>),
// we have to access by copying; otherwise we return by reference.
template <typename Vector>
using vector_needs_copy
= negation<std::is_same<decltype(std::declval<Vector>()[typename Vector::size_type()]),
typename Vector::value_type &>>;
// The usual case: access and iterate by reference
template <typename Vector, typename Class_>
void vector_accessor(enable_if_t<!vector_needs_copy<Vector>::value, Class_> &cl) {
using T = typename Vector::value_type;
using SizeType = typename Vector::size_type;
using DiffType = typename Vector::difference_type;
using ItType = typename Vector::iterator;
auto wrap_i = [](DiffType i, SizeType n) {
if (i < 0) {
i += n;
}
if (i < 0 || (SizeType) i >= n) {
throw index_error();
}
return i;
};
cl.def(
"__getitem__",
[wrap_i](Vector &v, DiffType i) -> T & {
i = wrap_i(i, v.size());
return v[(SizeType) i];
},
return_value_policy::reference_internal // ref + keepalive
);
cl.def(
"__iter__",
[](Vector &v) {
return make_iterator<return_value_policy::reference_internal, ItType, ItType, T &>(
v.begin(), v.end());
},
keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */
);
}
// The case for special objects, like std::vector<bool>, that have to be returned-by-copy:
template <typename Vector, typename Class_>
void vector_accessor(enable_if_t<vector_needs_copy<Vector>::value, Class_> &cl) {
using T = typename Vector::value_type;
using SizeType = typename Vector::size_type;
using DiffType = typename Vector::difference_type;
using ItType = typename Vector::iterator;
cl.def("__getitem__", [](const Vector &v, DiffType i) -> T {
if (i < 0) {
i += v.size();
if (i < 0) {
throw index_error();
}
}
auto i_st = static_cast<SizeType>(i);
if (i_st >= v.size()) {
throw index_error();
}
return v[i_st];
});
cl.def(
"__iter__",
[](Vector &v) {
return make_iterator<return_value_policy::copy, ItType, ItType, T>(v.begin(), v.end());
},
keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */
);
}
template <typename Vector, typename Class_>
auto vector_if_insertion_operator(Class_ &cl, std::string const &name)
-> decltype(std::declval<std::ostream &>() << std::declval<typename Vector::value_type>(),
void()) {
using size_type = typename Vector::size_type;
cl.def(
"__repr__",
[name](Vector &v) {
std::ostringstream s;
s << name << '[';
for (size_type i = 0; i < v.size(); ++i) {
s << v[i];
if (i != v.size() - 1) {
s << ", ";
}
}
s << ']';
return s.str();
},
"Return the canonical string representation of this list.");
}
// Provide the buffer interface for vectors if we have data() and we have a format for it
// GCC seems to have "void std::vector<bool>::data()" - doing SFINAE on the existence of data()
// is insufficient, we need to check it returns an appropriate pointer
template <typename Vector, typename = void>
struct vector_has_data_and_format : std::false_type {};
template <typename Vector>
struct vector_has_data_and_format<
Vector,
enable_if_t<std::is_same<decltype(format_descriptor<typename Vector::value_type>::format(),
std::declval<Vector>().data()),
typename Vector::value_type *>::value>> : std::true_type {};
// [workaround(intel)] Separate function required here
// Workaround as the Intel compiler does not compile the enable_if_t part below
// (tested with icc (ICC) 2021.1 Beta 20200827)
template <typename... Args>
constexpr bool args_any_are_buffer() {
return detail::any_of<std::is_same<Args, buffer_protocol>...>::value;
}
// [workaround(intel)] Separate function required here
// [workaround(msvc)] Can't use constexpr bool in return type
// Add the buffer interface to a vector
template <typename Vector, typename Class_, typename... Args>
void vector_buffer_impl(Class_ &cl, std::true_type) {
using T = typename Vector::value_type;
static_assert(vector_has_data_and_format<Vector>::value,
"There is not an appropriate format descriptor for this vector");
// numpy.h declares this for arbitrary types, but it may raise an exception and crash hard
// at runtime if PYBIND11_NUMPY_DTYPE hasn't been called, so check here
format_descriptor<T>::format();
cl.def_buffer([](Vector &v) -> buffer_info {
return buffer_info(v.data(),
static_cast<ssize_t>(sizeof(T)),
format_descriptor<T>::format(),
1,
{v.size()},
{sizeof(T)});
});
cl.def(init([](const buffer &buf) {
auto info = buf.request();
if (info.ndim != 1 || info.strides[0] % static_cast<ssize_t>(sizeof(T))) {
throw type_error("Only valid 1D buffers can be copied to a vector");
}
if (!detail::compare_buffer_info<T>::compare(info)
|| (ssize_t) sizeof(T) != info.itemsize) {
throw type_error("Format mismatch (Python: " + info.format
+ " C++: " + format_descriptor<T>::format() + ")");
}
T *p = static_cast<T *>(info.ptr);
ssize_t step = info.strides[0] / static_cast<ssize_t>(sizeof(T));
T *end = p + info.shape[0] * step;
if (step == 1) {
return Vector(p, end);
}
Vector vec;
vec.reserve((size_t) info.shape[0]);
for (; p != end; p += step) {
vec.push_back(*p);
}
return vec;
}));
return;
}
template <typename Vector, typename Class_, typename... Args>
void vector_buffer_impl(Class_ &, std::false_type) {}
template <typename Vector, typename Class_, typename... Args>
void vector_buffer(Class_ &cl) {
vector_buffer_impl<Vector, Class_, Args...>(
cl, detail::any_of<std::is_same<Args, buffer_protocol>...>{});
}
PYBIND11_NAMESPACE_END(detail)
//
// std::vector
//
template <typename Vector, typename holder_type = std::unique_ptr<Vector>, typename... Args>
class_<Vector, holder_type> bind_vector(handle scope, std::string const &name, Args &&...args) {
using Class_ = class_<Vector, holder_type>;
// If the value_type is unregistered (e.g. a converting type) or is itself registered
// module-local then make the vector binding module-local as well:
using vtype = typename Vector::value_type;
auto *vtype_info = detail::get_type_info(typeid(vtype));
bool local = !vtype_info || vtype_info->module_local;
Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward<Args>(args)...);
// Declare the buffer interface if a buffer_protocol() is passed in
detail::vector_buffer<Vector, Class_, Args...>(cl);
cl.def(init<>());
// Register copy constructor (if possible)
detail::vector_if_copy_constructible<Vector, Class_>(cl);
// Register comparison-related operators and functions (if possible)
detail::vector_if_equal_operator<Vector, Class_>(cl);
// Register stream insertion operator (if possible)
detail::vector_if_insertion_operator<Vector, Class_>(cl, name);
// Modifiers require copyable vector value type
detail::vector_modifiers<Vector, Class_>(cl);
// Accessor and iterator; return by value if copyable, otherwise we return by ref + keep-alive
detail::vector_accessor<Vector, Class_>(cl);
cl.def(
"__bool__",
[](const Vector &v) -> bool { return !v.empty(); },
"Check whether the list is nonempty");
cl.def("__len__", &Vector::size);
#if 0
// C++ style functions deprecated, leaving it here as an example
cl.def(init<size_type>());
cl.def("resize",
(void (Vector::*) (size_type count)) & Vector::resize,
"changes the number of elements stored");
cl.def("erase",
[](Vector &v, SizeType i) {
if (i >= v.size())
throw index_error();
v.erase(v.begin() + i);
}, "erases element at index ``i``");
cl.def("empty", &Vector::empty, "checks whether the container is empty");
cl.def("size", &Vector::size, "returns the number of elements");
cl.def("push_back", (void (Vector::*)(const T&)) &Vector::push_back, "adds an element to the end");
cl.def("pop_back", &Vector::pop_back, "removes the last element");
cl.def("max_size", &Vector::max_size, "returns the maximum possible number of elements");
cl.def("reserve", &Vector::reserve, "reserves storage");
cl.def("capacity", &Vector::capacity, "returns the number of elements that can be held in currently allocated storage");
cl.def("shrink_to_fit", &Vector::shrink_to_fit, "reduces memory usage by freeing unused memory");
cl.def("clear", &Vector::clear, "clears the contents");
cl.def("swap", &Vector::swap, "swaps the contents");
cl.def("front", [](Vector &v) {
if (v.size()) return v.front();
else throw index_error();
}, "access the first element");
cl.def("back", [](Vector &v) {
if (v.size()) return v.back();
else throw index_error();
}, "access the last element ");
#endif
return cl;
}
//
// std::map, std::unordered_map
//
PYBIND11_NAMESPACE_BEGIN(detail)
/* Fallback functions */
template <typename, typename, typename... Args>
void map_if_insertion_operator(const Args &...) {}
template <typename, typename, typename... Args>
void map_assignment(const Args &...) {}
// Map assignment when copy-assignable: just copy the value
template <typename Map, typename Class_>
void map_assignment(
enable_if_t<is_copy_assignable<typename Map::mapped_type>::value, Class_> &cl) {
using KeyType = typename Map::key_type;
using MappedType = typename Map::mapped_type;
cl.def("__setitem__", [](Map &m, const KeyType &k, const MappedType &v) {
auto it = m.find(k);
if (it != m.end()) {
it->second = v;
} else {
m.emplace(k, v);
}
});
}
// Not copy-assignable, but still copy-constructible: we can update the value by erasing and
// reinserting
template <typename Map, typename Class_>
void map_assignment(enable_if_t<!is_copy_assignable<typename Map::mapped_type>::value
&& is_copy_constructible<typename Map::mapped_type>::value,
Class_> &cl) {
using KeyType = typename Map::key_type;
using MappedType = typename Map::mapped_type;
cl.def("__setitem__", [](Map &m, const KeyType &k, const MappedType &v) {
// We can't use m[k] = v; because value type might not be default constructable
auto r = m.emplace(k, v);
if (!r.second) {
// value type is not copy assignable so the only way to insert it is to erase it
// first...
m.erase(r.first);
m.emplace(k, v);
}
});
}
template <typename Map, typename Class_>
auto map_if_insertion_operator(Class_ &cl, std::string const &name)
-> decltype(std::declval<std::ostream &>() << std::declval<typename Map::key_type>()
<< std::declval<typename Map::mapped_type>(),
void()) {
cl.def(
"__repr__",
[name](Map &m) {
std::ostringstream s;
s << name << '{';
bool f = false;
for (auto const &kv : m) {
if (f) {
s << ", ";
}
s << kv.first << ": " << kv.second;
f = true;
}
s << '}';
return s.str();
},
"Return the canonical string representation of this map.");
}
template <typename KeyType>
struct keys_view {
virtual size_t len() = 0;
virtual iterator iter() = 0;
virtual bool contains(const KeyType &k) = 0;
virtual bool contains(const object &k) = 0;
virtual ~keys_view() = default;
};
template <typename MappedType>
struct values_view {
virtual size_t len() = 0;
virtual iterator iter() = 0;
virtual ~values_view() = default;
};
template <typename KeyType, typename MappedType>
struct items_view {
virtual size_t len() = 0;
virtual iterator iter() = 0;
virtual ~items_view() = default;
};
template <typename Map, typename KeysView>
struct KeysViewImpl : public KeysView {
explicit KeysViewImpl(Map &map) : map(map) {}
size_t len() override { return map.size(); }
iterator iter() override { return make_key_iterator(map.begin(), map.end()); }
bool contains(const typename Map::key_type &k) override { return map.find(k) != map.end(); }
bool contains(const object &) override { return false; }
Map ↦
};
template <typename Map, typename ValuesView>
struct ValuesViewImpl : public ValuesView {
explicit ValuesViewImpl(Map &map) : map(map) {}
size_t len() override { return map.size(); }
iterator iter() override { return make_value_iterator(map.begin(), map.end()); }
Map ↦
};
template <typename Map, typename ItemsView>
struct ItemsViewImpl : public ItemsView {
explicit ItemsViewImpl(Map &map) : map(map) {}
size_t len() override { return map.size(); }
iterator iter() override { return make_iterator(map.begin(), map.end()); }
Map ↦
};
PYBIND11_NAMESPACE_END(detail)
template <typename Map, typename holder_type = std::unique_ptr<Map>, typename... Args>
class_<Map, holder_type> bind_map(handle scope, const std::string &name, Args &&...args) {
using KeyType = typename Map::key_type;
using MappedType = typename Map::mapped_type;
using StrippedKeyType = detail::remove_cvref_t<KeyType>;
using StrippedMappedType = detail::remove_cvref_t<MappedType>;
using KeysView = detail::keys_view<StrippedKeyType>;
using ValuesView = detail::values_view<StrippedMappedType>;
using ItemsView = detail::items_view<StrippedKeyType, StrippedMappedType>;
using Class_ = class_<Map, holder_type>;
// If either type is a non-module-local bound type then make the map binding non-local as well;
// otherwise (e.g. both types are either module-local or converting) the map will be
// module-local.
auto *tinfo = detail::get_type_info(typeid(MappedType));
bool local = !tinfo || tinfo->module_local;
if (local) {
tinfo = detail::get_type_info(typeid(KeyType));
local = !tinfo || tinfo->module_local;
}
Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward<Args>(args)...);
static constexpr auto key_type_descr = detail::make_caster<KeyType>::name;
static constexpr auto mapped_type_descr = detail::make_caster<MappedType>::name;
std::string key_type_name(key_type_descr.text), mapped_type_name(mapped_type_descr.text);
// If key type isn't properly wrapped, fall back to C++ names
if (key_type_name == "%") {
key_type_name = detail::type_info_description(typeid(KeyType));
}
// Similarly for value type:
if (mapped_type_name == "%") {
mapped_type_name = detail::type_info_description(typeid(MappedType));
}
// Wrap KeysView[KeyType] if it wasn't already wrapped
if (!detail::get_type_info(typeid(KeysView))) {
class_<KeysView> keys_view(
scope, ("KeysView[" + key_type_name + "]").c_str(), pybind11::module_local(local));
keys_view.def("__len__", &KeysView::len);
keys_view.def("__iter__",
&KeysView::iter,
keep_alive<0, 1>() /* Essential: keep view alive while iterator exists */
);
keys_view.def("__contains__",
static_cast<bool (KeysView::*)(const KeyType &)>(&KeysView::contains));
// Fallback for when the object is not of the key type
keys_view.def("__contains__",
static_cast<bool (KeysView::*)(const object &)>(&KeysView::contains));
}
// Similarly for ValuesView:
if (!detail::get_type_info(typeid(ValuesView))) {
class_<ValuesView> values_view(scope,
("ValuesView[" + mapped_type_name + "]").c_str(),
pybind11::module_local(local));
values_view.def("__len__", &ValuesView::len);
values_view.def("__iter__",
&ValuesView::iter,
keep_alive<0, 1>() /* Essential: keep view alive while iterator exists */
);
}
// Similarly for ItemsView:
if (!detail::get_type_info(typeid(ItemsView))) {
class_<ItemsView> items_view(
scope,
("ItemsView[" + key_type_name + ", ").append(mapped_type_name + "]").c_str(),
pybind11::module_local(local));
items_view.def("__len__", &ItemsView::len);
items_view.def("__iter__",
&ItemsView::iter,
keep_alive<0, 1>() /* Essential: keep view alive while iterator exists */
);
}
cl.def(init<>());
// Register stream insertion operator (if possible)
detail::map_if_insertion_operator<Map, Class_>(cl, name);
cl.def(
"__bool__",
[](const Map &m) -> bool { return !m.empty(); },
"Check whether the map is nonempty");
cl.def(
"__iter__",
[](Map &m) { return make_key_iterator(m.begin(), m.end()); },
keep_alive<0, 1>() /* Essential: keep map alive while iterator exists */
);
cl.def(
"keys",
[](Map &m) {
return std::unique_ptr<KeysView>(new detail::KeysViewImpl<Map, KeysView>(m));
},
keep_alive<0, 1>() /* Essential: keep map alive while view exists */
);
cl.def(
"values",
[](Map &m) {
return std::unique_ptr<ValuesView>(new detail::ValuesViewImpl<Map, ValuesView>(m));
},
keep_alive<0, 1>() /* Essential: keep map alive while view exists */
);
cl.def(
"items",
[](Map &m) {
return std::unique_ptr<ItemsView>(new detail::ItemsViewImpl<Map, ItemsView>(m));
},
keep_alive<0, 1>() /* Essential: keep map alive while view exists */
);
cl.def(
"__getitem__",
[](Map &m, const KeyType &k) -> MappedType & {
auto it = m.find(k);
if (it == m.end()) {
throw key_error();
}
return it->second;
},
return_value_policy::reference_internal // ref + keepalive
);
cl.def("__contains__", [](Map &m, const KeyType &k) -> bool {
auto it = m.find(k);
if (it == m.end()) {
return false;
}
return true;
});
// Fallback for when the object is not of the key type
cl.def("__contains__", [](Map &, const object &) -> bool { return false; });
// Assignment provided only if the type is copyable
detail::map_assignment<Map, Class_>(cl);
cl.def("__delitem__", [](Map &m, const KeyType &k) {
auto it = m.find(k);
if (it == m.end()) {
throw key_error();
}
m.erase(it);
});
cl.def("__len__", &Map::size);
return cl;
}
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/include/pybind11/type_caster_pyobject_ptr.h | C/C++ Header | // Copyright (c) 2023 The pybind Community.
#pragma once
#include "detail/common.h"
#include "detail/descr.h"
#include "cast.h"
#include "pytypes.h"
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
class type_caster<PyObject> {
public:
static constexpr auto name = const_name("object"); // See discussion under PR #4601.
// This overload is purely to guard against accidents.
template <typename T,
detail::enable_if_t<!is_same_ignoring_cvref<T, PyObject *>::value, int> = 0>
static handle cast(T &&, return_value_policy, handle /*parent*/) {
static_assert(is_same_ignoring_cvref<T, PyObject *>::value,
"Invalid C++ type T for to-Python conversion (type_caster<PyObject>).");
return nullptr; // Unreachable.
}
static handle cast(PyObject *src, return_value_policy policy, handle /*parent*/) {
if (src == nullptr) {
throw error_already_set();
}
if (PyErr_Occurred()) {
raise_from(PyExc_SystemError, "src != nullptr but PyErr_Occurred()");
throw error_already_set();
}
if (policy == return_value_policy::take_ownership) {
return src;
}
if (policy == return_value_policy::reference
|| policy == return_value_policy::automatic_reference) {
return handle(src).inc_ref();
}
pybind11_fail("type_caster<PyObject>::cast(): unsupported return_value_policy: "
+ std::to_string(static_cast<int>(policy)));
}
bool load(handle src, bool) {
value = reinterpret_borrow<object>(src);
return true;
}
template <typename T>
using cast_op_type = PyObject *;
explicit operator PyObject *() { return value.ptr(); }
private:
object value;
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/noxfile.py | Python | import os
import nox
nox.needs_version = ">=2022.1.7"
nox.options.sessions = ["lint", "tests", "tests_packaging"]
PYTHON_VERSIONS = [
"3.6",
"3.7",
"3.8",
"3.9",
"3.10",
"3.11",
"pypy3.7",
"pypy3.8",
"pypy3.9",
]
if os.environ.get("CI", None):
nox.options.error_on_missing_interpreters = True
@nox.session(reuse_venv=True)
def lint(session: nox.Session) -> None:
"""
Lint the codebase (except for clang-format/tidy).
"""
session.install("pre-commit")
session.run("pre-commit", "run", "-a", *session.posargs)
@nox.session(python=PYTHON_VERSIONS)
def tests(session: nox.Session) -> None:
"""
Run the tests (requires a compiler).
"""
tmpdir = session.create_tmp()
session.install("cmake")
session.install("-r", "tests/requirements.txt")
session.run(
"cmake",
"-S.",
f"-B{tmpdir}",
"-DPYBIND11_WERROR=ON",
"-DDOWNLOAD_CATCH=ON",
"-DDOWNLOAD_EIGEN=ON",
*session.posargs,
)
session.run("cmake", "--build", tmpdir)
session.run("cmake", "--build", tmpdir, "--config=Release", "--target", "check")
@nox.session
def tests_packaging(session: nox.Session) -> None:
"""
Run the packaging tests.
"""
session.install("-r", "tests/requirements.txt", "--prefer-binary")
session.run("pytest", "tests/extra_python_package", *session.posargs)
@nox.session(reuse_venv=True)
def docs(session: nox.Session) -> None:
"""
Build the docs. Pass "serve" to serve.
"""
session.install("-r", "docs/requirements.txt")
session.chdir("docs")
if "pdf" in session.posargs:
session.run("sphinx-build", "-M", "latexpdf", ".", "_build")
return
session.run("sphinx-build", "-M", "html", ".", "_build")
if "serve" in session.posargs:
session.log("Launching docs at http://localhost:8000/ - use Ctrl-C to quit")
session.run("python", "-m", "http.server", "8000", "-d", "_build/html")
elif session.posargs:
session.error("Unsupported argument to docs")
@nox.session(reuse_venv=True)
def make_changelog(session: nox.Session) -> None:
"""
Inspect the closed issues and make entries for a changelog.
"""
session.install("ghapi", "rich")
session.run("python", "tools/make_changelog.py")
@nox.session(reuse_venv=True)
def build(session: nox.Session) -> None:
"""
Build SDists and wheels.
"""
session.install("build")
session.log("Building normal files")
session.run("python", "-m", "build", *session.posargs)
session.log("Building pybind11-global files (PYBIND11_GLOBAL_SDIST=1)")
session.run(
"python", "-m", "build", *session.posargs, env={"PYBIND11_GLOBAL_SDIST": "1"}
)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/pybind11/__init__.py | Python | import sys
if sys.version_info < (3, 6): # noqa: UP036
msg = "pybind11 does not support Python < 3.6. 2.9 was the last release supporting Python 2.7 and 3.5."
raise ImportError(msg)
from ._version import __version__, version_info
from .commands import get_cmake_dir, get_include, get_pkgconfig_dir
__all__ = (
"version_info",
"__version__",
"get_include",
"get_cmake_dir",
"get_pkgconfig_dir",
)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/pybind11/__main__.py | Python | # pylint: disable=missing-function-docstring
import argparse
import sys
import sysconfig
from ._version import __version__
from .commands import get_cmake_dir, get_include, get_pkgconfig_dir
def print_includes() -> None:
dirs = [
sysconfig.get_path("include"),
sysconfig.get_path("platinclude"),
get_include(),
]
# Make unique but preserve order
unique_dirs = []
for d in dirs:
if d and d not in unique_dirs:
unique_dirs.append(d)
print(" ".join("-I" + d for d in unique_dirs))
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
action="version",
version=__version__,
help="Print the version and exit.",
)
parser.add_argument(
"--includes",
action="store_true",
help="Include flags for both pybind11 and Python headers.",
)
parser.add_argument(
"--cmakedir",
action="store_true",
help="Print the CMake module directory, ideal for setting -Dpybind11_ROOT in CMake.",
)
parser.add_argument(
"--pkgconfigdir",
action="store_true",
help="Print the pkgconfig directory, ideal for setting $PKG_CONFIG_PATH.",
)
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
if args.includes:
print_includes()
if args.cmakedir:
print(get_cmake_dir())
if args.pkgconfigdir:
print(get_pkgconfig_dir())
if __name__ == "__main__":
main()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/pybind11/_version.py | Python | from typing import Union
def _to_int(s: str) -> Union[int, str]:
try:
return int(s)
except ValueError:
return s
__version__ = "2.11.1"
version_info = tuple(_to_int(s) for s in __version__.split("."))
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/pybind11/commands.py | Python | import os
DIR = os.path.abspath(os.path.dirname(__file__))
def get_include(user: bool = False) -> str: # noqa: ARG001
"""
Return the path to the pybind11 include directory. The historical "user"
argument is unused, and may be removed.
"""
installed_path = os.path.join(DIR, "include")
source_path = os.path.join(os.path.dirname(DIR), "include")
return installed_path if os.path.exists(installed_path) else source_path
def get_cmake_dir() -> str:
"""
Return the path to the pybind11 CMake module directory.
"""
cmake_installed_path = os.path.join(DIR, "share", "cmake", "pybind11")
if os.path.exists(cmake_installed_path):
return cmake_installed_path
msg = "pybind11 not installed, installation required to access the CMake files"
raise ImportError(msg)
def get_pkgconfig_dir() -> str:
"""
Return the path to the pybind11 pkgconfig directory.
"""
pkgconfig_installed_path = os.path.join(DIR, "share", "pkgconfig")
if os.path.exists(pkgconfig_installed_path):
return pkgconfig_installed_path
msg = "pybind11 not installed, installation required to access the pkgconfig files"
raise ImportError(msg)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/pybind11/setup_helpers.py | Python | """
This module provides helpers for C++11+ projects using pybind11.
LICENSE:
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# IMPORTANT: If you change this file in the pybind11 repo, also review
# setup_helpers.pyi for matching changes.
#
# If you copy this file in, you don't
# need the .pyi file; it's just an interface file for static type checkers.
import contextlib
import os
import platform
import shlex
import shutil
import sys
import sysconfig
import tempfile
import threading
import warnings
from functools import lru_cache
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
Union,
)
try:
from setuptools import Extension as _Extension
from setuptools.command.build_ext import build_ext as _build_ext
except ImportError:
from distutils.command.build_ext import build_ext as _build_ext # type: ignore[assignment]
from distutils.extension import Extension as _Extension # type: ignore[assignment]
import distutils.ccompiler
import distutils.errors
WIN = sys.platform.startswith("win32") and "mingw" not in sysconfig.get_platform()
MACOS = sys.platform.startswith("darwin")
STD_TMPL = "/std:c++{}" if WIN else "-std=c++{}"
# It is recommended to use PEP 518 builds if using this module. However, this
# file explicitly supports being copied into a user's project directory
# standalone, and pulling pybind11 with the deprecated setup_requires feature.
# If you copy the file, remember to add it to your MANIFEST.in, and add the current
# directory into your path if it sits beside your setup.py.
class Pybind11Extension(_Extension):
"""
Build a C++11+ Extension module with pybind11. This automatically adds the
recommended flags when you init the extension and assumes C++ sources - you
can further modify the options yourself.
The customizations are:
* ``/EHsc`` and ``/bigobj`` on Windows
* ``stdlib=libc++`` on macOS
* ``visibility=hidden`` and ``-g0`` on Unix
Finally, you can set ``cxx_std`` via constructor or afterwards to enable
flags for C++ std, and a few extra helper flags related to the C++ standard
level. It is _highly_ recommended you either set this, or use the provided
``build_ext``, which will search for the highest supported extension for
you if the ``cxx_std`` property is not set. Do not set the ``cxx_std``
property more than once, as flags are added when you set it. Set the
property to None to disable the addition of C++ standard flags.
If you want to add pybind11 headers manually, for example for an exact
git checkout, then set ``include_pybind11=False``.
"""
# flags are prepended, so that they can be further overridden, e.g. by
# ``extra_compile_args=["-g"]``.
def _add_cflags(self, flags: List[str]) -> None:
self.extra_compile_args[:0] = flags
def _add_ldflags(self, flags: List[str]) -> None:
self.extra_link_args[:0] = flags
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._cxx_level = 0
cxx_std = kwargs.pop("cxx_std", 0)
if "language" not in kwargs:
kwargs["language"] = "c++"
include_pybind11 = kwargs.pop("include_pybind11", True)
super().__init__(*args, **kwargs)
# Include the installed package pybind11 headers
if include_pybind11:
# If using setup_requires, this fails the first time - that's okay
try:
import pybind11
pyinc = pybind11.get_include()
if pyinc not in self.include_dirs:
self.include_dirs.append(pyinc)
except ModuleNotFoundError:
pass
self.cxx_std = cxx_std
cflags = []
if WIN:
cflags += ["/EHsc", "/bigobj"]
else:
cflags += ["-fvisibility=hidden"]
env_cflags = os.environ.get("CFLAGS", "")
env_cppflags = os.environ.get("CPPFLAGS", "")
c_cpp_flags = shlex.split(env_cflags) + shlex.split(env_cppflags)
if not any(opt.startswith("-g") for opt in c_cpp_flags):
cflags += ["-g0"]
self._add_cflags(cflags)
@property
def cxx_std(self) -> int:
"""
The CXX standard level. If set, will add the required flags. If left at
0, it will trigger an automatic search when pybind11's build_ext is
used. If None, will have no effect. Besides just the flags, this may
add a macos-min 10.9 or 10.14 flag if MACOSX_DEPLOYMENT_TARGET is
unset.
"""
return self._cxx_level
@cxx_std.setter
def cxx_std(self, level: int) -> None:
if self._cxx_level:
warnings.warn(
"You cannot safely change the cxx_level after setting it!", stacklevel=2
)
# MSVC 2015 Update 3 and later only have 14 (and later 17) modes, so
# force a valid flag here.
if WIN and level == 11:
level = 14
self._cxx_level = level
if not level:
return
cflags = [STD_TMPL.format(level)]
ldflags = []
if MACOS and "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
# C++17 requires a higher min version of macOS. An earlier version
# (10.12 or 10.13) can be set manually via environment variable if
# you are careful in your feature usage, but 10.14 is the safest
# setting for general use. However, never set higher than the
# current macOS version!
current_macos = tuple(int(x) for x in platform.mac_ver()[0].split(".")[:2])
desired_macos = (10, 9) if level < 17 else (10, 14)
macos_string = ".".join(str(x) for x in min(current_macos, desired_macos))
macosx_min = f"-mmacosx-version-min={macos_string}"
cflags += [macosx_min]
ldflags += [macosx_min]
self._add_cflags(cflags)
self._add_ldflags(ldflags)
# Just in case someone clever tries to multithread
tmp_chdir_lock = threading.Lock()
@contextlib.contextmanager
def tmp_chdir() -> Iterator[str]:
"Prepare and enter a temporary directory, cleanup when done"
# Threadsafe
with tmp_chdir_lock:
olddir = os.getcwd()
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(olddir)
shutil.rmtree(tmpdir)
# cf http://bugs.python.org/issue26689
def has_flag(compiler: Any, flag: str) -> bool:
"""
Return the flag if a flag name is supported on the
specified compiler, otherwise None (can be used as a boolean).
If multiple flags are passed, return the first that matches.
"""
with tmp_chdir():
fname = Path("flagcheck.cpp")
# Don't trigger -Wunused-parameter.
fname.write_text("int main (int, char **) { return 0; }", encoding="utf-8")
try:
compiler.compile([str(fname)], extra_postargs=[flag])
except distutils.errors.CompileError:
return False
return True
# Every call will cache the result
cpp_flag_cache = None
@lru_cache()
def auto_cpp_level(compiler: Any) -> Union[str, int]:
"""
Return the max supported C++ std level (17, 14, or 11). Returns latest on Windows.
"""
if WIN:
return "latest"
levels = [17, 14, 11]
for level in levels:
if has_flag(compiler, STD_TMPL.format(level)):
return level
msg = "Unsupported compiler -- at least C++11 support is needed!"
raise RuntimeError(msg)
class build_ext(_build_ext): # noqa: N801
"""
Customized build_ext that allows an auto-search for the highest supported
C++ level for Pybind11Extension. This is only needed for the auto-search
for now, and is completely optional otherwise.
"""
def build_extensions(self) -> None:
"""
Build extensions, injecting C++ std for Pybind11Extension if needed.
"""
for ext in self.extensions:
if hasattr(ext, "_cxx_level") and ext._cxx_level == 0:
ext.cxx_std = auto_cpp_level(self.compiler)
super().build_extensions()
def intree_extensions(
paths: Iterable[str], package_dir: Optional[Dict[str, str]] = None
) -> List[Pybind11Extension]:
"""
Generate Pybind11Extensions from source files directly located in a Python
source tree.
``package_dir`` behaves as in ``setuptools.setup``. If unset, the Python
package root parent is determined as the first parent directory that does
not contain an ``__init__.py`` file.
"""
exts = []
if package_dir is None:
for path in paths:
parent, _ = os.path.split(path)
while os.path.exists(os.path.join(parent, "__init__.py")):
parent, _ = os.path.split(parent)
relname, _ = os.path.splitext(os.path.relpath(path, parent))
qualified_name = relname.replace(os.path.sep, ".")
exts.append(Pybind11Extension(qualified_name, [path]))
return exts
for path in paths:
for prefix, parent in package_dir.items():
if path.startswith(parent):
relname, _ = os.path.splitext(os.path.relpath(path, parent))
qualified_name = relname.replace(os.path.sep, ".")
if prefix:
qualified_name = prefix + "." + qualified_name
exts.append(Pybind11Extension(qualified_name, [path]))
break
else:
msg = (
f"path {path} is not a child of any of the directories listed "
f"in 'package_dir' ({package_dir})"
)
raise ValueError(msg)
return exts
def naive_recompile(obj: str, src: str) -> bool:
"""
This will recompile only if the source file changes. It does not check
header files, so a more advanced function or Ccache is better if you have
editable header files in your package.
"""
return os.stat(obj).st_mtime < os.stat(src).st_mtime
def no_recompile(obg: str, src: str) -> bool: # noqa: ARG001
"""
This is the safest but slowest choice (and is the default) - will always
recompile sources.
"""
return True
S = TypeVar("S", bound="ParallelCompile")
CCompilerMethod = Callable[
[
distutils.ccompiler.CCompiler,
List[str],
Optional[str],
Optional[Union[Tuple[str], Tuple[str, Optional[str]]]],
Optional[List[str]],
bool,
Optional[List[str]],
Optional[List[str]],
Optional[List[str]],
],
List[str],
]
# Optional parallel compile utility
# inspired by: http://stackoverflow.com/questions/11013851/speeding-up-build-process-with-distutils
# and: https://github.com/tbenthompson/cppimport/blob/stable/cppimport/build_module.py
# and NumPy's parallel distutils module:
# https://github.com/numpy/numpy/blob/master/numpy/distutils/ccompiler.py
class ParallelCompile:
"""
Make a parallel compile function. Inspired by
numpy.distutils.ccompiler.CCompiler.compile and cppimport.
This takes several arguments that allow you to customize the compile
function created:
envvar:
Set an environment variable to control the compilation threads, like
NPY_NUM_BUILD_JOBS
default:
0 will automatically multithread, or 1 will only multithread if the
envvar is set.
max:
The limit for automatic multithreading if non-zero
needs_recompile:
A function of (obj, src) that returns True when recompile is needed. No
effect in isolated mode; use ccache instead, see
https://github.com/matplotlib/matplotlib/issues/1507/
To use::
ParallelCompile("NPY_NUM_BUILD_JOBS").install()
or::
with ParallelCompile("NPY_NUM_BUILD_JOBS"):
setup(...)
By default, this assumes all files need to be recompiled. A smarter
function can be provided via needs_recompile. If the output has not yet
been generated, the compile will always run, and this function is not
called.
"""
__slots__ = ("envvar", "default", "max", "_old", "needs_recompile")
def __init__(
self,
envvar: Optional[str] = None,
default: int = 0,
max: int = 0, # pylint: disable=redefined-builtin
needs_recompile: Callable[[str, str], bool] = no_recompile,
) -> None:
self.envvar = envvar
self.default = default
self.max = max
self.needs_recompile = needs_recompile
self._old: List[CCompilerMethod] = []
def function(self) -> CCompilerMethod:
"""
Builds a function object usable as distutils.ccompiler.CCompiler.compile.
"""
def compile_function(
compiler: distutils.ccompiler.CCompiler,
sources: List[str],
output_dir: Optional[str] = None,
macros: Optional[Union[Tuple[str], Tuple[str, Optional[str]]]] = None,
include_dirs: Optional[List[str]] = None,
debug: bool = False,
extra_preargs: Optional[List[str]] = None,
extra_postargs: Optional[List[str]] = None,
depends: Optional[List[str]] = None,
) -> Any:
# These lines are directly from distutils.ccompiler.CCompiler
macros, objects, extra_postargs, pp_opts, build = compiler._setup_compile( # type: ignore[attr-defined]
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
cc_args = compiler._get_cc_args(pp_opts, debug, extra_preargs) # type: ignore[attr-defined]
# The number of threads; start with default.
threads = self.default
# Determine the number of compilation threads, unless set by an environment variable.
if self.envvar is not None:
threads = int(os.environ.get(self.envvar, self.default))
def _single_compile(obj: Any) -> None:
try:
src, ext = build[obj]
except KeyError:
return
if not os.path.exists(obj) or self.needs_recompile(obj, src):
compiler._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) # type: ignore[attr-defined]
try:
# Importing .synchronize checks for platforms that have some multiprocessing
# capabilities but lack semaphores, such as AWS Lambda and Android Termux.
import multiprocessing.synchronize
from multiprocessing.pool import ThreadPool
except ImportError:
threads = 1
if threads == 0:
try:
threads = multiprocessing.cpu_count()
threads = self.max if self.max and self.max < threads else threads
except NotImplementedError:
threads = 1
if threads > 1:
with ThreadPool(threads) as pool:
for _ in pool.imap_unordered(_single_compile, objects):
pass
else:
for ob in objects:
_single_compile(ob)
return objects
return compile_function
def install(self: S) -> S:
"""
Installs the compile function into distutils.ccompiler.CCompiler.compile.
"""
distutils.ccompiler.CCompiler.compile = self.function() # type: ignore[assignment]
return self
def __enter__(self: S) -> S:
self._old.append(distutils.ccompiler.CCompiler.compile)
return self.install()
def __exit__(self, *args: Any) -> None:
distutils.ccompiler.CCompiler.compile = self._old.pop() # type: ignore[assignment]
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/setup.py | Python | #!/usr/bin/env python3
# Setup script for PyPI; use CMakeFile.txt to build extension modules
import contextlib
import os
import re
import shutil
import string
import subprocess
import sys
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Dict, Iterator, List, Union
import setuptools.command.sdist
DIR = Path(__file__).parent.absolute()
VERSION_REGEX = re.compile(
r"^\s*#\s*define\s+PYBIND11_VERSION_([A-Z]+)\s+(.*)$", re.MULTILINE
)
VERSION_FILE = Path("pybind11/_version.py")
COMMON_FILE = Path("include/pybind11/detail/common.h")
def build_expected_version_hex(matches: Dict[str, str]) -> str:
patch_level_serial = matches["PATCH"]
serial = None
major = int(matches["MAJOR"])
minor = int(matches["MINOR"])
flds = patch_level_serial.split(".")
if flds:
patch = int(flds[0])
if len(flds) == 1:
level = "0"
serial = 0
elif len(flds) == 2:
level_serial = flds[1]
for level in ("a", "b", "c", "dev"):
if level_serial.startswith(level):
serial = int(level_serial[len(level) :])
break
if serial is None:
msg = f'Invalid PYBIND11_VERSION_PATCH: "{patch_level_serial}"'
raise RuntimeError(msg)
version_hex_str = f"{major:02x}{minor:02x}{patch:02x}{level[:1]}{serial:x}"
return f"0x{version_hex_str.upper()}"
# PYBIND11_GLOBAL_SDIST will build a different sdist, with the python-headers
# files, and the sys.prefix files (CMake and headers).
global_sdist = os.environ.get("PYBIND11_GLOBAL_SDIST", False)
setup_py = Path(
"tools/setup_global.py.in" if global_sdist else "tools/setup_main.py.in"
)
extra_cmd = 'cmdclass["sdist"] = SDist\n'
to_src = (
(Path("pyproject.toml"), Path("tools/pyproject.toml")),
(Path("setup.py"), setup_py),
)
# Read the listed version
loc: Dict[str, str] = {}
code = compile(VERSION_FILE.read_text(encoding="utf-8"), "pybind11/_version.py", "exec")
exec(code, loc)
version = loc["__version__"]
# Verify that the version matches the one in C++
matches = dict(VERSION_REGEX.findall(COMMON_FILE.read_text(encoding="utf8")))
cpp_version = "{MAJOR}.{MINOR}.{PATCH}".format(**matches)
if version != cpp_version:
msg = f"Python version {version} does not match C++ version {cpp_version}!"
raise RuntimeError(msg)
version_hex = matches.get("HEX", "MISSING")
exp_version_hex = build_expected_version_hex(matches)
if version_hex != exp_version_hex:
msg = f"PYBIND11_VERSION_HEX {version_hex} does not match expected value {exp_version_hex}!"
raise RuntimeError(msg)
# TODO: use literals & overload (typing extensions or Python 3.8)
def get_and_replace(
filename: Path, binary: bool = False, **opts: str
) -> Union[bytes, str]:
if binary:
contents = filename.read_bytes()
return string.Template(contents.decode()).substitute(opts).encode()
return string.Template(filename.read_text()).substitute(opts)
# Use our input files instead when making the SDist (and anything that depends
# on it, like a wheel)
class SDist(setuptools.command.sdist.sdist):
def make_release_tree(self, base_dir: str, files: List[str]) -> None:
super().make_release_tree(base_dir, files)
for to, src in to_src:
txt = get_and_replace(src, binary=True, version=version, extra_cmd="")
dest = Path(base_dir) / to
# This is normally linked, so unlink before writing!
dest.unlink()
dest.write_bytes(txt) # type: ignore[arg-type]
# Remove the CMake install directory when done
@contextlib.contextmanager
def remove_output(*sources: str) -> Iterator[None]:
try:
yield
finally:
for src in sources:
shutil.rmtree(src)
with remove_output("pybind11/include", "pybind11/share"):
# Generate the files if they are not present.
with TemporaryDirectory() as tmpdir:
cmd = ["cmake", "-S", ".", "-B", tmpdir] + [
"-DCMAKE_INSTALL_PREFIX=pybind11",
"-DBUILD_TESTING=OFF",
"-DPYBIND11_NOPYTHON=ON",
"-Dprefix_for_pc_file=${pcfiledir}/../../",
]
if "CMAKE_ARGS" in os.environ:
fcommand = [
c
for c in os.environ["CMAKE_ARGS"].split()
if "DCMAKE_INSTALL_PREFIX" not in c
]
cmd += fcommand
subprocess.run(cmd, check=True, cwd=DIR, stdout=sys.stdout, stderr=sys.stderr)
subprocess.run(
["cmake", "--install", tmpdir],
check=True,
cwd=DIR,
stdout=sys.stdout,
stderr=sys.stderr,
)
txt = get_and_replace(setup_py, version=version, extra_cmd=extra_cmd)
code = compile(txt, setup_py, "exec")
exec(code, {"SDist": SDist})
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/conftest.py | Python | """pytest configuration
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
Adds docstring and exceptions message sanitizers.
"""
import contextlib
import difflib
import gc
import multiprocessing
import re
import sys
import textwrap
import traceback
import pytest
# Early diagnostic for failed imports
try:
import pybind11_tests
except Exception:
# pytest does not show the traceback without this.
traceback.print_exc()
raise
@pytest.fixture(scope="session", autouse=True)
def use_multiprocessing_forkserver_on_linux():
if sys.platform != "linux":
# The default on Windows and macOS is "spawn": If it's not broken, don't fix it.
return
# Full background: https://github.com/pybind/pybind11/issues/4105#issuecomment-1301004592
# In a nutshell: fork() after starting threads == flakiness in the form of deadlocks.
# It is actually a well-known pitfall, unfortunately without guard rails.
# "forkserver" is more performant than "spawn" (~9s vs ~13s for tests/test_gil_scoped.py,
# visit the issuecomment link above for details).
multiprocessing.set_start_method("forkserver")
_long_marker = re.compile(r"([0-9])L")
_hexadecimal = re.compile(r"0x[0-9a-fA-F]+")
# Avoid collecting Python3 only files
collect_ignore = []
def _strip_and_dedent(s):
"""For triple-quote strings"""
return textwrap.dedent(s.lstrip("\n").rstrip())
def _split_and_sort(s):
"""For output which does not require specific line order"""
return sorted(_strip_and_dedent(s).splitlines())
def _make_explanation(a, b):
"""Explanation for a failed assert -- the a and b arguments are List[str]"""
return ["--- actual / +++ expected"] + [
line.strip("\n") for line in difflib.ndiff(a, b)
]
class Output:
"""Basic output post-processing and comparison"""
def __init__(self, string):
self.string = string
self.explanation = []
def __str__(self):
return self.string
def __eq__(self, other):
# Ignore constructor/destructor output which is prefixed with "###"
a = [
line
for line in self.string.strip().splitlines()
if not line.startswith("###")
]
b = _strip_and_dedent(other).splitlines()
if a == b:
return True
self.explanation = _make_explanation(a, b)
return False
class Unordered(Output):
"""Custom comparison for output without strict line ordering"""
def __eq__(self, other):
a = _split_and_sort(self.string)
b = _split_and_sort(other)
if a == b:
return True
self.explanation = _make_explanation(a, b)
return False
class Capture:
def __init__(self, capfd):
self.capfd = capfd
self.out = ""
self.err = ""
def __enter__(self):
self.capfd.readouterr()
return self
def __exit__(self, *args):
self.out, self.err = self.capfd.readouterr()
def __eq__(self, other):
a = Output(self.out)
b = other
if a == b:
return True
self.explanation = a.explanation
return False
def __str__(self):
return self.out
def __contains__(self, item):
return item in self.out
@property
def unordered(self):
return Unordered(self.out)
@property
def stderr(self):
return Output(self.err)
@pytest.fixture()
def capture(capsys):
"""Extended `capsys` with context manager and custom equality operators"""
return Capture(capsys)
class SanitizedString:
def __init__(self, sanitizer):
self.sanitizer = sanitizer
self.string = ""
self.explanation = []
def __call__(self, thing):
self.string = self.sanitizer(thing)
return self
def __eq__(self, other):
a = self.string
b = _strip_and_dedent(other)
if a == b:
return True
self.explanation = _make_explanation(a.splitlines(), b.splitlines())
return False
def _sanitize_general(s):
s = s.strip()
s = s.replace("pybind11_tests.", "m.")
return _long_marker.sub(r"\1", s)
def _sanitize_docstring(thing):
s = thing.__doc__
return _sanitize_general(s)
@pytest.fixture()
def doc():
"""Sanitize docstrings and add custom failure explanation"""
return SanitizedString(_sanitize_docstring)
def _sanitize_message(thing):
s = str(thing)
s = _sanitize_general(s)
return _hexadecimal.sub("0", s)
@pytest.fixture()
def msg():
"""Sanitize messages and add custom failure explanation"""
return SanitizedString(_sanitize_message)
def pytest_assertrepr_compare(op, left, right): # noqa: ARG001
"""Hook to insert custom failure explanation"""
if hasattr(left, "explanation"):
return left.explanation
return None
def gc_collect():
"""Run the garbage collector twice (needed when running
reference counting tests with PyPy)"""
gc.collect()
gc.collect()
def pytest_configure():
pytest.suppress = contextlib.suppress
pytest.gc_collect = gc_collect
def pytest_report_header(config):
del config # Unused.
assert (
pybind11_tests.compiler_info is not None
), "Please update pybind11_tests.cpp if this assert fails."
return (
"C++ Info:"
f" {pybind11_tests.compiler_info}"
f" {pybind11_tests.cpp_std}"
f" {pybind11_tests.PYBIND11_INTERNALS_ID}"
f" PYBIND11_SIMPLE_GIL_MANAGEMENT={pybind11_tests.PYBIND11_SIMPLE_GIL_MANAGEMENT}"
)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/constructor_stats.h | C/C++ Header | #pragma once
/*
tests/constructor_stats.h -- framework for printing and tracking object
instance lifetimes in example/test code.
Copyright (c) 2016 Jason Rhinelander <jason@imaginary.ca>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
This header provides a few useful tools for writing examples or tests that want to check and/or
display object instance lifetimes. It requires that you include this header and add the following
function calls to constructors:
class MyClass {
MyClass() { ...; print_default_created(this); }
~MyClass() { ...; print_destroyed(this); }
MyClass(const MyClass &c) { ...; print_copy_created(this); }
MyClass(MyClass &&c) { ...; print_move_created(this); }
MyClass(int a, int b) { ...; print_created(this, a, b); }
MyClass &operator=(const MyClass &c) { ...; print_copy_assigned(this); }
MyClass &operator=(MyClass &&c) { ...; print_move_assigned(this); }
...
}
You can find various examples of these in several of the existing testing .cpp files. (Of course
you don't need to add any of the above constructors/operators that you don't actually have, except
for the destructor).
Each of these will print an appropriate message such as:
### MyClass @ 0x2801910 created via default constructor
### MyClass @ 0x27fa780 created 100 200
### MyClass @ 0x2801910 destroyed
### MyClass @ 0x27fa780 destroyed
You can also include extra arguments (such as the 100, 200 in the output above, coming from the
value constructor) for all of the above methods which will be included in the output.
For testing, each of these also keeps track the created instances and allows you to check how many
of the various constructors have been invoked from the Python side via code such as:
from pybind11_tests import ConstructorStats
cstats = ConstructorStats.get(MyClass)
print(cstats.alive())
print(cstats.default_constructions)
Note that `.alive()` should usually be the first thing you call as it invokes Python's garbage
collector to actually destroy objects that aren't yet referenced.
For everything except copy and move constructors and destructors, any extra values given to the
print_...() function is stored in a class-specific values list which you can retrieve and inspect
from the ConstructorStats instance `.values()` method.
In some cases, when you need to track instances of a C++ class not registered with pybind11, you
need to add a function returning the ConstructorStats for the C++ class; this can be done with:
m.def("get_special_cstats", &ConstructorStats::get<SpecialClass>,
py::return_value_policy::reference)
Finally, you can suppress the output messages, but keep the constructor tracking (for
inspection/testing in python) by using the functions with `print_` replaced with `track_` (e.g.
`track_copy_created(this)`).
*/
#include "pybind11_tests.h"
#include <list>
#include <sstream>
#include <typeindex>
#include <unordered_map>
class ConstructorStats {
protected:
std::unordered_map<void *, int> _instances; // Need a map rather than set because members can
// shared address with parents
std::list<std::string> _values; // Used to track values
// (e.g. of value constructors)
public:
int default_constructions = 0;
int copy_constructions = 0;
int move_constructions = 0;
int copy_assignments = 0;
int move_assignments = 0;
void copy_created(void *inst) {
created(inst);
copy_constructions++;
}
void move_created(void *inst) {
created(inst);
move_constructions++;
}
void default_created(void *inst) {
created(inst);
default_constructions++;
}
void created(void *inst) { ++_instances[inst]; }
void destroyed(void *inst) {
if (--_instances[inst] < 0) {
throw std::runtime_error("cstats.destroyed() called with unknown "
"instance; potential double-destruction "
"or a missing cstats.created()");
}
}
static void gc() {
// Force garbage collection to ensure any pending destructors are invoked:
#if defined(PYPY_VERSION)
PyObject *globals = PyEval_GetGlobals();
PyObject *result = PyRun_String("import gc\n"
"for i in range(2):\n"
" gc.collect()\n",
Py_file_input,
globals,
globals);
if (result == nullptr)
throw py::error_already_set();
Py_DECREF(result);
#else
py::module_::import("gc").attr("collect")();
#endif
}
int alive() {
gc();
int total = 0;
for (const auto &p : _instances) {
if (p.second > 0) {
total += p.second;
}
}
return total;
}
void value() {} // Recursion terminator
// Takes one or more values, converts them to strings, then stores them.
template <typename T, typename... Tmore>
void value(const T &v, Tmore &&...args) {
std::ostringstream oss;
oss << v;
_values.push_back(oss.str());
value(std::forward<Tmore>(args)...);
}
// Move out stored values
py::list values() {
py::list l;
for (const auto &v : _values) {
l.append(py::cast(v));
}
_values.clear();
return l;
}
// Gets constructor stats from a C++ type index
static ConstructorStats &get(std::type_index type) {
static std::unordered_map<std::type_index, ConstructorStats> all_cstats;
return all_cstats[type];
}
// Gets constructor stats from a C++ type
template <typename T>
static ConstructorStats &get() {
#if defined(PYPY_VERSION)
gc();
#endif
return get(typeid(T));
}
// Gets constructor stats from a Python class
static ConstructorStats &get(py::object class_) {
auto &internals = py::detail::get_internals();
const std::type_index *t1 = nullptr, *t2 = nullptr;
try {
auto *type_info
= internals.registered_types_py.at((PyTypeObject *) class_.ptr()).at(0);
for (auto &p : internals.registered_types_cpp) {
if (p.second == type_info) {
if (t1) {
t2 = &p.first;
break;
}
t1 = &p.first;
}
}
} catch (const std::out_of_range &) {
}
if (!t1) {
throw std::runtime_error("Unknown class passed to ConstructorStats::get()");
}
auto &cs1 = get(*t1);
// If we have both a t1 and t2 match, one is probably the trampoline class; return
// whichever has more constructions (typically one or the other will be 0)
if (t2) {
auto &cs2 = get(*t2);
int cs1_total = cs1.default_constructions + cs1.copy_constructions
+ cs1.move_constructions + (int) cs1._values.size();
int cs2_total = cs2.default_constructions + cs2.copy_constructions
+ cs2.move_constructions + (int) cs2._values.size();
if (cs2_total > cs1_total) {
return cs2;
}
}
return cs1;
}
};
// To track construction/destruction, you need to call these methods from the various
// constructors/operators. The ones that take extra values record the given values in the
// constructor stats values for later inspection.
template <class T>
void track_copy_created(T *inst) {
ConstructorStats::get<T>().copy_created(inst);
}
template <class T>
void track_move_created(T *inst) {
ConstructorStats::get<T>().move_created(inst);
}
template <class T, typename... Values>
void track_copy_assigned(T *, Values &&...values) {
auto &cst = ConstructorStats::get<T>();
cst.copy_assignments++;
cst.value(std::forward<Values>(values)...);
}
template <class T, typename... Values>
void track_move_assigned(T *, Values &&...values) {
auto &cst = ConstructorStats::get<T>();
cst.move_assignments++;
cst.value(std::forward<Values>(values)...);
}
template <class T, typename... Values>
void track_default_created(T *inst, Values &&...values) {
auto &cst = ConstructorStats::get<T>();
cst.default_created(inst);
cst.value(std::forward<Values>(values)...);
}
template <class T, typename... Values>
void track_created(T *inst, Values &&...values) {
auto &cst = ConstructorStats::get<T>();
cst.created(inst);
cst.value(std::forward<Values>(values)...);
}
template <class T, typename... Values>
void track_destroyed(T *inst) {
ConstructorStats::get<T>().destroyed(inst);
}
template <class T, typename... Values>
void track_values(T *, Values &&...values) {
ConstructorStats::get<T>().value(std::forward<Values>(values)...);
}
/// Don't cast pointers to Python, print them as strings
inline const char *format_ptrs(const char *p) { return p; }
template <typename T>
py::str format_ptrs(T *p) {
return "{:#x}"_s.format(reinterpret_cast<std::uintptr_t>(p));
}
template <typename T>
auto format_ptrs(T &&x) -> decltype(std::forward<T>(x)) {
return std::forward<T>(x);
}
template <class T, typename... Output>
void print_constr_details(T *inst, const std::string &action, Output &&...output) {
py::print("###",
py::type_id<T>(),
"@",
format_ptrs(inst),
action,
format_ptrs(std::forward<Output>(output))...);
}
// Verbose versions of the above:
template <class T, typename... Values>
void print_copy_created(T *inst,
Values &&...values) { // NB: this prints, but doesn't store, given values
print_constr_details(inst, "created via copy constructor", values...);
track_copy_created(inst);
}
template <class T, typename... Values>
void print_move_created(T *inst,
Values &&...values) { // NB: this prints, but doesn't store, given values
print_constr_details(inst, "created via move constructor", values...);
track_move_created(inst);
}
template <class T, typename... Values>
void print_copy_assigned(T *inst, Values &&...values) {
print_constr_details(inst, "assigned via copy assignment", values...);
track_copy_assigned(inst, values...);
}
template <class T, typename... Values>
void print_move_assigned(T *inst, Values &&...values) {
print_constr_details(inst, "assigned via move assignment", values...);
track_move_assigned(inst, values...);
}
template <class T, typename... Values>
void print_default_created(T *inst, Values &&...values) {
print_constr_details(inst, "created via default constructor", values...);
track_default_created(inst, values...);
}
template <class T, typename... Values>
void print_created(T *inst, Values &&...values) {
print_constr_details(inst, "created", values...);
track_created(inst, values...);
}
template <class T, typename... Values>
void print_destroyed(T *inst, Values &&...values) { // Prints but doesn't store given values
print_constr_details(inst, "destroyed", values...);
track_destroyed(inst);
}
template <class T, typename... Values>
void print_values(T *inst, Values &&...values) {
print_constr_details(inst, ":", values...);
track_values(inst, values...);
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/cross_module_gil_utils.cpp | C++ | /*
tests/cross_module_gil_utils.cpp -- tools for acquiring GIL from a different module
Copyright (c) 2019 Google LLC
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#if defined(PYBIND11_INTERNALS_VERSION)
# undef PYBIND11_INTERNALS_VERSION
#endif
#define PYBIND11_INTERNALS_VERSION 21814642 // Ensure this module has its own `internals` instance.
#include <pybind11/pybind11.h>
#include <cstdint>
#include <string>
#include <thread>
// This file mimics a DSO that makes pybind11 calls but does not define a
// PYBIND11_MODULE. The purpose is to test that such a DSO can create a
// py::gil_scoped_acquire when the running thread is in a GIL-released state.
//
// Note that we define a Python module here for convenience, but in general
// this need not be the case. The typical scenario would be a DSO that implements
// shared logic used internally by multiple pybind11 modules.
namespace {
namespace py = pybind11;
void gil_acquire() { py::gil_scoped_acquire gil; }
std::string gil_multi_acquire_release(unsigned bits) {
if ((bits & 0x1u) != 0u) {
py::gil_scoped_acquire gil;
}
if ((bits & 0x2u) != 0u) {
py::gil_scoped_release gil;
}
if ((bits & 0x4u) != 0u) {
py::gil_scoped_acquire gil;
}
if ((bits & 0x8u) != 0u) {
py::gil_scoped_release gil;
}
return PYBIND11_INTERNALS_ID;
}
struct CustomAutoGIL {
CustomAutoGIL() : gstate(PyGILState_Ensure()) {}
~CustomAutoGIL() { PyGILState_Release(gstate); }
PyGILState_STATE gstate;
};
struct CustomAutoNoGIL {
CustomAutoNoGIL() : save(PyEval_SaveThread()) {}
~CustomAutoNoGIL() { PyEval_RestoreThread(save); }
PyThreadState *save;
};
template <typename Acquire, typename Release>
void gil_acquire_inner() {
Acquire acquire_outer;
Acquire acquire_inner;
Release release;
}
template <typename Acquire, typename Release>
void gil_acquire_nested() {
Acquire acquire_outer;
Acquire acquire_inner;
Release release;
auto thread = std::thread(&gil_acquire_inner<Acquire, Release>);
thread.join();
}
constexpr char kModuleName[] = "cross_module_gil_utils";
struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT, kModuleName, nullptr, 0, nullptr, nullptr, nullptr, nullptr, nullptr};
} // namespace
#define ADD_FUNCTION(Name, ...) \
PyModule_AddObject(m, Name, PyLong_FromVoidPtr(reinterpret_cast<void *>(&__VA_ARGS__)));
extern "C" PYBIND11_EXPORT PyObject *PyInit_cross_module_gil_utils() {
PyObject *m = PyModule_Create(&moduledef);
if (m != nullptr) {
static_assert(sizeof(&gil_acquire) == sizeof(void *),
"Function pointer must have the same size as void*");
ADD_FUNCTION("gil_acquire_funcaddr", gil_acquire)
ADD_FUNCTION("gil_multi_acquire_release_funcaddr", gil_multi_acquire_release)
ADD_FUNCTION("gil_acquire_inner_custom_funcaddr",
gil_acquire_inner<CustomAutoGIL, CustomAutoNoGIL>)
ADD_FUNCTION("gil_acquire_nested_custom_funcaddr",
gil_acquire_nested<CustomAutoGIL, CustomAutoNoGIL>)
ADD_FUNCTION("gil_acquire_inner_pybind11_funcaddr",
gil_acquire_inner<py::gil_scoped_acquire, py::gil_scoped_release>)
ADD_FUNCTION("gil_acquire_nested_pybind11_funcaddr",
gil_acquire_nested<py::gil_scoped_acquire, py::gil_scoped_release>)
}
return m;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/cross_module_interleaved_error_already_set.cpp | C++ | /*
Copyright (c) 2022 Google LLC
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <pybind11/pybind11.h>
// This file mimics a DSO that makes pybind11 calls but does not define a PYBIND11_MODULE,
// so that the first call of cross_module_error_already_set() triggers the first call of
// pybind11::detail::get_internals().
namespace {
namespace py = pybind11;
void interleaved_error_already_set() {
PyErr_SetString(PyExc_RuntimeError, "1st error.");
try {
throw py::error_already_set();
} catch (const py::error_already_set &) {
// The 2nd error could be conditional in a real application.
PyErr_SetString(PyExc_RuntimeError, "2nd error.");
} // Here the 1st error is destroyed before the 2nd error is fetched.
// The error_already_set dtor triggers a pybind11::detail::get_internals()
// call via pybind11::gil_scoped_acquire.
if (PyErr_Occurred()) {
throw py::error_already_set();
}
}
constexpr char kModuleName[] = "cross_module_interleaved_error_already_set";
struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT, kModuleName, nullptr, 0, nullptr, nullptr, nullptr, nullptr, nullptr};
} // namespace
extern "C" PYBIND11_EXPORT PyObject *PyInit_cross_module_interleaved_error_already_set() {
PyObject *m = PyModule_Create(&moduledef);
if (m != nullptr) {
static_assert(sizeof(&interleaved_error_already_set) == sizeof(void *),
"Function pointer must have the same size as void *");
PyModule_AddObject(
m,
"funcaddr",
PyLong_FromVoidPtr(reinterpret_cast<void *>(&interleaved_error_already_set)));
}
return m;
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/eigen_tensor_avoid_stl_array.cpp | C++ | /*
tests/eigen_tensor.cpp -- automatic conversion of Eigen Tensor
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#ifndef EIGEN_AVOID_STL_ARRAY
# define EIGEN_AVOID_STL_ARRAY
#endif
#include "test_eigen_tensor.inl"
PYBIND11_MODULE(eigen_tensor_avoid_stl_array, m) { eigen_tensor_test::test_module(m); }
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/env.py | Python | import platform
import sys
import pytest
LINUX = sys.platform.startswith("linux")
MACOS = sys.platform.startswith("darwin")
WIN = sys.platform.startswith("win32") or sys.platform.startswith("cygwin")
CPYTHON = platform.python_implementation() == "CPython"
PYPY = platform.python_implementation() == "PyPy"
def deprecated_call():
"""
pytest.deprecated_call() seems broken in pytest<3.9.x; concretely, it
doesn't work on CPython 3.8.0 with pytest==3.3.2 on Ubuntu 18.04 (#2922).
This is a narrowed reimplementation of the following PR :(
https://github.com/pytest-dev/pytest/pull/4104
"""
# TODO: Remove this when testing requires pytest>=3.9.
pieces = pytest.__version__.split(".")
pytest_major_minor = (int(pieces[0]), int(pieces[1]))
if pytest_major_minor < (3, 9):
return pytest.warns((DeprecationWarning, PendingDeprecationWarning))
return pytest.deprecated_call()
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/extra_python_package/test_files.py | Python | import contextlib
import os
import string
import subprocess
import sys
import tarfile
import zipfile
# These tests must be run explicitly
# They require CMake 3.15+ (--install)
DIR = os.path.abspath(os.path.dirname(__file__))
MAIN_DIR = os.path.dirname(os.path.dirname(DIR))
PKGCONFIG = """\
prefix=${{pcfiledir}}/../../
includedir=${{prefix}}/include
Name: pybind11
Description: Seamless operability between C++11 and Python
Version: {VERSION}
Cflags: -I${{includedir}}
"""
main_headers = {
"include/pybind11/attr.h",
"include/pybind11/buffer_info.h",
"include/pybind11/cast.h",
"include/pybind11/chrono.h",
"include/pybind11/common.h",
"include/pybind11/complex.h",
"include/pybind11/eigen.h",
"include/pybind11/embed.h",
"include/pybind11/eval.h",
"include/pybind11/functional.h",
"include/pybind11/gil.h",
"include/pybind11/iostream.h",
"include/pybind11/numpy.h",
"include/pybind11/operators.h",
"include/pybind11/options.h",
"include/pybind11/pybind11.h",
"include/pybind11/pytypes.h",
"include/pybind11/stl.h",
"include/pybind11/stl_bind.h",
"include/pybind11/type_caster_pyobject_ptr.h",
}
detail_headers = {
"include/pybind11/detail/class.h",
"include/pybind11/detail/common.h",
"include/pybind11/detail/descr.h",
"include/pybind11/detail/init.h",
"include/pybind11/detail/internals.h",
"include/pybind11/detail/type_caster_base.h",
"include/pybind11/detail/typeid.h",
}
eigen_headers = {
"include/pybind11/eigen/common.h",
"include/pybind11/eigen/matrix.h",
"include/pybind11/eigen/tensor.h",
}
stl_headers = {
"include/pybind11/stl/filesystem.h",
}
cmake_files = {
"share/cmake/pybind11/FindPythonLibsNew.cmake",
"share/cmake/pybind11/pybind11Common.cmake",
"share/cmake/pybind11/pybind11Config.cmake",
"share/cmake/pybind11/pybind11ConfigVersion.cmake",
"share/cmake/pybind11/pybind11NewTools.cmake",
"share/cmake/pybind11/pybind11Targets.cmake",
"share/cmake/pybind11/pybind11Tools.cmake",
}
pkgconfig_files = {
"share/pkgconfig/pybind11.pc",
}
py_files = {
"__init__.py",
"__main__.py",
"_version.py",
"commands.py",
"py.typed",
"setup_helpers.py",
}
headers = main_headers | detail_headers | eigen_headers | stl_headers
src_files = headers | cmake_files | pkgconfig_files
all_files = src_files | py_files
sdist_files = {
"pybind11",
"pybind11/include",
"pybind11/include/pybind11",
"pybind11/include/pybind11/detail",
"pybind11/include/pybind11/eigen",
"pybind11/include/pybind11/stl",
"pybind11/share",
"pybind11/share/cmake",
"pybind11/share/cmake/pybind11",
"pybind11/share/pkgconfig",
"pyproject.toml",
"setup.cfg",
"setup.py",
"LICENSE",
"MANIFEST.in",
"README.rst",
"PKG-INFO",
"SECURITY.md",
}
local_sdist_files = {
".egg-info",
".egg-info/PKG-INFO",
".egg-info/SOURCES.txt",
".egg-info/dependency_links.txt",
".egg-info/not-zip-safe",
".egg-info/top_level.txt",
}
def read_tz_file(tar: tarfile.TarFile, name: str) -> bytes:
start = tar.getnames()[0] + "/"
inner_file = tar.extractfile(tar.getmember(f"{start}{name}"))
assert inner_file
with contextlib.closing(inner_file) as f:
return f.read()
def normalize_line_endings(value: bytes) -> bytes:
return value.replace(os.linesep.encode("utf-8"), b"\n")
def test_build_sdist(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
subprocess.run(
[sys.executable, "-m", "build", "--sdist", f"--outdir={tmpdir}"], check=True
)
(sdist,) = tmpdir.visit("*.tar.gz")
with tarfile.open(str(sdist), "r:gz") as tar:
start = tar.getnames()[0] + "/"
version = start[9:-1]
simpler = {n.split("/", 1)[-1] for n in tar.getnames()[1:]}
setup_py = read_tz_file(tar, "setup.py")
pyproject_toml = read_tz_file(tar, "pyproject.toml")
pkgconfig = read_tz_file(tar, "pybind11/share/pkgconfig/pybind11.pc")
cmake_cfg = read_tz_file(
tar, "pybind11/share/cmake/pybind11/pybind11Config.cmake"
)
assert (
'set(pybind11_INCLUDE_DIR "${PACKAGE_PREFIX_DIR}/include")'
in cmake_cfg.decode("utf-8")
)
files = {f"pybind11/{n}" for n in all_files}
files |= sdist_files
files |= {f"pybind11{n}" for n in local_sdist_files}
files.add("pybind11.egg-info/entry_points.txt")
files.add("pybind11.egg-info/requires.txt")
assert simpler == files
with open(os.path.join(MAIN_DIR, "tools", "setup_main.py.in"), "rb") as f:
contents = (
string.Template(f.read().decode("utf-8"))
.substitute(version=version, extra_cmd="")
.encode("utf-8")
)
assert setup_py == contents
with open(os.path.join(MAIN_DIR, "tools", "pyproject.toml"), "rb") as f:
contents = f.read()
assert pyproject_toml == contents
simple_version = ".".join(version.split(".")[:3])
pkgconfig_expected = PKGCONFIG.format(VERSION=simple_version).encode("utf-8")
assert normalize_line_endings(pkgconfig) == pkgconfig_expected
def test_build_global_dist(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
monkeypatch.setenv("PYBIND11_GLOBAL_SDIST", "1")
subprocess.run(
[sys.executable, "-m", "build", "--sdist", "--outdir", str(tmpdir)], check=True
)
(sdist,) = tmpdir.visit("*.tar.gz")
with tarfile.open(str(sdist), "r:gz") as tar:
start = tar.getnames()[0] + "/"
version = start[16:-1]
simpler = {n.split("/", 1)[-1] for n in tar.getnames()[1:]}
setup_py = read_tz_file(tar, "setup.py")
pyproject_toml = read_tz_file(tar, "pyproject.toml")
pkgconfig = read_tz_file(tar, "pybind11/share/pkgconfig/pybind11.pc")
cmake_cfg = read_tz_file(
tar, "pybind11/share/cmake/pybind11/pybind11Config.cmake"
)
assert (
'set(pybind11_INCLUDE_DIR "${PACKAGE_PREFIX_DIR}/include")'
in cmake_cfg.decode("utf-8")
)
files = {f"pybind11/{n}" for n in all_files}
files |= sdist_files
files |= {f"pybind11_global{n}" for n in local_sdist_files}
assert simpler == files
with open(os.path.join(MAIN_DIR, "tools", "setup_global.py.in"), "rb") as f:
contents = (
string.Template(f.read().decode())
.substitute(version=version, extra_cmd="")
.encode("utf-8")
)
assert setup_py == contents
with open(os.path.join(MAIN_DIR, "tools", "pyproject.toml"), "rb") as f:
contents = f.read()
assert pyproject_toml == contents
simple_version = ".".join(version.split(".")[:3])
pkgconfig_expected = PKGCONFIG.format(VERSION=simple_version).encode("utf-8")
assert normalize_line_endings(pkgconfig) == pkgconfig_expected
def tests_build_wheel(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
subprocess.run(
[sys.executable, "-m", "pip", "wheel", ".", "-w", str(tmpdir)], check=True
)
(wheel,) = tmpdir.visit("*.whl")
files = {f"pybind11/{n}" for n in all_files}
files |= {
"dist-info/LICENSE",
"dist-info/METADATA",
"dist-info/RECORD",
"dist-info/WHEEL",
"dist-info/entry_points.txt",
"dist-info/top_level.txt",
}
with zipfile.ZipFile(str(wheel)) as z:
names = z.namelist()
trimmed = {n for n in names if "dist-info" not in n}
trimmed |= {f"dist-info/{n.split('/', 1)[-1]}" for n in names if "dist-info" in n}
assert files == trimmed
def tests_build_global_wheel(monkeypatch, tmpdir):
monkeypatch.chdir(MAIN_DIR)
monkeypatch.setenv("PYBIND11_GLOBAL_SDIST", "1")
subprocess.run(
[sys.executable, "-m", "pip", "wheel", ".", "-w", str(tmpdir)], check=True
)
(wheel,) = tmpdir.visit("*.whl")
files = {f"data/data/{n}" for n in src_files}
files |= {f"data/headers/{n[8:]}" for n in headers}
files |= {
"dist-info/LICENSE",
"dist-info/METADATA",
"dist-info/WHEEL",
"dist-info/top_level.txt",
"dist-info/RECORD",
}
with zipfile.ZipFile(str(wheel)) as z:
names = z.namelist()
beginning = names[0].split("/", 1)[0].rsplit(".", 1)[0]
trimmed = {n[len(beginning) + 1 :] for n in names}
assert files == trimmed
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/extra_setuptools/test_setuphelper.py | Python | import os
import subprocess
import sys
from textwrap import dedent
import pytest
DIR = os.path.abspath(os.path.dirname(__file__))
MAIN_DIR = os.path.dirname(os.path.dirname(DIR))
WIN = sys.platform.startswith("win32") or sys.platform.startswith("cygwin")
@pytest.mark.parametrize("parallel", [False, True])
@pytest.mark.parametrize("std", [11, 0])
def test_simple_setup_py(monkeypatch, tmpdir, parallel, std):
monkeypatch.chdir(tmpdir)
monkeypatch.syspath_prepend(MAIN_DIR)
(tmpdir / "setup.py").write_text(
dedent(
f"""\
import sys
sys.path.append({MAIN_DIR!r})
from setuptools import setup, Extension
from pybind11.setup_helpers import build_ext, Pybind11Extension
std = {std}
ext_modules = [
Pybind11Extension(
"simple_setup",
sorted(["main.cpp"]),
cxx_std=std,
),
]
cmdclass = dict()
if std == 0:
cmdclass["build_ext"] = build_ext
parallel = {parallel}
if parallel:
from pybind11.setup_helpers import ParallelCompile
ParallelCompile().install()
setup(
name="simple_setup_package",
cmdclass=cmdclass,
ext_modules=ext_modules,
)
"""
),
encoding="ascii",
)
(tmpdir / "main.cpp").write_text(
dedent(
"""\
#include <pybind11/pybind11.h>
int f(int x) {
return x * 3;
}
PYBIND11_MODULE(simple_setup, m) {
m.def("f", &f);
}
"""
),
encoding="ascii",
)
out = subprocess.check_output(
[sys.executable, "setup.py", "build_ext", "--inplace"],
)
if not WIN:
assert b"-g0" in out
out = subprocess.check_output(
[sys.executable, "setup.py", "build_ext", "--inplace", "--force"],
env=dict(os.environ, CFLAGS="-g"),
)
if not WIN:
assert b"-g0" not in out
# Debug helper printout, normally hidden
print(out)
for item in tmpdir.listdir():
print(item.basename)
assert (
len([f for f in tmpdir.listdir() if f.basename.startswith("simple_setup")]) == 1
)
assert len(list(tmpdir.listdir())) == 4 # two files + output + build_dir
(tmpdir / "test.py").write_text(
dedent(
"""\
import simple_setup
assert simple_setup.f(3) == 9
"""
),
encoding="ascii",
)
subprocess.check_call(
[sys.executable, "test.py"], stdout=sys.stdout, stderr=sys.stderr
)
def test_intree_extensions(monkeypatch, tmpdir):
monkeypatch.syspath_prepend(MAIN_DIR)
from pybind11.setup_helpers import intree_extensions
monkeypatch.chdir(tmpdir)
root = tmpdir
root.ensure_dir()
subdir = root / "dir"
subdir.ensure_dir()
src = subdir / "ext.cpp"
src.ensure()
relpath = src.relto(tmpdir)
(ext,) = intree_extensions([relpath])
assert ext.name == "ext"
subdir.ensure("__init__.py")
(ext,) = intree_extensions([relpath])
assert ext.name == "dir.ext"
def test_intree_extensions_package_dir(monkeypatch, tmpdir):
monkeypatch.syspath_prepend(MAIN_DIR)
from pybind11.setup_helpers import intree_extensions
monkeypatch.chdir(tmpdir)
root = tmpdir / "src"
root.ensure_dir()
subdir = root / "dir"
subdir.ensure_dir()
src = subdir / "ext.cpp"
src.ensure()
(ext,) = intree_extensions([src.relto(tmpdir)], package_dir={"": "src"})
assert ext.name == "dir.ext"
(ext,) = intree_extensions([src.relto(tmpdir)], package_dir={"foo": "src"})
assert ext.name == "foo.dir.ext"
subdir.ensure("__init__.py")
(ext,) = intree_extensions([src.relto(tmpdir)], package_dir={"": "src"})
assert ext.name == "dir.ext"
(ext,) = intree_extensions([src.relto(tmpdir)], package_dir={"foo": "src"})
assert ext.name == "foo.dir.ext"
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/local_bindings.h | C/C++ Header | #pragma once
#include "pybind11_tests.h"
#include <utility>
/// Simple class used to test py::local:
template <int>
class LocalBase {
public:
explicit LocalBase(int i) : i(i) {}
int i = -1;
};
/// Registered with py::module_local in both main and secondary modules:
using LocalType = LocalBase<0>;
/// Registered without py::module_local in both modules:
using NonLocalType = LocalBase<1>;
/// A second non-local type (for stl_bind tests):
using NonLocal2 = LocalBase<2>;
/// Tests within-module, different-compilation-unit local definition conflict:
using LocalExternal = LocalBase<3>;
/// Mixed: registered local first, then global
using MixedLocalGlobal = LocalBase<4>;
/// Mixed: global first, then local
using MixedGlobalLocal = LocalBase<5>;
/// Registered with py::module_local only in the secondary module:
using ExternalType1 = LocalBase<6>;
using ExternalType2 = LocalBase<7>;
using LocalVec = std::vector<LocalType>;
using LocalVec2 = std::vector<NonLocal2>;
using LocalMap = std::unordered_map<std::string, LocalType>;
using NonLocalVec = std::vector<NonLocalType>;
using NonLocalVec2 = std::vector<NonLocal2>;
using NonLocalMap = std::unordered_map<std::string, NonLocalType>;
using NonLocalMap2 = std::unordered_map<std::string, uint8_t>;
// Exception that will be caught via the module local translator.
class LocalException : public std::exception {
public:
explicit LocalException(const char *m) : message{m} {}
const char *what() const noexcept override { return message.c_str(); }
private:
std::string message = "";
};
// Exception that will be registered with register_local_exception_translator
class LocalSimpleException : public std::exception {
public:
explicit LocalSimpleException(const char *m) : message{m} {}
const char *what() const noexcept override { return message.c_str(); }
private:
std::string message = "";
};
PYBIND11_MAKE_OPAQUE(LocalVec);
PYBIND11_MAKE_OPAQUE(LocalVec2);
PYBIND11_MAKE_OPAQUE(LocalMap);
PYBIND11_MAKE_OPAQUE(NonLocalVec);
// PYBIND11_MAKE_OPAQUE(NonLocalVec2); // same type as LocalVec2
PYBIND11_MAKE_OPAQUE(NonLocalMap);
PYBIND11_MAKE_OPAQUE(NonLocalMap2);
// Simple bindings (used with the above):
template <typename T, int Adjust = 0, typename... Args>
py::class_<T> bind_local(Args &&...args) {
return py::class_<T>(std::forward<Args>(args)...).def(py::init<int>()).def("get", [](T &i) {
return i.i + Adjust;
});
};
// Simulate a foreign library base class (to match the example in the docs):
namespace pets {
class Pet {
public:
explicit Pet(std::string name) : name_(std::move(name)) {}
std::string name_;
const std::string &name() const { return name_; }
};
} // namespace pets
struct MixGL {
int i;
explicit MixGL(int i) : i{i} {}
};
struct MixGL2 {
int i;
explicit MixGL2(int i) : i{i} {}
};
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/object.h | C/C++ Header | #if !defined(__OBJECT_H)
# define __OBJECT_H
# include "constructor_stats.h"
# include <atomic>
/// Reference counted object base class
class Object {
public:
/// Default constructor
Object() { print_default_created(this); }
/// Copy constructor
Object(const Object &) : m_refCount(0) { print_copy_created(this); }
/// Return the current reference count
int getRefCount() const { return m_refCount; };
/// Increase the object's reference count by one
void incRef() const { ++m_refCount; }
/** \brief Decrease the reference count of
* the object and possibly deallocate it.
*
* The object will automatically be deallocated once
* the reference count reaches zero.
*/
void decRef(bool dealloc = true) const {
--m_refCount;
if (m_refCount == 0 && dealloc) {
delete this;
} else if (m_refCount < 0) {
throw std::runtime_error("Internal error: reference count < 0!");
}
}
virtual std::string toString() const = 0;
protected:
/** \brief Virtual protected deconstructor.
* (Will only be called by \ref ref)
*/
virtual ~Object() { print_destroyed(this); }
private:
mutable std::atomic<int> m_refCount{0};
};
// Tag class used to track constructions of ref objects. When we track constructors, below, we
// track and print out the actual class (e.g. ref<MyObject>), and *also* add a fake tracker for
// ref_tag. This lets us check that the total number of ref<Anything> constructors/destructors is
// correct without having to check each individual ref<Whatever> type individually.
class ref_tag {};
/**
* \brief Reference counting helper
*
* The \a ref refeference template is a simple wrapper to store a
* pointer to an object. It takes care of increasing and decreasing
* the reference count of the object. When the last reference goes
* out of scope, the associated object will be deallocated.
*
* \ingroup libcore
*/
template <typename T>
class ref {
public:
/// Create a nullptr reference
ref() : m_ptr(nullptr) {
print_default_created(this);
track_default_created((ref_tag *) this);
}
/// Construct a reference from a pointer
explicit ref(T *ptr) : m_ptr(ptr) {
if (m_ptr) {
((Object *) m_ptr)->incRef();
}
print_created(this, "from pointer", m_ptr);
track_created((ref_tag *) this, "from pointer");
}
/// Copy constructor
ref(const ref &r) : m_ptr(r.m_ptr) {
if (m_ptr) {
((Object *) m_ptr)->incRef();
}
print_copy_created(this, "with pointer", m_ptr);
track_copy_created((ref_tag *) this);
}
/// Move constructor
ref(ref &&r) noexcept : m_ptr(r.m_ptr) {
r.m_ptr = nullptr;
print_move_created(this, "with pointer", m_ptr);
track_move_created((ref_tag *) this);
}
/// Destroy this reference
~ref() {
if (m_ptr) {
((Object *) m_ptr)->decRef();
}
print_destroyed(this);
track_destroyed((ref_tag *) this);
}
/// Move another reference into the current one
ref &operator=(ref &&r) noexcept {
print_move_assigned(this, "pointer", r.m_ptr);
track_move_assigned((ref_tag *) this);
if (*this == r) {
return *this;
}
if (m_ptr) {
((Object *) m_ptr)->decRef();
}
m_ptr = r.m_ptr;
r.m_ptr = nullptr;
return *this;
}
/// Overwrite this reference with another reference
ref &operator=(const ref &r) {
if (this == &r) {
return *this;
}
print_copy_assigned(this, "pointer", r.m_ptr);
track_copy_assigned((ref_tag *) this);
if (m_ptr == r.m_ptr) {
return *this;
}
if (m_ptr) {
((Object *) m_ptr)->decRef();
}
m_ptr = r.m_ptr;
if (m_ptr) {
((Object *) m_ptr)->incRef();
}
return *this;
}
/// Overwrite this reference with a pointer to another object
ref &operator=(T *ptr) {
print_values(this, "assigned pointer");
track_values((ref_tag *) this, "assigned pointer");
if (m_ptr == ptr) {
return *this;
}
if (m_ptr) {
((Object *) m_ptr)->decRef();
}
m_ptr = ptr;
if (m_ptr) {
((Object *) m_ptr)->incRef();
}
return *this;
}
/// Compare this reference with another reference
bool operator==(const ref &r) const { return m_ptr == r.m_ptr; }
/// Compare this reference with another reference
bool operator!=(const ref &r) const { return m_ptr != r.m_ptr; }
/// Compare this reference with a pointer
bool operator==(const T *ptr) const { return m_ptr == ptr; }
/// Compare this reference with a pointer
bool operator!=(const T *ptr) const { return m_ptr != ptr; }
/// Access the object referenced by this reference
T *operator->() { return m_ptr; }
/// Access the object referenced by this reference
const T *operator->() const { return m_ptr; }
/// Return a C++ reference to the referenced object
T &operator*() { return *m_ptr; }
/// Return a const C++ reference to the referenced object
const T &operator*() const { return *m_ptr; }
/// Return a pointer to the referenced object
explicit operator T *() { return m_ptr; }
/// Return a const pointer to the referenced object
T *get_ptr() { return m_ptr; }
/// Return a pointer to the referenced object
const T *get_ptr() const { return m_ptr; }
private:
T *m_ptr;
};
#endif /* __OBJECT_H */
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/pybind11_cross_module_tests.cpp | C++ | /*
tests/pybind11_cross_module_tests.cpp -- contains tests that require multiple modules
Copyright (c) 2017 Jason Rhinelander <jason@imaginary.ca>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <pybind11/stl_bind.h>
#include "local_bindings.h"
#include "pybind11_tests.h"
#include "test_exceptions.h"
#include <numeric>
#include <utility>
PYBIND11_MODULE(pybind11_cross_module_tests, m) {
m.doc() = "pybind11 cross-module test module";
// test_local_bindings.py tests:
//
// Definitions here are tested by importing both this module and the
// relevant pybind11_tests submodule from a test_whatever.py
// test_load_external
bind_local<ExternalType1>(m, "ExternalType1", py::module_local());
bind_local<ExternalType2>(m, "ExternalType2", py::module_local());
// test_exceptions.py
py::register_local_exception<LocalSimpleException>(m, "LocalSimpleException");
m.def("raise_runtime_error", []() {
PyErr_SetString(PyExc_RuntimeError, "My runtime error");
throw py::error_already_set();
});
m.def("raise_value_error", []() {
PyErr_SetString(PyExc_ValueError, "My value error");
throw py::error_already_set();
});
m.def("throw_pybind_value_error", []() { throw py::value_error("pybind11 value error"); });
m.def("throw_pybind_type_error", []() { throw py::type_error("pybind11 type error"); });
m.def("throw_stop_iteration", []() { throw py::stop_iteration(); });
m.def("throw_local_error", []() { throw LocalException("just local"); });
m.def("throw_local_simple_error", []() { throw LocalSimpleException("external mod"); });
py::register_exception_translator([](std::exception_ptr p) {
try {
if (p) {
std::rethrow_exception(p);
}
} catch (const shared_exception &e) {
PyErr_SetString(PyExc_KeyError, e.what());
}
});
// translate the local exception into a key error but only in this module
py::register_local_exception_translator([](std::exception_ptr p) {
try {
if (p) {
std::rethrow_exception(p);
}
} catch (const LocalException &e) {
PyErr_SetString(PyExc_KeyError, e.what());
}
});
// test_local_bindings.py
// Local to both:
bind_local<LocalType, 1>(m, "LocalType", py::module_local()).def("get2", [](LocalType &t) {
return t.i + 2;
});
// Can only be called with our python type:
m.def("local_value", [](LocalType &l) { return l.i; });
// test_nonlocal_failure
// This registration will fail (global registration when LocalFail is already registered
// globally in the main test module):
m.def("register_nonlocal", [m]() { bind_local<NonLocalType, 0>(m, "NonLocalType"); });
// test_stl_bind_local
// stl_bind.h binders defaults to py::module_local if the types are local or converting:
py::bind_vector<LocalVec>(m, "LocalVec");
py::bind_map<LocalMap>(m, "LocalMap");
// test_stl_bind_global
// and global if the type (or one of the types, for the map) is global (so these will fail,
// assuming pybind11_tests is already loaded):
m.def("register_nonlocal_vec", [m]() { py::bind_vector<NonLocalVec>(m, "NonLocalVec"); });
m.def("register_nonlocal_map", [m]() { py::bind_map<NonLocalMap>(m, "NonLocalMap"); });
// The default can, however, be overridden to global using `py::module_local()` or
// `py::module_local(false)`.
// Explicitly made local:
py::bind_vector<NonLocalVec2>(m, "NonLocalVec2", py::module_local());
// Explicitly made global (and so will fail to bind):
m.def("register_nonlocal_map2",
[m]() { py::bind_map<NonLocalMap2>(m, "NonLocalMap2", py::module_local(false)); });
// test_mixed_local_global
// We try this both with the global type registered first and vice versa (the order shouldn't
// matter).
m.def("register_mixed_global_local",
[m]() { bind_local<MixedGlobalLocal, 200>(m, "MixedGlobalLocal", py::module_local()); });
m.def("register_mixed_local_global", [m]() {
bind_local<MixedLocalGlobal, 2000>(m, "MixedLocalGlobal", py::module_local(false));
});
m.def("get_mixed_gl", [](int i) { return MixedGlobalLocal(i); });
m.def("get_mixed_lg", [](int i) { return MixedLocalGlobal(i); });
// test_internal_locals_differ
m.def("local_cpp_types_addr",
[]() { return (uintptr_t) &py::detail::get_local_internals().registered_types_cpp; });
// test_stl_caster_vs_stl_bind
py::bind_vector<std::vector<int>>(m, "VectorInt");
m.def("load_vector_via_binding",
[](std::vector<int> &v) { return std::accumulate(v.begin(), v.end(), 0); });
// test_cross_module_calls
m.def("return_self", [](LocalVec *v) { return v; });
m.def("return_copy", [](const LocalVec &v) { return LocalVec(v); });
class Dog : public pets::Pet {
public:
explicit Dog(std::string name) : Pet(std::move(name)) {}
};
py::class_<pets::Pet>(m, "Pet", py::module_local()).def("name", &pets::Pet::name);
// Binding for local extending class:
py::class_<Dog, pets::Pet>(m, "Dog").def(py::init<std::string>());
m.def("pet_name", [](pets::Pet &p) { return p.name(); });
py::class_<MixGL>(m, "MixGL", py::module_local()).def(py::init<int>());
m.def("get_gl_value", [](MixGL &o) { return o.i + 100; });
py::class_<MixGL2>(m, "MixGL2", py::module_local()).def(py::init<int>());
// test_vector_bool
// We can't test both stl.h and stl_bind.h conversions of `std::vector<bool>` within
// the same module (it would be an ODR violation). Therefore `bind_vector` of `bool`
// is defined here and tested in `test_stl_binders.py`.
py::bind_vector<std::vector<bool>>(m, "VectorBool");
// test_missing_header_message
// The main module already includes stl.h, but we need to test the error message
// which appears when this header is missing.
m.def("missing_header_arg", [](const std::vector<float> &) {});
m.def("missing_header_return", []() { return std::vector<float>(); });
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/pybind11_tests.cpp | C++ | /*
tests/pybind11_tests.cpp -- pybind example plugin
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include "pybind11_tests.h"
#include "constructor_stats.h"
#include <functional>
#include <list>
/*
For testing purposes, we define a static global variable here in a function that each individual
test .cpp calls with its initialization lambda. It's convenient here because we can just not
compile some test files to disable/ignore some of the test code.
It is NOT recommended as a way to use pybind11 in practice, however: the initialization order will
be essentially random, which is okay for our test scripts (there are no dependencies between the
individual pybind11 test .cpp files), but most likely not what you want when using pybind11
productively.
Instead, see the "How can I reduce the build time?" question in the "Frequently asked questions"
section of the documentation for good practice on splitting binding code over multiple files.
*/
std::list<std::function<void(py::module_ &)>> &initializers() {
static std::list<std::function<void(py::module_ &)>> inits;
return inits;
}
test_initializer::test_initializer(Initializer init) { initializers().emplace_back(init); }
test_initializer::test_initializer(const char *submodule_name, Initializer init) {
initializers().emplace_back([=](py::module_ &parent) {
auto m = parent.def_submodule(submodule_name);
init(m);
});
}
void bind_ConstructorStats(py::module_ &m) {
py::class_<ConstructorStats>(m, "ConstructorStats")
.def("alive", &ConstructorStats::alive)
.def("values", &ConstructorStats::values)
.def_readwrite("default_constructions", &ConstructorStats::default_constructions)
.def_readwrite("copy_assignments", &ConstructorStats::copy_assignments)
.def_readwrite("move_assignments", &ConstructorStats::move_assignments)
.def_readwrite("copy_constructions", &ConstructorStats::copy_constructions)
.def_readwrite("move_constructions", &ConstructorStats::move_constructions)
.def_static("get",
(ConstructorStats & (*) (py::object)) & ConstructorStats::get,
py::return_value_policy::reference_internal)
// Not exactly ConstructorStats, but related: expose the internal pybind number of
// registered instances to allow instance cleanup checks (invokes a GC first)
.def_static("detail_reg_inst", []() {
ConstructorStats::gc();
return py::detail::get_internals().registered_instances.size();
});
}
const char *cpp_std() {
return
#if defined(PYBIND11_CPP20)
"C++20";
#elif defined(PYBIND11_CPP17)
"C++17";
#elif defined(PYBIND11_CPP14)
"C++14";
#else
"C++11";
#endif
}
PYBIND11_MODULE(pybind11_tests, m) {
m.doc() = "pybind11 test module";
// Intentionally kept minimal to not create a maintenance chore
// ("just enough" to be conclusive).
#if defined(_MSC_FULL_VER)
m.attr("compiler_info") = "MSVC " PYBIND11_TOSTRING(_MSC_FULL_VER);
#elif defined(__VERSION__)
m.attr("compiler_info") = __VERSION__;
#else
m.attr("compiler_info") = py::none();
#endif
m.attr("cpp_std") = cpp_std();
m.attr("PYBIND11_INTERNALS_ID") = PYBIND11_INTERNALS_ID;
m.attr("PYBIND11_SIMPLE_GIL_MANAGEMENT") =
#if defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
true;
#else
false;
#endif
bind_ConstructorStats(m);
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
m.attr("detailed_error_messages_enabled") = true;
#else
m.attr("detailed_error_messages_enabled") = false;
#endif
py::class_<UserType>(m, "UserType", "A `py::class_` type for testing")
.def(py::init<>())
.def(py::init<int>())
.def("get_value", &UserType::value, "Get value using a method")
.def("set_value", &UserType::set, "Set value using a method")
.def_property("value", &UserType::value, &UserType::set, "Get/set value using a property")
.def("__repr__", [](const UserType &u) { return "UserType({})"_s.format(u.value()); });
py::class_<IncType, UserType>(m, "IncType")
.def(py::init<>())
.def(py::init<int>())
.def("__repr__", [](const IncType &u) { return "IncType({})"_s.format(u.value()); });
for (const auto &initializer : initializers()) {
initializer(m);
}
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/pybind11_tests.h | C/C++ Header | #pragma once
#include <pybind11/eval.h>
#include <pybind11/pybind11.h>
namespace py = pybind11;
using namespace pybind11::literals;
class test_initializer {
using Initializer = void (*)(py::module_ &);
public:
explicit test_initializer(Initializer init);
test_initializer(const char *submodule_name, Initializer init);
};
#define TEST_SUBMODULE(name, variable) \
void test_submodule_##name(py::module_ &); \
test_initializer name(#name, test_submodule_##name); \
void test_submodule_##name(py::module_ &(variable))
/// Dummy type which is not exported anywhere -- something to trigger a conversion error
struct UnregisteredType {};
/// A user-defined type which is exported and can be used by any test
class UserType {
public:
UserType() = default;
explicit UserType(int i) : i(i) {}
int value() const { return i; }
void set(int set) { i = set; }
private:
int i = -1;
};
/// Like UserType, but increments `value` on copy for quick reference vs. copy tests
class IncType : public UserType {
public:
using UserType::UserType;
IncType() = default;
IncType(const IncType &other) : IncType(other.value() + 1) {}
IncType(IncType &&) = delete;
IncType &operator=(const IncType &) = delete;
IncType &operator=(IncType &&) = delete;
};
/// A simple union for basic testing
union IntFloat {
int i;
float f;
};
/// Custom cast-only type that casts to a string "rvalue" or "lvalue" depending on the cast
/// context. Used to test recursive casters (e.g. std::tuple, stl containers).
struct RValueCaster {};
PYBIND11_NAMESPACE_BEGIN(pybind11)
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
class type_caster<RValueCaster> {
public:
PYBIND11_TYPE_CASTER(RValueCaster, const_name("RValueCaster"));
static handle cast(RValueCaster &&, return_value_policy, handle) {
return py::str("rvalue").release();
}
static handle cast(const RValueCaster &, return_value_policy, handle) {
return py::str("lvalue").release();
}
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(pybind11)
template <typename F>
void ignoreOldStyleInitWarnings(F &&body) {
py::exec(R"(
message = "pybind11-bound class '.+' is using an old-style placement-new '(?:__init__|__setstate__)' which has been deprecated"
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=message, category=FutureWarning)
body()
)",
py::dict(py::arg("body") = py::cpp_function(body)));
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/test_async.cpp | C++ | /*
tests/test_async.cpp -- __await__ support
Copyright (c) 2019 Google Inc.
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include "pybind11_tests.h"
TEST_SUBMODULE(async_module, m) {
struct DoesNotSupportAsync {};
py::class_<DoesNotSupportAsync>(m, "DoesNotSupportAsync").def(py::init<>());
struct SupportsAsync {};
py::class_<SupportsAsync>(m, "SupportsAsync")
.def(py::init<>())
.def("__await__", [](const SupportsAsync &self) -> py::object {
static_cast<void>(self);
py::object loop = py::module_::import("asyncio.events").attr("get_event_loop")();
py::object f = loop.attr("create_future")();
f.attr("set_result")(5);
return f.attr("__await__")();
});
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/test_async.py | Python | import pytest
asyncio = pytest.importorskip("asyncio")
m = pytest.importorskip("pybind11_tests.async_module")
@pytest.fixture()
def event_loop():
loop = asyncio.new_event_loop()
yield loop
loop.close()
async def get_await_result(x):
return await x
def test_await(event_loop):
assert event_loop.run_until_complete(get_await_result(m.SupportsAsync())) == 5
def test_await_missing(event_loop):
with pytest.raises(TypeError):
event_loop.run_until_complete(get_await_result(m.DoesNotSupportAsync()))
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/test_buffers.cpp | C++ | /*
tests/test_buffers.cpp -- supporting Pythons' buffer protocol
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <pybind11/complex.h>
#include <pybind11/stl.h>
#include "constructor_stats.h"
#include "pybind11_tests.h"
TEST_SUBMODULE(buffers, m) {
m.attr("long_double_and_double_have_same_size") = (sizeof(long double) == sizeof(double));
m.def("format_descriptor_format_buffer_info_equiv",
[](const std::string &cpp_name, const py::buffer &buffer) {
// https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables
static auto *format_table = new std::map<std::string, std::string>;
static auto *equiv_table
= new std::map<std::string, bool (py::buffer_info::*)() const>;
if (format_table->empty()) {
#define PYBIND11_ASSIGN_HELPER(...) \
(*format_table)[#__VA_ARGS__] = py::format_descriptor<__VA_ARGS__>::format(); \
(*equiv_table)[#__VA_ARGS__] = &py::buffer_info::item_type_is_equivalent_to<__VA_ARGS__>;
PYBIND11_ASSIGN_HELPER(PyObject *)
PYBIND11_ASSIGN_HELPER(bool)
PYBIND11_ASSIGN_HELPER(std::int8_t)
PYBIND11_ASSIGN_HELPER(std::uint8_t)
PYBIND11_ASSIGN_HELPER(std::int16_t)
PYBIND11_ASSIGN_HELPER(std::uint16_t)
PYBIND11_ASSIGN_HELPER(std::int32_t)
PYBIND11_ASSIGN_HELPER(std::uint32_t)
PYBIND11_ASSIGN_HELPER(std::int64_t)
PYBIND11_ASSIGN_HELPER(std::uint64_t)
PYBIND11_ASSIGN_HELPER(float)
PYBIND11_ASSIGN_HELPER(double)
PYBIND11_ASSIGN_HELPER(long double)
PYBIND11_ASSIGN_HELPER(std::complex<float>)
PYBIND11_ASSIGN_HELPER(std::complex<double>)
PYBIND11_ASSIGN_HELPER(std::complex<long double>)
#undef PYBIND11_ASSIGN_HELPER
}
return std::pair<std::string, bool>(
(*format_table)[cpp_name], (buffer.request().*((*equiv_table)[cpp_name]))());
});
// test_from_python / test_to_python:
class Matrix {
public:
Matrix(py::ssize_t rows, py::ssize_t cols) : m_rows(rows), m_cols(cols) {
print_created(this, std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix");
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
m_data = new float[(size_t) (rows * cols)];
memset(m_data, 0, sizeof(float) * (size_t) (rows * cols));
}
Matrix(const Matrix &s) : m_rows(s.m_rows), m_cols(s.m_cols) {
print_copy_created(this,
std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix");
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
m_data = new float[(size_t) (m_rows * m_cols)];
memcpy(m_data, s.m_data, sizeof(float) * (size_t) (m_rows * m_cols));
}
Matrix(Matrix &&s) noexcept : m_rows(s.m_rows), m_cols(s.m_cols), m_data(s.m_data) {
print_move_created(this);
s.m_rows = 0;
s.m_cols = 0;
s.m_data = nullptr;
}
~Matrix() {
print_destroyed(this,
std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix");
delete[] m_data;
}
Matrix &operator=(const Matrix &s) {
if (this == &s) {
return *this;
}
print_copy_assigned(this,
std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix");
delete[] m_data;
m_rows = s.m_rows;
m_cols = s.m_cols;
m_data = new float[(size_t) (m_rows * m_cols)];
memcpy(m_data, s.m_data, sizeof(float) * (size_t) (m_rows * m_cols));
return *this;
}
Matrix &operator=(Matrix &&s) noexcept {
print_move_assigned(this,
std::to_string(m_rows) + "x" + std::to_string(m_cols) + " matrix");
if (&s != this) {
delete[] m_data;
m_rows = s.m_rows;
m_cols = s.m_cols;
m_data = s.m_data;
s.m_rows = 0;
s.m_cols = 0;
s.m_data = nullptr;
}
return *this;
}
float operator()(py::ssize_t i, py::ssize_t j) const {
return m_data[(size_t) (i * m_cols + j)];
}
float &operator()(py::ssize_t i, py::ssize_t j) {
return m_data[(size_t) (i * m_cols + j)];
}
float *data() { return m_data; }
py::ssize_t rows() const { return m_rows; }
py::ssize_t cols() const { return m_cols; }
private:
py::ssize_t m_rows;
py::ssize_t m_cols;
float *m_data;
};
py::class_<Matrix>(m, "Matrix", py::buffer_protocol())
.def(py::init<py::ssize_t, py::ssize_t>())
/// Construct from a buffer
.def(py::init([](const py::buffer &b) {
py::buffer_info info = b.request();
if (info.format != py::format_descriptor<float>::format() || info.ndim != 2) {
throw std::runtime_error("Incompatible buffer format!");
}
auto *v = new Matrix(info.shape[0], info.shape[1]);
memcpy(v->data(), info.ptr, sizeof(float) * (size_t) (v->rows() * v->cols()));
return v;
}))
.def("rows", &Matrix::rows)
.def("cols", &Matrix::cols)
/// Bare bones interface
.def("__getitem__",
[](const Matrix &m, std::pair<py::ssize_t, py::ssize_t> i) {
if (i.first >= m.rows() || i.second >= m.cols()) {
throw py::index_error();
}
return m(i.first, i.second);
})
.def("__setitem__",
[](Matrix &m, std::pair<py::ssize_t, py::ssize_t> i, float v) {
if (i.first >= m.rows() || i.second >= m.cols()) {
throw py::index_error();
}
m(i.first, i.second) = v;
})
/// Provide buffer access
.def_buffer([](Matrix &m) -> py::buffer_info {
return py::buffer_info(
m.data(), /* Pointer to buffer */
{m.rows(), m.cols()}, /* Buffer dimensions */
{sizeof(float) * size_t(m.cols()), /* Strides (in bytes) for each index */
sizeof(float)});
});
// test_inherited_protocol
class SquareMatrix : public Matrix {
public:
explicit SquareMatrix(py::ssize_t n) : Matrix(n, n) {}
};
// Derived classes inherit the buffer protocol and the buffer access function
py::class_<SquareMatrix, Matrix>(m, "SquareMatrix").def(py::init<py::ssize_t>());
// test_pointer_to_member_fn
// Tests that passing a pointer to member to the base class works in
// the derived class.
struct Buffer {
int32_t value = 0;
py::buffer_info get_buffer_info() {
return py::buffer_info(
&value, sizeof(value), py::format_descriptor<int32_t>::format(), 1);
}
};
py::class_<Buffer>(m, "Buffer", py::buffer_protocol())
.def(py::init<>())
.def_readwrite("value", &Buffer::value)
.def_buffer(&Buffer::get_buffer_info);
class ConstBuffer {
std::unique_ptr<int32_t> value;
public:
int32_t get_value() const { return *value; }
void set_value(int32_t v) { *value = v; }
py::buffer_info get_buffer_info() const {
return py::buffer_info(
value.get(), sizeof(*value), py::format_descriptor<int32_t>::format(), 1);
}
ConstBuffer() : value(new int32_t{0}) {}
};
py::class_<ConstBuffer>(m, "ConstBuffer", py::buffer_protocol())
.def(py::init<>())
.def_property("value", &ConstBuffer::get_value, &ConstBuffer::set_value)
.def_buffer(&ConstBuffer::get_buffer_info);
struct DerivedBuffer : public Buffer {};
py::class_<DerivedBuffer>(m, "DerivedBuffer", py::buffer_protocol())
.def(py::init<>())
.def_readwrite("value", (int32_t DerivedBuffer::*) &DerivedBuffer::value)
.def_buffer(&DerivedBuffer::get_buffer_info);
struct BufferReadOnly {
const uint8_t value = 0;
explicit BufferReadOnly(uint8_t value) : value(value) {}
py::buffer_info get_buffer_info() { return py::buffer_info(&value, 1); }
};
py::class_<BufferReadOnly>(m, "BufferReadOnly", py::buffer_protocol())
.def(py::init<uint8_t>())
.def_buffer(&BufferReadOnly::get_buffer_info);
struct BufferReadOnlySelect {
uint8_t value = 0;
bool readonly = false;
py::buffer_info get_buffer_info() { return py::buffer_info(&value, 1, readonly); }
};
py::class_<BufferReadOnlySelect>(m, "BufferReadOnlySelect", py::buffer_protocol())
.def(py::init<>())
.def_readwrite("value", &BufferReadOnlySelect::value)
.def_readwrite("readonly", &BufferReadOnlySelect::readonly)
.def_buffer(&BufferReadOnlySelect::get_buffer_info);
// Expose buffer_info for testing.
py::class_<py::buffer_info>(m, "buffer_info")
.def(py::init<>())
.def_readonly("itemsize", &py::buffer_info::itemsize)
.def_readonly("size", &py::buffer_info::size)
.def_readonly("format", &py::buffer_info::format)
.def_readonly("ndim", &py::buffer_info::ndim)
.def_readonly("shape", &py::buffer_info::shape)
.def_readonly("strides", &py::buffer_info::strides)
.def_readonly("readonly", &py::buffer_info::readonly)
.def("__repr__", [](py::handle self) {
return py::str("itemsize={0.itemsize!r}, size={0.size!r}, format={0.format!r}, "
"ndim={0.ndim!r}, shape={0.shape!r}, strides={0.strides!r}, "
"readonly={0.readonly!r}")
.format(self);
});
m.def("get_buffer_info", [](const py::buffer &buffer) { return buffer.request(); });
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/test_buffers.py | Python | import ctypes
import io
import struct
import pytest
import env
from pybind11_tests import ConstructorStats
from pybind11_tests import buffers as m
np = pytest.importorskip("numpy")
if m.long_double_and_double_have_same_size:
# Determined by the compiler used to build the pybind11 tests
# (e.g. MSVC gets here, but MinGW might not).
np_float128 = None
np_complex256 = None
else:
# Determined by the compiler used to build numpy (e.g. MinGW).
np_float128 = getattr(np, *["float128"] * 2)
np_complex256 = getattr(np, *["complex256"] * 2)
CPP_NAME_FORMAT_NP_DTYPE_TABLE = [
("PyObject *", "O", object),
("bool", "?", np.bool_),
("std::int8_t", "b", np.int8),
("std::uint8_t", "B", np.uint8),
("std::int16_t", "h", np.int16),
("std::uint16_t", "H", np.uint16),
("std::int32_t", "i", np.int32),
("std::uint32_t", "I", np.uint32),
("std::int64_t", "q", np.int64),
("std::uint64_t", "Q", np.uint64),
("float", "f", np.float32),
("double", "d", np.float64),
("long double", "g", np_float128),
("std::complex<float>", "Zf", np.complex64),
("std::complex<double>", "Zd", np.complex128),
("std::complex<long double>", "Zg", np_complex256),
]
CPP_NAME_FORMAT_TABLE = [
(cpp_name, format)
for cpp_name, format, np_dtype in CPP_NAME_FORMAT_NP_DTYPE_TABLE
if np_dtype is not None
]
CPP_NAME_NP_DTYPE_TABLE = [
(cpp_name, np_dtype) for cpp_name, _, np_dtype in CPP_NAME_FORMAT_NP_DTYPE_TABLE
]
@pytest.mark.parametrize(("cpp_name", "np_dtype"), CPP_NAME_NP_DTYPE_TABLE)
def test_format_descriptor_format_buffer_info_equiv(cpp_name, np_dtype):
if np_dtype is None:
pytest.skip(
f"cpp_name=`{cpp_name}`: `long double` and `double` have same size."
)
if isinstance(np_dtype, str):
pytest.skip(f"np.{np_dtype} does not exist.")
np_array = np.array([], dtype=np_dtype)
for other_cpp_name, expected_format in CPP_NAME_FORMAT_TABLE:
format, np_array_is_matching = m.format_descriptor_format_buffer_info_equiv(
other_cpp_name, np_array
)
assert format == expected_format
if other_cpp_name == cpp_name:
assert np_array_is_matching
else:
assert not np_array_is_matching
def test_from_python():
with pytest.raises(RuntimeError) as excinfo:
m.Matrix(np.array([1, 2, 3])) # trying to assign a 1D array
assert str(excinfo.value) == "Incompatible buffer format!"
m3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
m4 = m.Matrix(m3)
for i in range(m4.rows()):
for j in range(m4.cols()):
assert m3[i, j] == m4[i, j]
cstats = ConstructorStats.get(m.Matrix)
assert cstats.alive() == 1
del m3, m4
assert cstats.alive() == 0
assert cstats.values() == ["2x3 matrix"]
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Don't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# https://foss.heptapod.net/pypy/pypy/-/issues/2444
# TODO: fix on recent PyPy
@pytest.mark.xfail(
env.PYPY, reason="PyPy 7.3.7 doesn't clear this anymore", strict=False
)
def test_to_python():
mat = m.Matrix(5, 4)
assert memoryview(mat).shape == (5, 4)
assert mat[2, 3] == 0
mat[2, 3] = 4.0
mat[3, 2] = 7.0
assert mat[2, 3] == 4
assert mat[3, 2] == 7
assert struct.unpack_from("f", mat, (3 * 4 + 2) * 4) == (7,)
assert struct.unpack_from("f", mat, (2 * 4 + 3) * 4) == (4,)
mat2 = np.array(mat, copy=False)
assert mat2.shape == (5, 4)
assert abs(mat2).sum() == 11
assert mat2[2, 3] == 4
assert mat2[3, 2] == 7
mat2[2, 3] = 5
assert mat2[2, 3] == 5
cstats = ConstructorStats.get(m.Matrix)
assert cstats.alive() == 1
del mat
pytest.gc_collect()
assert cstats.alive() == 1
del mat2 # holds a mat reference
pytest.gc_collect()
assert cstats.alive() == 0
assert cstats.values() == ["5x4 matrix"]
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Don't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_inherited_protocol():
"""SquareMatrix is derived from Matrix and inherits the buffer protocol"""
matrix = m.SquareMatrix(5)
assert memoryview(matrix).shape == (5, 5)
assert np.asarray(matrix).shape == (5, 5)
def test_pointer_to_member_fn():
for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]:
buf = cls()
buf.value = 0x12345678
value = struct.unpack("i", bytearray(buf))[0]
assert value == 0x12345678
def test_readonly_buffer():
buf = m.BufferReadOnly(0x64)
view = memoryview(buf)
assert view[0] == 0x64
assert view.readonly
with pytest.raises(TypeError):
view[0] = 0
def test_selective_readonly_buffer():
buf = m.BufferReadOnlySelect()
memoryview(buf)[0] = 0x64
assert buf.value == 0x64
io.BytesIO(b"A").readinto(buf)
assert buf.value == ord(b"A")
buf.readonly = True
with pytest.raises(TypeError):
memoryview(buf)[0] = 0
with pytest.raises(TypeError):
io.BytesIO(b"1").readinto(buf)
def test_ctypes_array_1d():
char1d = (ctypes.c_char * 10)()
int1d = (ctypes.c_int * 15)()
long1d = (ctypes.c_long * 7)()
for carray in (char1d, int1d, long1d):
info = m.get_buffer_info(carray)
assert info.itemsize == ctypes.sizeof(carray._type_)
assert info.size == len(carray)
assert info.ndim == 1
assert info.shape == [info.size]
assert info.strides == [info.itemsize]
assert not info.readonly
def test_ctypes_array_2d():
char2d = ((ctypes.c_char * 10) * 4)()
int2d = ((ctypes.c_int * 15) * 3)()
long2d = ((ctypes.c_long * 7) * 2)()
for carray in (char2d, int2d, long2d):
info = m.get_buffer_info(carray)
assert info.itemsize == ctypes.sizeof(carray[0]._type_)
assert info.size == len(carray) * len(carray[0])
assert info.ndim == 2
assert info.shape == [len(carray), len(carray[0])]
assert info.strides == [info.itemsize * len(carray[0]), info.itemsize]
assert not info.readonly
def test_ctypes_from_buffer():
test_pystr = b"0123456789"
for pyarray in (test_pystr, bytearray(test_pystr)):
pyinfo = m.get_buffer_info(pyarray)
if pyinfo.readonly:
cbytes = (ctypes.c_char * len(pyarray)).from_buffer_copy(pyarray)
cinfo = m.get_buffer_info(cbytes)
else:
cbytes = (ctypes.c_char * len(pyarray)).from_buffer(pyarray)
cinfo = m.get_buffer_info(cbytes)
assert cinfo.size == pyinfo.size
assert cinfo.ndim == pyinfo.ndim
assert cinfo.shape == pyinfo.shape
assert cinfo.strides == pyinfo.strides
assert not cinfo.readonly
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/test_builtin_casters.cpp | C++ | /*
tests/test_builtin_casters.cpp -- Casters available without any additional headers
Copyright (c) 2017 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include <pybind11/complex.h>
#include "pybind11_tests.h"
struct ConstRefCasted {
int tag;
};
PYBIND11_NAMESPACE_BEGIN(pybind11)
PYBIND11_NAMESPACE_BEGIN(detail)
template <>
class type_caster<ConstRefCasted> {
public:
static constexpr auto name = const_name<ConstRefCasted>();
// Input is unimportant, a new value will always be constructed based on the
// cast operator.
bool load(handle, bool) { return true; }
explicit operator ConstRefCasted &&() {
value = {1};
// NOLINTNEXTLINE(performance-move-const-arg)
return std::move(value);
}
explicit operator ConstRefCasted &() {
value = {2};
return value;
}
explicit operator ConstRefCasted *() {
value = {3};
return &value;
}
explicit operator const ConstRefCasted &() {
value = {4};
return value;
}
explicit operator const ConstRefCasted *() {
value = {5};
return &value;
}
// custom cast_op to explicitly propagate types to the conversion operators.
template <typename T_>
using cast_op_type =
/// const
conditional_t<
std::is_same<remove_reference_t<T_>, const ConstRefCasted *>::value,
const ConstRefCasted *,
conditional_t<
std::is_same<T_, const ConstRefCasted &>::value,
const ConstRefCasted &,
/// non-const
conditional_t<std::is_same<remove_reference_t<T_>, ConstRefCasted *>::value,
ConstRefCasted *,
conditional_t<std::is_same<T_, ConstRefCasted &>::value,
ConstRefCasted &,
/* else */ ConstRefCasted &&>>>>;
private:
ConstRefCasted value = {0};
};
PYBIND11_NAMESPACE_END(detail)
PYBIND11_NAMESPACE_END(pybind11)
TEST_SUBMODULE(builtin_casters, m) {
PYBIND11_WARNING_PUSH
PYBIND11_WARNING_DISABLE_MSVC(4127)
// test_simple_string
m.def("string_roundtrip", [](const char *s) { return s; });
// test_unicode_conversion
// Some test characters in utf16 and utf32 encodings. The last one (the 𝐀) contains a null
// byte
char32_t a32 = 0x61 /*a*/, z32 = 0x7a /*z*/, ib32 = 0x203d /*‽*/, cake32 = 0x1f382 /*🎂*/,
mathbfA32 = 0x1d400 /*𝐀*/;
char16_t b16 = 0x62 /*b*/, z16 = 0x7a, ib16 = 0x203d, cake16_1 = 0xd83c, cake16_2 = 0xdf82,
mathbfA16_1 = 0xd835, mathbfA16_2 = 0xdc00;
std::wstring wstr;
wstr.push_back(0x61); // a
wstr.push_back(0x2e18); // ⸘
if (sizeof(wchar_t) == 2) {
wstr.push_back(mathbfA16_1);
wstr.push_back(mathbfA16_2);
} // 𝐀, utf16
else {
wstr.push_back((wchar_t) mathbfA32);
} // 𝐀, utf32
wstr.push_back(0x7a); // z
m.def("good_utf8_string", []() {
return std::string((const char *) u8"Say utf8\u203d \U0001f382 \U0001d400");
}); // Say utf8‽ 🎂 𝐀
m.def("good_utf16_string", [=]() {
return std::u16string({b16, ib16, cake16_1, cake16_2, mathbfA16_1, mathbfA16_2, z16});
}); // b‽🎂𝐀z
m.def("good_utf32_string", [=]() {
return std::u32string({a32, mathbfA32, cake32, ib32, z32});
}); // a𝐀🎂‽z
m.def("good_wchar_string", [=]() { return wstr; }); // a‽𝐀z
m.def("bad_utf8_string", []() {
return std::string("abc\xd0"
"def");
});
m.def("bad_utf16_string", [=]() { return std::u16string({b16, char16_t(0xd800), z16}); });
// Under Python 2.7, invalid unicode UTF-32 characters didn't appear to trigger
// UnicodeDecodeError
m.def("bad_utf32_string", [=]() { return std::u32string({a32, char32_t(0xd800), z32}); });
if (sizeof(wchar_t) == 2) {
m.def("bad_wchar_string", [=]() {
return std::wstring({wchar_t(0x61), wchar_t(0xd800)});
});
}
m.def("u8_Z", []() -> char { return 'Z'; });
m.def("u8_eacute", []() -> char { return '\xe9'; });
m.def("u16_ibang", [=]() -> char16_t { return ib16; });
m.def("u32_mathbfA", [=]() -> char32_t { return mathbfA32; });
m.def("wchar_heart", []() -> wchar_t { return 0x2665; });
// test_single_char_arguments
m.attr("wchar_size") = py::cast(sizeof(wchar_t));
m.def("ord_char", [](char c) -> int { return static_cast<unsigned char>(c); });
m.def("ord_char_lv", [](char &c) -> int { return static_cast<unsigned char>(c); });
m.def("ord_char16", [](char16_t c) -> uint16_t { return c; });
m.def("ord_char16_lv", [](char16_t &c) -> uint16_t { return c; });
m.def("ord_char32", [](char32_t c) -> uint32_t { return c; });
m.def("ord_wchar", [](wchar_t c) -> int { return c; });
// test_bytes_to_string
m.def("strlen", [](char *s) { return strlen(s); });
m.def("string_length", [](const std::string &s) { return s.length(); });
#ifdef PYBIND11_HAS_U8STRING
m.attr("has_u8string") = true;
m.def("good_utf8_u8string", []() {
return std::u8string(u8"Say utf8\u203d \U0001f382 \U0001d400");
}); // Say utf8‽ 🎂 𝐀
m.def("bad_utf8_u8string", []() {
return std::u8string((const char8_t *) "abc\xd0"
"def");
});
m.def("u8_char8_Z", []() -> char8_t { return u8'Z'; });
// test_single_char_arguments
m.def("ord_char8", [](char8_t c) -> int { return static_cast<unsigned char>(c); });
m.def("ord_char8_lv", [](char8_t &c) -> int { return static_cast<unsigned char>(c); });
#endif
// test_string_view
#ifdef PYBIND11_HAS_STRING_VIEW
m.attr("has_string_view") = true;
m.def("string_view_print", [](std::string_view s) { py::print(s, s.size()); });
m.def("string_view16_print", [](std::u16string_view s) { py::print(s, s.size()); });
m.def("string_view32_print", [](std::u32string_view s) { py::print(s, s.size()); });
m.def("string_view_chars", [](std::string_view s) {
py::list l;
for (auto c : s) {
l.append((std::uint8_t) c);
}
return l;
});
m.def("string_view16_chars", [](std::u16string_view s) {
py::list l;
for (auto c : s) {
l.append((int) c);
}
return l;
});
m.def("string_view32_chars", [](std::u32string_view s) {
py::list l;
for (auto c : s) {
l.append((int) c);
}
return l;
});
m.def("string_view_return",
[]() { return std::string_view((const char *) u8"utf8 secret \U0001f382"); });
m.def("string_view16_return",
[]() { return std::u16string_view(u"utf16 secret \U0001f382"); });
m.def("string_view32_return",
[]() { return std::u32string_view(U"utf32 secret \U0001f382"); });
// The inner lambdas here are to also test implicit conversion
using namespace std::literals;
m.def("string_view_bytes",
[]() { return [](py::bytes b) { return b; }("abc \x80\x80 def"sv); });
m.def("string_view_str",
[]() { return [](py::str s) { return s; }("abc \342\200\275 def"sv); });
m.def("string_view_from_bytes",
[](const py::bytes &b) { return [](std::string_view s) { return s; }(b); });
m.def("string_view_memoryview", []() {
static constexpr auto val = "Have some \360\237\216\202"sv;
return py::memoryview::from_memory(val);
});
# ifdef PYBIND11_HAS_U8STRING
m.def("string_view8_print", [](std::u8string_view s) { py::print(s, s.size()); });
m.def("string_view8_chars", [](std::u8string_view s) {
py::list l;
for (auto c : s)
l.append((std::uint8_t) c);
return l;
});
m.def("string_view8_return", []() { return std::u8string_view(u8"utf8 secret \U0001f382"); });
m.def("string_view8_str", []() { return py::str{std::u8string_view{u8"abc ‽ def"}}; });
# endif
struct TypeWithBothOperatorStringAndStringView {
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::string() const { return "success"; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator std::string_view() const { return "failure"; }
};
m.def("bytes_from_type_with_both_operator_string_and_string_view",
[]() { return py::bytes(TypeWithBothOperatorStringAndStringView()); });
m.def("str_from_type_with_both_operator_string_and_string_view",
[]() { return py::str(TypeWithBothOperatorStringAndStringView()); });
#endif
// test_integer_casting
m.def("i32_str", [](std::int32_t v) { return std::to_string(v); });
m.def("u32_str", [](std::uint32_t v) { return std::to_string(v); });
m.def("i64_str", [](std::int64_t v) { return std::to_string(v); });
m.def("u64_str", [](std::uint64_t v) { return std::to_string(v); });
// test_int_convert
m.def("int_passthrough", [](int arg) { return arg; });
m.def(
"int_passthrough_noconvert", [](int arg) { return arg; }, py::arg{}.noconvert());
// test_tuple
m.def(
"pair_passthrough",
[](const std::pair<bool, std::string> &input) {
return std::make_pair(input.second, input.first);
},
"Return a pair in reversed order");
m.def(
"tuple_passthrough",
[](std::tuple<bool, std::string, int> input) {
return std::make_tuple(std::get<2>(input), std::get<1>(input), std::get<0>(input));
},
"Return a triple in reversed order");
m.def("empty_tuple", []() { return std::tuple<>(); });
static std::pair<RValueCaster, RValueCaster> lvpair;
static std::tuple<RValueCaster, RValueCaster, RValueCaster> lvtuple;
static std::pair<RValueCaster, std::tuple<RValueCaster, std::pair<RValueCaster, RValueCaster>>>
lvnested;
m.def("rvalue_pair", []() { return std::make_pair(RValueCaster{}, RValueCaster{}); });
m.def("lvalue_pair", []() -> const decltype(lvpair) & { return lvpair; });
m.def("rvalue_tuple",
[]() { return std::make_tuple(RValueCaster{}, RValueCaster{}, RValueCaster{}); });
m.def("lvalue_tuple", []() -> const decltype(lvtuple) & { return lvtuple; });
m.def("rvalue_nested", []() {
return std::make_pair(
RValueCaster{},
std::make_tuple(RValueCaster{}, std::make_pair(RValueCaster{}, RValueCaster{})));
});
m.def("lvalue_nested", []() -> const decltype(lvnested) & { return lvnested; });
m.def(
"int_string_pair",
[]() {
// Using no-destructor idiom to side-step warnings from overzealous compilers.
static auto *int_string_pair = new std::pair<int, std::string>{2, "items"};
return int_string_pair;
},
py::return_value_policy::reference);
// test_builtins_cast_return_none
m.def("return_none_string", []() -> std::string * { return nullptr; });
m.def("return_none_char", []() -> const char * { return nullptr; });
m.def("return_none_bool", []() -> bool * { return nullptr; });
m.def("return_none_int", []() -> int * { return nullptr; });
m.def("return_none_float", []() -> float * { return nullptr; });
m.def("return_none_pair", []() -> std::pair<int, int> * { return nullptr; });
// test_none_deferred
m.def("defer_none_cstring", [](char *) { return false; });
m.def("defer_none_cstring", [](const py::none &) { return true; });
m.def("defer_none_custom", [](UserType *) { return false; });
m.def("defer_none_custom", [](const py::none &) { return true; });
m.def("nodefer_none_void", [](void *) { return true; });
m.def("nodefer_none_void", [](const py::none &) { return false; });
// test_void_caster
m.def("load_nullptr_t", [](std::nullptr_t) {}); // not useful, but it should still compile
m.def("cast_nullptr_t", []() { return std::nullptr_t{}; });
// [workaround(intel)] ICC 20/21 breaks with py::arg().stuff, using py::arg{}.stuff works.
// test_bool_caster
m.def("bool_passthrough", [](bool arg) { return arg; });
m.def(
"bool_passthrough_noconvert", [](bool arg) { return arg; }, py::arg{}.noconvert());
// TODO: This should be disabled and fixed in future Intel compilers
#if !defined(__INTEL_COMPILER)
// Test "bool_passthrough_noconvert" again, but using () instead of {} to construct py::arg
// When compiled with the Intel compiler, this results in segmentation faults when importing
// the module. Tested with icc (ICC) 2021.1 Beta 20200827, this should be tested again when
// a newer version of icc is available.
m.def(
"bool_passthrough_noconvert2", [](bool arg) { return arg; }, py::arg().noconvert());
#endif
// test_reference_wrapper
m.def("refwrap_builtin", [](std::reference_wrapper<int> p) { return 10 * p.get(); });
m.def("refwrap_usertype", [](std::reference_wrapper<UserType> p) { return p.get().value(); });
m.def("refwrap_usertype_const",
[](std::reference_wrapper<const UserType> p) { return p.get().value(); });
m.def("refwrap_lvalue", []() -> std::reference_wrapper<UserType> {
static UserType x(1);
return std::ref(x);
});
m.def("refwrap_lvalue_const", []() -> std::reference_wrapper<const UserType> {
static UserType x(1);
return std::cref(x);
});
// Not currently supported (std::pair caster has return-by-value cast operator);
// triggers static_assert failure.
// m.def("refwrap_pair", [](std::reference_wrapper<std::pair<int, int>>) { });
m.def(
"refwrap_list",
[](bool copy) {
static IncType x1(1), x2(2);
py::list l;
for (const auto &f : {std::ref(x1), std::ref(x2)}) {
l.append(py::cast(
f, copy ? py::return_value_policy::copy : py::return_value_policy::reference));
}
return l;
},
"copy"_a);
m.def("refwrap_iiw", [](const IncType &w) { return w.value(); });
m.def("refwrap_call_iiw", [](IncType &w, const py::function &f) {
py::list l;
l.append(f(std::ref(w)));
l.append(f(std::cref(w)));
IncType x(w.value());
l.append(f(std::ref(x)));
IncType y(w.value());
auto r3 = std::ref(y);
l.append(f(r3));
return l;
});
// test_complex
m.def("complex_cast", [](float x) { return "{}"_s.format(x); });
m.def("complex_cast",
[](std::complex<float> x) { return "({}, {})"_s.format(x.real(), x.imag()); });
// test int vs. long (Python 2)
m.def("int_cast", []() { return (int) 42; });
m.def("long_cast", []() { return (long) 42; });
m.def("longlong_cast", []() { return ULLONG_MAX; });
/// test void* cast operator
m.def("test_void_caster", []() -> bool {
void *v = (void *) 0xabcd;
py::object o = py::cast(v);
return py::cast<void *>(o) == v;
});
// Tests const/non-const propagation in cast_op.
m.def("takes", [](ConstRefCasted x) { return x.tag; });
m.def("takes_move", [](ConstRefCasted &&x) { return x.tag; });
m.def("takes_ptr", [](ConstRefCasted *x) { return x->tag; });
m.def("takes_ref", [](ConstRefCasted &x) { return x.tag; });
m.def("takes_ref_wrap", [](std::reference_wrapper<ConstRefCasted> x) { return x.get().tag; });
m.def("takes_const_ptr", [](const ConstRefCasted *x) { return x->tag; });
m.def("takes_const_ref", [](const ConstRefCasted &x) { return x.tag; });
m.def("takes_const_ref_wrap",
[](std::reference_wrapper<const ConstRefCasted> x) { return x.get().tag; });
PYBIND11_WARNING_POP
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/test_builtin_casters.py | Python | import sys
import pytest
import env
from pybind11_tests import IncType, UserType
from pybind11_tests import builtin_casters as m
def test_simple_string():
assert m.string_roundtrip("const char *") == "const char *"
def test_unicode_conversion():
"""Tests unicode conversion and error reporting."""
assert m.good_utf8_string() == "Say utf8‽ 🎂 𝐀"
assert m.good_utf16_string() == "b‽🎂𝐀z"
assert m.good_utf32_string() == "a𝐀🎂‽z"
assert m.good_wchar_string() == "a⸘𝐀z"
if hasattr(m, "has_u8string"):
assert m.good_utf8_u8string() == "Say utf8‽ 🎂 𝐀"
with pytest.raises(UnicodeDecodeError):
m.bad_utf8_string()
with pytest.raises(UnicodeDecodeError):
m.bad_utf16_string()
# These are provided only if they actually fail (they don't when 32-bit)
if hasattr(m, "bad_utf32_string"):
with pytest.raises(UnicodeDecodeError):
m.bad_utf32_string()
if hasattr(m, "bad_wchar_string"):
with pytest.raises(UnicodeDecodeError):
m.bad_wchar_string()
if hasattr(m, "has_u8string"):
with pytest.raises(UnicodeDecodeError):
m.bad_utf8_u8string()
assert m.u8_Z() == "Z"
assert m.u8_eacute() == "é"
assert m.u16_ibang() == "‽"
assert m.u32_mathbfA() == "𝐀"
assert m.wchar_heart() == "♥"
if hasattr(m, "has_u8string"):
assert m.u8_char8_Z() == "Z"
def test_single_char_arguments():
"""Tests failures for passing invalid inputs to char-accepting functions"""
def toobig_message(r):
return f"Character code point not in range({r:#x})"
toolong_message = "Expected a character, but multi-character string found"
assert m.ord_char("a") == 0x61 # simple ASCII
assert m.ord_char_lv("b") == 0x62
assert (
m.ord_char("é") == 0xE9
) # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
assert m.ord_char("Ā") == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char("ab")
assert str(excinfo.value) == toolong_message
assert m.ord_char16("a") == 0x61
assert m.ord_char16("é") == 0xE9
assert m.ord_char16_lv("ê") == 0xEA
assert m.ord_char16("Ā") == 0x100
assert m.ord_char16("‽") == 0x203D
assert m.ord_char16("♥") == 0x2665
assert m.ord_char16_lv("♡") == 0x2661
with pytest.raises(ValueError) as excinfo:
assert m.ord_char16("🎂") == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char16("aa")
assert str(excinfo.value) == toolong_message
assert m.ord_char32("a") == 0x61
assert m.ord_char32("é") == 0xE9
assert m.ord_char32("Ā") == 0x100
assert m.ord_char32("‽") == 0x203D
assert m.ord_char32("♥") == 0x2665
assert m.ord_char32("🎂") == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert m.ord_char32("aa")
assert str(excinfo.value) == toolong_message
assert m.ord_wchar("a") == 0x61
assert m.ord_wchar("é") == 0xE9
assert m.ord_wchar("Ā") == 0x100
assert m.ord_wchar("‽") == 0x203D
assert m.ord_wchar("♥") == 0x2665
if m.wchar_size == 2:
with pytest.raises(ValueError) as excinfo:
assert m.ord_wchar("🎂") == 0x1F382 # requires surrogate pair
assert str(excinfo.value) == toobig_message(0x10000)
else:
assert m.ord_wchar("🎂") == 0x1F382
with pytest.raises(ValueError) as excinfo:
assert m.ord_wchar("aa")
assert str(excinfo.value) == toolong_message
if hasattr(m, "has_u8string"):
assert m.ord_char8("a") == 0x61 # simple ASCII
assert m.ord_char8_lv("b") == 0x62
assert (
m.ord_char8("é") == 0xE9
) # requires 2 bytes in utf-8, but can be stuffed in a char
with pytest.raises(ValueError) as excinfo:
assert m.ord_char8("Ā") == 0x100 # requires 2 bytes, doesn't fit in a char
assert str(excinfo.value) == toobig_message(0x100)
with pytest.raises(ValueError) as excinfo:
assert m.ord_char8("ab")
assert str(excinfo.value) == toolong_message
def test_bytes_to_string():
"""Tests the ability to pass bytes to C++ string-accepting functions. Note that this is
one-way: the only way to return bytes to Python is via the pybind11::bytes class."""
# Issue #816
assert m.strlen(b"hi") == 2
assert m.string_length(b"world") == 5
assert m.string_length(b"a\x00b") == 3
assert m.strlen(b"a\x00b") == 1 # C-string limitation
# passing in a utf8 encoded string should work
assert m.string_length("💩".encode()) == 4
def test_bytearray_to_string():
"""Tests the ability to pass bytearray to C++ string-accepting functions"""
assert m.string_length(bytearray(b"Hi")) == 2
assert m.strlen(bytearray(b"bytearray")) == 9
assert m.string_length(bytearray()) == 0
assert m.string_length(bytearray("🦜", "utf-8", "strict")) == 4
assert m.string_length(bytearray(b"\x80")) == 1
@pytest.mark.skipif(not hasattr(m, "has_string_view"), reason="no <string_view>")
def test_string_view(capture):
"""Tests support for C++17 string_view arguments and return values"""
assert m.string_view_chars("Hi") == [72, 105]
assert m.string_view_chars("Hi 🎂") == [72, 105, 32, 0xF0, 0x9F, 0x8E, 0x82]
assert m.string_view16_chars("Hi 🎂") == [72, 105, 32, 0xD83C, 0xDF82]
assert m.string_view32_chars("Hi 🎂") == [72, 105, 32, 127874]
if hasattr(m, "has_u8string"):
assert m.string_view8_chars("Hi") == [72, 105]
assert m.string_view8_chars("Hi 🎂") == [72, 105, 32, 0xF0, 0x9F, 0x8E, 0x82]
assert m.string_view_return() == "utf8 secret 🎂"
assert m.string_view16_return() == "utf16 secret 🎂"
assert m.string_view32_return() == "utf32 secret 🎂"
if hasattr(m, "has_u8string"):
assert m.string_view8_return() == "utf8 secret 🎂"
with capture:
m.string_view_print("Hi")
m.string_view_print("utf8 🎂")
m.string_view16_print("utf16 🎂")
m.string_view32_print("utf32 🎂")
assert (
capture
== """
Hi 2
utf8 🎂 9
utf16 🎂 8
utf32 🎂 7
"""
)
if hasattr(m, "has_u8string"):
with capture:
m.string_view8_print("Hi")
m.string_view8_print("utf8 🎂")
assert (
capture
== """
Hi 2
utf8 🎂 9
"""
)
with capture:
m.string_view_print("Hi, ascii")
m.string_view_print("Hi, utf8 🎂")
m.string_view16_print("Hi, utf16 🎂")
m.string_view32_print("Hi, utf32 🎂")
assert (
capture
== """
Hi, ascii 9
Hi, utf8 🎂 13
Hi, utf16 🎂 12
Hi, utf32 🎂 11
"""
)
if hasattr(m, "has_u8string"):
with capture:
m.string_view8_print("Hi, ascii")
m.string_view8_print("Hi, utf8 🎂")
assert (
capture
== """
Hi, ascii 9
Hi, utf8 🎂 13
"""
)
assert m.string_view_bytes() == b"abc \x80\x80 def"
assert m.string_view_str() == "abc ‽ def"
assert m.string_view_from_bytes("abc ‽ def".encode()) == "abc ‽ def"
if hasattr(m, "has_u8string"):
assert m.string_view8_str() == "abc ‽ def"
assert m.string_view_memoryview() == "Have some 🎂".encode()
assert m.bytes_from_type_with_both_operator_string_and_string_view() == b"success"
assert m.str_from_type_with_both_operator_string_and_string_view() == "success"
def test_integer_casting():
"""Issue #929 - out-of-range integer values shouldn't be accepted"""
assert m.i32_str(-1) == "-1"
assert m.i64_str(-1) == "-1"
assert m.i32_str(2000000000) == "2000000000"
assert m.u32_str(2000000000) == "2000000000"
assert m.i64_str(-999999999999) == "-999999999999"
assert m.u64_str(999999999999) == "999999999999"
with pytest.raises(TypeError) as excinfo:
m.u32_str(-1)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.u64_str(-1)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.i32_str(-3000000000)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.i32_str(3000000000)
assert "incompatible function arguments" in str(excinfo.value)
def test_int_convert():
class Int:
def __int__(self):
return 42
class NotInt:
pass
class Float:
def __float__(self):
return 41.99999
class Index:
def __index__(self):
return 42
class IntAndIndex:
def __int__(self):
return 42
def __index__(self):
return 0
class RaisingTypeErrorOnIndex:
def __index__(self):
raise TypeError
def __int__(self):
return 42
class RaisingValueErrorOnIndex:
def __index__(self):
raise ValueError
def __int__(self):
return 42
convert, noconvert = m.int_passthrough, m.int_passthrough_noconvert
def requires_conversion(v):
pytest.raises(TypeError, noconvert, v)
def cant_convert(v):
pytest.raises(TypeError, convert, v)
assert convert(7) == 7
assert noconvert(7) == 7
cant_convert(3.14159)
# TODO: Avoid DeprecationWarning in `PyLong_AsLong` (and similar)
# TODO: PyPy 3.8 does not behave like CPython 3.8 here yet (7.3.7)
if (3, 8) <= sys.version_info < (3, 10) and env.CPYTHON:
with env.deprecated_call():
assert convert(Int()) == 42
else:
assert convert(Int()) == 42
requires_conversion(Int())
cant_convert(NotInt())
cant_convert(Float())
# Before Python 3.8, `PyLong_AsLong` does not pick up on `obj.__index__`,
# but pybind11 "backports" this behavior.
assert convert(Index()) == 42
assert noconvert(Index()) == 42
assert convert(IntAndIndex()) == 0 # Fishy; `int(DoubleThought)` == 42
assert noconvert(IntAndIndex()) == 0
assert convert(RaisingTypeErrorOnIndex()) == 42
requires_conversion(RaisingTypeErrorOnIndex())
assert convert(RaisingValueErrorOnIndex()) == 42
requires_conversion(RaisingValueErrorOnIndex())
def test_numpy_int_convert():
np = pytest.importorskip("numpy")
convert, noconvert = m.int_passthrough, m.int_passthrough_noconvert
def require_implicit(v):
pytest.raises(TypeError, noconvert, v)
# `np.intc` is an alias that corresponds to a C++ `int`
assert convert(np.intc(42)) == 42
assert noconvert(np.intc(42)) == 42
# The implicit conversion from np.float32 is undesirable but currently accepted.
# TODO: Avoid DeprecationWarning in `PyLong_AsLong` (and similar)
# TODO: PyPy 3.8 does not behave like CPython 3.8 here yet (7.3.7)
# https://github.com/pybind/pybind11/issues/3408
if (3, 8) <= sys.version_info < (3, 10) and env.CPYTHON:
with env.deprecated_call():
assert convert(np.float32(3.14159)) == 3
else:
assert convert(np.float32(3.14159)) == 3
require_implicit(np.float32(3.14159))
def test_tuple(doc):
"""std::pair <-> tuple & std::tuple <-> tuple"""
assert m.pair_passthrough((True, "test")) == ("test", True)
assert m.tuple_passthrough((True, "test", 5)) == (5, "test", True)
# Any sequence can be cast to a std::pair or std::tuple
assert m.pair_passthrough([True, "test"]) == ("test", True)
assert m.tuple_passthrough([True, "test", 5]) == (5, "test", True)
assert m.empty_tuple() == ()
assert (
doc(m.pair_passthrough)
== """
pair_passthrough(arg0: Tuple[bool, str]) -> Tuple[str, bool]
Return a pair in reversed order
"""
)
assert (
doc(m.tuple_passthrough)
== """
tuple_passthrough(arg0: Tuple[bool, str, int]) -> Tuple[int, str, bool]
Return a triple in reversed order
"""
)
assert m.rvalue_pair() == ("rvalue", "rvalue")
assert m.lvalue_pair() == ("lvalue", "lvalue")
assert m.rvalue_tuple() == ("rvalue", "rvalue", "rvalue")
assert m.lvalue_tuple() == ("lvalue", "lvalue", "lvalue")
assert m.rvalue_nested() == ("rvalue", ("rvalue", ("rvalue", "rvalue")))
assert m.lvalue_nested() == ("lvalue", ("lvalue", ("lvalue", "lvalue")))
assert m.int_string_pair() == (2, "items")
def test_builtins_cast_return_none():
"""Casters produced with PYBIND11_TYPE_CASTER() should convert nullptr to None"""
assert m.return_none_string() is None
assert m.return_none_char() is None
assert m.return_none_bool() is None
assert m.return_none_int() is None
assert m.return_none_float() is None
assert m.return_none_pair() is None
def test_none_deferred():
"""None passed as various argument types should defer to other overloads"""
assert not m.defer_none_cstring("abc")
assert m.defer_none_cstring(None)
assert not m.defer_none_custom(UserType())
assert m.defer_none_custom(None)
assert m.nodefer_none_void(None)
def test_void_caster():
assert m.load_nullptr_t(None) is None
assert m.cast_nullptr_t() is None
def test_reference_wrapper():
"""std::reference_wrapper for builtin and user types"""
assert m.refwrap_builtin(42) == 420
assert m.refwrap_usertype(UserType(42)) == 42
assert m.refwrap_usertype_const(UserType(42)) == 42
with pytest.raises(TypeError) as excinfo:
m.refwrap_builtin(None)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.refwrap_usertype(None)
assert "incompatible function arguments" in str(excinfo.value)
assert m.refwrap_lvalue().value == 1
assert m.refwrap_lvalue_const().value == 1
a1 = m.refwrap_list(copy=True)
a2 = m.refwrap_list(copy=True)
assert [x.value for x in a1] == [2, 3]
assert [x.value for x in a2] == [2, 3]
assert a1[0] is not a2[0]
assert a1[1] is not a2[1]
b1 = m.refwrap_list(copy=False)
b2 = m.refwrap_list(copy=False)
assert [x.value for x in b1] == [1, 2]
assert [x.value for x in b2] == [1, 2]
assert b1[0] is b2[0]
assert b1[1] is b2[1]
assert m.refwrap_iiw(IncType(5)) == 5
assert m.refwrap_call_iiw(IncType(10), m.refwrap_iiw) == [10, 10, 10, 10]
def test_complex_cast():
"""std::complex casts"""
assert m.complex_cast(1) == "1.0"
assert m.complex_cast(2j) == "(0.0, 2.0)"
def test_bool_caster():
"""Test bool caster implicit conversions."""
convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert
def require_implicit(v):
pytest.raises(TypeError, noconvert, v)
def cant_convert(v):
pytest.raises(TypeError, convert, v)
# straight up bool
assert convert(True) is True
assert convert(False) is False
assert noconvert(True) is True
assert noconvert(False) is False
# None requires implicit conversion
require_implicit(None)
assert convert(None) is False
class A:
def __init__(self, x):
self.x = x
def __nonzero__(self):
return self.x
def __bool__(self):
return self.x
class B:
pass
# Arbitrary objects are not accepted
cant_convert(object())
cant_convert(B())
# Objects with __nonzero__ / __bool__ defined can be converted
require_implicit(A(True))
assert convert(A(True)) is True
assert convert(A(False)) is False
def test_numpy_bool():
np = pytest.importorskip("numpy")
convert, noconvert = m.bool_passthrough, m.bool_passthrough_noconvert
def cant_convert(v):
pytest.raises(TypeError, convert, v)
# np.bool_ is not considered implicit
assert convert(np.bool_(True)) is True
assert convert(np.bool_(False)) is False
assert noconvert(np.bool_(True)) is True
assert noconvert(np.bool_(False)) is False
cant_convert(np.zeros(2, dtype="int"))
def test_int_long():
assert isinstance(m.int_cast(), int)
assert isinstance(m.long_cast(), int)
assert isinstance(m.longlong_cast(), int)
def test_void_caster_2():
assert m.test_void_caster()
def test_const_ref_caster():
"""Verifies that const-ref is propagated through type_caster cast_op.
The returned ConstRefCasted type is a minimal type that is constructed to
reference the casting mode used.
"""
x = False
assert m.takes(x) == 1
assert m.takes_move(x) == 1
assert m.takes_ptr(x) == 3
assert m.takes_ref(x) == 2
assert m.takes_ref_wrap(x) == 2
assert m.takes_const_ptr(x) == 5
assert m.takes_const_ref(x) == 4
assert m.takes_const_ref_wrap(x) == 4
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/extern/otk-pyoptix/optix/pybind11/tests/test_call_policies.cpp | C++ | /*
tests/test_call_policies.cpp -- keep_alive and call_guard
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
All rights reserved. Use of this source code is governed by a
BSD-style license that can be found in the LICENSE file.
*/
#include "pybind11_tests.h"
struct CustomGuard {
static bool enabled;
CustomGuard() { enabled = true; }
~CustomGuard() { enabled = false; }
static const char *report_status() { return enabled ? "guarded" : "unguarded"; }
};
bool CustomGuard::enabled = false;
struct DependentGuard {
static bool enabled;
DependentGuard() { enabled = CustomGuard::enabled; }
~DependentGuard() { enabled = false; }
static const char *report_status() { return enabled ? "guarded" : "unguarded"; }
};
bool DependentGuard::enabled = false;
TEST_SUBMODULE(call_policies, m) {
// Parent/Child are used in:
// test_keep_alive_argument, test_keep_alive_return_value, test_alive_gc_derived,
// test_alive_gc_multi_derived, test_return_none, test_keep_alive_constructor
class Child {
public:
Child() { py::print("Allocating child."); }
Child(const Child &) = default;
Child(Child &&) = default;
~Child() { py::print("Releasing child."); }
};
py::class_<Child>(m, "Child").def(py::init<>());
class Parent {
public:
Parent() { py::print("Allocating parent."); }
Parent(const Parent &parent) = default;
~Parent() { py::print("Releasing parent."); }
void addChild(Child *) {}
Child *returnChild() { return new Child(); }
Child *returnNullChild() { return nullptr; }
static Child *staticFunction(Parent *) { return new Child(); }
};
py::class_<Parent>(m, "Parent")
.def(py::init<>())
.def(py::init([](Child *) { return new Parent(); }), py::keep_alive<1, 2>())
.def("addChild", &Parent::addChild)
.def("addChildKeepAlive", &Parent::addChild, py::keep_alive<1, 2>())
.def("returnChild", &Parent::returnChild)
.def("returnChildKeepAlive", &Parent::returnChild, py::keep_alive<1, 0>())
.def("returnNullChildKeepAliveChild", &Parent::returnNullChild, py::keep_alive<1, 0>())
.def("returnNullChildKeepAliveParent", &Parent::returnNullChild, py::keep_alive<0, 1>())
.def_static("staticFunction", &Parent::staticFunction, py::keep_alive<1, 0>());
m.def(
"free_function", [](Parent *, Child *) {}, py::keep_alive<1, 2>());
m.def(
"invalid_arg_index", [] {}, py::keep_alive<0, 1>());
#if !defined(PYPY_VERSION)
// test_alive_gc
class ParentGC : public Parent {
public:
using Parent::Parent;
};
py::class_<ParentGC, Parent>(m, "ParentGC", py::dynamic_attr()).def(py::init<>());
#endif
// test_call_guard
m.def("unguarded_call", &CustomGuard::report_status);
m.def("guarded_call", &CustomGuard::report_status, py::call_guard<CustomGuard>());
m.def(
"multiple_guards_correct_order",
[]() {
return CustomGuard::report_status() + std::string(" & ")
+ DependentGuard::report_status();
},
py::call_guard<CustomGuard, DependentGuard>());
m.def(
"multiple_guards_wrong_order",
[]() {
return DependentGuard::report_status() + std::string(" & ")
+ CustomGuard::report_status();
},
py::call_guard<DependentGuard, CustomGuard>());
#if defined(WITH_THREAD) && !defined(PYPY_VERSION)
// `py::call_guard<py::gil_scoped_release>()` should work in PyPy as well,
// but it's unclear how to test it without `PyGILState_GetThisThreadState`.
auto report_gil_status = []() {
auto is_gil_held = false;
if (auto *tstate = py::detail::get_thread_state_unchecked()) {
is_gil_held = (tstate == PyGILState_GetThisThreadState());
}
return is_gil_held ? "GIL held" : "GIL released";
};
m.def("with_gil", report_gil_status);
m.def("without_gil", report_gil_status, py::call_guard<py::gil_scoped_release>());
#endif
}
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.