sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
Genesis-Embodied-AI/Genesis:genesis/utils/usd/usd_stage.py | from typing import List, Set
import genesis as gs
from pxr import Sdf, Usd, UsdPhysics
from .usd_context import (
UsdContext,
extract_links_referenced_by_joints,
find_joints_in_range,
find_rigid_bodies_in_range,
)
from .usd_utils import UnionFind
def _find_common_ancestor_path(paths: List[str]) -> str:
"""
Find the common ancestor path of a list of prim paths.
Parameters
----------
paths : List[str]
List of prim paths.
Returns
-------
str
The common ancestor path (longest common prefix).
"""
if not paths:
return "/"
# Split paths into components, filtering out empty strings (from leading slash)
path_components = [[comp for comp in path.split("/") if comp] for path in paths]
# Find the minimum length
min_len = min(len(components) for components in path_components)
# Find common prefix
common_components = []
for i in range(min_len):
if all(components[i] == path_components[0][i] for components in path_components):
common_components.append(path_components[0][i])
else:
break
if not common_components:
return "/"
return "/" + "/".join(common_components)
def _find_connected_components(stage: Usd.Stage, all_joints: List[Usd.Prim]) -> List[tuple[List[Usd.Prim], Set[str]]]:
"""
Find connected components in the joint graph using union-find (disjoint set) algorithm.
Parameters
----------
stage : Usd.Stage
The USD stage.
all_joints : List[Usd.Prim]
List of all joint prims in the stage.
Returns
-------
List[tuple[List[Usd.Prim], Set[str]]]
List of (joints, links) tuples for each connected component.
"""
# Union-Find data structure
uf = UnionFind()
# Build joint-to-links mapping and union connected links
# Only include paths that are actually rigid bodies (have RigidBodyAPI or CollisionAPI)
joint_to_links: dict[Usd.Prim, tuple[str | None, str | None]] = {}
all_link_paths: Set[str] = set()
def is_rigid_body(path: str) -> bool:
"""Check if a prim path is a rigid body."""
prim = stage.GetPrimAtPath(path)
if not prim.IsValid():
return False
return prim.HasAPI(UsdPhysics.RigidBodyAPI) or prim.HasAPI(UsdPhysics.CollisionAPI)
for joint_prim in all_joints:
joint = UsdPhysics.Joint(joint_prim)
body0_targets = joint.GetBody0Rel().GetTargets()
body1_targets = joint.GetBody1Rel().GetTargets()
body0_path = str(body0_targets[0]) if body0_targets else None
body1_path = str(body1_targets[0]) if body1_targets else None
joint_to_links[joint_prim] = (body0_path, body1_path)
# Union connected links (only if they are rigid bodies)
# If a joint connects a non-rigid-body to a rigid body, we still include the rigid body
if body0_path and body1_path:
body0_is_rigid = is_rigid_body(body0_path)
body1_is_rigid = is_rigid_body(body1_path)
if body0_is_rigid and body1_is_rigid:
# Both are rigid bodies - union them
all_link_paths.add(body0_path)
all_link_paths.add(body1_path)
uf.union(body0_path, body1_path)
elif body0_is_rigid:
# Only body0 is rigid - add it as a standalone link
all_link_paths.add(body0_path)
uf.find(body0_path) # Ensure it's in the union-find structure
elif body1_is_rigid:
# Only body1 is rigid - add it as a standalone link
all_link_paths.add(body1_path)
uf.find(body1_path) # Ensure it's in the union-find structure
# If neither is rigid, skip this joint (it doesn't connect rigid bodies)
elif body0_path and is_rigid_body(body0_path):
all_link_paths.add(body0_path)
uf.find(body0_path) # Ensure it's in the union-find structure
elif body1_path and is_rigid_body(body1_path):
all_link_paths.add(body1_path)
uf.find(body1_path) # Ensure it's in the union-find structure
# Group links and joints by connected component
component_roots: dict[str, tuple[List[Usd.Prim], Set[str]]] = {}
for link_path in all_link_paths:
root = uf.find(link_path)
if root not in component_roots:
component_roots[root] = ([], set())
component_roots[root][1].add(link_path)
# Add joints to their components
# Only add joints that connect at least one rigid body
for joint_prim, (body0_path, body1_path) in joint_to_links.items():
# Add joint to component based on which body is a rigid body
if body0_path and body0_path in all_link_paths:
root = uf.find(body0_path)
if root in component_roots and joint_prim not in component_roots[root][0]:
component_roots[root][0].append(joint_prim)
elif body1_path and body1_path in all_link_paths:
root = uf.find(body1_path)
if root in component_roots and joint_prim not in component_roots[root][0]:
component_roots[root][0].append(joint_prim)
# Return only components that have joints
return [(joints, links) for joints, links in component_roots.values() if joints]
def parse_usd_stage(morph: gs.morphs.USD) -> List[gs.morphs.USD]:
"""
Parse a USD stage and extract all rigid entities (articulations and rigid bodies) as separate USD morphs.
This function uses a graph-based approach to identify connected components:
- Joints are edges, links (rigid bodies referenced by joints) are nodes
- Each connected component becomes one articulation entity
- Pure rigid bodies (not referenced by any joint) become separate entities
Joint prims are not stored on morphs; they are deduced dynamically when each
entity is parsed via parse_usd_rigid_entity.
Parameters
----------
stage : gs.morphs.USD
The USD morph containing the stage to parse. The morph must have a valid `usd_ctx`
attribute that provides access to the USD context.
Returns
-------
morphs: List[gs.morphs.USD]
A list of USD morphs, one for each rigid entity found in the stage. Each morph is
a copy of the input stage with its `prim_path` set to the topmost common ancestor
of all links in the component. The list is guaranteed to be non-empty (raises
an exception if no entities are found).
"""
context: UsdContext = morph.usd_ctx
usd_stage: Usd.Stage = context.stage
# Find all joints in the stage
all_joints = find_joints_in_range(Usd.PrimRange(usd_stage.GetPseudoRoot()))
# Find all rigid bodies in the stage (prune children when rigid body is found)
all_rigid_bodies = find_rigid_bodies_in_range(Usd.PrimRange(usd_stage.GetPseudoRoot()))
# Extract links referenced by joints (only rigid bodies)
links_referenced_by_joints = extract_links_referenced_by_joints(usd_stage, all_joints, check_rigid_body=True)
morphs: List[gs.morphs.USD] = []
components: List[tuple[List[Usd.Prim], Set[str]]] = []
# Process connected components (articulations)
if all_joints:
components = _find_connected_components(usd_stage, all_joints)
for component_joints, component_links in components:
# Find topmost common ancestor of all links in this component
link_paths = list(component_links)
common_ancestor_path = _find_common_ancestor_path(link_paths)
common_ancestor_prim = usd_stage.GetPrimAtPath(common_ancestor_path)
assert common_ancestor_prim.IsValid(), f"Invalid common ancestor path: {common_ancestor_path}"
# Create morph for this connected component
entity_morph = morph.copy()
entity_morph.prim_path = common_ancestor_path
morphs.append(entity_morph)
# Process pure rigid bodies (not referenced by joints)
# Links referenced by joints are already part of articulations, so exclude them
pure_rigid_bodies = all_rigid_bodies - links_referenced_by_joints
for rigid_body_path in pure_rigid_bodies:
entity_morph = morph.copy()
entity_morph.prim_path = rigid_body_path
morphs.append(entity_morph)
if not morphs:
gs.raise_exception("No entities found in USD stage.")
num_articulations = len(components) if all_joints else 0
gs.logger.debug(
f"Found {len(morphs)} rigid entity(ies) from USD stage: {num_articulations} articulation(s), {len(pure_rigid_bodies)} pure rigid body(ies)."
)
return morphs
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/usd/usd_stage.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/utils/usd/usd_utils.py | import math
from typing import List, Tuple
import numpy as np
from pxr import Gf, Usd, UsdGeom
import genesis as gs
from genesis.utils import geom as gu
AXES_VECTOR = {
"X": np.array([1, 0, 0], dtype=np.float32),
"Y": np.array([0, 1, 0], dtype=np.float32),
"Z": np.array([0, 0, 1], dtype=np.float32),
}
AXES_T = {
"X": np.array(
[[0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0], [-1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]], dtype=np.float32
),
"Y": np.array(
[[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]], dtype=np.float32
),
"Z": np.eye(4, dtype=np.float32),
}
def usd_pos_to_numpy(usd_pos: Gf.Vec3f | None) -> np.ndarray:
"""
Convert USD position to numpy array, handling None values.
Parameters
----------
usd_pos : Gf.Vec3f | None
USD position attribute value. If None, returns zero vector.
Returns
-------
np.ndarray
Position as numpy array, or zero vector if input is None.
"""
if usd_pos is None:
return gu.zero_pos()
return np.asarray(usd_pos, dtype=np.float32)
def usd_quat_to_numpy(usd_quat: Gf.Quatf | None) -> np.ndarray:
"""
Convert USD quaternion to numpy array, handling None values.
Parameters
----------
usd_quat : Gf.Quatf | None
USD quaternion attribute value. If None, returns identity quaternion.
Returns
-------
np.ndarray
Quaternion as numpy array, or identity quaternion if input is None.
"""
if usd_quat is None:
return gu.identity_quat()
return np.asarray([usd_quat.GetReal(), *usd_quat.GetImaginary()], dtype=np.float32)
def usd_center_of_mass_to_numpy(usd_pos: Gf.Vec3f) -> np.ndarray | None:
"""
Convert USD center of mass position to numpy array, handling invalid default values.
The USD Physics MassAPI defines centerOfMass with default value (-inf, -inf, -inf),
which is invalid and indicates that the center of mass should be computed from geometry.
This function returns None for the default invalid value, allowing the system to
recompute it from geometry.
Parameters
----------
usd_pos : Gf.Vec3f
USD center of mass position attribute value.
Returns
-------
np.ndarray | None
Valid center of mass position as numpy array, or None if invalid/default.
"""
pos = usd_pos_to_numpy(usd_pos)
# Default invalid value is (-inf, -inf, -inf) - all negative infinity
if np.all(np.isinf(pos) & (pos < 0)):
return None
return pos
def usd_principal_axes_to_numpy(usd_quat: Gf.Quatf) -> np.ndarray | None:
"""
Convert USD principal axes quaternion to numpy array, handling invalid default values.
The USD Physics MassAPI defines principalAxes with default value (0, 0, 0, 0),
which is invalid (identity quaternion should be (1, 0, 0, 0)) and indicates that
the principal axes should be computed from geometry. This function returns None
for the default invalid value.
Parameters
----------
usd_quat : Gf.Quatf
USD principal axes quaternion attribute value.
Returns
-------
np.ndarray | None
Valid principal axes quaternion as numpy array, or None if invalid/default.
"""
quat = usd_quat_to_numpy(usd_quat)
# Default invalid value is (0, 0, 0, 0) - identity quaternion should be (1, 0, 0, 0)
if np.allclose(quat, [0, 0, 0, 0]):
return None
return quat
def usd_inertia_to_numpy(inertia: Gf.Vec3f) -> np.ndarray | None:
"""
Convert USD diagonal inertia to numpy diagonal matrix, handling default and invalid values.
The USD Physics MassAPI defines diagonalInertia with default value (0, 0, 0),
which is valid but means the inertia should be ignored/computed from geometry.
This function returns None for the default ignored value or invalid values (non-finite).
Parameters
----------
inertia : Gf.Vec3f
USD diagonal inertia attribute value.
Returns
-------
np.ndarray | None
Valid diagonal inertia matrix (3x3), or None if default ignored or invalid.
"""
diagonal_inertia = usd_diagonal_inertia_to_numpy(inertia)
if diagonal_inertia is None:
return None
return np.diag(diagonal_inertia)
def usd_diagonal_inertia_to_numpy(usd_pos: Gf.Vec3f) -> np.ndarray | None:
"""
Convert USD diagonal inertia to numpy array, handling default and invalid values.
The USD Physics MassAPI defines diagonalInertia with default value (0, 0, 0),
which is valid but means the inertia should be ignored/computed from geometry.
This function returns None for the default ignored value or invalid values (negative or inf/nan).
Parameters
----------
usd_pos : Gf.Vec3f
USD diagonal inertia attribute value.
Returns
-------
np.ndarray | None
Valid diagonal inertia as numpy array, or None if default ignored or invalid.
"""
inertia = usd_pos_to_numpy(usd_pos)
# Default is (0, 0, 0) which means ignored - only return if non-zero and valid
if np.allclose(inertia, 0):
return None
# Check for invalid values (non-finite)
if not all(math.isfinite(e) for e in inertia):
return None
return inertia
def usd_mass_to_float(usd_mass: float) -> float | None:
"""
Convert USD mass to float, handling default and invalid values.
The USD Physics MassAPI defines mass with default value 0, which is valid but
means the mass should be ignored/computed from geometry. This function returns
None for the default ignored value or invalid values (non-positive, inf, or nan).
Parameters
----------
usd_mass : float
USD mass attribute value.
Returns
-------
float | None
Valid mass value, or None if default ignored or invalid.
"""
# Default is 0 which means ignored
if usd_mass == 0:
return None
# Check for invalid values (non-finite)
if not math.isfinite(usd_mass):
return None
return float(usd_mass)
def usd_attr_array_to_numpy(attr: Usd.Attribute, dtype: np.dtype, return_none: bool = False) -> np.ndarray | None:
if attr.HasValue():
return np.array(attr.Get(), dtype=dtype)
return None if return_none else np.empty(0, dtype=dtype)
def usd_primvar_array_to_numpy(
primvar: UsdGeom.Primvar, dtype: np.dtype, return_none: bool = False
) -> np.ndarray | None:
if primvar.IsDefined() and primvar.HasValue():
return np.array(primvar.ComputeFlattened(), dtype=dtype)
return None if return_none else np.empty(0, dtype=dtype)
def extract_scale(T: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
R, S = gu.polar(T[:3, :3], pure_rotation=True, side="right")
if np.linalg.det(R) <= 0:
gs.raise_exception(f"Negative determinant of rotation matrix detected. Got {np.linalg.det(R)}.")
Q = np.eye(4, dtype=T.dtype)
Q[:3, :3] = R
Q[:3, 3] = T[:3, 3]
return Q, S
def get_attr_value_by_candidates(prim: Usd.Prim, candidates: List[str], attr_name: str, default_value: float):
for candidate in candidates:
attr_value = prim.GetAttribute(candidate).Get()
if attr_value is not None:
return attr_value
gs.logger.debug(
f"No matching attribute `{attr_name}` found in {prim.GetPath()} "
f"given candidates: {candidates}. Using default value: {default_value}."
)
return default_value
class UnionFind:
"""
Union-Find (Disjoint Set) data structure with path compression and union by rank.
This data structure efficiently tracks disjoint sets and supports:
- Finding the root of an element (with path compression)
- Unioning two sets (with union by rank for optimal performance)
"""
def __init__(self):
"""Initialize an empty Union-Find structure."""
self.parent: dict[str, str] = {}
self.rank: dict[str, int] = {}
def find(self, x: str) -> str:
"""
Find the root of element x with path compression.
Parameters
----------
x : str
Element to find root for.
Returns
-------
str
Root element of x.
"""
if x not in self.parent:
self.parent[x] = x
self.rank[x] = 0
return x
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x]) # Path compression
return self.parent[x]
def union(self, x: str, y: str):
"""
Union two sets containing x and y using union by rank.
Parameters
----------
x : str
Element in first set.
y : str
Element in second set.
"""
root_x = self.find(x)
root_y = self.find(y)
if root_x == root_y:
return
# Union by rank
if self.rank[root_x] < self.rank[root_y]:
self.parent[root_x] = root_y
elif self.rank[root_x] > self.rank[root_y]:
self.parent[root_y] = root_x
else:
self.parent[root_y] = root_x
self.rank[root_x] += 1
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/usd/usd_utils.py",
"license": "Apache License 2.0",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Genesis-Embodied-AI/Genesis:examples/hibernation.py | """
Hibernation Performance Example
This example demonstrates the performance benefit of hibernation in Genesis rigid body simulation.
Hibernation allows stationary rigid bodies to "sleep", skipping physics computations for objects
that have settled, which significantly improves simulation performance.
The scenario creates many boxes that fall and settle on a ground plane. Once settled, hibernated
objects require minimal computation, while non-hibernated simulations continue computing physics
for all objects every step.
Usage:
python examples/hibernation.py # Run performance comparison
python examples/hibernation.py -v # With visualization (slower due to rendering)
python examples/hibernation.py -n 50 # Use 50 boxes instead of default 20
"""
import argparse
import time
import genesis as gs
def run_simulation(use_hibernation: bool, n_boxes: int, n_steps: int, show_viewer: bool) -> float:
"""Run simulation and return total time for the stepping phase."""
scene = gs.Scene(
rigid_options=gs.options.RigidOptions(
use_contact_island=True,
use_hibernation=use_hibernation,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(0, -3, 2),
camera_lookat=(0, 0, 0.5),
camera_up=(0, 0, 1),
),
show_viewer=show_viewer,
)
scene.add_entity(gs.morphs.Plane())
# Create a grid of boxes that will fall and settle
spacing = 0.25
box_size = 0.1
grid_size = int(n_boxes**0.5) + 1
for i in range(n_boxes):
row = i // grid_size
col = i % grid_size
x = (col - grid_size / 2) * spacing
y = (row - grid_size / 2) * spacing
z = 0.5 + (i % 3) * 0.2 # Stagger heights slightly
scene.add_entity(
gs.morphs.Box(pos=(x, y, z), size=(box_size, box_size, box_size)),
)
scene.build()
# Warm-up phase: let boxes fall and settle
warmup_steps = 200
for _ in range(warmup_steps):
scene.step()
# Timed phase: measure performance after objects have settled
start_time = time.perf_counter()
for _ in range(n_steps):
scene.step()
elapsed = time.perf_counter() - start_time
scene.destroy()
return elapsed
def main():
parser = argparse.ArgumentParser(description="Hibernation performance comparison")
parser.add_argument("-v", "--vis", action="store_true", default=False, help="Enable visualization")
parser.add_argument("-c", "--cpu", action="store_true", default=False, help="Use CPU backend")
parser.add_argument("-n", "--n-boxes", type=int, default=20, help="Number of boxes (default: 20)")
parser.add_argument("-s", "--steps", type=int, default=500, help="Number of timed steps (default: 500)")
args = parser.parse_args()
gs.init(backend=gs.cpu if args.cpu else gs.gpu, performance_mode=True)
print("=" * 70)
print("Hibernation Performance Comparison")
print("=" * 70)
print(f"Configuration: {args.n_boxes} boxes, {args.steps} timed steps")
print(f"Backend: {'CPU' if args.cpu else 'GPU'}")
print()
# Run without hibernation
print("Running simulation WITHOUT hibernation...")
time_without = run_simulation(
use_hibernation=False,
n_boxes=args.n_boxes,
n_steps=args.steps,
show_viewer=args.vis,
)
print(f" Time: {time_without:.3f}s ({args.steps / time_without:.1f} steps/sec)")
# Run with hibernation
print("Running simulation WITH hibernation...")
time_with = run_simulation(
use_hibernation=True,
n_boxes=args.n_boxes,
n_steps=args.steps,
show_viewer=args.vis,
)
print(f" Time: {time_with:.3f}s ({args.steps / time_with:.1f} steps/sec)")
# Results
print()
print("=" * 70)
print("Results")
print("=" * 70)
speedup = time_without / time_with if time_with > 0 else float("inf")
print(f"Without hibernation: {time_without:.3f}s")
print(f"With hibernation: {time_with:.3f}s")
print(f"Speedup: {speedup:.2f}x faster with hibernation")
print()
print("Note: Hibernation benefit increases with more settled objects and longer simulations.")
print(" The speedup comes from skipping physics computations for sleeping objects.")
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/hibernation.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:tests/test_ipc.py | import math
from contextlib import nullcontext
from itertools import permutations
from typing import TYPE_CHECKING, cast, Any
import numpy as np
import pytest
try:
import uipc
except ImportError:
pytest.skip("IPC Coupler is not supported because 'uipc' module is not available.", allow_module_level=True)
from uipc import builtin
from uipc.backend import SceneVisitor
from uipc.geometry import SimplicialComplexSlot, apply_transform, merge
import genesis as gs
import genesis.utils.geom as gu
from genesis.utils.misc import tensor_to_array, qd_to_numpy, geometric_mean, harmonic_mean
from .conftest import TOL_SINGLE
from .utils import assert_allclose, get_hf_dataset
if TYPE_CHECKING:
from genesis.engine.couplers import IPCCoupler
def collect_ipc_geometry_entries(scene):
visitor = SceneVisitor(scene.sim.coupler._ipc_scene)
for geom_slot in visitor.geometries():
if not isinstance(geom_slot, SimplicialComplexSlot):
continue
geom = geom_slot.geometry()
meta_attrs = geom.meta()
solver_type_attr = meta_attrs.find("solver_type")
if solver_type_attr is None:
continue
(solver_type,) = solver_type_attr.view()
assert solver_type in ("rigid", "fem", "cloth")
env_idx_attr = meta_attrs.find("env_idx")
(env_idx,) = map(int, env_idx_attr.view())
if solver_type == "rigid":
idx_attr = meta_attrs.find("link_idx")
else: # solver_type in ("fem", "cloth")
idx_attr = meta_attrs.find("entity_idx")
(idx,) = map(int, idx_attr.view())
yield (solver_type, env_idx, idx, geom)
def find_ipc_geometries(scene, *, solver_type, idx=None, env_idx=None):
geoms = []
for solver_type_, env_idx_, idx_, geom in collect_ipc_geometry_entries(scene):
if solver_type == solver_type_ and (idx is None or idx == idx_) and (env_idx is None or env_idx == env_idx_):
geoms.append(geom)
return geoms
def get_ipc_merged_geometry(scene, *, solver_type, idx, env_idx):
(geom,) = find_ipc_geometries(scene, solver_type=solver_type, idx=idx, env_idx=env_idx)
if geom.instances().size() >= 1:
geom = merge(apply_transform(geom))
return geom
def get_ipc_positions(scene, *, solver_type, idx, envs_idx):
geoms_positions = []
assert envs_idx
for env_idx in envs_idx:
merged_geom = get_ipc_merged_geometry(scene, solver_type=solver_type, idx=idx, env_idx=env_idx)
geom_positions = merged_geom.positions().view().squeeze(axis=-1)
geoms_positions.append(geom_positions)
return np.stack(geoms_positions, axis=0)
def get_ipc_rigid_links_idx(scene, env_idx):
links_idx = []
for solver_type_, env_idx_, idx_, _geom in collect_ipc_geometry_entries(scene):
if solver_type_ == "rigid" and env_idx_ == env_idx:
links_idx.append(idx_)
return links_idx
@pytest.mark.parametrize("enable_rigid_rigid_contact", [False, True])
def test_contact_pair_friction_resistance(enable_rigid_rigid_contact):
from genesis.engine.entities import RigidEntity, FEMEntity
scene = gs.Scene(
coupler_options=gs.options.IPCCouplerOptions(
contact_resistance=36.0,
enable_rigid_rigid_contact=enable_rigid_rigid_contact,
),
show_viewer=False,
)
plane = scene.add_entity(
gs.morphs.Plane(),
material=gs.materials.Rigid(
coup_type="ipc_only",
),
)
rigid_a = scene.add_entity(
gs.morphs.Box(
pos=(0.0, 0.0, 0.12),
size=(0.05, 0.05, 0.05),
),
material=gs.materials.Rigid(
coup_type="ipc_only",
coup_friction=0.25,
contact_resistance=9.0,
),
)
rigid_b = scene.add_entity(
gs.morphs.Box(
pos=(0.2, 0.0, 0.12),
size=(0.05, 0.05, 0.05),
),
material=gs.materials.Rigid(
coup_type="ipc_only",
coup_friction=0.64,
contact_resistance=16.0,
),
)
rigid_c = scene.add_entity(
gs.morphs.Box(
pos=(-0.2, 0.0, 0.12),
size=(0.05, 0.05, 0.05),
),
material=gs.materials.Rigid(
coup_type="ipc_only",
coup_friction=0.16,
contact_resistance=None,
),
)
fem = scene.add_entity(
morph=gs.morphs.Box(
pos=(0.4, 0.0, 0.12),
size=(0.05, 0.05, 0.05),
),
material=gs.materials.FEM.Elastic(
E=5e4,
nu=0.35,
rho=1000.0,
friction_mu=0.49,
contact_resistance=25.0,
),
)
scene.build()
assert scene.sim is not None
coupler = cast("IPCCoupler", scene.sim.coupler)
tab = coupler._ipc_scene.contact_tabular()
for entities in permutations((plane, rigid_a, rigid_b, rigid_c, fem), 2):
elems_idx = []
frictions = []
resistances = []
for entity in entities:
if isinstance(entity, RigidEntity):
if entity is plane:
elem = coupler._ipc_ground_contacts[entity]
else:
elem = coupler._ipc_abd_contacts[entity]
friction = entity.material.coup_friction
else:
assert isinstance(entity, FEMEntity)
elem = coupler._ipc_fem_contacts[entity]
friction = entity.material.friction_mu
resistance = entity.material.contact_resistance or coupler.options.contact_resistance
elems_idx.append(elem.id())
frictions.append(friction)
resistances.append(resistance)
model = tab.at(*elems_idx)
assert model.friction_rate() == pytest.approx(geometric_mean(*frictions))
assert model.resistance() == pytest.approx(harmonic_mean(*resistances))
assert model.is_enabled() ^ (
all(isinstance(entity, RigidEntity) and entity is not plane for entity in entities)
and not enable_rigid_rigid_contact
)
@pytest.mark.parametrize("n_envs", [0, 2])
def test_rigid_ground_sliding(n_envs, show_viewer):
GRAVITY = np.array([5.0, 0.0, -10.0], dtype=gs.np_float)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.01,
gravity=GRAVITY,
),
coupler_options=gs.options.IPCCouplerOptions(
contact_d_hat=0.01,
enable_rigid_rigid_contact=False,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(3.5, 2.0, 1.5),
camera_lookat=(1.0, -0.5, 0.0),
),
show_viewer=show_viewer,
)
scene.add_entity(
gs.morphs.Plane(),
material=gs.materials.Rigid(
coup_type="ipc_only",
coup_friction=0.25,
),
)
cubes = []
for y, mu in ((-0.4, 0.0), (-0.2, 0.01), (0.0, 0.04), (0.2, 0.09), (0.4, 0.16)):
cube = scene.add_entity(
gs.morphs.Box(
pos=(0.0, y, 0.12),
size=(0.08, 0.08, 0.08),
),
material=gs.materials.Rigid(
coup_type="ipc_only",
coup_friction=mu,
),
)
cubes.append(cube)
scene.build(n_envs=n_envs)
initial_positions = np.stack([tensor_to_array(cube.get_pos()) for cube in cubes], axis=-2)
for _ in range(100):
scene.step()
final_positions = np.stack([tensor_to_array(cube.get_pos()) for cube in cubes], axis=-2)
# Coarse non-penetration sanity check
assert (final_positions[..., 2] > 0.0).all()
# Distance from ground should be friction-independent
assert_allclose(np.diff(final_positions[..., 2], axis=-1), 0.0, tol=TOL_SINGLE)
# No y-axis driving force: lateral drift should be minimal
assert_allclose(initial_positions[..., 1], final_positions[..., 1], tol=TOL_SINGLE)
# All cubes should move along +x under tilted gravity.
assert ((final_positions[..., 0] - initial_positions[..., 0]) > 0.5).all()
# Lower coup_friction should slide farther, so x should strictly decrease as mu increases.
assert (np.diff(final_positions[..., ::-1, 0], axis=-1) > 0.2).all()
@pytest.mark.parametrize("n_envs", [0, 2])
def test_ipc_rigid_ground_clearance(n_envs, show_viewer):
GRAVITY = np.array([0.0, 0.0, -9.8], dtype=gs.np_float)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.005,
gravity=GRAVITY,
),
coupler_options=gs.options.IPCCouplerOptions(
contact_d_hat=0.01,
contact_resistance=1e6,
enable_rigid_rigid_contact=False,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.5, 0.0, 0.1),
camera_lookat=(0.0, 0.0, 0.0),
),
show_viewer=show_viewer,
)
scene.add_entity(
gs.morphs.Plane(),
material=gs.materials.Rigid(
coup_type="ipc_only",
),
)
cubes = []
for y, resistance in ((-0.4, 1e2), (-0.2, 1e3), (0.0, 1e4), (0.2, 1e5), (0.4, 1e6)):
cube = scene.add_entity(
gs.morphs.Box(
pos=(0.0, y, 0.05),
size=(0.08, 0.08, 0.08),
),
material=gs.materials.Rigid(
coup_type="ipc_only",
coup_friction=0.0,
contact_resistance=resistance,
),
)
cubes.append(cube)
scene.build(n_envs=n_envs)
initial_positions = np.stack([tensor_to_array(cube.get_pos()) for cube in cubes], axis=-2)
dist = []
for _ in range(70):
scene.step()
for _ in range(20):
scene.step()
dist.append(np.stack([tensor_to_array(cube.get_verts())[..., 2].min(axis=-1) for cube in cubes], axis=-1))
dist = np.stack(dist, axis=-1)
final_positions = np.stack([tensor_to_array(cube.get_pos()) for cube in cubes], axis=-2)
# No lateral driving force in x/y; drift should stay small.
assert_allclose(initial_positions[..., :2], final_positions[..., :2], atol=TOL_SINGLE)
# Make sure that it reaches equilibrium
assert_allclose(dist[..., -1], dist[..., -2], tol=TOL_SINGLE)
# Larger contact resistance should produce larger ground clearance (less penetration/compression).
assert (np.diff(dist, axis=-2) > TOL_SINGLE).all()
@pytest.mark.required
def test_needs_coup():
"""needs_coup=False excludes entity from IPC."""
scene = gs.Scene(
coupler_options=gs.options.IPCCouplerOptions(),
show_viewer=False,
)
scene.add_entity(gs.morphs.Plane(), material=gs.materials.Rigid(needs_coup=False))
scene.add_entity(
morph=gs.morphs.Box(size=(0.1, 0.1, 0.1), pos=(0, 0, 0.5)),
material=gs.materials.Rigid(needs_coup=False),
)
scene.build()
assert scene.sim.coupler._coup_type_by_entity == {}
assert not scene.sim.coupler.has_any_rigid_coupling
@pytest.mark.required
def test_link_filter_strict():
"""Verify that IPC link filter controls which links are actually added to IPC."""
from genesis.engine.entities import RigidEntity
scene = gs.Scene(
coupler_options=gs.options.IPCCouplerOptions(
enable_rigid_rigid_contact=False,
two_way_coupling=True,
),
show_viewer=False,
)
robot = scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/simple/two_cube_revolute.urdf",
pos=(0, 0, 0.2),
fixed=True,
),
material=gs.materials.Rigid(
coup_type="two_way_soft_constraint",
coup_links=("moving",),
),
)
assert isinstance(robot, RigidEntity)
scene.build()
assert scene.sim is not None
coupler = cast("IPCCoupler", scene.sim.coupler)
base_link = robot.get_link("base")
moving_link = robot.get_link("moving")
assert robot in coupler._coup_links
assert coupler._coup_links[robot] == {moving_link}
ipc_links_idx = get_ipc_rigid_links_idx(scene, env_idx=0)
assert moving_link.idx in ipc_links_idx
assert base_link.idx not in ipc_links_idx
assert moving_link in coupler._abd_slots_by_link
assert base_link not in coupler._abd_slots_by_link
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
@pytest.mark.parametrize(
"coup_type, fixed",
[("two_way_soft_constraint", True), ("two_way_soft_constraint", False), ("external_articulation", True)],
)
@pytest.mark.parametrize("joint_type", ["revolute", "prismatic"])
def test_single_joint(n_envs, coup_type, joint_type, fixed, show_viewer):
from genesis.engine.entities import RigidEntity
DT = 0.01
GRAVITY = np.array([0.0, 0.0, -9.8], dtype=gs.np_float)
POS = (0, 0, 0.5)
FREQ = 1.0
SCALE = 0.5 if joint_type == "revolute" else 0.1
CONTACT_MARGIN = 0.01
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=DT,
gravity=GRAVITY,
),
rigid_options=gs.options.RigidOptions(
enable_collision=False,
),
coupler_options=gs.options.IPCCouplerOptions(
contact_d_hat=CONTACT_MARGIN,
constraint_strength_translation=1,
constraint_strength_rotation=1,
enable_rigid_rigid_contact=False,
newton_tolerance=1e-2,
newton_translation_tolerance=1e-2,
linear_system_tolerance=1e-3,
newton_semi_implicit_enable=False,
two_way_coupling=True,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.0, 1.0, 0.8),
camera_lookat=(0.0, 0.0, 0.3),
),
show_viewer=show_viewer,
)
scene.add_entity(
gs.morphs.Plane(),
material=gs.materials.Rigid(
coup_type="ipc_only",
coup_friction=0.5,
),
)
robot = scene.add_entity(
morph=gs.morphs.URDF(
file=f"urdf/simple/two_cube_{joint_type}.urdf",
pos=POS,
fixed=fixed,
),
material=gs.materials.Rigid(
coup_type=coup_type,
),
)
assert isinstance(robot, RigidEntity)
scene.build(n_envs=n_envs)
assert scene.sim is not None
coupler = cast("IPCCoupler", scene.sim.coupler)
envs_idx = range(max(scene.n_envs, 1))
robot.set_dofs_kp(500.0, dofs_idx_local=-1)
robot.set_dofs_kv(50.0, dofs_idx_local=-1)
moving_link = robot.get_link("moving")
ipc_links_idx = get_ipc_rigid_links_idx(scene, env_idx=0)
assert moving_link.idx in ipc_links_idx
assert moving_link in coupler._abd_slots_by_link
if coup_type == "two_way_soft_constraint":
assert moving_link in coupler._abd_data_by_link
elif coup_type == "external_articulation":
art_data = coupler._articulation_data_by_entity[robot]
assert len(art_data.articulation_slots) == max(scene.n_envs, 1)
if fixed:
assert not coupler._abd_data_by_link
dist_min = np.array(float("inf"))
cur_dof_pos_history, target_dof_pos_history = [], []
gs_transform_history, ipc_transform_history = [], []
for _ in range(int(1 / (DT * FREQ))):
# Apply sinusoidal target position
target_dof_pos = SCALE * np.sin((2 * math.pi * FREQ) * scene.sim.cur_t)
target_dof_vel = SCALE * (2 * math.pi * FREQ) * np.cos((2 * math.pi * FREQ) * scene.sim.cur_t)
robot.control_dofs_position_velocity(target_dof_pos, target_dof_vel, dofs_idx_local=-1)
# Store the current and target position / velocity
cur_dof_pos = tensor_to_array(robot.get_dofs_position(dofs_idx_local=-1)[..., 0])
cur_dof_pos_history.append(cur_dof_pos)
target_dof_pos_history.append(target_dof_pos)
# Make sure the robot never went through the ground
if not fixed:
robot_verts = tensor_to_array(robot.get_verts())
dist_min = np.minimum(dist_min, robot_verts[..., 2].min(axis=-1))
# FIXME: For some reason it actually can...
assert (dist_min > -0.1).all()
scene.step()
if coup_type == "two_way_soft_constraint" or not fixed:
for env_idx in envs_idx:
abd_data = coupler._abd_data_by_link[moving_link][env_idx]
gs_transform = coupler._abd_transforms_by_link[moving_link][env_idx]
ipc_transform = abd_data.transform
# FIXME: Why the tolerance is must so large if no fixed ?!
assert_allclose(gs_transform[:3, 3], ipc_transform[:3, 3], atol=TOL_SINGLE if fixed else 0.2)
assert_allclose(
gu.R_to_xyz(gs_transform[:3, :3] @ ipc_transform[:3, :3].T), 0.0, atol=1e-4 if fixed else 0.3
)
gs_transform_history.append(gs_transform)
ipc_transform_history.append(ipc_transform)
cur_dof_pos_history = np.stack(cur_dof_pos_history, axis=-1)
target_dof_pos_history = np.stack(target_dof_pos_history, axis=-1)
for env_idx in envs_idx if scene.n_envs > 0 else (slice(None),):
corr = np.corrcoef(cur_dof_pos_history[env_idx], target_dof_pos_history)[0, 1]
assert corr > 1.0 - 5e-3
assert_allclose(
cur_dof_pos_history - cur_dof_pos_history[..., [0]],
target_dof_pos_history - target_dof_pos_history[..., [0]],
tol=0.03,
)
assert_allclose(np.ptp(cur_dof_pos_history, axis=-1), 2 * SCALE, tol=0.05)
if gs_transform_history:
gs_pos_history, gs_quat_history = gu.T_to_trans_quat(np.stack(gs_transform_history, axis=0))
ipc_pos_history, ipc_quat_history = gu.T_to_trans_quat(np.stack(ipc_transform_history, axis=0))
pos_err_history = np.linalg.norm(ipc_pos_history - gs_pos_history, axis=-1)
rot_err_history = np.linalg.norm(
gu.quat_to_rotvec(gu.transform_quat_by_quat(gs.inv_quat(gs_quat_history), ipc_quat_history)), axis=-1
)
assert (np.percentile(pos_err_history, 90, axis=0) < 1e-2).all()
assert (np.percentile(rot_err_history, 90, axis=0) < 5e-2).all()
# Make sure the robot bounced on the ground or stayed in place
if fixed:
assert_allclose(robot.get_pos(), POS, atol=TOL_SINGLE)
else:
assert (dist_min < 1.5 * CONTACT_MARGIN).all()
@pytest.mark.required
@pytest.mark.parametrize("coup_type", ["two_way_soft_constraint", "external_articulation"])
@pytest.mark.parametrize("merge_fixed_links", [True, False])
def test_find_target_links(coup_type, merge_fixed_links, show_viewer):
"""Test that find_target_link_for_fixed_merge correctly groups ABD bodies."""
from genesis.engine.entities import RigidEntity
from genesis.engine.couplers.ipc_coupler.utils import find_target_link_for_fixed_merge
scene = gs.Scene(
sim_options=gs.options.SimOptions(dt=0.01, gravity=(0, 0, -9.8)),
rigid_options=gs.options.RigidOptions(enable_collision=False),
coupler_options=gs.options.IPCCouplerOptions(
constraint_strength_translation=1,
constraint_strength_rotation=1,
enable_rigid_rigid_contact=False,
newton_tolerance=1e-2,
newton_translation_tolerance=1e-2,
two_way_coupling=True,
),
show_viewer=show_viewer,
)
scene.add_entity(
gs.morphs.Plane(),
material=gs.materials.Rigid(coup_type="ipc_only", coup_friction=0.5),
)
robot = scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/panda_bullet/panda_nohand.urdf",
pos=(0, 0, 1),
fixed=True,
merge_fixed_links=merge_fixed_links,
),
material=gs.materials.Rigid(coup_type=coup_type),
)
assert isinstance(robot, RigidEntity)
scene.build()
assert scene.sim is not None
coupler = cast("IPCCoupler", scene.sim.coupler)
# panda_nohand has joint8 (fixed: link7 -> attachment).
# With merge_fixed_links=True, attachment is merged into link7.
# With merge_fixed_links=False, attachment stays separate but IPC should still group them.
link7 = robot.get_link("link7")
assert link7 in coupler._abd_slots_by_link
if not merge_fixed_links:
attachment = robot.get_link("attachment")
# attachment exists as separate link but shares ABD body with link7
target = find_target_link_for_fixed_merge(attachment)
assert target == link7
# attachment is NOT in _abd_slots_by_link — only the target link gets a slot entry
assert attachment not in coupler._abd_slots_by_link
if coup_type == "external_articulation":
art_data = coupler._articulation_data_by_entity[robot]
assert len(art_data.articulation_slots) == 1
# All 7 revolute joints should be present (fixed joint is skipped)
assert len(art_data.joints_child_link) == 7
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
@pytest.mark.parametrize("constraint_strength", [1, 100])
def test_apply_forces_base_link(n_envs, constraint_strength, show_viewer):
from genesis.engine.entities import RigidEntity
DT = 0.002
FREQ = 2.0
SCALE = 0.1
GRAVITY = np.array([0.0, 0.0, -9.8], dtype=gs.np_float)
POS = (0.5, 0.0, 0.0)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=DT,
gravity=GRAVITY,
),
coupler_options=gs.options.IPCCouplerOptions(
constraint_strength_translation=constraint_strength,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(0.5, -0.5, 0.3),
camera_lookat=(0.25, 0.0, 0.0),
),
show_viewer=show_viewer,
)
box = scene.add_entity(
gs.morphs.Box(size=(0.05, 0.05, 0.05), pos=POS),
material=gs.materials.Rigid(coup_type="two_way_soft_constraint"),
)
assert isinstance(box, RigidEntity)
scene.build(n_envs=n_envs)
assert scene.sim is not None
box.set_dofs_kp(50000.0)
box.set_dofs_kv(500.0)
z_actual, z_target = [], []
for _ in range(int(1 / (DT * FREQ))):
t = scene.sim.cur_t
target_z = SCALE * math.sin((2 * math.pi * FREQ) * t)
target_vz = SCALE * (2 * math.pi * FREQ) * math.cos((2 * math.pi * FREQ) * t)
box.control_dofs_position_velocity(target_z, target_vz, dofs_idx_local=2)
scene.step()
z_target.append(target_z)
z_actual.append(tensor_to_array(box.get_pos()[..., 2]))
z_actual = np.array(z_actual)
z_target = np.array(z_target)
if z_actual.ndim > 1:
z_target = z_target[:, np.newaxis]
assert_allclose(z_actual, z_target, atol=0.005)
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
def test_objects_freefall(n_envs, show_viewer):
from genesis.engine.entities import RigidEntity, FEMEntity
DT = 0.002
GRAVITY = np.array([0.0, 0.0, -9.8], dtype=gs.np_float)
NUM_STEPS = 30
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=DT,
gravity=GRAVITY,
),
coupler_options=gs.options.IPCCouplerOptions(
contact_d_hat=0.01,
enable_rigid_rigid_contact=False,
two_way_coupling=True,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.2, 3.2, 1.5),
camera_lookat=(0.0, 0.0, 1.1),
),
show_viewer=show_viewer,
)
asset_path = get_hf_dataset(pattern="IPC/grid20x20.obj")
cloth = scene.add_entity(
morph=gs.morphs.Mesh(
file=f"{asset_path}/IPC/grid20x20.obj",
scale=1.5,
pos=(0.0, 0.0, 1.5),
euler=(0, 0, 0),
),
material=gs.materials.FEM.Cloth(
E=1e5,
nu=0.499,
rho=200,
thickness=0.001,
bending_stiffness=50.0,
),
surface=gs.surfaces.Plastic(
color=(0.3, 0.5, 0.8, 1.0),
),
)
box = scene.add_entity(
morph=gs.morphs.Box(
size=(0.2, 0.2, 0.2),
pos=(0.0, 0.0, 0.6),
),
material=gs.materials.Rigid(
rho=500.0,
coup_type="ipc_only",
),
surface=gs.surfaces.Plastic(
color=(0.8, 0.3, 0.2, 0.8),
),
)
assert isinstance(box, RigidEntity)
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
radius=0.08,
pos=(0.5, 0.0, 0.1),
),
material=gs.materials.FEM.Elastic(
E=1.0e5,
nu=0.3,
rho=1000.0,
model="stable_neohookean",
),
surface=gs.surfaces.Plastic(
color=(0.2, 0.8, 0.3, 0.8),
),
)
scene.build(n_envs=n_envs)
assert scene.sim is not None
coupler = cast("IPCCoupler", scene.sim.coupler)
envs_idx = range(max(scene.n_envs, 1))
ipc_links_idx = get_ipc_rigid_links_idx(scene, env_idx=0)
assert box.base_link_idx in ipc_links_idx
assert box.base_link in coupler._abd_slots_by_link
# Verify that geometries are present in IPC for each environment
cloth_entity_idx = scene.sim.fem_solver.entities.index(cloth)
box_entity_idx = scene.sim.rigid_solver.entities.index(box)
sphere_entity_idx = scene.sim.fem_solver.entities.index(sphere)
objs_kwargs = {
obj: dict(solver_type=solver_type, idx=idx)
for obj, solver_type, idx in (
(cloth, "cloth", cloth_entity_idx),
(box, "rigid", box_entity_idx),
(sphere, "fem", sphere_entity_idx),
)
}
for obj_kwargs in objs_kwargs.values():
for env_idx in envs_idx:
assert len(find_ipc_geometries(scene, **obj_kwargs, env_idx=env_idx)) == 1
# Get initial state
p_0 = {obj: get_ipc_positions(scene, **obj_kwargs, envs_idx=envs_idx) for obj, obj_kwargs in objs_kwargs.items()}
v_0 = {obj: np.zeros_like(p_0[obj]) for obj in objs_kwargs.keys()}
# Run simulation and validate dynamics equations at each step
p_prev, v_prev = p_0.copy(), v_0.copy()
for _i in range(NUM_STEPS):
# Move forward in time
scene.step()
for obj, obj_kwargs in objs_kwargs.items():
# Get new position
p_i = get_ipc_positions(scene, **obj_kwargs, envs_idx=envs_idx)
# Estimate velocity by finite difference: v_{n+1} = (x_{n+1} - x_n) / DT
v_i = (p_i - p_prev[obj]) / DT
# Compute estimated position and velocity
expected_v = v_prev[obj] + GRAVITY * DT
expected_p = p_prev[obj] + expected_v * DT
# Update for next iteration
p_prev[obj], v_prev[obj] = p_i, v_i
# FIXME: This test does not pass for sphere entity...
if obj is sphere:
continue
# Validate displacement and velocity assuming Euler scheme
assert_allclose(v_i, expected_v, atol=1e-3)
assert_allclose(p_i, expected_p, tol=TOL_SINGLE)
for obj in objs_kwargs.keys():
# Validate centroid consistency
assert isinstance(obj, (RigidEntity, FEMEntity))
ipc_centroid = p_prev[obj].mean(axis=-2)
gs_centroid = obj.get_state().pos.mean(axis=-2)
assert_allclose(ipc_centroid, gs_centroid, atol=TOL_SINGLE)
# Validate centroidal total displacement: 0.5 * GRAVITY * t * (t + DT)
# FEM entities (cloth) deform during freefall, causing small centroid drift — use looser tolerance.
p_delta = p_prev[obj] - p_0[obj]
expected_displacement = 0.5 * GRAVITY * NUM_STEPS * (NUM_STEPS + 1) * DT**2
assert_allclose(p_delta.mean(axis=-2), expected_displacement, tol=2e-3 if isinstance(obj, FEMEntity) else 1e-3)
# FIXME: This test does not pass for sphere entity...
if obj is sphere:
continue
# Validate vertex-based total displacement
assert_allclose(p_delta, expected_displacement, tol=TOL_SINGLE)
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
def test_objects_colliding(n_envs, show_viewer):
DT = 0.02
CONTACT_MARGIN = 0.01
GRAVITY = np.array([0.0, 0.0, -9.8], dtype=gs.np_float)
NUM_STEPS = 90
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=DT,
gravity=GRAVITY,
),
coupler_options=gs.options.IPCCouplerOptions(
contact_d_hat=CONTACT_MARGIN,
enable_rigid_rigid_contact=False,
two_way_coupling=True,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.0, 2.0, 0.1),
camera_lookat=(0.0, 0.0, 0.1),
),
show_viewer=show_viewer,
)
scene.add_entity(
gs.morphs.Plane(),
material=gs.materials.Rigid(
coup_type="ipc_only",
coup_friction=0.5,
),
)
asset_path = get_hf_dataset(pattern="IPC/grid20x20.obj")
cloth = scene.add_entity(
morph=gs.morphs.Mesh(
file=f"{asset_path}/IPC/grid20x20.obj",
scale=1.5,
pos=(0.0, 0.0, 0.2),
euler=(90, 0, 0),
),
material=gs.materials.FEM.Cloth(
E=1e5,
nu=0.499,
rho=200,
thickness=0.001,
bending_stiffness=50.0,
),
surface=gs.surfaces.Plastic(
color=(0.3, 0.5, 0.8, 1.0),
),
)
box = scene.add_entity(
morph=gs.morphs.Box(
size=(0.1, 0.1, 0.1),
pos=(-0.25, 0.0, 0.1),
),
material=gs.materials.Rigid(
rho=500.0,
coup_friction=0.3,
coup_type="ipc_only",
),
surface=gs.surfaces.Plastic(
color=(0.8, 0.3, 0.2, 0.8),
),
)
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
radius=0.08,
pos=(0.25, 0.0, 0.1),
),
material=gs.materials.FEM.Elastic(
E=1.0e3,
nu=0.3,
rho=1000.0,
friction_mu=0.3,
model="stable_neohookean",
),
surface=gs.surfaces.Plastic(
color=(0.2, 0.8, 0.3, 0.8),
),
)
scene.build(n_envs=n_envs)
assert scene.sim is not None
envs_idx = range(max(scene.n_envs, 1))
# Run simulation and validate dynamics equations at each step
objs_kwargs = {
obj: dict(solver_type=solver_type, idx=idx)
for obj, solver_type, idx in (
(cloth, "cloth", scene.sim.fem_solver.entities.index(cloth)),
(box, "rigid", scene.sim.rigid_solver.entities.index(box)),
(sphere, "fem", scene.sim.fem_solver.entities.index(sphere)),
)
}
p_history = {obj: [] for obj in objs_kwargs.keys()}
for _i in range(NUM_STEPS):
scene.step()
for obj, obj_kwargs in objs_kwargs.items():
p_i = get_ipc_positions(scene, **obj_kwargs, envs_idx=envs_idx)
p_history[obj].append(p_i)
cloth_p_history = np.stack(p_history[cloth], axis=-3)
for obj in objs_kwargs.keys():
obj_p_history = np.stack(p_history[obj], axis=-3)
# Make sure that all vertices are laying on the ground
assert (obj_p_history[..., 2] < 1.5 * CONTACT_MARGIN).any()
assert (obj_p_history[..., 2] > 0.0).all()
# Check that the objects did not fly away (5cm)
obj_delta_history = np.linalg.norm((obj_p_history - obj_p_history[..., [0], :, :])[..., :2], axis=-1)
assert_allclose(obj_delta_history, 0.0, atol=0.1)
# Make sure that all objects reached steady state
obj_disp_history = np.linalg.norm(np.diff(obj_p_history[..., -10:, :, :], axis=-3), axis=-1)
assert_allclose(obj_disp_history, 0.0, tol=5e-3)
# Make sure that the cloth is laying on all objects (at least one vertex above the others)
if obj is cloth:
continue
assert (obj_p_history[..., 2].max(axis=-1) < cloth_p_history[..., 2].max(axis=-1)).all()
@pytest.mark.required
@pytest.mark.parametrize("coup_type", ["two_way_soft_constraint", "external_articulation"])
def test_robot_grasp_fem(coup_type, show_viewer):
"""Verify FEM add/retrieve and that robot lift raises FEM more than 20cm."""
from genesis.engine.entities import RigidEntity, FEMEntity
DT = 0.01
GRAVITY = np.array([0.0, 0.0, -9.8], dtype=gs.np_float)
BOX_POS = (0.65, 0.0, 0.03)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=DT,
gravity=GRAVITY,
),
coupler_options=gs.options.IPCCouplerOptions(
constraint_strength_translation=10.0,
constraint_strength_rotation=10.0,
newton_translation_tolerance=10.0,
enable_rigid_rigid_contact=False,
enable_rigid_ground_contact=False,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.0, 1.0, 1.0),
camera_lookat=(0.3, 0.0, 0.5),
),
show_viewer=show_viewer,
)
scene.add_entity(
gs.morphs.Plane(),
material=gs.materials.Rigid(
coup_type="ipc_only",
coup_friction=0.8,
),
)
material_kwargs: dict[str, Any] = dict(
coup_friction=0.8,
coup_type=coup_type,
)
if coup_type == "two_way_soft_constraint":
material_kwargs["coup_links"] = ("left_finger", "right_finger")
franka = scene.add_entity(
gs.morphs.MJCF(
file="xml/franka_emika_panda/panda_non_overlap.xml",
),
material=gs.materials.Rigid(**material_kwargs),
)
assert isinstance(franka, RigidEntity)
box = scene.add_entity(
morph=gs.morphs.Box(
pos=BOX_POS,
size=(0.05, 0.05, 0.05),
),
material=gs.materials.FEM.Elastic(
E=5.0e4,
nu=0.45,
rho=1000.0,
friction_mu=0.5,
model="stable_neohookean",
),
surface=gs.surfaces.Plastic(
color=(0.2, 0.8, 0.2, 0.5),
),
)
assert isinstance(box, FEMEntity)
scene.build()
assert scene.sim is not None
coupler = cast("IPCCoupler", scene.sim.coupler)
envs_idx = range(max(scene.n_envs, 1))
motors_dof, fingers_dof = slice(0, 7), slice(7, 9)
# end_effector = franka.get_link("hand")
franka.set_dofs_kp([4500.0, 4500.0, 3500.0, 3500.0, 2000.0, 2000.0, 2000.0, 500.0, 500.0])
box_entity_idx = scene.sim.fem_solver.entities.index(box)
assert len(find_ipc_geometries(scene, solver_type="fem", idx=box_entity_idx, env_idx=0)) == 1
franka_finger_links = {franka.get_link(name) for name in ("left_finger", "right_finger")}
franka_finger_links_idx = {link.idx for link in franka_finger_links}
ipc_links_idx = get_ipc_rigid_links_idx(scene, env_idx=0)
assert franka_finger_links_idx.issubset(ipc_links_idx)
for link_idx in franka_finger_links:
assert link_idx in coupler._abd_slots_by_link
franka_links_idx = {link.idx for link in franka.links}
franka_ipc_links_idx = franka_links_idx.intersection(ipc_links_idx)
if coup_type == "two_way_soft_constraint":
assert coupler._coup_links.get(franka) == franka_finger_links
assert franka_ipc_links_idx == franka_finger_links_idx
else:
assert franka_finger_links_idx.issubset(franka_ipc_links_idx)
ipc_positions_0 = get_ipc_positions(scene, solver_type="fem", idx=box_entity_idx, envs_idx=envs_idx)
gs_positions_0 = tensor_to_array(box.get_state().pos)
assert_allclose(ipc_positions_0, gs_positions_0, atol=TOL_SINGLE)
gs_centroid_0 = gs_positions_0.mean(axis=1)
assert_allclose(gs_centroid_0, BOX_POS, atol=1e-4)
def run_stage(target_qpos, finger_pos, duration):
franka.control_dofs_position(target_qpos[motors_dof], motors_dof)
franka.control_dofs_position(finger_pos, fingers_dof)
for _ in range(int(duration / DT)):
scene.step()
# Setting initial configuration is not supported by coupling mode "external_articulation"
# qpos = franka.inverse_kinematics(link=end_effector, pos=[0.65, 0.0, 0.4], quat=[0.0, 1.0, 0.0, 0.0])
qpos = [-0.9482, 0.6910, 1.2114, -1.6619, -0.6739, 1.8685, 1.1844, 0.0112, 0.0096]
with pytest.raises(gs.GenesisException) if coup_type == "external_articulation" else nullcontext():
franka.set_dofs_position(qpos)
franka.control_dofs_position(qpos)
if coup_type == "external_articulation":
run_stage(qpos, finger_pos=0.04, duration=2.0)
# Lower the grapper half way to grasping position
# qpos = franka.inverse_kinematics(link=end_effector, pos=[0.65, 0.0, 0.25], quat=[0.0, 1.0, 0.0, 0.0])
qpos = [-0.8757, 0.8824, 1.0523, -1.7619, -0.8831, 2.0903, 1.2924, 0.0400, 0.0400]
run_stage(qpos, finger_pos=0.04, duration=1.0)
# Reach grasping position
# qpos = franka.inverse_kinematics(link=end_effector, pos=[0.65, 0.0, 0.135], quat=[0.0, 1.0, 0.0, 0.0])
qpos = [-0.7711, 1.0502, 0.8850, -1.7182, -1.0210, 2.2350, 1.3489, 0.0400, 0.0400]
run_stage(qpos, finger_pos=0.04, duration=0.5)
# Grasp the cube
run_stage(qpos, finger_pos=0.0, duration=0.1)
# Lift the cube
# qpos = franka.inverse_kinematics(link=end_effector, pos=[0.65, 0.0, 0.4], quat=[0.0, 1.0, 0.0, 0.0])
qpos = [-0.9488, 0.6916, 1.2123, -1.6627, -0.6750, 1.8683, 1.1855, 0.0301, 0.0319]
run_stage(qpos, finger_pos=0.0, duration=0.5)
ipc_positions_f = get_ipc_positions(scene, solver_type="fem", idx=box_entity_idx, envs_idx=envs_idx)
gs_positions_f = tensor_to_array(box.get_state().pos)
assert_allclose(ipc_positions_f, gs_positions_f, atol=TOL_SINGLE)
assert (gs_positions_f[..., 2] - gs_positions_0[..., 2] >= 0.2).all()
finger_aabb = tensor_to_array(franka.get_link("right_finger").get_AABB())
assert (gs_positions_f[..., 2] - finger_aabb[..., 0, 2] > 0).any()
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
def test_momentum_conservation(n_envs, show_viewer):
from genesis.engine.entities import RigidEntity
DT = 0.001
DURATION = 0.30
CONTACT_MARGIN = 0.01
VELOCITY = np.array([4.0, 0.0, 0.0], dtype=gs.np_float)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=DT,
gravity=(0.0, 0.0, 0.0),
),
coupler_options=gs.options.IPCCouplerOptions(
contact_d_hat=CONTACT_MARGIN,
constraint_strength_translation=1,
constraint_strength_rotation=1,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(0.5, 1.3, 0.6),
camera_lookat=(0.2, 0.0, 0.3),
),
show_viewer=show_viewer,
)
blob = scene.add_entity(
morph=gs.morphs.Sphere(
pos=(0.3, 0.0, 0.4),
radius=0.1,
),
material=gs.materials.FEM.Elastic(
E=1.0e5,
nu=0.45,
rho=1000.0,
model="stable_neohookean",
friction_mu=0.0,
),
)
rigid_cube = scene.add_entity(
morph=gs.morphs.Box(
pos=(0.0, 0.0, 0.4),
size=(0.1, 0.1, 0.1),
euler=(0, 0, 0),
),
material=gs.materials.Rigid(
rho=1000,
coup_type="two_way_soft_constraint",
),
surface=gs.surfaces.Plastic(
color=(0.8, 0.2, 0.2, 0.8),
),
)
assert isinstance(rigid_cube, RigidEntity)
scene.build(n_envs=n_envs)
assert scene.sim is not None
coupler = cast("IPCCoupler", scene.sim.coupler)
rigid_cube.set_dofs_velocity((*VELOCITY, 0.0, 0.0, 0.0))
fem_entity_idx = scene.sim.fem_solver.entities.index(blob)
assert len(find_ipc_geometries(scene, solver_type="fem", idx=fem_entity_idx, env_idx=0)) == 1
rigid_link = rigid_cube.base_link
ipc_links_idx = get_ipc_rigid_links_idx(scene, env_idx=0)
assert rigid_link.idx in ipc_links_idx
assert rigid_link in coupler._abd_slots_by_link
cube_mass = rigid_cube.get_mass()
# Read actual FEM mass from IPC geometry (mesh mass != analytical sphere mass due to tet discretization).
blob_radius = blob.morph.radius
blob_rho = blob.material.rho
blob_analytical_mass = (4.0 / 3.0) * np.pi * blob_radius**3 * blob_rho
(fem_raw_geo,) = find_ipc_geometries(scene, solver_type="fem", idx=fem_entity_idx, env_idx=0)
fem_mass_density = fem_raw_geo.meta().find(builtin.mass_density).view().item()
fem_merged_geo = get_ipc_merged_geometry(scene, solver_type="fem", idx=fem_entity_idx, env_idx=0)
fem_vertex_volumes = fem_merged_geo.vertices().find(builtin.volume).view().reshape(-1)
blob_mass = float(np.sum(fem_vertex_volumes) * fem_mass_density)
assert_allclose(blob_mass, blob_analytical_mass, rtol=0.01)
total_p_history = []
momentum_0 = VELOCITY * cube_mass
dist_min = np.array(float("inf"))
fem_positions_prev = None # FEM initial velocity is zero
for step in range(int(DURATION / DT)):
cube_vel = tensor_to_array(rigid_cube.get_links_vel(links_idx_local=0, ref="link_com")[..., 0, :])
rigid_linear_momentum = cube_mass * cube_vel
fem_proc_geo = get_ipc_merged_geometry(scene, solver_type="fem", idx=fem_entity_idx, env_idx=0)
fem_positions = fem_proc_geo.positions().view().squeeze(axis=-1)
if fem_positions_prev is not None:
fem_velocities = (fem_positions - fem_positions_prev) / DT
else:
fem_velocities = np.zeros_like(fem_positions)
fem_positions_prev = fem_positions
# Make sure that rigid and fem are not penetrating each other
fem_aabb_min, fem_aabb_max = fem_positions.min(axis=-2), fem_positions.max(axis=-2)
rigid_aabb = tensor_to_array(rigid_cube.get_AABB())
rigid_aabb_min, rigid_aabb_max = rigid_aabb[..., 0, :], rigid_aabb[..., 1, :]
overlap = np.minimum(fem_aabb_max, rigid_aabb_max) - np.maximum(rigid_aabb_min, fem_aabb_min)
dist_min = np.minimum(dist_min, -overlap.min(axis=-1))
assert (dist_min > 0.0).all()
volume_attr = fem_proc_geo.vertices().find(builtin.volume)
fem_vertex_masses = volume_attr.view().reshape(-1) * fem_mass_density
assert_allclose(np.sum(fem_vertex_masses), blob_mass, tol=TOL_SINGLE)
fem_linear_momentum = np.sum(fem_vertex_masses[:, np.newaxis] * fem_velocities, axis=0)
# Before collision: FEM should have zero momentum, rigid should carry all momentum.
if step < int(DURATION / 10 / DT):
assert_allclose(fem_linear_momentum, 0.0, atol=TOL_SINGLE)
assert_allclose(rigid_linear_momentum, momentum_0, tol=TOL_SINGLE)
total_linear_momentum = rigid_linear_momentum + fem_linear_momentum
total_p_history.append(total_linear_momentum)
scene.step()
# Make sure the objects bounced on each other
assert (dist_min < 1.5 * CONTACT_MARGIN).all()
assert (cube_vel[..., 0] < -0.5).all()
assert (fem_velocities[..., 0].mean(axis=-1) > 0.5).all()
# Check total momentum conservation.
# NOTE : The tet mesh's contact-facing vertices (x < -0.05) have a z-mean of -0.00138 due to TetGen's asymmetric
# Steiner point insertion, causing an asymmetric contact force distribution during the x-direction collision.
# This z-bias produces a net -z impulse, resulting in the observed z-momentum leak.
assert_allclose(total_p_history, momentum_0, tol=0.001)
@pytest.mark.required
@pytest.mark.parametrize("enable_rigid_ground_contact", [True, False])
@pytest.mark.parametrize("coup_type", ["ipc_only", "two_way_soft_constraint"])
def test_collision_delegation_ipc_vs_rigid(coup_type, enable_rigid_ground_contact):
"""Verify collision pair delegation between IPC and rigid solver based on coup_type and ground contact."""
from genesis.engine.entities import RigidEntity
scene = gs.Scene(
rigid_options=gs.options.RigidOptions(
enable_self_collision=True,
),
coupler_options=gs.options.IPCCouplerOptions(
enable_rigid_ground_contact=enable_rigid_ground_contact,
),
show_viewer=False,
)
plane = scene.add_entity(gs.morphs.Plane(), material=gs.materials.Rigid(needs_coup=False))
assert isinstance(plane, RigidEntity)
# Non-IPC box — always handled by rigid solver
box = scene.add_entity(
gs.morphs.Box(
size=(0.05, 0.05, 0.05),
pos=(1.0, 0.0, 0.2),
),
material=gs.materials.Rigid(needs_coup=False),
)
assert isinstance(box, RigidEntity)
if coup_type == "two_way_soft_constraint":
entity = scene.add_entity(
gs.morphs.MJCF(
file="xml/franka_emika_panda/panda_non_overlap.xml",
),
material=gs.materials.Rigid(
coup_type="two_way_soft_constraint",
coup_links=("left_finger", "right_finger"),
),
)
assert isinstance(entity, RigidEntity)
ipc_excluded_geoms = {geom.idx for name in entity.material.coup_links for geom in entity.get_link(name).geoms}
else:
with pytest.raises(gs.GenesisException):
entity = scene.add_entity(
gs.morphs.URDF(
file="urdf/go2/urdf/go2.urdf",
pos=(0.0, 0.0, 1.0),
),
material=gs.materials.Rigid(
coup_type="ipc_only",
),
)
entity = scene.add_entity(
morph=gs.morphs.Box(
size=(0.2, 0.2, 0.2),
pos=(0.0, 0.0, 0.6),
),
material=gs.materials.Rigid(
coup_type="ipc_only",
),
)
assert isinstance(entity, RigidEntity)
ipc_excluded_geoms = {geom.idx for geom in entity.geoms}
scene.build()
assert scene.sim is not None
assert scene.sim.rigid_solver.collider is not None
pair_idx = scene.sim.rigid_solver.collider._collision_pair_idx
# Collect geom indices for entities that should retain rigid solver pairs
rigid_kept_geoms = {geom.idx for geom in entity.geoms} - ipc_excluded_geoms
ground_geoms = {plane.geoms[0].idx}
box_geoms = {box.geoms[0].idx}
# Non-IPC box always has rigid solver ground pairs
assert any(pair_idx[min(a, b), max(a, b)] >= 0 for a in box_geoms for b in ground_geoms)
# Pairs between IPC-excluded geoms must have no rigid solver pairs (handled by IPC)
for i_ga in ipc_excluded_geoms:
for i_gb in ipc_excluded_geoms:
if i_ga < i_gb:
assert pair_idx[i_ga, i_gb] == -1
if coup_type == "two_way_soft_constraint":
# Mixed pairs (IPC-excluded ↔ non-IPC) must be kept in rigid solver
for i_ga in ipc_excluded_geoms:
for i_gb in box_geoms:
a, b = min(i_ga, i_gb), max(i_ga, i_gb)
assert pair_idx[a, b] >= 0
# IPC-excluded geom ↔ ground must be kept in rigid solver (ground is not IPC-excluded)
for i_ga in ipc_excluded_geoms:
for i_gb in ground_geoms:
a, b = min(i_ga, i_gb), max(i_ga, i_gb)
assert pair_idx[a, b] >= 0
else:
# ipc_only: ALL pairs involving the entity are excluded (IPC fully controls pose)
for i_ga in ipc_excluded_geoms:
for i_gb in box_geoms:
a, b = min(i_ga, i_gb), max(i_ga, i_gb)
assert pair_idx[a, b] == -1
for i_gb in ground_geoms:
a, b = min(i_ga, i_gb), max(i_ga, i_gb)
assert pair_idx[a, b] == -1
# Non-excluded rigid geoms (if any) keep rigid solver ground and self-collision pairs
if rigid_kept_geoms:
assert any(pair_idx[min(a, b), max(a, b)] >= 0 for a in rigid_kept_geoms for b in ground_geoms)
assert any(pair_idx[min(a, b), max(a, b)] >= 0 for a in rigid_kept_geoms for b in rigid_kept_geoms if a < b)
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
def test_cloth_corner_drag(n_envs, show_viewer):
"""Drag a cloth by one corner under gravity using a sandwich grip of two boxes.
Verify that FEM vertices near the gripped corner follow the imposed trajectory,
while the rest of the cloth hangs freely under gravity.
"""
from genesis.engine.entities import FEMEntity
DT = 0.01
CLOTH_HALF = 0.5
BOX_SIZE = 0.05
GAP = 0.005
SCALE = 0.5
FREQ = 0.7
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=DT,
gravity=(0.0, 0.0, -9.8),
),
coupler_options=gs.options.IPCCouplerOptions(
contact_enable=True,
enable_rigid_rigid_contact=True,
contact_d_hat=GAP,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.0 - CLOTH_HALF, -0.5, 1.0 - CLOTH_HALF),
camera_lookat=(-CLOTH_HALF, 0.0, -CLOTH_HALF),
),
show_viewer=show_viewer,
)
asset_path = get_hf_dataset(pattern="IPC/grid20x20.obj")
cloth = scene.add_entity(
morph=gs.morphs.Mesh(
file=f"{asset_path}/IPC/grid20x20.obj",
scale=2 * CLOTH_HALF,
pos=(-CLOTH_HALF, 0.0, -CLOTH_HALF),
),
material=gs.materials.FEM.Cloth(
E=1e4,
nu=0.3,
rho=200.0,
thickness=0.001,
bending_stiffness=None,
friction_mu=0.8,
),
)
assert isinstance(cloth, FEMEntity)
# Sandwich grip at one corner
boxes = []
for z_sign in (+1, -1):
box = scene.add_entity(
gs.morphs.Box(
size=(BOX_SIZE, BOX_SIZE, BOX_SIZE),
pos=(-BOX_SIZE, z_sign * (0.5 * BOX_SIZE + GAP), -BOX_SIZE),
),
material=gs.materials.Rigid(
coup_type="two_way_soft_constraint",
coup_friction=0.8,
),
surface=gs.surfaces.Plastic(
color=(1.0, 0.0, 0.0, 1.0) if z_sign > 0 else (0.0, 1.0, 0.0, 1.0),
),
)
boxes.append(box)
scene.build(n_envs=n_envs)
assert scene.sim is not None
# Close gap, hold position during settling
for box in boxes:
box.set_dofs_kp(2000.0)
box.set_dofs_kv(400.0)
init_dof = tensor_to_array(box.get_dofs_position())
init_dof[..., 1] = 0.0
box.control_dofs_position(init_dof)
# Find corner vertices: closest to the gripped corner
cloth_positions = tensor_to_array(cloth.get_state().pos)
corner_idx = np.argmin(np.linalg.norm(cloth_positions, axis=-1), axis=-1)
# Settle: let cloth conform to grip
for _ in range(40):
scene.step()
# Make sure that the cloth did not fall
cloth_pos = cloth.get_state().pos[range(scene.sim._B), corner_idx]
assert_allclose(cloth_pos, 0.0, tol=5e-3)
# Drag phase
for i in range(int(1.0 / (DT * FREQ))):
theta = (2.0 * np.pi * FREQ) * (i * scene.sim.dt)
x = SCALE / math.sqrt(2.0) * (np.cos(theta) - 1.0)
dx = -SCALE * math.sqrt(2.0) * np.pi * FREQ * np.sin(theta)
y = SCALE / math.sqrt(2.0) * np.sin(theta)
dy = SCALE * math.sqrt(2.0) * np.pi * FREQ * np.cos(theta)
z = SCALE / math.sqrt(2.0) * (np.cos(theta) - 1.0)
dz = -SCALE * math.sqrt(2.0) * np.pi * FREQ * np.sin(theta)
for box in boxes:
box.control_dofs_position_velocity(
(x - BOX_SIZE, y, z - BOX_SIZE), (dx, dy, dz), dofs_idx_local=slice(0, 3)
)
scene.step()
assert_allclose(cloth.get_state().pos[range(scene.sim._B), corner_idx], (x, y, z), tol=0.01)
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
@pytest.mark.parametrize("E, nu, strech_scale", [(1e4, 0.3, 1.0), (5e4, 0.49, 0.3)])
def test_cloth_uniform_biaxial_stretching(E, nu, strech_scale, n_envs, show_viewer):
"""Stretch a square cloth uniformly via position-controlled boxes at corners. Verify stretch physics."""
CLOTH_HALF = 0.5
BOX_SIZE = 0.05
GAP = 0.005
THICKNESS = 0.001
STRETCH_RATIO_1 = 1.0 + strech_scale * 0.15
STRETCH_RATIO_2 = 1.4
PULL_DISTANCE = 0.03 # Radial displacement per corner
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.01,
gravity=(0.0, 0.0, 0.0),
),
coupler_options=gs.options.IPCCouplerOptions(
contact_enable=True,
enable_rigid_rigid_contact=True,
contact_d_hat=GAP,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(0.0, -2.0, 1.0),
camera_lookat=(0.0, 0.0, 0.0),
),
show_viewer=show_viewer,
)
asset_path = get_hf_dataset(pattern="IPC/grid20x20.obj")
cloth = scene.add_entity(
morph=gs.morphs.Mesh(
file=f"{asset_path}/IPC/grid20x20.obj",
scale=2 * CLOTH_HALF,
pos=(0.0, 0.0, 0.0),
euler=(90, 0, 0),
),
material=gs.materials.FEM.Cloth(
E=E,
nu=nu,
rho=200.0,
thickness=THICKNESS,
bending_stiffness=None,
friction_mu=0.8,
),
)
# 8 boxes: 2 per corner (sandwich grip above/below cloth)
boxes = []
for x_sign, y_sign in ((-1, -1), (-1, 1), (1, -1), (1, 1)):
for z_sign in (+1, -1):
box = scene.add_entity(
gs.morphs.Box(
size=(BOX_SIZE, BOX_SIZE, BOX_SIZE),
pos=(
x_sign * (CLOTH_HALF - BOX_SIZE),
y_sign * (CLOTH_HALF - BOX_SIZE),
z_sign * (0.5 * BOX_SIZE + GAP),
),
),
material=gs.materials.Rigid(
coup_type="two_way_soft_constraint",
coup_friction=0.8,
),
surface=gs.surfaces.Plastic(
color=np.random.rand(3),
),
)
boxes.append(box)
scene.build(n_envs=n_envs)
# Configure PD: position-controlled outward pull on x,y; hold z + rotation
for box in boxes:
box.set_dofs_kp(2000.0)
box.set_dofs_kv(500.0)
init_dof = tensor_to_array(box.get_dofs_position())
init_dof[..., 2] = 0.0
box.control_dofs_position(init_dof)
# Wait for steady state
cloth_positions_0 = tensor_to_array(cloth.get_state().pos)
for _ in range(20):
scene.step()
cloth_positions_f = tensor_to_array(cloth.get_state().pos)
assert_allclose(cloth_positions_f, cloth_positions_0, atol=0.005)
assert_allclose(cloth_positions_f[..., 2], cloth_positions_0[..., 2], tol=5e-3)
# Stretch: phase one
for box in boxes:
init_dof = tensor_to_array(box.get_dofs_position())
init_dof[..., :2] *= STRETCH_RATIO_1
box.control_dofs_position(init_dof)
for _ in range(80):
scene.step()
cloth_positions_f = tensor_to_array(cloth.get_state().pos)
for box in boxes:
init_dof = tensor_to_array(box.get_dofs_position())
dist_vertices = np.linalg.norm(cloth_positions_f[..., :2] - init_dof[..., None, :2], axis=-1).min(axis=-1)
assert_allclose(dist_vertices, 0.0, atol=0.02)
assert_allclose(cloth_positions_f[..., 2], cloth_positions_0[..., 2], tol=5e-3)
# Extract X/Y forces while making sure observed forces are consistent
box_forces_xy = []
applied_forces = qd_to_numpy(scene.rigid_solver.dofs_state.qf_applied, None, transpose=True)
for box in boxes:
dofs_idx = slice(box.dof_start, box.dof_end)
box_forces = applied_forces[..., dofs_idx]
assert_allclose(box_forces[..., 3:], 0.0, tol=0.02)
assert_allclose(np.abs(box_forces[..., 0]), np.abs(box_forces[..., 1]), tol=0.02)
box_forces_xy.append(box_forces[..., :2])
# Check that deformation is roughly symmetric (sanity check)
grid = cloth_positions_f.reshape((-1, 20, 20, 3))
grid_flipped_x = np.flip(grid, axis=-3)
assert_allclose(grid[..., 0], grid_flipped_x[..., 0], atol=0.01)
assert_allclose(grid[..., 1], -grid_flipped_x[..., 1], atol=0.01)
grid_flipped_y = np.flip(grid, axis=-2)
assert_allclose(grid[..., 0], -grid_flipped_y[..., 0], atol=0.01)
assert_allclose(grid[..., 1], grid_flipped_y[..., 1], atol=0.01)
# Check that deformation is consistent with applied forces based on material properties.
# Each corner bears the load from half the reference edge length (by symmetry,
# 2 corners per edge). Use reference length since stress is in reference config.
strain_GL = 0.5 * (STRETCH_RATIO_1**2 - 1.0) # Green–Lagrange strain
expected_stress = E * strain_GL / (1.0 - nu) # Equal biaxial plane stress (2nd Piola–Kirchhoff)
expected_force_per_box = expected_stress * THICKNESS * CLOTH_HALF
# FIXME: The estimated force is not very accurate. Is it possible to do better?
assert_allclose(np.abs(box_forces_xy), expected_force_per_box, tol=1e4 / E)
# Stretch: phase two
for box in boxes:
init_dof = tensor_to_array(box.get_dofs_position())
init_dof[..., :2] *= STRETCH_RATIO_2
box.control_dofs_position(init_dof)
for _ in range(50):
scene.step()
# Lost grip
cloth_positions_f = tensor_to_array(cloth.get_state().pos)
cloth_aabb_min, cloth_aabb_max = cloth_positions_f.min(axis=-2), cloth_positions_f.max(axis=-2)
cloth_aabb_extent = cloth_aabb_max - cloth_aabb_min
assert (cloth_aabb_extent[..., :2] < STRETCH_RATIO_1 * (2.0 * CLOTH_HALF)).all()
assert ((0.001 < cloth_aabb_extent[..., 2]) & (cloth_aabb_extent[..., 2] < 0.2)).all()
@pytest.mark.required
def test_coup_collision_links():
"""Verify that coup_collision_links positive filter correctly limits IPC collision to named links."""
from genesis.engine.entities import RigidEntity
scene = gs.Scene(
coupler_options=gs.options.IPCCouplerOptions(
enable_rigid_rigid_contact=False,
two_way_coupling=True,
),
show_viewer=False,
)
robot = scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/simple/two_cube_revolute.urdf",
pos=(0, 0, 0.2),
fixed=True,
),
material=gs.materials.Rigid(
coup_type="two_way_soft_constraint",
coup_collision_links=("moving",),
),
)
assert isinstance(robot, RigidEntity)
scene.build()
assert scene.sim is not None
# Verify the collision settings were applied
coupler = cast("IPCCoupler", scene.sim.coupler)
collision_settings = coupler._coupling_collision_settings[robot]
base_link = robot.get_link("base")
moving_link = robot.get_link("moving")
# "base" should be disabled (not in coup_collision_links), "moving" should not be in the disabled dict
assert base_link in collision_settings
assert collision_settings[base_link] is False
assert moving_link not in collision_settings
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_ipc.py",
"license": "Apache License 2.0",
"lines": 1393,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:.github/workflows/scripts/alarm.py | """
This script runs from alarm.yml
Terminology/variable names:
- benchmark suite results: the results of running all benchmark tests once, for a specific code base
- the code base could be conceptually:
- the current code under test
- some past revision of the code, described by a git commit hash
- there are actually multiple benchmark test suites, identified by a suite_id
- in this script, we are only interested in the rigid benchmark suite
- metric: the string name of something we are measuring, such as 'runtime_fps'
- configuration parameter: something we vary/control, such as batch_size, or env
- config_params_str: a string like "backend=cpu-n_envs=64", which specifies specific configuration
parameters, in string format
- note that, two config_params_str might represent the same configuration, but be different strings,
because ordering of configuration parameters might be differnet
- config_params_fdict: a frozen dict that represents a specific set of configuration parameters
- by comparison with config_str, two identical config_params_fdict's always represent the same configuration
- note that config_params_fdict's are hashable
- (fdict is an abbreviation for 'frozendict')
- config_param_names: ordered list of the config parameter names, that we have almost certainly derived
from a config_params_fdict, by simply returning the ordered list of keys (though we may have merged
such a list over multiple config_params_fdict's)
- we are prefixing with 'config' to make explicit that this does not include the names of metrics
- 'pipeline format':
a string having format like:
"solver=PBD | backend=cpu | n_envs=128 | compile_time=2.52 | runtime_fps=990.0 | realtime_factor=49.5"
"""
import argparse
import csv
import dataclasses
import json
import math
import os
import statistics
import sys
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable, Iterable
from frozendict import frozendict
from wandb.apis.public import Run
import wandb
def config_params_str_to_fdict(config_params_str: str) -> frozendict[str, str]:
"""
Expects a config_params_str in the string format like:
solver=PBD-backend=cpu-n_envs=128
Returns this as a frozen dict of key value pairs.
Note that the values are strings, not converted into numbers.
"""
kv = {}
if config_params_str:
for token in config_params_str.split("-"):
token = token.strip()
if token and "=" in token:
k, v = token.split("=", 1)
kv[k.strip()] = v.strip()
return frozendict(kv)
def merge_string_tuples(tuples: tuple[tuple[str, ...], ...]) -> tuple[str, ...]:
"""
Merge tuples of strings into a single tuple of strings which:
- preserves the relative order of keys within each tuple
- gives precedence to later tuples when conflicts arise
"""
merged_keys = list(tuples[-1])
merged_keys_set = set(merged_keys)
for tuple_ in tuples[:-1]:
for key in tuple_:
if key not in merged_keys_set:
merged_keys.append(key)
merged_keys_set.add(key)
return tuple(merged_keys)
class SortKey:
def __init__(self, config_param_names: Iterable[str]) -> None:
self.config_param_names = config_param_names
def __call__(self, d: frozendict[str, Any]) -> list[tuple[int, int | float | None]]:
"""
returns list of tuples that can be used to order
dictionaries of values. The sort key function returns
a list of tuples of (True|False, value | None), where the sequence of
(True|False, value) matches that of config_param_names and:
- only keys in config_param_names are considered in the sorting
(in the context of this script, this lets us ignore the values of
metrics during sorting)
- when a param_name is present in the dictionary, the tuple
contains (False, value), otherwise (True, None)
Since the resulting tuples will be used for sorting, the result
is that we will first sort the incoming dictionaries by the first param_name,
then the second, etc
- for a particular param_name, the dicts without that param_name will
be placed after the dicts with that param name, since True is after False.
"""
key_list = []
for col in self.config_param_names:
val = d.get(col)
key_list.append((val is None, val))
return key_list
def parse_results_file(
results_file_path: Path, metric_keys: Iterable[str]
) -> dict[frozendict[str, str], dict[str, float]]:
"""
results file path should have lines in pipeline format, like:
solver=PBD | backend=cpu | n_envs=128 | compile_time=2.52 | runtime_fps=990.0 | realtime_factor=49.5
solver=PBD | backend=gpu | n_envs=1024 | compile_time=2.54 | runtime_fps=985.0 | realtime_factor=49.3
solver=MPM | backend=cpu | n_envs=64 | compile_time=2.53 | runtime_fps=988.0 | realtime_factor=49.4
This function returns a dict of dicts, something like:
{
FrozenDict({"solver": "PBD", "backend": "cpu"}): {
"compile_time": 2.52,
"runtime_fps": 990.0,
}
}
So:
- the keys of the top level dict are frozendict's representing all the key value pairs in a results row
EXCEPT the metric key value pairs
- the values are dicts where the keys are names of the metrics in metric_keys, and the values are
the measured value of that metric
Conceptually the keys are config_param_fdict's, and the values are a dictionary of metric names and
values.
"""
# easy to accidentally send a string instead of a tuple
assert isinstance(metric_keys, tuple)
results: dict[frozendict[str, str], dict[str, int | float]] = {}
for line in results_file_path.read_text().splitlines():
config_param_dict: dict[str, str] = dict( # type: ignore
map(str.strip, p.split("=", 1))
for p in line.split("|")
if "=" in p # type: ignore
)
metrics: dict[str, float | int] = {}
for k in metric_keys:
try:
# removes metric keys from the config param dict, and adds to the metric kv dict
metrics[k] = float(config_param_dict.pop(k))
except (ValueError, TypeError, KeyError):
pass
config_param_fdict: frozendict[str, str] = frozendict(config_param_dict)
results[config_param_fdict] = metrics
return results
def fmt_num(v, is_int: bool):
"""
converts number to string where:
- ints => displays as int
- floats => displays to 2 decimal places
"""
if v != v:
return "NaN"
return f"{int(v):,}" if is_int else f"{v:.2f}"
class WandbParser:
@property
def project(self):
raise NotImplementedError()
def __call__(
self,
benchmark_under_test: "BenchmarkRunUnderTest",
records_by_commit_hash: dict[str, dict[frozendict[str, str], dict[str, int | float]]],
config,
summary,
commit_hash: str,
) -> None:
raise NotImplementedError()
class WandbParserNewFormat(WandbParser):
@property
def project(self):
return "genesis-benchmarks-2"
def __call__(
self,
benchmark_under_test: "BenchmarkRunUnderTest",
records_by_commit_hash: dict[str, dict[frozendict[str, str], dict[str, int | float]]],
config,
summary,
commit_hash: str,
) -> None:
for k, v in summary.items():
if k.startswith("_"):
continue
metric_name, _, kv_pairs_str = k.partition("-")
kv_pairs_fdict = config_params_str_to_fdict(kv_pairs_str)
records_by_commit_hash[commit_hash][kv_pairs_fdict][metric_name] = v
class BenchmarkRunUnderTest:
"""
This class contains the data about the benchmark run under test, which we will then
compare with historical data. This data is loaded from text files in pipe format.
| foo=123 | bar=456 | ...
"""
def __init__(self, artifacts_dir: Path, metric_keys: Iterable[str], filename_glob: str) -> None:
"""
metric_keys: the keys corresponding to values being measured, such as runtime_fps
filename_glob: how to locate the data files with the data for the benchmark run
under test.
"""
self.result_file_paths = list(artifacts_dir.rglob(filename_glob))
# make sure we do actually have some current benchmark data to read
assert self.result_file_paths
self.metric_keys = metric_keys
# self.results is a dictionary where the keys are config_param_fdict's, and the values
# are dicts of metric names and values
self.results: dict[frozendict[str, str], dict[str, float]] = {}
for self.result_file_path in self.result_file_paths:
self.results |= parse_results_file(self.result_file_path, self.metric_keys)
# all the config_param_fdicts that we need to check for a 'complete set', when looking
# at historical data (some earlier runs might be missing some of the newer benchmark
# runs)
self.all_config_param_fdicts = frozenset(self.results.keys())
assert self.all_config_param_fdicts
# ordered list of the config parameter names
self.config_param_names = merge_string_tuples(tuple((tuple(kv.keys())) for kv in self.results.keys()))
class Alarm:
def __init__(self, args: argparse.Namespace) -> None:
self.max_valid_revisions = args.max_valid_revisions
self.max_fetch_revisions = args.max_fetch_revisions
# let's just define these in one place
self.metric_compile_time = "compile_time"
self.metric_runtime_fps = "runtime_fps"
self.metric_realtime_factor = "realtime_factor"
self.metric_max_mem_mb = "max_mem_mb"
self.metrics_tol = {
self.metric_runtime_fps: args.runtime_fps_regression_tolerance_pct,
self.metric_compile_time: args.compile_time_regression_tolerance_pct,
self.metric_max_mem_mb: args.mem_regression_tolerance_pct,
}
self.speed_artifacts_dir = Path(args.speed_artifacts_dir).expanduser().resolve()
self.mem_artifacts_dir = Path(args.mem_artifacts_dir).expanduser().resolve()
self.check_body_path = Path(args.check_body_path).expanduser()
self.csv_out_file_by_metric_name = {
self.metric_runtime_fps: Path(args.csv_runtime_fps_path).expanduser().resolve(),
self.metric_compile_time: Path(args.csv_compile_time_path).expanduser().resolve(),
self.metric_max_mem_mb: Path(args.csv_mem_path).expanduser().resolve(),
}
self.speed_metric_keys = (
self.metric_compile_time,
self.metric_runtime_fps,
self.metric_realtime_factor,
)
self.mem_metric_keys = (self.metric_max_mem_mb,) # note: make sure is a tuple
self.dev_skip_speed = args.dev_skip_speed
self.dev_allow_all_branches = args.dev_allow_all_branches
assert "WANDB_API_KEY" in os.environ
self.wandb_entity = os.environ["WANDB_ENTITY"]
def fetch_wandb_data(
self,
benchmark_under_test: BenchmarkRunUnderTest,
run_name_prefix: str | None,
wandb_parser: WandbParser,
) -> dict[str, dict[frozendict[str, str], dict[str, float | int]]]:
api = wandb.Api()
runs_iter: Iterable[Run] = api.runs(f"{self.wandb_entity}/{wandb_parser.project}", order="-created_at")
commit_hashes = set()
records_by_commit_hash: dict[str, dict[frozendict[str, str], dict[str, float | int]]] = defaultdict(
lambda: defaultdict(dict)
)
for i, run in enumerate(runs_iter):
if run_name_prefix and not run.name.startswith(run_name_prefix):
continue
# Abort if still not complete after checking enough runs.
# This would happen if a new benchmark has been added, and not enough past data is available yet.
if len(commit_hashes) == self.max_fetch_revisions:
break
# Early return if enough complete records have been collected
complete_records = [
benchmark_under_test.all_config_param_fdicts.issubset(record.keys())
for record in records_by_commit_hash.values()
]
if sum(complete_records) == self.max_valid_revisions:
break
# Load config and summary, with support of legacy runs
summary: dict[str, Any]
try:
config, summary = run.config, run.summary # type: ignore
except Exception as e:
print(e)
continue
if isinstance(config, str):
config = {k: v["value"] for k, v in json.loads(config).items() if not k.startswith("_")}
if isinstance(summary._json_dict, str): # type: ignore
summary = json.loads(summary._json_dict) # type: ignore
# Extract revision commit and branch
try:
commit_hash, branch = config["revision"].split("@", 1)
commit_hashes.add(commit_hash)
except ValueError:
# Ignore this run if the revision has been corrupted for some unknown reason
continue
# Ignore runs associated with a commit that is not part of the official repository
if not branch.startswith("Genesis-Embodied-AI/") and not self.dev_allow_all_branches:
continue
# Skip runs did not finish for some reason
if run.state != "finished":
continue
# Do not store new records if the desired number of revision is already reached
if len(records_by_commit_hash) == self.max_valid_revisions and commit_hash not in records_by_commit_hash:
continue
wandb_parser(
benchmark_under_test=benchmark_under_test,
records_by_commit_hash=records_by_commit_hash,
config=config,
summary=summary,
commit_hash=commit_hash,
)
return records_by_commit_hash
def build_table(
self,
config_param_names: tuple[str, ...],
alias: str,
metric: str,
benchmark_run_under_test: BenchmarkRunUnderTest,
records_by_commit_hash: dict[str, Any],
sign: int,
) -> tuple[list[str], bool, bool]:
# together these rows contain the text of the markdwon
markdown_rows = []
rows = []
alert_found, reg_found = False, False
# the labels in the header row of the table
header_cells = (
"status",
*config_param_names,
f"current {alias}",
f"baseline {alias} [last (mean ± std)] (*1)",
f"Δ {alias} (*2)",
)
header = "| " + " | ".join(header_cells) + " |"
align = "|:------:|" + "|".join([":---" for _ in config_param_names]) + "|---:|---:|---:|"
row_data = {}
for config_params_fdict in sorted(
benchmark_run_under_test.results.keys(), key=SortKey(config_param_names=config_param_names)
):
value_cur = benchmark_run_under_test.results[config_params_fdict][metric]
is_int = isinstance(value_cur, int) or value_cur.is_integer()
value_repr = fmt_num(value_cur, is_int)
params_repr = [config_params_fdict.get(k, "-") for k in config_param_names]
row_data = {
**dict(zip(config_param_names, params_repr)),
"current": value_cur,
"baseline_last": None,
"baseline_mean": None,
"baseline_min": None,
"baseline_max": None,
"status": None,
}
values_prev = [
record[config_params_fdict][metric]
for record in records_by_commit_hash.values()
if config_params_fdict in record
]
if values_prev:
value_last = values_prev[0]
value_ref = statistics.fmean(values_prev)
delta = (value_cur - value_last) / value_last * 100.0
row_data["baseline_last"] = int(value_last) if is_int else float(value_last)
stats_repr = f"{fmt_num(value_last, is_int)}"
delta_repr = f"{delta:+.1f}%"
if len(values_prev) >= self.max_valid_revisions:
row_data["baseline_mean"] = int(value_ref) if is_int else float(value_ref)
row_data["baseline_min"] = int(min(values_prev)) if is_int else float(min(values_prev))
row_data["baseline_max"] = int(max(values_prev)) if is_int else float(max(values_prev))
value_ci95 = (
statistics.stdev(values_prev) / math.sqrt(len(values_prev)) * 1.96
if len(values_prev) > 1
else math.nan
)
stats_repr += f" ({fmt_num(value_ref, is_int)} ± {fmt_num(value_ci95, is_int)})"
if sign * delta < -self.metrics_tol[metric]:
row_data["status"] = "regression"
delta_repr = f"**{delta_repr}**"
picto = "🔴"
reg_found = True
elif sign * delta > self.metrics_tol[metric]:
row_data["status"] = "alert"
delta_repr = f"**{delta_repr}**"
picto = "⚠️"
alert_found = True
else:
row_data["status"] = "ok"
picto = "✅"
else:
row_data["status"] = "n/a"
picto = "ℹ️"
else:
picto, stats_repr, delta_repr = "ℹ️", "---", "---"
markdown_rows.append("| " + " | ".join((picto, *params_repr, value_repr, stats_repr, delta_repr)) + " |")
rows.append(row_data)
blist = [f"- Commit {i}: {sha}" for i, sha in enumerate(records_by_commit_hash.keys(), 1)]
baseline_block = ["**Baselines considered:** " + f"**{len(records_by_commit_hash)}** commits"] + blist
with self.csv_out_file_by_metric_name[metric].open("w", newline="", encoding="utf-8") as f:
w = csv.DictWriter(f, fieldnames=row_data.keys())
w.writeheader()
for rec in rows:
w.writerow(rec)
return [header, align] + markdown_rows + [""] + baseline_block, reg_found, alert_found
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--speed-artifacts-dir", type=str, required=True)
parser.add_argument("--mem-artifacts-dir", type=str, required=True)
parser.add_argument(
"--max-valid-revisions",
type=int,
default=10,
help="limits how many git commits are used to build the baseline statistics",
)
parser.add_argument("--max-fetch-revisions", type=int, default=10)
parser.add_argument("--runtime-fps-regression-tolerance-pct", type=float, default=10)
parser.add_argument("--compile-time-regression-tolerance-pct", type=float, default=10)
parser.add_argument("--mem-regression-tolerance-pct", type=float, default=10)
parser.add_argument("--check-body-path", type=str, required=True)
parser.add_argument("--csv-runtime-fps-path", type=str, required=True)
parser.add_argument("--csv-compile-time-path", type=str, required=True)
parser.add_argument("--csv-mem-path", type=str, required=True)
parser.add_argument("--exit-code-regression", type=int, default=42)
parser.add_argument("--exit-code-alert", type=int, default=43)
parser.add_argument("--dev-skip-speed", action="store_true")
parser.add_argument("--dev-allow-all-branches", action="store_true")
args = parser.parse_args()
alarm = Alarm(args=args)
results_under_test_speed = BenchmarkRunUnderTest(
artifacts_dir=alarm.speed_artifacts_dir, metric_keys=alarm.speed_metric_keys, filename_glob="speed_test*.txt"
)
results_under_test_mem = BenchmarkRunUnderTest(
artifacts_dir=alarm.mem_artifacts_dir, metric_keys=alarm.mem_metric_keys, filename_glob="mem_test*.txt"
)
speed_records_by_commit_hash = {}
if not alarm.dev_skip_speed:
speed_records_by_commit_hash = alarm.fetch_wandb_data(
benchmark_under_test=results_under_test_speed,
run_name_prefix="speed-",
wandb_parser=WandbParserNewFormat(),
)
mem_records_by_commit_hash = alarm.fetch_wandb_data(
benchmark_under_test=results_under_test_mem, run_name_prefix="mem-", wandb_parser=WandbParserNewFormat()
)
reg_found, alert_found = False, False
table_by_metric_name: dict[str, list[str]] = {}
reg_found, alert_found = False, False
for metric, alias, sign, results_under_test_, records_by_commit_hash_ in (
(alarm.metric_runtime_fps, "FPS", 1, results_under_test_speed, speed_records_by_commit_hash),
(alarm.metric_compile_time, "compile", -1, results_under_test_speed, speed_records_by_commit_hash),
(alarm.metric_max_mem_mb, "memory", -1, results_under_test_mem, mem_records_by_commit_hash),
):
(table_by_metric_name[metric], reg_found_, alert_found_) = alarm.build_table(
config_param_names=results_under_test_.config_param_names,
alias=alias,
metric=metric,
sign=sign,
benchmark_run_under_test=results_under_test_,
records_by_commit_hash=records_by_commit_hash_,
)
reg_found |= reg_found_
alert_found |= alert_found_
thr_repr = ", ".join(
f"{alias} ± {alarm.metrics_tol[metric]:.0f}%"
for metric, alias in (
(alarm.metric_runtime_fps, "runtime"),
(alarm.metric_compile_time, "compile"),
(alarm.metric_max_mem_mb, "mem"),
)
)
check_body = "\n".join(
[
f"Thresholds: {thr_repr}",
"",
"### Runtime FPS",
*table_by_metric_name[alarm.metric_runtime_fps],
"",
"### Compile Time",
*table_by_metric_name[alarm.metric_compile_time],
"",
"### Memory usage",
*table_by_metric_name[alarm.metric_max_mem_mb],
"",
f"- (*1) last: last commit on main, mean/std: stats over commit hashes {alarm.max_valid_revisions} commits if available.",
"- (*2) Δ: relative difference between PR and last commit on main, i.e. (PR - main) / main * 100%.",
]
)
alarm.check_body_path.write_text(check_body + "\n", encoding="utf-8")
if reg_found:
sys.exit(int(args.exit_code_regression))
if alert_found:
sys.exit(int(args.exit_code_alert))
sys.exit(0)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": ".github/workflows/scripts/alarm.py",
"license": "Apache License 2.0",
"lines": 477,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:tests/upload_benchmarks_table_to_wandb.py | """
Upload benchmark results to Weights & Biases.
This script parses benchmark results files (memory or performance) generated
by monitor_test_mem.py or similar monitoring tools and uploads them to W&B.
Memory example:
env=franka | constraint_solver=None | gjk_collision=True | batch_size=30000 | backend=cuda | dtype=field | max_mem_mb=123
env=anymal | constraint_solver=Newton | gjk_collision=False | batch_size=0 | backend=cpu | dtype=ndarray | max_mem_mb=234
Performance example:
env=franka | batch_size=30000 | dtype=field | backend=cuda | compile_time=68.4 | runtime_fps=20067534.0 | realtime_factor=200675.3
env=anymal | constraint_solver=Newton | gjk_collision=False | batch_size=0 | backend=cpu | dtype=ndarray |compile_time=3.2 | runtime_fps=1355.0 | realtime_factor=3322
... and check uploads to https://wandb.ai/genesis-ai-company/genesis-benchmarks-mem/table
"""
import argparse
import wandb
import os
import sys
from pathlib import Path
from utils import get_git_commit_info, pprint_oneline
def upload_results_to_wandb(
run_prefix: str | None, results_file_path: str, project_name: str, metric_names=None
) -> None:
"""
Parse results file in pipe-delimited format and upload to W&B.
Args:
results_file_path: Path to the results file
project_name: W&B project name (e.g., "genesis-benchmarks-mem" or "genesis-benchmarks-perf")
metric_names: List of metric field names to log. If None, logs all non-parameter fields.
"""
revision, _ = get_git_commit_info()
print(f"Uploading results to W&B project '{project_name}' for revision: {revision}")
uploaded_count = 0
skipped_count = 0
# Initialize a single run for all benchmark results
name = f"{revision[:12]}"
if run_prefix:
name = f"{run_prefix}-{name}"
run = wandb.init(
project=project_name,
name=name,
config={
"revision": revision,
},
settings=wandb.Settings(
x_disable_stats=True,
console="off",
),
)
with open(results_file_path) as f:
for line in f:
line = line.strip()
if not line:
continue
# Parse pipe-delimited format: key=value | key=value | ...
params = {}
for part in line.split(" \t| "):
if "=" in part:
k, v = part.split("=", 1)
params[k.strip()] = v.strip()
if not params:
skipped_count += 1
continue
# Extract metrics based on specified metric_names or all remaining fields
if metric_names:
metrics = {k: float(params.pop(k)) for k in metric_names if k in params}
else:
# Extract all numeric fields as metrics (non-parameters)
metrics = {}
for k in list(params.keys()):
try:
metrics[k] = float(params[k])
except ValueError:
continue
del params[k]
if not metrics:
skipped_count += 1
continue
# Sort params for consistent benchmark ID ordering
sorted_params = dict(sorted(params.items()))
# Create benchmark ID matching alarm.yml format
benchmark_id_suffix = pprint_oneline(sorted_params, delimiter="-")
for metric_name, metric_value in metrics.items():
benchmark_id = f"{metric_name}-{benchmark_id_suffix}"
print(f"📊 Uploading {benchmark_id}: {metric_value}")
run.log({benchmark_id: metric_value})
uploaded_count += 1
run.finish()
print(f"\n✅ Upload complete: {uploaded_count} results processed, {skipped_count} skipped")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Upload benchmark results to W&B")
parser.add_argument("--in-file", required=True, help="Path to results file")
parser.add_argument(
"--project", required=True, help="W&B project name (e.g., genesis-benchmarks-mem or genesis-benchmarks-perf)"
)
parser.add_argument("--run-prefix", help="Added at start of W&B run name, if provided")
parser.add_argument(
"--metrics",
nargs="+",
default=None,
help="Metric field names to upload (e.g., max_mem_mb compile_time runtime_fps). If not specified, all numeric fields are uploaded.",
)
args = parser.parse_args()
upload_results_to_wandb(
run_prefix=args.run_prefix, results_file_path=args.in_file, project_name=args.project, metric_names=args.metrics
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/upload_benchmarks_table_to_wandb.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/abd/accessor.py | """
Rigid solver control, getter, and setter kernel functions.
This module contains Quadrants kernel functions for controlling rigid body simulations,
including state getters/setters, link position/quaternion manipulation, DOF control,
and drone-specific operations.
These functions are used by the RigidSolver class to interface with the Quadrants
data structures for rigid body dynamics simulation.
"""
import quadrants as qd
import genesis as gs
import genesis.utils.geom as gu
import genesis.utils.array_class as array_class
from .misc import func_apply_link_external_force, func_apply_link_external_torque
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_get_kinematic_state(
qpos: qd.types.ndarray(),
vel: qd.types.ndarray(),
links_pos: qd.types.ndarray(),
links_quat: qd.types.ndarray(),
i_pos_shift: qd.types.ndarray(),
links_state: array_class.LinksState,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
n_qs = qpos.shape[1]
n_dofs = vel.shape[1]
n_links = links_pos.shape[1]
_B = qpos.shape[0]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_q, i_b in qd.ndrange(n_qs, _B):
qpos[i_b, i_q] = rigid_global_info.qpos[i_q, i_b]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_d, i_b in qd.ndrange(n_dofs, _B):
vel[i_b, i_d] = dofs_state.vel[i_d, i_b]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l, i_b in qd.ndrange(n_links, _B):
for j in qd.static(range(3)):
links_pos[i_b, i_l, j] = links_state.pos[i_l, i_b][j]
i_pos_shift[i_b, i_l, j] = links_state.i_pos_shift[i_l, i_b][j]
for j in qd.static(range(4)):
links_quat[i_b, i_l, j] = links_state.quat[i_l, i_b][j]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_kinematic_state(
envs_idx: qd.types.ndarray(),
qpos: qd.types.ndarray(),
dofs_vel: qd.types.ndarray(),
links_pos: qd.types.ndarray(),
links_quat: qd.types.ndarray(),
i_pos_shift: qd.types.ndarray(),
links_state: array_class.LinksState,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
n_qs = qpos.shape[1]
n_dofs = dofs_vel.shape[1]
n_links = links_pos.shape[1]
_B = envs_idx.shape[0]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_q, i_b_ in qd.ndrange(n_qs, _B):
rigid_global_info.qpos[i_q, envs_idx[i_b_]] = qpos[envs_idx[i_b_], i_q]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_d, i_b_ in qd.ndrange(n_dofs, _B):
dofs_state.vel[i_d, envs_idx[i_b_]] = dofs_vel[envs_idx[i_b_], i_d]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l, i_b_ in qd.ndrange(n_links, _B):
for j in qd.static(range(3)):
links_state.pos[i_l, envs_idx[i_b_]][j] = links_pos[envs_idx[i_b_], i_l, j]
links_state.i_pos_shift[i_l, envs_idx[i_b_]][j] = i_pos_shift[envs_idx[i_b_], i_l, j]
for j in qd.static(range(4)):
links_state.quat[i_l, envs_idx[i_b_]][j] = links_quat[envs_idx[i_b_], i_l, j]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_get_state(
qpos: qd.types.ndarray(),
vel: qd.types.ndarray(),
acc: qd.types.ndarray(),
links_pos: qd.types.ndarray(),
links_quat: qd.types.ndarray(),
i_pos_shift: qd.types.ndarray(),
mass_shift: qd.types.ndarray(),
friction_ratio: qd.types.ndarray(),
links_state: array_class.LinksState,
dofs_state: array_class.DofsState,
geoms_state: array_class.GeomsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
n_qs = qpos.shape[1]
n_dofs = vel.shape[1]
n_links = links_pos.shape[1]
n_geoms = friction_ratio.shape[1]
_B = qpos.shape[0]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_q, i_b in qd.ndrange(n_qs, _B):
qpos[i_b, i_q] = rigid_global_info.qpos[i_q, i_b]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_d, i_b in qd.ndrange(n_dofs, _B):
vel[i_b, i_d] = dofs_state.vel[i_d, i_b]
acc[i_b, i_d] = dofs_state.acc[i_d, i_b]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l, i_b in qd.ndrange(n_links, _B):
for j in qd.static(range(3)):
links_pos[i_b, i_l, j] = links_state.pos[i_l, i_b][j]
i_pos_shift[i_b, i_l, j] = links_state.i_pos_shift[i_l, i_b][j]
for j in qd.static(range(4)):
links_quat[i_b, i_l, j] = links_state.quat[i_l, i_b][j]
mass_shift[i_b, i_l] = links_state.mass_shift[i_l, i_b]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l, i_b in qd.ndrange(n_geoms, _B):
friction_ratio[i_b, i_l] = geoms_state.friction_ratio[i_l, i_b]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_state(
envs_idx: qd.types.ndarray(),
qpos: qd.types.ndarray(),
dofs_vel: qd.types.ndarray(),
dofs_acc: qd.types.ndarray(),
links_pos: qd.types.ndarray(),
links_quat: qd.types.ndarray(),
i_pos_shift: qd.types.ndarray(),
mass_shift: qd.types.ndarray(),
friction_ratio: qd.types.ndarray(),
links_state: array_class.LinksState,
dofs_state: array_class.DofsState,
geoms_state: array_class.GeomsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
n_qs = qpos.shape[1]
n_dofs = dofs_vel.shape[1]
n_links = links_pos.shape[1]
n_geoms = friction_ratio.shape[1]
_B = envs_idx.shape[0]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_q, i_b_ in qd.ndrange(n_qs, _B):
rigid_global_info.qpos[i_q, envs_idx[i_b_]] = qpos[envs_idx[i_b_], i_q]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_d, i_b_ in qd.ndrange(n_dofs, _B):
dofs_state.vel[i_d, envs_idx[i_b_]] = dofs_vel[envs_idx[i_b_], i_d]
dofs_state.acc[i_d, envs_idx[i_b_]] = dofs_acc[envs_idx[i_b_], i_d]
dofs_state.ctrl_force[i_d, envs_idx[i_b_]] = gs.qd_float(0.0)
dofs_state.ctrl_mode[i_d, envs_idx[i_b_]] = gs.CTRL_MODE.FORCE
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l, i_b_ in qd.ndrange(n_links, _B):
for j in qd.static(range(3)):
links_state.pos[i_l, envs_idx[i_b_]][j] = links_pos[envs_idx[i_b_], i_l, j]
links_state.i_pos_shift[i_l, envs_idx[i_b_]][j] = i_pos_shift[envs_idx[i_b_], i_l, j]
links_state.cfrc_applied_vel[i_l, envs_idx[i_b_]][j] = gs.qd_float(0.0)
links_state.cfrc_applied_ang[i_l, envs_idx[i_b_]][j] = gs.qd_float(0.0)
for j in qd.static(range(4)):
links_state.quat[i_l, envs_idx[i_b_]][j] = links_quat[envs_idx[i_b_], i_l, j]
links_state.mass_shift[i_l, envs_idx[i_b_]] = mass_shift[envs_idx[i_b_], i_l]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l, i_b_ in qd.ndrange(n_geoms, _B):
geoms_state.friction_ratio[i_l, envs_idx[i_b_]] = friction_ratio[envs_idx[i_b_], i_l]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_get_state_grad(
qpos_grad: qd.types.ndarray(),
vel_grad: qd.types.ndarray(),
links_pos_grad: qd.types.ndarray(),
links_quat_grad: qd.types.ndarray(),
links_state: array_class.LinksState,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
n_qs = qpos_grad.shape[1]
n_dofs = vel_grad.shape[1]
n_links = links_pos_grad.shape[1]
_B = qpos_grad.shape[0]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_q, i_b in qd.ndrange(n_qs, _B):
qd.atomic_add(rigid_global_info.qpos.grad[i_q, i_b], qpos_grad[i_b, i_q])
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_d, i_b in qd.ndrange(n_dofs, _B):
qd.atomic_add(dofs_state.vel.grad[i_d, i_b], vel_grad[i_b, i_d])
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l, i_b in qd.ndrange(n_links, _B):
for j in qd.static(range(3)):
qd.atomic_add(links_state.pos.grad[i_l, i_b][j], links_pos_grad[i_b, i_l, j])
for j in qd.static(range(4)):
qd.atomic_add(links_state.quat.grad[i_l, i_b][j], links_quat_grad[i_b, i_l, j])
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_links_pos(
relative: qd.i32,
pos: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
links_info: array_class.LinksInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
i_b = envs_idx[i_b_]
i_l = links_idx[i_l_]
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
if links_info.parent_idx[I_l] == -1 and links_info.is_fixed[I_l]:
for j in qd.static(range(3)):
links_state.pos[i_l, i_b][j] = pos[i_b_, i_l_, j]
if relative:
for j in qd.static(range(3)):
links_state.pos[i_l, i_b][j] = links_state.pos[i_l, i_b][j] + links_info.pos[I_l][j]
else:
q_start = links_info.q_start[I_l]
for j in qd.static(range(3)):
rigid_global_info.qpos[q_start + j, i_b] = pos[i_b_, i_l_, j]
if relative:
for j in qd.static(range(3)):
rigid_global_info.qpos[q_start + j, i_b] = (
rigid_global_info.qpos[q_start + j, i_b] + rigid_global_info.qpos0[q_start + j, i_b]
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_wake_up_entities_by_links(
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
links_info: array_class.LinksInfo,
links_state: array_class.LinksState,
entities_state: array_class.EntitiesState,
entities_info: array_class.EntitiesInfo,
dofs_state: array_class.DofsState,
geoms_state: array_class.GeomsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
"""Wake up entities that own the specified links by setting their hibernated flags to False."""
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
i_b = envs_idx[i_b_]
i_l = links_idx[i_l_]
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
i_e = links_info.entity_idx[I_l]
# Wake up the entity and all its components
if entities_state.hibernated[i_e, i_b]:
entities_state.hibernated[i_e, i_b] = False
# Add entity to awake_entities list
n_awake = qd.atomic_add(rigid_global_info.n_awake_entities[i_b], 1)
rigid_global_info.awake_entities[n_awake, i_b] = i_e
# Wake up all links of this entity and add to awake_links
for i_l2 in range(entities_info.link_start[i_e], entities_info.link_end[i_e]):
links_state.hibernated[i_l2, i_b] = False
n_awake_links = qd.atomic_add(rigid_global_info.n_awake_links[i_b], 1)
rigid_global_info.awake_links[n_awake_links, i_b] = i_l2
# Wake up all DOFs of this entity and add to awake_dofs
for i_d in range(entities_info.dof_start[i_e], entities_info.dof_end[i_e]):
dofs_state.hibernated[i_d, i_b] = False
n_awake_dofs = qd.atomic_add(rigid_global_info.n_awake_dofs[i_b], 1)
rigid_global_info.awake_dofs[n_awake_dofs, i_b] = i_d
# Wake up all geoms of this entity
for i_g in range(entities_info.geom_start[i_e], entities_info.geom_end[i_e]):
geoms_state.hibernated[i_g, i_b] = False
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_links_pos_grad(
relative: qd.i32,
pos_grad: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
links_info: array_class.LinksInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
i_b = envs_idx[i_b_]
i_l = links_idx[i_l_]
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
if links_info.parent_idx[I_l] == -1 and links_info.is_fixed[I_l]:
for j in qd.static(range(3)):
pos_grad[i_b_, i_l_, j] = links_state.pos.grad[i_l, i_b][j]
links_state.pos.grad[i_l, i_b][j] = 0.0
else:
q_start = links_info.q_start[I_l]
for j in qd.static(range(3)):
pos_grad[i_b_, i_l_, j] = rigid_global_info.qpos.grad[q_start + j, i_b]
rigid_global_info.qpos.grad[q_start + j, i_b] = 0.0
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_links_quat(
relative: qd.i32,
quat: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
links_info: array_class.LinksInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
i_b = envs_idx[i_b_]
i_l = links_idx[i_l_]
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
if relative:
quat_ = qd.Vector(
[
quat[i_b_, i_l_, 0],
quat[i_b_, i_l_, 1],
quat[i_b_, i_l_, 2],
quat[i_b_, i_l_, 3],
],
dt=gs.qd_float,
)
if links_info.parent_idx[I_l] == -1 and links_info.is_fixed[I_l]:
links_state.quat[i_l, i_b] = gu.qd_transform_quat_by_quat(links_info.quat[I_l], quat_)
else:
q_start = links_info.q_start[I_l]
quat0 = qd.Vector(
[
rigid_global_info.qpos0[q_start + 3, i_b],
rigid_global_info.qpos0[q_start + 4, i_b],
rigid_global_info.qpos0[q_start + 5, i_b],
rigid_global_info.qpos0[q_start + 6, i_b],
],
dt=gs.qd_float,
)
quat_ = gu.qd_transform_quat_by_quat(quat0, quat_)
for j in qd.static(range(4)):
rigid_global_info.qpos[q_start + j + 3, i_b] = quat_[j]
else:
if links_info.parent_idx[I_l] == -1 and links_info.is_fixed[I_l]:
for j in qd.static(range(4)):
links_state.quat[i_l, i_b][j] = quat[i_b_, i_l_, j]
else:
q_start = links_info.q_start[I_l]
for j in qd.static(range(4)):
rigid_global_info.qpos[q_start + j + 3, i_b] = quat[i_b_, i_l_, j]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_links_quat_grad(
relative: qd.i32,
quat_grad: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
links_info: array_class.LinksInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
i_b = envs_idx[i_b_]
i_l = links_idx[i_l_]
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
if links_info.parent_idx[I_l] == -1 and links_info.is_fixed[I_l]:
for j in qd.static(range(4)):
quat_grad[i_b_, i_l_, j] = links_state.quat.grad[i_l, i_b][j]
links_state.quat.grad[i_l, i_b][j] = 0.0
else:
q_start = links_info.q_start[I_l]
for j in qd.static(range(4)):
quat_grad[i_b_, i_l_, j] = rigid_global_info.qpos.grad[q_start + j + 3, i_b]
rigid_global_info.qpos.grad[q_start + j + 3, i_b] = 0.0
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_links_mass_shift(
mass: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
links_state: array_class.LinksState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
links_state.mass_shift[links_idx[i_l_], envs_idx[i_b_]] = mass[i_b_, i_l_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_links_COM_shift(
com: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
links_state: array_class.LinksState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
for j in qd.static(range(3)):
links_state.i_pos_shift[links_idx[i_l_], envs_idx[i_b_]][j] = com[i_b_, i_l_, j]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_links_inertial_mass(
inertial_mass: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
links_info: array_class.LinksInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
if qd.static(static_rigid_sim_config.batch_links_info):
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
links_info.inertial_mass[links_idx[i_l_], envs_idx[i_b_]] = inertial_mass[i_b_, i_l_]
else:
for i_l_ in range(links_idx.shape[0]):
links_info.inertial_mass[links_idx[i_l_]] = inertial_mass[i_l_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_geoms_friction_ratio(
friction_ratio: qd.types.ndarray(),
geoms_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
geoms_state: array_class.GeomsState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_g_, i_b_ in qd.ndrange(geoms_idx.shape[0], envs_idx.shape[0]):
geoms_state.friction_ratio[geoms_idx[i_g_], envs_idx[i_b_]] = friction_ratio[i_b_, i_g_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_qpos(
qpos: qd.types.ndarray(),
qs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_q_, i_b_ in qd.ndrange(qs_idx.shape[0], envs_idx.shape[0]):
rigid_global_info.qpos[qs_idx[i_q_], envs_idx[i_b_]] = qpos[i_b_, i_q_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_global_sol_params(
sol_params: qd.types.ndarray(),
geoms_info: array_class.GeomsInfo,
joints_info: array_class.JointsInfo,
equalities_info: array_class.EqualitiesInfo,
static_rigid_sim_config: qd.template(),
):
n_geoms = geoms_info.sol_params.shape[0]
n_joints = joints_info.sol_params.shape[0]
n_equalities = equalities_info.sol_params.shape[0]
_B = equalities_info.sol_params.shape[1]
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_g in range(n_geoms):
for j in qd.static(range(7)):
geoms_info.sol_params[i_g][j] = sol_params[j]
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_j, i_b in qd.ndrange(n_joints, _B):
I_j = [i_j, i_b] if qd.static(static_rigid_sim_config.batch_joints_info) else i_j
for j in qd.static(range(7)):
joints_info.sol_params[I_j][j] = sol_params[j]
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_eq, i_b in qd.ndrange(n_equalities, _B):
for j in qd.static(range(7)):
equalities_info.sol_params[i_eq, i_b][j] = sol_params[j]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_sol_params(
constraint_type: qd.template(),
sol_params: qd.types.ndarray(),
inputs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
geoms_info: array_class.GeomsInfo,
joints_info: array_class.JointsInfo,
equalities_info: array_class.EqualitiesInfo,
static_rigid_sim_config: qd.template(),
):
if qd.static(constraint_type == 0): # geometries
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_g_ in range(inputs_idx.shape[0]):
for j in qd.static(range(7)):
geoms_info.sol_params[inputs_idx[i_g_]][j] = sol_params[i_g_, j]
if qd.static(constraint_type == 1): # joints
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
if qd.static(static_rigid_sim_config.batch_joints_info):
for i_j_, i_b_ in qd.ndrange(inputs_idx.shape[0], envs_idx.shape[0]):
for j in qd.static(range(7)):
joints_info.sol_params[inputs_idx[i_j_], envs_idx[i_b_]][j] = sol_params[i_b_, i_j_, j]
else:
for i_j_ in range(inputs_idx.shape[0]):
for j in qd.static(range(7)):
joints_info.sol_params[inputs_idx[i_j_]][j] = sol_params[i_j_, j]
if qd.static(constraint_type == 2): # equalities
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_eq_, i_b_ in qd.ndrange(inputs_idx.shape[0], envs_idx.shape[0]):
for j in qd.static(range(7)):
equalities_info.sol_params[inputs_idx[i_eq_], envs_idx[i_b_]][j] = sol_params[i_b_, i_eq_, j]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_kp(
kp: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_info: array_class.DofsInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
if qd.static(static_rigid_sim_config.batch_dofs_info):
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_info.kp[dofs_idx[i_d_], envs_idx[i_b_]] = kp[i_b_, i_d_]
else:
for i_d_ in range(dofs_idx.shape[0]):
dofs_info.kp[dofs_idx[i_d_]] = kp[i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_kv(
kv: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_info: array_class.DofsInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
if qd.static(static_rigid_sim_config.batch_dofs_info):
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_info.kv[dofs_idx[i_d_], envs_idx[i_b_]] = kv[i_b_, i_d_]
else:
for i_d_ in range(dofs_idx.shape[0]):
dofs_info.kv[dofs_idx[i_d_]] = kv[i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_force_range(
lower: qd.types.ndarray(),
upper: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_info: array_class.DofsInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
if qd.static(static_rigid_sim_config.batch_dofs_info):
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_info.force_range[dofs_idx[i_d_], envs_idx[i_b_]][0] = lower[i_b_, i_d_]
dofs_info.force_range[dofs_idx[i_d_], envs_idx[i_b_]][1] = upper[i_b_, i_d_]
else:
for i_d_ in range(dofs_idx.shape[0]):
dofs_info.force_range[dofs_idx[i_d_]][0] = lower[i_d_]
dofs_info.force_range[dofs_idx[i_d_]][1] = upper[i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_stiffness(
stiffness: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_info: array_class.DofsInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
if qd.static(static_rigid_sim_config.batch_dofs_info):
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_info.stiffness[dofs_idx[i_d_], envs_idx[i_b_]] = stiffness[i_b_, i_d_]
else:
for i_d_ in range(dofs_idx.shape[0]):
dofs_info.stiffness[dofs_idx[i_d_]] = stiffness[i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_armature(
armature: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_info: array_class.DofsInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
if qd.static(static_rigid_sim_config.batch_dofs_info):
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_info.armature[dofs_idx[i_d_], envs_idx[i_b_]] = armature[i_b_, i_d_]
else:
for i_d_ in range(dofs_idx.shape[0]):
dofs_info.armature[dofs_idx[i_d_]] = armature[i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_damping(
damping: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_info: array_class.DofsInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
if qd.static(static_rigid_sim_config.batch_dofs_info):
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_info.damping[dofs_idx[i_d_], envs_idx[i_b_]] = damping[i_b_, i_d_]
else:
for i_d_ in range(dofs_idx.shape[0]):
dofs_info.damping[dofs_idx[i_d_]] = damping[i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_frictionloss(
frictionloss: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_info: array_class.DofsInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
if qd.static(static_rigid_sim_config.batch_dofs_info):
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_info.frictionloss[dofs_idx[i_d_], envs_idx[i_b_]] = frictionloss[i_b_, i_d_]
else:
for i_d_ in range(dofs_idx.shape[0]):
dofs_info.frictionloss[dofs_idx[i_d_]] = frictionloss[i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_limit(
lower: qd.types.ndarray(),
upper: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_info: array_class.DofsInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
if qd.static(static_rigid_sim_config.batch_dofs_info):
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_info.limit[dofs_idx[i_d_], envs_idx[i_b_]][0] = lower[i_b_, i_d_]
dofs_info.limit[dofs_idx[i_d_], envs_idx[i_b_]][1] = upper[i_b_, i_d_]
else:
for i_d_ in range(dofs_idx.shape[0]):
dofs_info.limit[dofs_idx[i_d_]][0] = lower[i_d_]
dofs_info.limit[dofs_idx[i_d_]][1] = upper[i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_velocity(
velocity: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_state: array_class.DofsState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_state.vel[dofs_idx[i_d_], envs_idx[i_b_]] = velocity[i_b_, i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_velocity_grad(
velocity_grad: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_state: array_class.DofsState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.PARTIAL)
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
velocity_grad[i_b_, i_d_] = dofs_state.vel.grad[dofs_idx[i_d_], envs_idx[i_b_]]
dofs_state.vel.grad[dofs_idx[i_d_], envs_idx[i_b_]] = 0.0
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_zero_velocity(
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_state: array_class.DofsState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_state.vel[dofs_idx[i_d_], envs_idx[i_b_]] = 0.0
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_dofs_position(
position: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_state: array_class.DofsState,
links_info: array_class.LinksInfo,
joints_info: array_class.JointsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
n_entities = entities_info.link_start.shape[0]
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_state.pos[dofs_idx[i_d_], envs_idx[i_b_]] = position[i_b_, i_d_]
# Note that qpos must be updated, as dofs_state.pos is not used for actual IK.
# TODO: Make this more efficient by only taking care of releavant qs/dofs.
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_e, i_b_ in qd.ndrange(n_entities, envs_idx.shape[0]):
i_b = envs_idx[i_b_]
for i_l in range(entities_info.link_start[i_e], entities_info.link_end[i_e]):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
if links_info.n_dofs[I_l] == 0:
continue
dof_start = links_info.dof_start[I_l]
q_start = links_info.q_start[I_l]
i_j = links_info.joint_start[I_l]
I_j = [i_j, i_b] if qd.static(static_rigid_sim_config.batch_joints_info) else i_j
joint_type = joints_info.type[I_j]
if joint_type == gs.JOINT_TYPE.FIXED:
pass
elif joint_type == gs.JOINT_TYPE.FREE:
xyz = qd.Vector(
[
dofs_state.pos[0 + 3 + dof_start, i_b],
dofs_state.pos[1 + 3 + dof_start, i_b],
dofs_state.pos[2 + 3 + dof_start, i_b],
],
dt=gs.qd_float,
)
quat = gu.qd_xyz_to_quat(xyz)
for j in qd.static(range(3)):
rigid_global_info.qpos[j + q_start, i_b] = dofs_state.pos[j + dof_start, i_b]
for j in qd.static(range(4)):
rigid_global_info.qpos[j + 3 + q_start, i_b] = quat[j]
elif joint_type == gs.JOINT_TYPE.SPHERICAL:
xyz = qd.Vector(
[
dofs_state.pos[0 + dof_start, i_b],
dofs_state.pos[1 + dof_start, i_b],
dofs_state.pos[2 + dof_start, i_b],
],
dt=gs.qd_float,
)
quat = gu.qd_xyz_to_quat(xyz)
for i_q_ in qd.static(range(4)):
i_q = q_start + i_q_
rigid_global_info.qpos[i_q, i_b] = quat[i_q_]
else: # (gs.JOINT_TYPE.REVOLUTE, gs.JOINT_TYPE.PRISMATIC)
for i_d_ in range(links_info.dof_end[I_l] - dof_start):
i_q = q_start + i_d_
i_d = dof_start + i_d_
rigid_global_info.qpos[i_q, i_b] = rigid_global_info.qpos0[i_q, i_b] + dofs_state.pos[i_d, i_b]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_control_dofs_force(
force: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_state: array_class.DofsState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
dofs_state.ctrl_mode[dofs_idx[i_d_], envs_idx[i_b_]] = gs.CTRL_MODE.FORCE
dofs_state.ctrl_force[dofs_idx[i_d_], envs_idx[i_b_]] = force[i_b_, i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_control_dofs_velocity(
velocity: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_state: array_class.DofsState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
i_d = dofs_idx[i_d_]
i_b = envs_idx[i_b_]
dofs_state.ctrl_mode[i_d, i_b] = gs.CTRL_MODE.VELOCITY
dofs_state.ctrl_vel[i_d, i_b] = velocity[i_b_, i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_control_dofs_position(
position: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_state: array_class.DofsState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
i_d = dofs_idx[i_d_]
i_b = envs_idx[i_b_]
dofs_state.ctrl_mode[i_d, i_b] = gs.CTRL_MODE.POSITION
dofs_state.ctrl_pos[i_d, i_b] = position[i_b_, i_d_]
dofs_state.ctrl_vel[i_d, i_b] = 0.0
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_control_dofs_position_velocity(
position: qd.types.ndarray(),
velocity: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_state: array_class.DofsState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.PARTIAL))
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
i_d = dofs_idx[i_d_]
i_b = envs_idx[i_b_]
dofs_state.ctrl_mode[i_d, i_b] = gs.CTRL_MODE.POSITION
dofs_state.ctrl_pos[i_d, i_b] = position[i_b_, i_d_]
dofs_state.ctrl_vel[i_d, i_b] = velocity[i_b_, i_d_]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_get_links_vel(
tensor: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
ref: qd.template(),
links_state: array_class.LinksState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
# This is the velocity in world coordinates expressed at global com-position
vel = links_state.cd_vel[links_idx[i_l_], envs_idx[i_b_]] # entity's CoM
# Translate to get the velocity expressed at a different position if necessary link-position
if qd.static(ref == 1): # link's CoM
vel = vel + links_state.cd_ang[links_idx[i_l_], envs_idx[i_b_]].cross(
links_state.i_pos[links_idx[i_l_], envs_idx[i_b_]]
)
if qd.static(ref == 2): # link's origin
vel = vel + links_state.cd_ang[links_idx[i_l_], envs_idx[i_b_]].cross(
links_state.pos[links_idx[i_l_], envs_idx[i_b_]] - links_state.root_COM[links_idx[i_l_], envs_idx[i_b_]]
)
for j in qd.static(range(3)):
tensor[i_b_, i_l_, j] = vel[j]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_get_links_acc(
tensor: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
links_state: array_class.LinksState,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_l_, i_b_ in qd.ndrange(links_idx.shape[0], envs_idx.shape[0]):
i_l = links_idx[i_l_]
i_b = envs_idx[i_b_]
# Compute links spatial acceleration expressed at links origin in world coordinates
cpos = links_state.pos[i_l, i_b] - links_state.root_COM[i_l, i_b]
acc_ang = links_state.cacc_ang[i_l, i_b]
acc_lin = links_state.cacc_lin[i_l, i_b] + acc_ang.cross(cpos)
# Compute links classical linear acceleration expressed at links origin in world coordinates
ang = links_state.cd_ang[i_l, i_b]
vel = links_state.cd_vel[i_l, i_b] + ang.cross(cpos)
acc_classic_lin = acc_lin + ang.cross(vel)
for j in qd.static(range(3)):
tensor[i_b_, i_l_, j] = acc_classic_lin[j]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_get_dofs_control_force(
tensor: qd.types.ndarray(),
dofs_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
static_rigid_sim_config: qd.template(),
):
# we need to compute control force here because this won't be computed until the next actual simulation step
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_d_, i_b_ in qd.ndrange(dofs_idx.shape[0], envs_idx.shape[0]):
i_d = dofs_idx[i_d_]
i_b = envs_idx[i_b_]
I_d = [i_d, i_b] if qd.static(static_rigid_sim_config.batch_dofs_info) else i_d
force = gs.qd_float(0.0)
if dofs_state.ctrl_mode[i_d, i_b] == gs.CTRL_MODE.FORCE:
force = dofs_state.ctrl_force[i_d, i_b]
elif dofs_state.ctrl_mode[i_d, i_b] == gs.CTRL_MODE.VELOCITY:
force = dofs_info.kv[I_d] * (dofs_state.ctrl_vel[i_d, i_b] - dofs_state.vel[i_d, i_b])
elif dofs_state.ctrl_mode[i_d, i_b] == gs.CTRL_MODE.POSITION:
force = dofs_info.kp[I_d] * (dofs_state.ctrl_pos[i_d, i_b] - dofs_state.pos[i_d, i_b]) + dofs_info.kv[
I_d
] * (dofs_state.ctrl_vel[i_d, i_b] - dofs_state.vel[i_d, i_b])
tensor[i_b_, i_d_] = qd.math.clamp(
force,
dofs_info.force_range[I_d][0],
dofs_info.force_range[I_d][1],
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_drone_rpm(
propellers_link_idx: qd.types.ndarray(),
propellers_rpm: qd.types.ndarray(),
propellers_spin: qd.types.ndarray(),
KF: qd.float32,
KM: qd.float32,
invert: qd.i32,
links_state: array_class.LinksState,
static_rigid_sim_config: qd.template(),
):
"""
Set the RPM of propellers of a drone entity.
This method should only be called by drone entities.
"""
n_propellers = propellers_link_idx.shape[0]
_B = propellers_rpm.shape[0]
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_b in range(_B):
for i_prop in range(n_propellers):
i_l = propellers_link_idx[i_prop]
force = qd.Vector([0.0, 0.0, propellers_rpm[i_b, i_prop] ** 2 * KF], dt=gs.qd_float)
torque = qd.Vector(
[0.0, 0.0, propellers_rpm[i_b, i_prop] ** 2 * KM * propellers_spin[i_prop]], dt=gs.qd_float
)
if invert:
torque = -torque
func_apply_link_external_force(force, i_l, i_b, 1, 1, links_state)
func_apply_link_external_torque(torque, i_l, i_b, 1, 1, links_state)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_update_drone_propeller_vgeoms(
propellers_vgeom_idxs: qd.types.ndarray(),
propellers_revs: qd.types.ndarray(),
propellers_spin: qd.types.ndarray(),
vgeoms_state: array_class.VGeomsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
"""
Update the angle of the vgeom in the propellers of a drone entity.
"""
EPS = rigid_global_info.EPS[None]
n_propellers = propellers_vgeom_idxs.shape[0]
_B = propellers_revs.shape[1]
for i_pp, i_b in qd.ndrange(n_propellers, _B):
i_vg = propellers_vgeom_idxs[i_pp]
rad = (
propellers_revs[i_pp, i_b] * propellers_spin[i_pp] * rigid_global_info.substep_dt[None] * qd.math.pi / 30.0
)
vgeoms_state.quat[i_vg, i_b] = gu.qd_transform_quat_by_quat(
gu.qd_rotvec_to_quat(qd.Vector([0.0, 0.0, rad], dt=gs.qd_float), EPS),
vgeoms_state.quat[i_vg, i_b],
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_geom_friction(geoms_idx: qd.i32, friction: qd.f32, geoms_info: array_class.GeomsInfo):
geoms_info.friction[geoms_idx] = friction
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_set_geoms_friction(
friction: qd.types.ndarray(),
geoms_idx: qd.types.ndarray(),
geoms_info: array_class.GeomsInfo,
static_rigid_sim_config: qd.template(),
):
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_g_ in range(geoms_idx.shape[0]):
geoms_info.friction[geoms_idx[i_g_]] = friction[i_g_]
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/abd/accessor.py",
"license": "Apache License 2.0",
"lines": 889,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/abd/diff.py | """
Backward pass functions for the rigid body solver.
This module contains functions used during the backward pass (gradient computation)
of the rigid body simulation. These functions handle:
- Copying state between next and current time steps
- Saving and loading adjoint cache for gradient computation
- Preparing and beginning backward substeps
- Gradient validity checking
- Cartesian space copying for adjoint computation
- Acceleration copying and dq integration
These functions are extracted from the main rigid_solver module to improve
code organization and maintainability.
"""
import quadrants as qd
import genesis as gs
import genesis.utils.geom as gu
import genesis.utils.array_class as array_class
from .forward_kinematics import func_update_cartesian_space
@qd.func
def func_copy_next_to_curr(
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
errno: array_class.V_ANNOTATION,
):
n_qs = rigid_global_info.qpos.shape[0]
n_dofs = dofs_state.vel.shape[0]
_B = dofs_state.vel.shape[1]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(_B):
# Prevent nan propagation
is_valid = True
for i_d in range(n_dofs):
e = dofs_state.vel_next[i_d, i_b]
is_valid &= not qd.math.isnan(e)
for i_q in range(n_qs):
e = rigid_global_info.qpos_next[i_q, i_b]
is_valid &= not qd.math.isnan(e)
if is_valid:
for i_d in range(n_dofs):
dofs_state.vel[i_d, i_b] = dofs_state.vel_next[i_d, i_b]
for i_q in range(n_qs):
rigid_global_info.qpos[i_q, i_b] = rigid_global_info.qpos_next[i_q, i_b]
else:
errno[i_b] = errno[i_b] | array_class.ErrorCode.INVALID_ACC_NAN
@qd.func
def func_copy_next_to_curr_grad(
f: qd.int32,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
rigid_adjoint_cache: array_class.RigidAdjointCache,
static_rigid_sim_config: qd.template(),
):
n_dofs = dofs_state.vel.shape[0]
n_qs = rigid_global_info.qpos.shape[0]
_B = dofs_state.vel.shape[1]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_d, i_b in qd.ndrange(n_dofs, _B):
dofs_state.vel_next.grad[i_d, i_b] = dofs_state.vel.grad[i_d, i_b]
dofs_state.vel.grad[i_d, i_b] = 0.0
dofs_state.vel[i_d, i_b] = rigid_adjoint_cache.dofs_vel[f, i_d, i_b]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_q, i_b in qd.ndrange(n_qs, _B):
rigid_global_info.qpos_next.grad[i_q, i_b] = rigid_global_info.qpos.grad[i_q, i_b]
rigid_global_info.qpos.grad[i_q, i_b] = 0.0
rigid_global_info.qpos[i_q, i_b] = rigid_adjoint_cache.qpos[f, i_q, i_b]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_save_adjoint_cache(
f: qd.int32,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
rigid_adjoint_cache: array_class.RigidAdjointCache,
static_rigid_sim_config: qd.template(),
):
func_save_adjoint_cache(f, dofs_state, rigid_global_info, rigid_adjoint_cache, static_rigid_sim_config)
@qd.func
def func_save_adjoint_cache(
f: qd.int32,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
rigid_adjoint_cache: array_class.RigidAdjointCache,
static_rigid_sim_config: qd.template(),
):
n_dofs = dofs_state.vel.shape[0]
n_qs = rigid_global_info.qpos.shape[0]
_B = dofs_state.vel.shape[1]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_d, i_b in qd.ndrange(n_dofs, _B):
rigid_adjoint_cache.dofs_vel[f, i_d, i_b] = dofs_state.vel[i_d, i_b]
rigid_adjoint_cache.dofs_acc[f, i_d, i_b] = dofs_state.acc[i_d, i_b]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_q, i_b in qd.ndrange(n_qs, _B):
rigid_adjoint_cache.qpos[f, i_q, i_b] = rigid_global_info.qpos[i_q, i_b]
@qd.func
def func_load_adjoint_cache(
f: qd.int32,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
rigid_adjoint_cache: array_class.RigidAdjointCache,
static_rigid_sim_config: qd.template(),
):
n_dofs = dofs_state.vel.shape[0]
n_qs = rigid_global_info.qpos.shape[0]
_B = dofs_state.vel.shape[1]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_d, i_b in qd.ndrange(n_dofs, _B):
dofs_state.vel[i_d, i_b] = rigid_adjoint_cache.dofs_vel[f, i_d, i_b]
dofs_state.acc[i_d, i_b] = rigid_adjoint_cache.dofs_acc[f, i_d, i_b]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_q, i_b in qd.ndrange(n_qs, _B):
rigid_global_info.qpos[i_q, i_b] = rigid_adjoint_cache.qpos[f, i_q, i_b]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_prepare_backward_substep(
f: qd.int32,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
dofs_state_adjoint_cache: array_class.DofsState,
links_state_adjoint_cache: array_class.LinksState,
joints_state_adjoint_cache: array_class.JointsState,
geoms_state_adjoint_cache: array_class.GeomsState,
rigid_adjoint_cache: array_class.RigidAdjointCache,
static_rigid_sim_config: qd.template(),
):
# Load the current state from adjoint cache
func_load_adjoint_cache(
f=f,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
rigid_adjoint_cache=rigid_adjoint_cache,
static_rigid_sim_config=static_rigid_sim_config,
)
# If mujoco compatibility is disabled, update the cartesian space and save the results to adjoint cache. This is
# because the cartesian space is overwritten later by other kernels if mujoco compatibility was disabled.
if qd.static(not static_rigid_sim_config.enable_mujoco_compatibility):
func_update_cartesian_space(
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
geoms_state=geoms_state,
geoms_info=geoms_info,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
force_update_fixed_geoms=False,
is_backward=True,
)
# FIXME: Parameter pruning for ndarray is buggy for now and requires match variable and arg names.
# Save results of [update_cartesian_space] to adjoint cache
func_copy_cartesian_space(
dofs_state=dofs_state,
links_state=links_state,
joints_state=joints_state,
geoms_state=geoms_state,
dofs_state_adjoint_cache=dofs_state_adjoint_cache,
links_state_adjoint_cache=links_state_adjoint_cache,
joints_state_adjoint_cache=joints_state_adjoint_cache,
geoms_state_adjoint_cache=geoms_state_adjoint_cache,
static_rigid_sim_config=static_rigid_sim_config,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_begin_backward_substep(
f: qd.int32,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
dofs_state_adjoint_cache: array_class.DofsState,
links_state_adjoint_cache: array_class.LinksState,
joints_state_adjoint_cache: array_class.JointsState,
geoms_state_adjoint_cache: array_class.GeomsState,
rigid_adjoint_cache: array_class.RigidAdjointCache,
static_rigid_sim_config: qd.template(),
) -> qd.i32:
is_grad_valid = func_is_grad_valid(
rigid_global_info=rigid_global_info,
dofs_state=dofs_state,
static_rigid_sim_config=static_rigid_sim_config,
)
if is_grad_valid:
func_copy_next_to_curr_grad(
f=f,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
rigid_adjoint_cache=rigid_adjoint_cache,
static_rigid_sim_config=static_rigid_sim_config,
)
if not static_rigid_sim_config.enable_mujoco_compatibility:
# FIXME: Parameter pruning for ndarray is buggy for now and requires match variable and arg names.
# Save results of [update_cartesian_space] to adjoint cache
func_copy_cartesian_space(
dofs_state=dofs_state,
links_state=links_state,
joints_state=joints_state,
geoms_state=geoms_state,
dofs_state_adjoint_cache=dofs_state_adjoint_cache,
links_state_adjoint_cache=links_state_adjoint_cache,
joints_state_adjoint_cache=joints_state_adjoint_cache,
geoms_state_adjoint_cache=geoms_state_adjoint_cache,
static_rigid_sim_config=static_rigid_sim_config,
)
return is_grad_valid
@qd.func
def func_is_grad_valid(
rigid_global_info: array_class.RigidGlobalInfo,
dofs_state: array_class.DofsState,
static_rigid_sim_config: qd.template(),
):
is_valid = True
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for I in qd.grouped(qd.ndrange(*rigid_global_info.qpos.shape)):
if qd.math.isnan(rigid_global_info.qpos.grad[I]):
is_valid = False
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for I in qd.grouped(qd.ndrange(*dofs_state.vel.shape)):
if qd.math.isnan(dofs_state.vel.grad[I]):
is_valid = False
return is_valid
@qd.func
def func_copy_cartesian_space(
dofs_state: array_class.DofsState,
links_state: array_class.LinksState,
joints_state: array_class.JointsState,
geoms_state: array_class.GeomsState,
dofs_state_adjoint_cache: array_class.DofsState,
links_state_adjoint_cache: array_class.LinksState,
joints_state_adjoint_cache: array_class.JointsState,
geoms_state_adjoint_cache: array_class.GeomsState,
static_rigid_sim_config: qd.template(),
):
# Copy outputs of [kernel_update_cartesian_space] among [dofs, links, joints, geoms] states. This is used to restore
# the outputs that were overwritten if we disabled mujoco compatibility for backward pass.
# dofs state
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for I in qd.grouped(qd.ndrange(*dofs_state.pos.shape)):
# pos, cdof_ang, cdof_vel, cdofvel_ang, cdofvel_vel, cdofd_ang, cdofd_vel
dofs_state_adjoint_cache.pos[I] = dofs_state.pos[I]
dofs_state_adjoint_cache.cdof_ang[I] = dofs_state.cdof_ang[I]
dofs_state_adjoint_cache.cdof_vel[I] = dofs_state.cdof_vel[I]
dofs_state_adjoint_cache.cdofvel_ang[I] = dofs_state.cdofvel_ang[I]
dofs_state_adjoint_cache.cdofvel_vel[I] = dofs_state.cdofvel_vel[I]
dofs_state_adjoint_cache.cdofd_ang[I] = dofs_state.cdofd_ang[I]
dofs_state_adjoint_cache.cdofd_vel[I] = dofs_state.cdofd_vel[I]
# links state
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for I in qd.grouped(qd.ndrange(*links_state.pos.shape)):
# pos, quat, root_COM, mass_sum, i_pos, i_quat, cinr_inertial, cinr_pos, cinr_quat, cinr_mass, j_pos, j_quat,
# cd_vel, cd_ang
links_state_adjoint_cache.pos[I] = links_state.pos[I]
links_state_adjoint_cache.quat[I] = links_state.quat[I]
links_state_adjoint_cache.root_COM[I] = links_state.root_COM[I]
links_state_adjoint_cache.mass_sum[I] = links_state.mass_sum[I]
links_state_adjoint_cache.i_pos[I] = links_state.i_pos[I]
links_state_adjoint_cache.i_quat[I] = links_state.i_quat[I]
links_state_adjoint_cache.cinr_inertial[I] = links_state.cinr_inertial[I]
links_state_adjoint_cache.cinr_pos[I] = links_state.cinr_pos[I]
links_state_adjoint_cache.cinr_quat[I] = links_state.cinr_quat[I]
links_state_adjoint_cache.cinr_mass[I] = links_state.cinr_mass[I]
links_state_adjoint_cache.j_pos[I] = links_state.j_pos[I]
links_state_adjoint_cache.j_quat[I] = links_state.j_quat[I]
links_state_adjoint_cache.cd_vel[I] = links_state.cd_vel[I]
links_state_adjoint_cache.cd_ang[I] = links_state.cd_ang[I]
# joints state
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for I in qd.grouped(qd.ndrange(*joints_state.xanchor.shape)):
# xanchor, xaxis
joints_state_adjoint_cache.xanchor[I] = joints_state.xanchor[I]
joints_state_adjoint_cache.xaxis[I] = joints_state.xaxis[I]
# geoms state
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for I in qd.grouped(qd.ndrange(*geoms_state.pos.shape)):
# pos, quat, verts_updated
geoms_state_adjoint_cache.pos[I] = geoms_state.pos[I]
geoms_state_adjoint_cache.quat[I] = geoms_state.quat[I]
geoms_state_adjoint_cache.verts_updated[I] = geoms_state.verts_updated[I]
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_copy_acc(
f: qd.int32,
dofs_state: array_class.DofsState,
rigid_adjoint_cache: array_class.RigidAdjointCache,
static_rigid_sim_config: qd.template(),
):
n_dofs = dofs_state.vel.shape[0]
_B = dofs_state.vel.shape[1]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_d, i_b in qd.ndrange(n_dofs, _B):
dofs_state.acc[i_d, i_b] = rigid_adjoint_cache.dofs_acc[f, i_d, i_b]
@qd.func
def func_integrate_dq_entity(
dq,
i_e,
i_b,
respect_joint_limit,
links_info: array_class.LinksInfo,
joints_info: array_class.JointsInfo,
dofs_info: array_class.DofsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
EPS = rigid_global_info.EPS[None]
for i_l in range(entities_info.link_start[i_e], entities_info.link_end[i_e]):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
if links_info.n_dofs[I_l] == 0:
continue
i_j = links_info.joint_start[I_l]
I_j = [i_j, i_b] if qd.static(static_rigid_sim_config.batch_joints_info) else i_j
joint_type = joints_info.type[I_j]
q_start = links_info.q_start[I_l]
dof_start = links_info.dof_start[I_l]
dq_start = links_info.dof_start[I_l] - entities_info.dof_start[i_e]
if joint_type == gs.JOINT_TYPE.FREE:
pos = qd.Vector(
[
rigid_global_info.qpos[q_start, i_b],
rigid_global_info.qpos[q_start + 1, i_b],
rigid_global_info.qpos[q_start + 2, i_b],
]
)
dpos = qd.Vector([dq[dq_start, i_b], dq[dq_start + 1, i_b], dq[dq_start + 2, i_b]])
pos = pos + dpos
quat = qd.Vector(
[
rigid_global_info.qpos[q_start + 3, i_b],
rigid_global_info.qpos[q_start + 4, i_b],
rigid_global_info.qpos[q_start + 5, i_b],
rigid_global_info.qpos[q_start + 6, i_b],
]
)
dquat = gu.qd_rotvec_to_quat(
qd.Vector([dq[dq_start + 3, i_b], dq[dq_start + 4, i_b], dq[dq_start + 5, i_b]], dt=gs.qd_float), EPS
)
quat = gu.qd_transform_quat_by_quat(
quat, dquat
) # Note that this order is different from integrateing vel. Here dq is w.r.t to world.
for j in qd.static(range(3)):
rigid_global_info.qpos[q_start + j, i_b] = pos[j]
for j in qd.static(range(4)):
rigid_global_info.qpos[q_start + j + 3, i_b] = quat[j]
elif joint_type == gs.JOINT_TYPE.FIXED:
pass
else:
for i_d_ in range(links_info.n_dofs[I_l]):
rigid_global_info.qpos[q_start + i_d_, i_b] = (
rigid_global_info.qpos[q_start + i_d_, i_b] + dq[dq_start + i_d_, i_b]
)
if respect_joint_limit:
I_d = (
[dof_start + i_d_, i_b]
if qd.static(static_rigid_sim_config.batch_dofs_info)
else dof_start + i_d_
)
rigid_global_info.qpos[q_start + i_d_, i_b] = qd.math.clamp(
rigid_global_info.qpos[q_start + i_d_, i_b],
dofs_info.limit[I_d][0],
dofs_info.limit[I_d][1],
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/abd/diff.py",
"license": "Apache License 2.0",
"lines": 373,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/abd/forward_kinematics.py | """
Forward kinematics, velocity propagation, and geometry updates for rigid body simulation.
This module contains Quadrants kernels and functions for:
- Forward kinematics computation (link and joint pose updates)
- Velocity propagation through kinematic chains
- Geometry pose and vertex updates
- Center of mass calculations
- AABB updates for collision detection
- Hibernation management for inactive entities
"""
import quadrants as qd
import genesis as gs
import genesis.utils.geom as gu
import genesis.utils.array_class as array_class
from .misc import (
func_check_index_range,
func_read_field_if,
func_write_field_if,
func_write_and_read_field_if,
func_atomic_add_if,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_forward_kinematics_links_geoms(
envs_idx: qd.types.ndarray(),
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
for i_b_ in range(envs_idx.shape[0]):
i_b = envs_idx[i_b_]
func_update_cartesian_space_batch(
i_b=i_b,
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
geoms_info=geoms_info,
geoms_state=geoms_state,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
force_update_fixed_geoms=True,
is_backward=False,
)
func_forward_velocity_batch(
i_b=i_b,
entities_info=entities_info,
links_info=links_info,
links_state=links_state,
joints_info=joints_info,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=False,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_masked_forward_kinematics_links_geoms(
envs_mask: qd.types.ndarray(),
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
for i_b in range(envs_mask.shape[0]):
if envs_mask[i_b]:
func_update_cartesian_space_batch(
i_b=i_b,
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
geoms_info=geoms_info,
geoms_state=geoms_state,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
force_update_fixed_geoms=True,
is_backward=False,
)
func_forward_velocity_batch(
i_b=i_b,
entities_info=entities_info,
links_info=links_info,
links_state=links_state,
joints_info=joints_info,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=False,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_forward_kinematics(
envs_idx: qd.types.ndarray(),
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
for i_b_ in range(envs_idx.shape[0]):
i_b = envs_idx[i_b_]
func_forward_kinematics_batch(
i_b=i_b,
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=False,
)
func_COM_links(
i_b=i_b,
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=False,
)
func_forward_velocity_batch(
i_b=i_b,
entities_info=entities_info,
links_info=links_info,
links_state=links_state,
joints_info=joints_info,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=False,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_masked_forward_kinematics(
envs_mask: qd.types.ndarray(),
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
for i_b in range(envs_mask.shape[0]):
if envs_mask[i_b]:
func_forward_kinematics_batch(
i_b=i_b,
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=False,
)
func_COM_links(
i_b=i_b,
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=False,
)
func_forward_velocity_batch(
i_b=i_b,
entities_info=entities_info,
links_info=links_info,
links_state=links_state,
joints_info=joints_info,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=False,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_forward_velocity(
envs_idx: qd.types.ndarray(),
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
is_backward: qd.template(),
):
for i_b_ in range(envs_idx.shape[0]):
i_b = envs_idx[i_b_]
func_forward_velocity_batch(
i_b=i_b,
entities_info=entities_info,
links_info=links_info,
links_state=links_state,
joints_info=joints_info,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=is_backward,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_masked_forward_velocity(
envs_mask: qd.types.ndarray(),
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
is_backward: qd.template(),
):
for i_b in range(envs_mask.shape[0]):
if envs_mask[i_b]:
func_forward_velocity_batch(
i_b=i_b,
entities_info=entities_info,
links_info=links_info,
links_state=links_state,
joints_info=joints_info,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=is_backward,
)
@qd.func
def func_COM_links(
i_b,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
is_backward: qd.template(),
):
BW = qd.static(is_backward)
for i_e_ in (
(
range(rigid_global_info.n_awake_entities[i_b])
if qd.static(static_rigid_sim_config.use_hibernation)
else range(entities_info.n_links.shape[0])
)
if qd.static(not BW)
else (
qd.static(range(static_rigid_sim_config.max_n_awake_entities))
if qd.static(static_rigid_sim_config.use_hibernation)
else qd.static(range(static_rigid_sim_config.n_entities))
)
):
if func_check_index_range(
i_e_, 0, rigid_global_info.n_awake_entities[i_b], static_rigid_sim_config.use_hibernation
):
i_e = (
rigid_global_info.awake_entities[i_e_, i_b]
if qd.static(static_rigid_sim_config.use_hibernation)
else i_e_
)
func_COM_links_entity(
i_e,
i_b,
links_state,
links_info,
joints_state,
joints_info,
dofs_state,
dofs_info,
entities_info,
rigid_global_info,
static_rigid_sim_config,
is_backward,
)
@qd.func
def func_COM_links_entity(
i_e,
i_b,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
is_backward: qd.template(),
):
EPS = rigid_global_info.EPS[None]
BW = qd.static(is_backward)
# Becomes static loop in backward pass, because we assume this loop is an inner loop
for i_l_ in (
range(entities_info.link_start[i_e], entities_info.link_end[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
):
i_l = i_l_ if qd.static(not BW) else (i_l_ + entities_info.link_start[i_e])
if func_check_index_range(i_l, entities_info.link_start[i_e], entities_info.link_end[i_e], BW):
links_state.root_COM_bw[i_l, i_b].fill(0.0)
links_state.mass_sum[i_l, i_b] = 0.0
for i_l_ in (
range(entities_info.link_start[i_e], entities_info.link_end[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
):
i_l = i_l_ if qd.static(not BW) else (i_l_ + entities_info.link_start[i_e])
if func_check_index_range(i_l, entities_info.link_start[i_e], entities_info.link_end[i_e], BW):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
mass = links_info.inertial_mass[I_l] + links_state.mass_shift[i_l, i_b]
(
links_state.i_pos_bw[i_l, i_b],
links_state.i_quat[i_l, i_b],
) = gu.qd_transform_pos_quat_by_trans_quat(
links_info.inertial_pos[I_l] + links_state.i_pos_shift[i_l, i_b],
links_info.inertial_quat[I_l],
links_state.pos[i_l, i_b],
links_state.quat[i_l, i_b],
)
i_r = links_info.root_idx[I_l]
links_state.mass_sum[i_r, i_b] = links_state.mass_sum[i_r, i_b] + mass
qd.atomic_add(links_state.root_COM_bw[i_r, i_b], mass * links_state.i_pos_bw[i_l, i_b])
for i_l_ in (
range(entities_info.link_start[i_e], entities_info.link_end[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
):
i_l = i_l_ if qd.static(not BW) else (i_l_ + entities_info.link_start[i_e])
if func_check_index_range(i_l, entities_info.link_start[i_e], entities_info.link_end[i_e], BW):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
i_r = links_info.root_idx[I_l]
if i_l == i_r:
mass_sum = links_state.mass_sum[i_l, i_b]
if mass_sum > EPS:
links_state.root_COM[i_l, i_b] = links_state.root_COM_bw[i_l, i_b] / links_state.mass_sum[i_l, i_b]
else:
links_state.root_COM[i_l, i_b] = links_state.i_pos_bw[i_r, i_b]
for i_l_ in (
range(entities_info.link_start[i_e], entities_info.link_end[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
):
i_l = i_l_ if qd.static(not BW) else (i_l_ + entities_info.link_start[i_e])
if func_check_index_range(i_l, entities_info.link_start[i_e], entities_info.link_end[i_e], BW):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
i_r = links_info.root_idx[I_l]
links_state.root_COM[i_l, i_b] = links_state.root_COM[i_r, i_b]
for i_l_ in (
range(entities_info.link_start[i_e], entities_info.link_end[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
):
i_l = i_l_ if qd.static(not BW) else (i_l_ + entities_info.link_start[i_e])
if func_check_index_range(i_l, entities_info.link_start[i_e], entities_info.link_end[i_e], BW):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
i_r = links_info.root_idx[I_l]
links_state.i_pos[i_l, i_b] = links_state.i_pos_bw[i_l, i_b] - links_state.root_COM[i_l, i_b]
i_inertial = links_info.inertial_i[I_l]
i_mass = links_info.inertial_mass[I_l] + links_state.mass_shift[i_l, i_b]
(
links_state.cinr_inertial[i_l, i_b],
links_state.cinr_pos[i_l, i_b],
links_state.cinr_quat[i_l, i_b],
links_state.cinr_mass[i_l, i_b],
) = gu.qd_transform_inertia_by_trans_quat(
i_inertial,
i_mass,
links_state.i_pos[i_l, i_b],
links_state.i_quat[i_l, i_b],
rigid_global_info.EPS[None],
)
for i_l_ in (
range(entities_info.link_start[i_e], entities_info.link_end[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
):
i_l = i_l_ if qd.static(not BW) else (i_l_ + entities_info.link_start[i_e])
if func_check_index_range(i_l, entities_info.link_start[i_e], entities_info.link_end[i_e], BW):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
if links_info.n_dofs[I_l] > 0:
i_p = links_info.parent_idx[I_l]
_i_j = links_info.joint_start[I_l]
_I_j = [_i_j, i_b] if qd.static(static_rigid_sim_config.batch_joints_info) else _i_j
joint_type = joints_info.type[_I_j]
p_pos = qd.Vector.zero(gs.qd_float, 3)
p_quat = gu.qd_identity_quat()
if i_p != -1:
p_pos = links_state.pos[i_p, i_b]
p_quat = links_state.quat[i_p, i_b]
if joint_type == gs.JOINT_TYPE.FREE or (links_info.is_fixed[I_l] and i_p == -1):
links_state.j_pos[i_l, i_b] = links_state.pos[i_l, i_b]
links_state.j_quat[i_l, i_b] = links_state.quat[i_l, i_b]
else:
(
links_state.j_pos_bw[i_l, 0, i_b],
links_state.j_quat_bw[i_l, 0, i_b],
) = gu.qd_transform_pos_quat_by_trans_quat(links_info.pos[I_l], links_info.quat[I_l], p_pos, p_quat)
n_joints = links_info.joint_end[I_l] - links_info.joint_start[I_l]
for i_j_ in (
range(n_joints)
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_joints_per_link))
):
i_j = i_j_ + links_info.joint_start[I_l]
curr_i_j = 0 if qd.static(not BW) else i_j_
next_i_j = 0 if qd.static(not BW) else i_j_ + 1
if func_check_index_range(
i_j,
links_info.joint_start[I_l],
links_info.joint_end[I_l],
BW,
):
I_j = [i_j, i_b] if qd.static(static_rigid_sim_config.batch_joints_info) else i_j
(
links_state.j_pos_bw[i_l, next_i_j, i_b],
links_state.j_quat_bw[i_l, next_i_j, i_b],
) = gu.qd_transform_pos_quat_by_trans_quat(
joints_info.pos[I_j],
gu.qd_identity_quat(),
links_state.j_pos_bw[i_l, curr_i_j, i_b],
links_state.j_quat_bw[i_l, curr_i_j, i_b],
)
i_j_ = 0 if qd.static(not BW) else n_joints
links_state.j_pos[i_l, i_b] = links_state.j_pos_bw[i_l, i_j_, i_b]
links_state.j_quat[i_l, i_b] = links_state.j_quat_bw[i_l, i_j_, i_b]
for i_l_ in (
range(entities_info.link_start[i_e], entities_info.link_end[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
):
i_l = i_l_ if qd.static(not BW) else (i_l_ + entities_info.link_start[i_e])
if func_check_index_range(i_l, entities_info.link_start[i_e], entities_info.link_end[i_e], BW):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
if links_info.n_dofs[I_l] > 0:
for i_j_ in (
range(links_info.joint_start[I_l], links_info.joint_end[I_l])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_joints_per_link))
):
i_j = i_j_ if qd.static(not BW) else (i_j_ + links_info.joint_start[I_l])
if func_check_index_range(i_j, links_info.joint_start[I_l], links_info.joint_end[I_l], BW):
offset_pos = links_state.root_COM[i_l, i_b] - joints_state.xanchor[i_j, i_b]
I_j = [i_j, i_b] if qd.static(static_rigid_sim_config.batch_joints_info) else i_j
joint_type = joints_info.type[I_j]
dof_start = joints_info.dof_start[I_j]
if joint_type == gs.JOINT_TYPE.REVOLUTE:
dofs_state.cdof_ang[dof_start, i_b] = joints_state.xaxis[i_j, i_b]
dofs_state.cdof_vel[dof_start, i_b] = joints_state.xaxis[i_j, i_b].cross(offset_pos)
elif joint_type == gs.JOINT_TYPE.PRISMATIC:
dofs_state.cdof_ang[dof_start, i_b] = qd.Vector.zero(gs.qd_float, 3)
dofs_state.cdof_vel[dof_start, i_b] = joints_state.xaxis[i_j, i_b]
elif joint_type == gs.JOINT_TYPE.SPHERICAL:
xmat_T = gu.qd_quat_to_R(links_state.quat[i_l, i_b], EPS).transpose()
for i in qd.static(range(3)):
dofs_state.cdof_ang[i + dof_start, i_b] = xmat_T[i, :]
dofs_state.cdof_vel[i + dof_start, i_b] = xmat_T[i, :].cross(offset_pos)
elif joint_type == gs.JOINT_TYPE.FREE:
for i in qd.static(range(3)):
dofs_state.cdof_ang[i + dof_start, i_b] = qd.Vector.zero(gs.qd_float, 3)
dofs_state.cdof_vel[i + dof_start, i_b] = qd.Vector.zero(gs.qd_float, 3)
dofs_state.cdof_vel[i + dof_start, i_b][i] = 1.0
xmat_T = gu.qd_quat_to_R(links_state.quat[i_l, i_b], EPS).transpose()
for i in qd.static(range(3)):
dofs_state.cdof_ang[i + dof_start + 3, i_b] = xmat_T[i, :]
dofs_state.cdof_vel[i + dof_start + 3, i_b] = xmat_T[i, :].cross(offset_pos)
for i_d_ in (
range(dof_start, joints_info.dof_end[I_j])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_dofs_per_joint))
):
i_d = i_d_ if qd.static(not BW) else (i_d_ + dof_start)
if func_check_index_range(i_d, dof_start, joints_info.dof_end[I_j], BW):
dofs_state.cdofvel_ang[i_d, i_b] = (
dofs_state.cdof_ang[i_d, i_b] * dofs_state.vel[i_d, i_b]
)
dofs_state.cdofvel_vel[i_d, i_b] = (
dofs_state.cdof_vel[i_d, i_b] * dofs_state.vel[i_d, i_b]
)
@qd.func
def func_forward_kinematics_entity(
i_e,
i_b,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
is_backward: qd.template(),
):
BW = qd.static(is_backward)
W = qd.static(func_write_field_if)
R = qd.static(func_read_field_if)
WR = qd.static(func_write_and_read_field_if)
# Becomes static loop in backward pass, because we assume this loop is an inner loop
for i_l_ in (
range(entities_info.link_start[i_e], entities_info.link_end[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
):
i_l = gs.qd_int(i_l_ if qd.static(not BW) else (i_l_ + entities_info.link_start[i_e]))
if func_check_index_range(i_l, entities_info.link_start[i_e], entities_info.link_end[i_e], BW):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
I_l0 = (i_l, 0, i_b)
pos = W(links_state.pos_bw, I_l0, links_info.pos[I_l], BW)
quat = W(links_state.quat_bw, I_l0, links_info.quat[I_l], BW)
if links_info.parent_idx[I_l] != -1:
parent_pos = links_state.pos[links_info.parent_idx[I_l], i_b]
parent_quat = links_state.quat[links_info.parent_idx[I_l], i_b]
pos_ = parent_pos + gu.qd_transform_by_quat(links_info.pos[I_l], parent_quat)
quat_ = gu.qd_transform_quat_by_quat(links_info.quat[I_l], parent_quat)
pos = W(links_state.pos_bw, I_l0, pos_, BW)
quat = W(links_state.quat_bw, I_l0, quat_, BW)
n_joints = links_info.joint_end[I_l] - links_info.joint_start[I_l]
for i_j_ in (
range(n_joints)
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_joints_per_link))
):
i_j = i_j_ + links_info.joint_start[I_l]
curr_I = (i_l, 0 if qd.static(not BW) else i_j_, i_b)
next_I = (i_l, 0 if qd.static(not BW) else i_j_ + 1, i_b)
if func_check_index_range(i_j, links_info.joint_start[I_l], links_info.joint_end[I_l], BW):
I_j = [i_j, i_b] if qd.static(static_rigid_sim_config.batch_joints_info) else i_j
joint_type = joints_info.type[I_j]
q_start = joints_info.q_start[I_j]
dof_start = joints_info.dof_start[I_j]
I_d = [dof_start, i_b] if qd.static(static_rigid_sim_config.batch_dofs_info) else dof_start
# compute axis and anchor
if joint_type == gs.JOINT_TYPE.FREE:
joints_state.xanchor[i_j, i_b] = qd.Vector(
[
rigid_global_info.qpos[q_start, i_b],
rigid_global_info.qpos[q_start + 1, i_b],
rigid_global_info.qpos[q_start + 2, i_b],
]
)
joints_state.xaxis[i_j, i_b] = qd.Vector([0.0, 0.0, 1.0])
elif joint_type == gs.JOINT_TYPE.FIXED:
pass
else:
axis = qd.Vector([0.0, 0.0, 1.0], dt=gs.qd_float)
if joint_type == gs.JOINT_TYPE.REVOLUTE:
axis = dofs_info.motion_ang[I_d]
elif joint_type == gs.JOINT_TYPE.PRISMATIC:
axis = dofs_info.motion_vel[I_d]
pos_ = R(links_state.pos_bw, curr_I, pos, BW)
quat_ = R(links_state.quat_bw, curr_I, quat, BW)
joints_state.xanchor[i_j, i_b] = gu.qd_transform_by_quat(joints_info.pos[I_j], quat_) + pos_
joints_state.xaxis[i_j, i_b] = gu.qd_transform_by_quat(axis, quat_)
if joint_type == gs.JOINT_TYPE.FREE:
pos_ = qd.Vector(
[
rigid_global_info.qpos[q_start, i_b],
rigid_global_info.qpos[q_start + 1, i_b],
rigid_global_info.qpos[q_start + 2, i_b],
],
dt=gs.qd_float,
)
quat_ = qd.Vector(
[
rigid_global_info.qpos[q_start + 3, i_b],
rigid_global_info.qpos[q_start + 4, i_b],
rigid_global_info.qpos[q_start + 5, i_b],
rigid_global_info.qpos[q_start + 6, i_b],
],
dt=gs.qd_float,
)
quat_ = quat_ / quat_.norm()
pos = WR(links_state.pos_bw, next_I, pos_, BW)
quat = WR(links_state.quat_bw, next_I, quat_, BW)
xyz = gu.qd_quat_to_xyz(quat, rigid_global_info.EPS[None])
for j in qd.static(range(3)):
dofs_state.pos[dof_start + j, i_b] = pos[j]
dofs_state.pos[dof_start + 3 + j, i_b] = xyz[j]
elif joint_type == gs.JOINT_TYPE.FIXED:
pass
elif joint_type == gs.JOINT_TYPE.SPHERICAL:
qloc = qd.Vector(
[
rigid_global_info.qpos[q_start, i_b],
rigid_global_info.qpos[q_start + 1, i_b],
rigid_global_info.qpos[q_start + 2, i_b],
rigid_global_info.qpos[q_start + 3, i_b],
],
dt=gs.qd_float,
)
xyz = gu.qd_quat_to_xyz(qloc, rigid_global_info.EPS[None])
for j in qd.static(range(3)):
dofs_state.pos[dof_start + j, i_b] = xyz[j]
quat_ = gu.qd_transform_quat_by_quat(qloc, R(links_state.quat_bw, curr_I, quat, BW))
quat = WR(links_state.quat_bw, next_I, quat_, BW)
pos_ = joints_state.xanchor[i_j, i_b] - gu.qd_transform_by_quat(joints_info.pos[I_j], quat)
pos = W(links_state.pos_bw, next_I, pos_, BW)
elif joint_type == gs.JOINT_TYPE.REVOLUTE:
axis = dofs_info.motion_ang[I_d]
dofs_state.pos[dof_start, i_b] = (
rigid_global_info.qpos[q_start, i_b] - rigid_global_info.qpos0[q_start, i_b]
)
qloc = gu.qd_rotvec_to_quat(axis * dofs_state.pos[dof_start, i_b], rigid_global_info.EPS[None])
quat_ = gu.qd_transform_quat_by_quat(qloc, R(links_state.quat_bw, curr_I, quat, BW))
quat = WR(links_state.quat_bw, next_I, quat_, BW)
pos_ = joints_state.xanchor[i_j, i_b] - gu.qd_transform_by_quat(joints_info.pos[I_j], quat)
pos = W(links_state.pos_bw, next_I, pos_, BW)
else: # joint_type == gs.JOINT_TYPE.PRISMATIC:
dofs_state.pos[dof_start, i_b] = (
rigid_global_info.qpos[q_start, i_b] - rigid_global_info.qpos0[q_start, i_b]
)
pos_ = (
R(links_state.pos_bw, curr_I, pos, BW)
+ joints_state.xaxis[i_j, i_b] * dofs_state.pos[dof_start, i_b]
)
pos = W(links_state.pos_bw, next_I, pos_, BW)
# Skip link pose update for fixed root links to let users manually overwrite them
I_jf = (i_l, 0 if qd.static(not BW) else n_joints, i_b)
if not (links_info.parent_idx[I_l] == -1 and links_info.is_fixed[I_l]):
links_state.pos[i_l, i_b] = R(links_state.pos_bw, I_jf, pos, BW)
links_state.quat[i_l, i_b] = R(links_state.quat_bw, I_jf, quat, BW)
@qd.func
def func_forward_kinematics_batch(
i_b,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
is_backward: qd.template(),
):
BW = qd.static(is_backward)
for i_e_ in (
(
range(rigid_global_info.n_awake_entities[i_b])
if qd.static(static_rigid_sim_config.use_hibernation)
else range(entities_info.n_links.shape[0])
)
if qd.static(not BW)
else (
qd.static(range(static_rigid_sim_config.max_n_awake_entities))
if qd.static(static_rigid_sim_config.use_hibernation)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
)
):
if func_check_index_range(
i_e_, 0, rigid_global_info.n_awake_entities[i_b], static_rigid_sim_config.use_hibernation
):
i_e = (
rigid_global_info.awake_entities[i_e_, i_b]
if qd.static(static_rigid_sim_config.use_hibernation)
else i_e_
)
func_forward_kinematics_entity(
i_e,
i_b,
links_state,
links_info,
joints_state,
joints_info,
dofs_state,
dofs_info,
entities_info,
rigid_global_info,
static_rigid_sim_config,
is_backward,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_forward_kinematics_entity(
i_e: qd.int32,
envs_idx: qd.types.ndarray(),
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
for i_b_ in range(envs_idx.shape[0]):
i_b = envs_idx[i_b_]
func_forward_kinematics_entity(
i_e,
i_b,
links_state,
links_info,
joints_state,
joints_info,
dofs_state,
dofs_info,
entities_info,
rigid_global_info,
static_rigid_sim_config,
is_backward=False,
)
@qd.func
def func_update_geoms_entity(
i_e,
i_b,
entities_info: array_class.EntitiesInfo,
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
force_update_fixed_geoms: qd.template(),
is_backward: qd.template(),
):
"""
NOTE: this only update geom pose, not its verts and else.
"""
BW = qd.static(is_backward)
for i_g_ in (
# Dynamic inner loop for forward pass
range(entities_info.n_geoms[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_geoms_per_entity)) # Static inner loop for backward pass
):
i_g = entities_info.geom_start[i_e] + i_g_
if func_check_index_range(i_g, entities_info.geom_start[i_e], entities_info.geom_end[i_e], BW):
if force_update_fixed_geoms or not geoms_info.is_fixed[i_g]:
(
geoms_state.pos[i_g, i_b],
geoms_state.quat[i_g, i_b],
) = gu.qd_transform_pos_quat_by_trans_quat(
geoms_info.pos[i_g],
geoms_info.quat[i_g],
links_state.pos[geoms_info.link_idx[i_g], i_b],
links_state.quat[geoms_info.link_idx[i_g], i_b],
)
geoms_state.verts_updated[i_g, i_b] = False
@qd.func
def func_update_geoms_batch(
i_b,
entities_info: array_class.EntitiesInfo,
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
force_update_fixed_geoms: qd.template(),
is_backward: qd.template(),
):
"""
NOTE: this only update geom pose, not its verts and else.
"""
BW = qd.static(is_backward)
for i_e_ in (
(
# Dynamic inner loop for forward pass
range(rigid_global_info.n_awake_entities[i_b])
if qd.static(static_rigid_sim_config.use_hibernation)
else range(entities_info.n_links.shape[0])
)
if qd.static(not BW)
else (
qd.static(range(static_rigid_sim_config.max_n_awake_entities)) # Static inner loop for backward pass
if qd.static(static_rigid_sim_config.use_hibernation)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
)
):
if func_check_index_range(
i_e_, 0, rigid_global_info.n_awake_entities[i_b], static_rigid_sim_config.use_hibernation
):
i_e = (
rigid_global_info.awake_entities[i_e_, i_b]
if qd.static(static_rigid_sim_config.use_hibernation)
else i_e_
)
func_update_geoms_entity(
i_e,
i_b,
entities_info,
geoms_info,
geoms_state,
links_state,
rigid_global_info,
static_rigid_sim_config,
force_update_fixed_geoms,
is_backward,
)
@qd.func
def func_update_geoms(
entities_info: array_class.EntitiesInfo,
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
force_update_fixed_geoms: qd.template(),
is_backward: qd.template(),
):
# This loop must be the outermost loop to be differentiable
if qd.static(static_rigid_sim_config.use_hibernation):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(links_state.pos.shape[1]):
func_update_geoms_batch(
i_b,
entities_info,
geoms_info,
geoms_state,
links_state,
rigid_global_info,
static_rigid_sim_config,
force_update_fixed_geoms,
is_backward,
)
else:
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.PARTIAL))
for i_e, i_b in qd.ndrange(entities_info.n_links.shape[0], links_state.pos.shape[1]):
func_update_geoms_entity(
i_e,
i_b,
entities_info,
geoms_info,
geoms_state,
links_state,
rigid_global_info,
static_rigid_sim_config,
force_update_fixed_geoms,
is_backward,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_update_geoms(
envs_idx: qd.types.ndarray(),
entities_info: array_class.EntitiesInfo,
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
force_update_fixed_geoms: qd.template(),
):
for i_b_ in range(envs_idx.shape[0]):
i_b = envs_idx[i_b_]
func_update_geoms_batch(
i_b,
entities_info,
geoms_info,
geoms_state,
links_state,
rigid_global_info,
static_rigid_sim_config,
force_update_fixed_geoms,
is_backward=False,
)
@qd.func
def func_forward_velocity_entity(
i_e,
i_b,
entities_info: array_class.EntitiesInfo,
links_info: array_class.LinksInfo,
links_state: array_class.LinksState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
is_backward: qd.template(),
):
BW = qd.static(is_backward)
W = qd.static(func_write_field_if)
R = qd.static(func_read_field_if)
A = qd.static(func_atomic_add_if)
for i_l_ in (
range(entities_info.link_start[i_e], entities_info.link_end[i_e])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
):
i_l = gs.qd_int(i_l_ if qd.static(not BW) else (i_l_ + entities_info.link_start[i_e]))
if func_check_index_range(i_l, entities_info.link_start[i_e], entities_info.link_end[i_e], BW):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
n_joints = links_info.joint_end[I_l] - links_info.joint_start[I_l]
I_j0 = (i_l, 0, i_b)
cvel_vel = W(links_state.cd_vel_bw, I_j0, qd.Vector.zero(gs.qd_float, 3), BW)
cvel_ang = W(links_state.cd_ang_bw, I_j0, qd.Vector.zero(gs.qd_float, 3), BW)
if links_info.parent_idx[I_l] != -1:
cvel_vel = W(links_state.cd_vel_bw, I_j0, links_state.cd_vel[links_info.parent_idx[I_l], i_b], BW)
cvel_ang = W(links_state.cd_ang_bw, I_j0, links_state.cd_ang[links_info.parent_idx[I_l], i_b], BW)
for i_j_ in (
range(n_joints)
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_joints_per_link))
):
i_j = i_j_ + links_info.joint_start[I_l]
if func_check_index_range(i_j, links_info.joint_start[I_l], links_info.joint_end[I_l], BW):
I_j = [i_j, i_b] if qd.static(static_rigid_sim_config.batch_joints_info) else i_j
joint_type = joints_info.type[I_j]
dof_start = joints_info.dof_start[I_j]
curr_I = (i_l, 0 if qd.static(not BW) else i_j_, i_b)
next_I = (i_l, 0 if qd.static(not BW) else i_j_ + 1, i_b)
if joint_type == gs.JOINT_TYPE.FREE:
for i_3 in qd.static(range(3)):
_vel = dofs_state.cdof_vel[dof_start + i_3, i_b] * dofs_state.vel[dof_start + i_3, i_b]
_ang = dofs_state.cdof_ang[dof_start + i_3, i_b] * dofs_state.vel[dof_start + i_3, i_b]
cvel_vel = cvel_vel + A(links_state.cd_vel_bw, curr_I, _vel, BW)
cvel_ang = cvel_ang + A(links_state.cd_ang_bw, curr_I, _ang, BW)
for i_3 in qd.static(range(3)):
(
dofs_state.cdofd_ang[dof_start + i_3, i_b],
dofs_state.cdofd_vel[dof_start + i_3, i_b],
) = qd.Vector.zero(gs.qd_float, 3), qd.Vector.zero(gs.qd_float, 3)
(
dofs_state.cdofd_ang[dof_start + i_3 + 3, i_b],
dofs_state.cdofd_vel[dof_start + i_3 + 3, i_b],
) = gu.motion_cross_motion(
R(links_state.cd_ang_bw, curr_I, cvel_ang, BW),
R(links_state.cd_vel_bw, curr_I, cvel_vel, BW),
dofs_state.cdof_ang[dof_start + i_3 + 3, i_b],
dofs_state.cdof_vel[dof_start + i_3 + 3, i_b],
)
if qd.static(BW):
links_state.cd_vel_bw[next_I] = links_state.cd_vel_bw[curr_I]
links_state.cd_ang_bw[next_I] = links_state.cd_ang_bw[curr_I]
for i_3 in qd.static(range(3)):
_vel = (
dofs_state.cdof_vel[dof_start + i_3 + 3, i_b] * dofs_state.vel[dof_start + i_3 + 3, i_b]
)
_ang = (
dofs_state.cdof_ang[dof_start + i_3 + 3, i_b] * dofs_state.vel[dof_start + i_3 + 3, i_b]
)
cvel_vel = cvel_vel + A(links_state.cd_vel_bw, next_I, _vel, BW)
cvel_ang = cvel_ang + A(links_state.cd_ang_bw, next_I, _ang, BW)
else:
for i_d_ in (
range(dof_start, joints_info.dof_end[I_j])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_dofs_per_joint))
):
i_d = i_d_ if qd.static(not BW) else (i_d_ + dof_start)
if func_check_index_range(i_d, dof_start, joints_info.dof_end[I_j], BW):
dofs_state.cdofd_ang[i_d, i_b], dofs_state.cdofd_vel[i_d, i_b] = gu.motion_cross_motion(
R(links_state.cd_ang_bw, curr_I, cvel_ang, BW),
R(links_state.cd_vel_bw, curr_I, cvel_vel, BW),
dofs_state.cdof_ang[i_d, i_b],
dofs_state.cdof_vel[i_d, i_b],
)
if qd.static(BW):
links_state.cd_vel_bw[next_I] = links_state.cd_vel_bw[curr_I]
links_state.cd_ang_bw[next_I] = links_state.cd_ang_bw[curr_I]
for i_d_ in (
range(dof_start, joints_info.dof_end[I_j])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.max_n_dofs_per_joint))
):
i_d = i_d_ if qd.static(not BW) else (i_d_ + dof_start)
if func_check_index_range(i_d, dof_start, joints_info.dof_end[I_j], BW):
_vel = dofs_state.cdof_vel[i_d, i_b] * dofs_state.vel[i_d, i_b]
_ang = dofs_state.cdof_ang[i_d, i_b] * dofs_state.vel[i_d, i_b]
cvel_vel = cvel_vel + A(links_state.cd_vel_bw, next_I, _vel, BW)
cvel_ang = cvel_ang + A(links_state.cd_ang_bw, next_I, _ang, BW)
I_jf = (i_l, 0 if qd.static(not BW) else n_joints, i_b)
links_state.cd_vel[i_l, i_b] = R(links_state.cd_vel_bw, I_jf, cvel_vel, BW)
links_state.cd_ang[i_l, i_b] = R(links_state.cd_ang_bw, I_jf, cvel_ang, BW)
@qd.func
def func_forward_velocity_batch(
i_b,
entities_info: array_class.EntitiesInfo,
links_info: array_class.LinksInfo,
links_state: array_class.LinksState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
is_backward: qd.template(),
):
BW = qd.static(is_backward)
for i_e_ in (
(
# Dynamic inner loop for forward pass
range(rigid_global_info.n_awake_entities[i_b])
if qd.static(static_rigid_sim_config.use_hibernation)
else range(entities_info.n_links.shape[0])
)
if qd.static(not BW)
else (
qd.static(range(static_rigid_sim_config.max_n_awake_entities)) # Static inner loop for backward pass
if qd.static(static_rigid_sim_config.use_hibernation)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
)
):
if func_check_index_range(
i_e_, 0, rigid_global_info.n_awake_entities[i_b], static_rigid_sim_config.use_hibernation
):
i_e = (
rigid_global_info.awake_entities[i_e_, i_b]
if qd.static(static_rigid_sim_config.use_hibernation)
else i_e_
)
func_forward_velocity_entity(
i_e=i_e,
i_b=i_b,
entities_info=entities_info,
links_info=links_info,
links_state=links_state,
joints_info=joints_info,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=is_backward,
)
@qd.func
def func_forward_velocity(
entities_info: array_class.EntitiesInfo,
links_info: array_class.LinksInfo,
links_state: array_class.LinksState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
is_backward: qd.template(),
):
# This loop must be the outermost loop to be differentiable
if qd.static(static_rigid_sim_config.use_hibernation):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(links_state.pos.shape[1]):
func_forward_velocity_batch(
i_b,
entities_info,
links_info,
links_state,
joints_info,
dofs_state,
rigid_global_info,
static_rigid_sim_config,
is_backward,
)
else:
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.PARTIAL))
for i_e, i_b in qd.ndrange(entities_info.n_links.shape[0], links_state.pos.shape[1]):
func_forward_velocity_entity(
i_e,
i_b,
entities_info,
links_info,
links_state,
joints_info,
dofs_state,
rigid_global_info,
static_rigid_sim_config,
is_backward,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_update_verts_for_geoms(
geoms_idx: qd.types.ndarray(),
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
static_rigid_sim_config: qd.template(),
):
n_geoms = geoms_idx.shape[0]
_B = geoms_state.verts_updated.shape[1]
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.PARTIAL))
for i_g_, i_b in qd.ndrange(n_geoms, _B):
i_g = geoms_idx[i_g_]
func_update_verts_for_geom(i_g, i_b, geoms_state, geoms_info, verts_info, free_verts_state, fixed_verts_state)
@qd.func
def func_update_verts_for_geom(
i_g: qd.i32,
i_b: qd.i32,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
):
_B = geoms_state.verts_updated.shape[1]
if not geoms_state.verts_updated[i_g, i_b]:
i_v_start = geoms_info.vert_start[i_g]
if verts_info.is_fixed[i_v_start]:
for i_v in range(i_v_start, geoms_info.vert_end[i_g]):
verts_state_idx = verts_info.verts_state_idx[i_v]
fixed_verts_state.pos[verts_state_idx] = gu.qd_transform_by_trans_quat(
verts_info.init_pos[i_v], geoms_state.pos[i_g, i_b], geoms_state.quat[i_g, i_b]
)
for j_b in range(_B):
geoms_state.verts_updated[i_g, j_b] = True
else:
for i_v in range(i_v_start, geoms_info.vert_end[i_g]):
verts_state_idx = verts_info.verts_state_idx[i_v]
free_verts_state.pos[verts_state_idx, i_b] = gu.qd_transform_by_trans_quat(
verts_info.init_pos[i_v], geoms_state.pos[i_g, i_b], geoms_state.quat[i_g, i_b]
)
geoms_state.verts_updated[i_g, i_b] = True
@qd.func
def func_update_all_verts(
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
verts_info: array_class.VertsInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
static_rigid_sim_config: qd.template(),
):
n_geoms, _B = geoms_state.pos.shape
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.PARTIAL))
for i_g, i_b in qd.ndrange(n_geoms, _B):
func_update_verts_for_geom(i_g, i_b, geoms_state, geoms_info, verts_info, free_verts_state, fixed_verts_state)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_update_all_verts(
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
verts_info: array_class.VertsInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
static_rigid_sim_config: qd.template(),
):
func_update_all_verts(
geoms_info, geoms_state, verts_info, free_verts_state, fixed_verts_state, static_rigid_sim_config
)
@qd.kernel
def kernel_update_geom_aabbs(
geoms_state: array_class.GeomsState,
geoms_init_AABB: array_class.GeomsInitAABB,
static_rigid_sim_config: qd.template(),
):
n_geoms = geoms_state.pos.shape[0]
_B = geoms_state.pos.shape[1]
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_g, i_b in qd.ndrange(n_geoms, _B):
g_pos = geoms_state.pos[i_g, i_b]
g_quat = geoms_state.quat[i_g, i_b]
lower = gu.qd_vec3(qd.math.inf)
upper = gu.qd_vec3(-qd.math.inf)
for i_corner in qd.static(range(8)):
corner_pos = gu.qd_transform_by_trans_quat(geoms_init_AABB[i_g, i_corner], g_pos, g_quat)
lower = qd.min(lower, corner_pos)
upper = qd.max(upper, corner_pos)
geoms_state.aabb_min[i_g, i_b] = lower
geoms_state.aabb_max[i_g, i_b] = upper
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_update_vgeoms(
vgeoms_info: array_class.VGeomsInfo,
vgeoms_state: array_class.VGeomsState,
links_state: array_class.LinksState,
static_rigid_sim_config: qd.template(),
):
"""
Vgeoms are only for visualization purposes.
"""
n_vgeoms = vgeoms_info.link_idx.shape[0]
_B = links_state.pos.shape[1]
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_g, i_b in qd.ndrange(n_vgeoms, _B):
i_l = vgeoms_info.link_idx[i_g]
vgeoms_state.pos[i_g, i_b], vgeoms_state.quat[i_g, i_b] = gu.qd_transform_pos_quat_by_trans_quat(
vgeoms_info.pos[i_g], vgeoms_info.quat[i_g], links_state.pos[i_l, i_b], links_state.quat[i_l, i_b]
)
@qd.func
def func_hibernate__for_all_awake_islands_either_hiberanate_or_update_aabb_sort_buffer(
dofs_state: array_class.DofsState,
entities_state: array_class.EntitiesState,
entities_info: array_class.EntitiesInfo,
links_state: array_class.LinksState,
geoms_state: array_class.GeomsState,
collider_state: array_class.ColliderState,
unused__rigid_global_info: array_class.RigidGlobalInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
contact_island_state: array_class.ContactIslandState,
errno: array_class.V_ANNOTATION,
):
_B = entities_state.hibernated.shape[1]
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_b in range(_B):
for island_idx in range(contact_island_state.n_islands[i_b]):
was_island_hibernated = contact_island_state.island_hibernated[island_idx, i_b]
if not was_island_hibernated:
are_all_entities_okay_for_hibernation = True
entity_ref_n = contact_island_state.island_entity.n[island_idx, i_b]
entity_ref_start = contact_island_state.island_entity.start[island_idx, i_b]
# Invariant check: ensure entity_id access won't exceed buffer
if entity_ref_start + entity_ref_n > contact_island_state.entity_id.shape[0]:
errno[i_b] = errno[i_b] | array_class.ErrorCode.OVERFLOW_HIBERNATION_ISLANDS
continue
for i_entity_ref_offset_ in range(entity_ref_n):
entity_ref = entity_ref_start + i_entity_ref_offset_
entity_idx = contact_island_state.entity_id[entity_ref, i_b]
# Hibernated entities already have zero dofs_state.acc/vel
is_entity_hibernated = entities_state.hibernated[entity_idx, i_b]
if is_entity_hibernated:
continue
for i_d in range(entities_info.dof_start[entity_idx], entities_info.dof_end[entity_idx]):
max_acc = rigid_global_info.hibernation_thresh_acc[None]
max_vel = rigid_global_info.hibernation_thresh_vel[None]
if qd.abs(dofs_state.acc[i_d, i_b]) > max_acc or qd.abs(dofs_state.vel[i_d, i_b]) > max_vel:
are_all_entities_okay_for_hibernation = False
break
if not are_all_entities_okay_for_hibernation:
break
if not are_all_entities_okay_for_hibernation:
# update collider sort_buffer with aabb extents along x-axis
for i_entity_ref_offset_ in range(entity_ref_n):
entity_ref = entity_ref_start + i_entity_ref_offset_
entity_idx = contact_island_state.entity_id[entity_ref, i_b]
for i_g in range(entities_info.geom_start[entity_idx], entities_info.geom_end[entity_idx]):
min_idx, min_val = geoms_state.min_buffer_idx[i_g, i_b], geoms_state.aabb_min[i_g, i_b][0]
max_idx, max_val = geoms_state.max_buffer_idx[i_g, i_b], geoms_state.aabb_max[i_g, i_b][0]
collider_state.sort_buffer.value[min_idx, i_b] = min_val
collider_state.sort_buffer.value[max_idx, i_b] = max_val
else:
# perform hibernation
# Guard: only process if there are entities in this island
if entity_ref_n > 0:
prev_entity_ref = entity_ref_start + entity_ref_n - 1
prev_entity_idx = contact_island_state.entity_id[prev_entity_ref, i_b]
for i_entity_ref_offset_ in range(entity_ref_n):
entity_ref = entity_ref_start + i_entity_ref_offset_
entity_idx = contact_island_state.entity_id[entity_ref, i_b]
func_hibernate_entity_and_zero_dof_velocities(
entity_idx,
i_b,
entities_state=entities_state,
entities_info=entities_info,
dofs_state=dofs_state,
links_state=links_state,
geoms_state=geoms_state,
)
# store entities in the hibernated islands by daisy chaining them
contact_island_state.entity_idx_to_next_entity_idx_in_hibernated_island[
prev_entity_idx, i_b
] = entity_idx
prev_entity_idx = entity_idx
@qd.func
def func_aggregate_awake_entities(
entities_state: array_class.EntitiesState,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
n_entities = entities_state.hibernated.shape[0]
_B = entities_state.hibernated.shape[1]
# Reset counts once per batch (not per entity!)
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_b in range(_B):
rigid_global_info.n_awake_entities[i_b] = 0
rigid_global_info.n_awake_links[i_b] = 0
rigid_global_info.n_awake_dofs[i_b] = 0
# Count awake entities
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_e, i_b in qd.ndrange(n_entities, _B):
if entities_state.hibernated[i_e, i_b] or entities_info.n_dofs[i_e] == 0:
continue
next_awake_entity_idx = qd.atomic_add(rigid_global_info.n_awake_entities[i_b], 1)
rigid_global_info.awake_entities[next_awake_entity_idx, i_b] = i_e
n_dofs = entities_info.n_dofs[i_e]
entity_dofs_base_idx: qd.int32 = entities_info.dof_start[i_e]
awake_dofs_base_idx = qd.atomic_add(rigid_global_info.n_awake_dofs[i_b], n_dofs)
for i_d_ in range(n_dofs):
rigid_global_info.awake_dofs[awake_dofs_base_idx + i_d_, i_b] = entity_dofs_base_idx + i_d_
n_links = entities_info.n_links[i_e]
entity_links_base_idx: qd.int32 = entities_info.link_start[i_e]
awake_links_base_idx = qd.atomic_add(rigid_global_info.n_awake_links[i_b], n_links)
for i_l_ in range(n_links):
rigid_global_info.awake_links[awake_links_base_idx + i_l_, i_b] = entity_links_base_idx + i_l_
@qd.func
def func_hibernate_entity_and_zero_dof_velocities(
i_e: int,
i_b: int,
entities_state: array_class.EntitiesState,
entities_info: array_class.EntitiesInfo,
dofs_state: array_class.DofsState,
links_state: array_class.LinksState,
geoms_state: array_class.GeomsState,
):
"""
Mark RigidEnity, individual DOFs in DofsState, RigidLinks, and RigidGeoms as hibernated.
Also, zero out DOF velocitities and accelerations.
"""
entities_state.hibernated[i_e, i_b] = True
for i_d in range(entities_info.dof_start[i_e], entities_info.dof_end[i_e]):
dofs_state.hibernated[i_d, i_b] = True
dofs_state.vel[i_d, i_b] = 0.0
dofs_state.acc[i_d, i_b] = 0.0
for i_l in range(entities_info.link_start[i_e], entities_info.link_end[i_e]):
links_state.hibernated[i_l, i_b] = True
for i_g in range(entities_info.geom_start[i_e], entities_info.geom_end[i_e]):
geoms_state.hibernated[i_g, i_b] = True
@qd.func
def func_update_cartesian_space_entity(
i_e,
i_b,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
force_update_fixed_geoms: qd.template(),
is_backward: qd.template(),
):
func_forward_kinematics_entity(
i_e,
i_b,
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=is_backward,
)
func_COM_links_entity(
i_e,
i_b,
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
is_backward=is_backward,
)
func_update_geoms_entity(
i_e,
i_b,
entities_info=entities_info,
geoms_info=geoms_info,
geoms_state=geoms_state,
links_state=links_state,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
force_update_fixed_geoms=force_update_fixed_geoms,
is_backward=is_backward,
)
@qd.func
def func_update_cartesian_space_batch(
i_b,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
force_update_fixed_geoms: qd.template(),
is_backward: qd.template(),
):
BW = qd.static(is_backward)
# This loop is considered an inner loop
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL))
for i_0 in (
(
# Dynamic inner loop for forward pass
range(rigid_global_info.n_awake_entities[i_b])
if qd.static(static_rigid_sim_config.use_hibernation)
else range(entities_info.n_links.shape[0])
)
if qd.static(not BW)
else (
qd.static(range(static_rigid_sim_config.max_n_awake_entities)) # Static inner loop for backward pass
if qd.static(static_rigid_sim_config.use_hibernation)
else qd.static(range(static_rigid_sim_config.max_n_links_per_entity))
)
):
if func_check_index_range(i_0, 0, rigid_global_info.n_awake_entities[i_b], BW):
i_e = (
rigid_global_info.awake_entities[i_0, i_b]
if qd.static(static_rigid_sim_config.use_hibernation)
else i_0
)
func_update_cartesian_space_entity(
i_e,
i_b,
links_state,
links_info,
joints_state,
joints_info,
dofs_state,
dofs_info,
geoms_info,
geoms_state,
entities_info,
rigid_global_info,
static_rigid_sim_config,
force_update_fixed_geoms,
is_backward,
)
@qd.func
def func_update_cartesian_space(
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
force_update_fixed_geoms: qd.template(),
is_backward: qd.template(),
):
BW = qd.static(is_backward)
# This loop must be the outermost loop to be differentiable
if qd.static(static_rigid_sim_config.use_hibernation):
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(links_state.pos.shape[1]):
func_update_cartesian_space_batch(
i_b,
links_state,
links_info,
joints_state,
joints_info,
dofs_state,
dofs_info,
geoms_info,
geoms_state,
entities_info,
rigid_global_info,
static_rigid_sim_config,
force_update_fixed_geoms,
is_backward,
)
else:
# FIXME: Implement parallelization at tree-level (based on root_idx) instead of entity-level
qd.loop_config(serialize=qd.static(static_rigid_sim_config.para_level < gs.PARA_LEVEL.PARTIAL))
for i_e, i_b in qd.ndrange(entities_info.n_links.shape[0], links_state.pos.shape[1]):
i_l_start = entities_info.link_start[i_e]
I_l_start = [i_l_start, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l_start
if links_info.root_idx[I_l_start] == i_l_start:
for j_e in (
range(i_e, entities_info.n_links.shape[0])
if qd.static(not BW)
else qd.static(range(static_rigid_sim_config.n_entities))
):
if func_check_index_range(j_e, i_e, static_rigid_sim_config.n_entities, BW):
j_l_start = entities_info.link_start[j_e]
J_l_start = (
[j_l_start, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else j_l_start
)
if links_info.root_idx[J_l_start] == i_l_start:
func_update_cartesian_space_entity(
j_e,
i_b,
links_state,
links_info,
joints_state,
joints_info,
dofs_state,
dofs_info,
geoms_info,
geoms_state,
entities_info,
rigid_global_info,
static_rigid_sim_config,
force_update_fixed_geoms,
is_backward,
)
@qd.kernel(fastcache=gs.use_fastcache)
def kernel_update_cartesian_space(
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
geoms_info: array_class.GeomsInfo,
geoms_state: array_class.GeomsState,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
force_update_fixed_geoms: qd.template(),
is_backward: qd.template(),
):
func_update_cartesian_space(
links_state=links_state,
links_info=links_info,
joints_state=joints_state,
joints_info=joints_info,
dofs_state=dofs_state,
dofs_info=dofs_info,
geoms_info=geoms_info,
geoms_state=geoms_state,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
force_update_fixed_geoms=force_update_fixed_geoms,
is_backward=is_backward,
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/abd/forward_kinematics.py",
"license": "Apache License 2.0",
"lines": 1541,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/abd/inverse_kinematics.py | """
Inverse kinematics for rigid body entities.
This module contains the inverse kinematics kernel for computing joint configurations
that achieve desired end-effector poses.
"""
import quadrants as qd
import genesis as gs
import genesis.utils.geom as gu
import genesis.utils.linalg as lu
import genesis.utils.array_class as array_class
# FIXME: RigidEntity is not compatible with fast cache
@qd.kernel(fastcache=False)
def kernel_rigid_entity_inverse_kinematics(
rigid_entity: qd.template(),
links_idx: qd.types.ndarray(),
poss: qd.types.ndarray(),
quats: qd.types.ndarray(),
local_points: qd.types.ndarray(),
n_links: qd.i32,
dofs_idx: qd.types.ndarray(),
n_dofs: qd.i32,
links_idx_by_dofs: qd.types.ndarray(),
n_links_by_dofs: qd.i32,
custom_init_qpos: qd.i32,
init_qpos: qd.types.ndarray(),
max_samples: qd.i32,
max_solver_iters: qd.i32,
damping: qd.f32,
pos_tol: qd.f32,
rot_tol: qd.f32,
pos_mask_: qd.types.ndarray(),
rot_mask_: qd.types.ndarray(),
link_pos_mask: qd.types.ndarray(),
link_rot_mask: qd.types.ndarray(),
max_step_size: qd.f32,
respect_joint_limit: qd.i32,
envs_idx: qd.types.ndarray(),
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
joints_state: array_class.JointsState,
joints_info: array_class.JointsInfo,
dofs_state: array_class.DofsState,
dofs_info: array_class.DofsInfo,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
):
EPS = rigid_global_info.EPS[None]
# convert to qd Vector
pos_mask = qd.Vector([pos_mask_[0], pos_mask_[1], pos_mask_[2]], dt=gs.qd_float)
rot_mask = qd.Vector([rot_mask_[0], rot_mask_[1], rot_mask_[2]], dt=gs.qd_float)
n_error_dims = 6 * n_links
for i_b_ in range(envs_idx.shape[0]):
i_b = envs_idx[i_b_]
# save original qpos
for i_q in range(rigid_entity.n_qs):
rigid_entity._IK_qpos_orig[i_q, i_b] = rigid_global_info.qpos[i_q + rigid_entity._q_start, i_b]
if custom_init_qpos:
for i_q in range(rigid_entity.n_qs):
rigid_global_info.qpos[i_q + rigid_entity._q_start, i_b] = init_qpos[i_b_, i_q]
for i_error in range(n_error_dims):
rigid_entity._IK_err_pose_best[i_error, i_b] = 1e4
solved = False
for i_sample in range(max_samples):
for _ in range(max_solver_iters):
# run FK to update link states using current q
gs.engine.solvers.rigid.rigid_solver.func_forward_kinematics_entity(
rigid_entity._idx_in_solver,
i_b,
links_state,
links_info,
joints_state,
joints_info,
dofs_state,
dofs_info,
entities_info,
rigid_global_info,
static_rigid_sim_config,
is_backward=False,
)
# compute error
solved = True
for i_ee in range(n_links):
i_l_ee = links_idx[i_ee]
tgt_pos_i = qd.Vector([poss[i_ee, i_b_, 0], poss[i_ee, i_b_, 1], poss[i_ee, i_b_, 2]])
local_point_i = qd.Vector([local_points[i_ee, 0], local_points[i_ee, 1], local_points[i_ee, 2]])
pos_curr_i = links_state.pos[i_l_ee, i_b] + gu.qd_transform_by_quat(
local_point_i, links_state.quat[i_l_ee, i_b]
)
err_pos_i = tgt_pos_i - pos_curr_i
for k in range(3):
err_pos_i[k] *= pos_mask[k] * link_pos_mask[i_ee]
if err_pos_i.norm() > pos_tol:
solved = False
tgt_quat_i = qd.Vector(
[quats[i_ee, i_b_, 0], quats[i_ee, i_b_, 1], quats[i_ee, i_b_, 2], quats[i_ee, i_b_, 3]]
)
err_rot_i = gu.qd_quat_to_rotvec(
gu.qd_transform_quat_by_quat(gu.qd_inv_quat(links_state.quat[i_l_ee, i_b]), tgt_quat_i), EPS
)
for k in range(3):
err_rot_i[k] *= rot_mask[k] * link_rot_mask[i_ee]
if err_rot_i.norm() > rot_tol:
solved = False
# put into multi-link error array
for k in range(3):
rigid_entity._IK_err_pose[i_ee * 6 + k, i_b] = err_pos_i[k]
rigid_entity._IK_err_pose[i_ee * 6 + k + 3, i_b] = err_rot_i[k]
if solved:
break
# compute multi-link jacobian
for i_ee in range(n_links):
# update jacobian for ee link
i_l_ee = links_idx[i_ee]
local_point_i = qd.Vector([local_points[i_ee, 0], local_points[i_ee, 1], local_points[i_ee, 2]])
rigid_entity._func_get_jacobian(
tgt_link_idx=i_l_ee,
i_b=i_b,
p_local=local_point_i,
pos_mask=pos_mask,
rot_mask=rot_mask,
dofs_info=dofs_info,
joints_info=joints_info,
links_info=links_info,
links_state=links_state,
) # NOTE: we still compute jacobian for all dofs as we haven't found a clean way to implement this
# copy to multi-link jacobian (only for the effective n_dofs instead of self.n_dofs)
for i_dof in range(n_dofs):
for i_error in qd.static(range(6)):
i_row = i_ee * 6 + i_error
i_dof_ = dofs_idx[i_dof]
rigid_entity._IK_jacobian[i_row, i_dof, i_b] = rigid_entity._jacobian[i_error, i_dof_, i_b]
# compute dq = jac.T @ inverse(jac @ jac.T + diag) @ error (only for the effective n_dofs instead of self.n_dofs)
lu.mat_transpose(rigid_entity._IK_jacobian, rigid_entity._IK_jacobian_T, n_error_dims, n_dofs, i_b)
lu.mat_mul(
rigid_entity._IK_jacobian,
rigid_entity._IK_jacobian_T,
rigid_entity._IK_mat,
n_error_dims,
n_dofs,
n_error_dims,
i_b,
)
lu.mat_add_eye(rigid_entity._IK_mat, damping**2, n_error_dims, i_b)
lu.mat_inverse(
rigid_entity._IK_mat,
rigid_entity._IK_L,
rigid_entity._IK_U,
rigid_entity._IK_y,
rigid_entity._IK_inv,
n_error_dims,
i_b,
)
lu.mat_mul_vec(
rigid_entity._IK_inv,
rigid_entity._IK_err_pose,
rigid_entity._IK_vec,
n_error_dims,
n_error_dims,
i_b,
)
for i_d in range(rigid_entity.n_dofs): # IK_delta_qpos = IK_jacobian_T @ IK_vec
rigid_entity._IK_delta_qpos[i_d, i_b] = 0
for i_d in range(n_dofs):
for j in range(n_error_dims):
# NOTE: IK_delta_qpos uses the original indexing instead of the effective n_dofs
i_d_ = dofs_idx[i_d]
rigid_entity._IK_delta_qpos[i_d_, i_b] += (
rigid_entity._IK_jacobian_T[i_d, j, i_b] * rigid_entity._IK_vec[j, i_b]
)
for i_d in range(rigid_entity.n_dofs):
rigid_entity._IK_delta_qpos[i_d, i_b] = qd.math.clamp(
rigid_entity._IK_delta_qpos[i_d, i_b], -max_step_size, max_step_size
)
# update q
gs.engine.solvers.rigid.rigid_solver.func_integrate_dq_entity(
rigid_entity._IK_delta_qpos,
rigid_entity._idx_in_solver,
i_b,
respect_joint_limit,
links_info,
joints_info,
dofs_info,
entities_info,
rigid_global_info,
static_rigid_sim_config,
)
if not solved:
# re-compute final error if exited not due to solved
gs.engine.solvers.rigid.rigid_solver.func_forward_kinematics_entity(
rigid_entity._idx_in_solver,
i_b,
links_state,
links_info,
joints_state,
joints_info,
dofs_state,
dofs_info,
entities_info,
rigid_global_info,
static_rigid_sim_config,
is_backward=False,
)
solved = True
for i_ee in range(n_links):
i_l_ee = links_idx[i_ee]
tgt_pos_i = qd.Vector([poss[i_ee, i_b_, 0], poss[i_ee, i_b_, 1], poss[i_ee, i_b_, 2]])
local_point_i = qd.Vector([local_points[i_ee, 0], local_points[i_ee, 1], local_points[i_ee, 2]])
pos_curr_i = links_state.pos[i_l_ee, i_b] + gu.qd_transform_by_quat(
local_point_i, links_state.quat[i_l_ee, i_b]
)
err_pos_i = tgt_pos_i - pos_curr_i
for k in range(3):
err_pos_i[k] *= pos_mask[k] * link_pos_mask[i_ee]
if err_pos_i.norm() > pos_tol:
solved = False
tgt_quat_i = qd.Vector(
[quats[i_ee, i_b_, 0], quats[i_ee, i_b_, 1], quats[i_ee, i_b_, 2], quats[i_ee, i_b_, 3]]
)
err_rot_i = gu.qd_quat_to_rotvec(
gu.qd_transform_quat_by_quat(gu.qd_inv_quat(links_state.quat[i_l_ee, i_b]), tgt_quat_i), EPS
)
for k in range(3):
err_rot_i[k] *= rot_mask[k] * link_rot_mask[i_ee]
if err_rot_i.norm() > rot_tol:
solved = False
# put into multi-link error array
for k in range(3):
rigid_entity._IK_err_pose[i_ee * 6 + k, i_b] = err_pos_i[k]
rigid_entity._IK_err_pose[i_ee * 6 + k + 3, i_b] = err_rot_i[k]
if solved:
for i_q in range(rigid_entity.n_qs):
rigid_entity._IK_qpos_best[i_q, i_b] = rigid_global_info.qpos[i_q + rigid_entity._q_start, i_b]
for i_error in range(n_error_dims):
rigid_entity._IK_err_pose_best[i_error, i_b] = rigid_entity._IK_err_pose[i_error, i_b]
break
else:
# copy to _IK_qpos if this sample is better
improved = True
for i_ee in range(n_links):
error_pos_i = qd.Vector(
[rigid_entity._IK_err_pose[i_ee * 6 + i_error, i_b] for i_error in range(3)]
)
error_rot_i = qd.Vector(
[rigid_entity._IK_err_pose[i_ee * 6 + i_error, i_b] for i_error in range(3, 6)]
)
error_pos_best = qd.Vector(
[rigid_entity._IK_err_pose_best[i_ee * 6 + i_error, i_b] for i_error in range(3)]
)
error_rot_best = qd.Vector(
[rigid_entity._IK_err_pose_best[i_ee * 6 + i_error, i_b] for i_error in range(3, 6)]
)
if error_pos_i.norm() > error_pos_best.norm() or error_rot_i.norm() > error_rot_best.norm():
improved = False
break
if improved:
for i_q in range(rigid_entity.n_qs):
rigid_entity._IK_qpos_best[i_q, i_b] = rigid_global_info.qpos[i_q + rigid_entity._q_start, i_b]
for i_error in range(n_error_dims):
rigid_entity._IK_err_pose_best[i_error, i_b] = rigid_entity._IK_err_pose[i_error, i_b]
# Resample init q
if respect_joint_limit and i_sample < max_samples - 1:
for i_l_ in range(n_links_by_dofs):
i_l = links_idx_by_dofs[i_l_]
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
for i_j in range(links_info.joint_start[I_l], links_info.joint_end[I_l]):
I_j = [i_j, i_b] if qd.static(static_rigid_sim_config.batch_joints_info) else i_j
i_d = joints_info.dof_start[I_j]
I_d = [i_d, i_b] if qd.static(static_rigid_sim_config.batch_dofs_info) else i_d
dof_limit = dofs_info.limit[I_d]
if (
joints_info.type[I_j] == gs.JOINT_TYPE.REVOLUTE
or joints_info.type[I_j] == gs.JOINT_TYPE.PRISMATIC
) and not (qd.math.isinf(dof_limit[0]) or qd.math.isinf(dof_limit[1])):
q_start = joints_info.q_start[I_j]
rigid_global_info.qpos[q_start, i_b] = dof_limit[0] + qd.random() * (
dof_limit[1] - dof_limit[0]
)
else:
pass # When respect_joint_limit=False, we can simply continue from the last solution
# restore original qpos and link state
for i_q in range(rigid_entity.n_qs):
rigid_global_info.qpos[i_q + rigid_entity._q_start, i_b] = rigid_entity._IK_qpos_orig[i_q, i_b]
gs.engine.solvers.rigid.rigid_solver.func_forward_kinematics_entity(
rigid_entity._idx_in_solver,
i_b,
links_state,
links_info,
joints_state,
joints_info,
dofs_state,
dofs_info,
entities_info,
rigid_global_info,
static_rigid_sim_config,
is_backward=False,
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/abd/inverse_kinematics.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/broadphase.py | """
Broad-phase collision detection functions.
This module contains AABB operations, sweep-and-prune algorithms,
and collision pair validation for the rigid body collider.
"""
import quadrants as qd
import genesis as gs
import genesis.utils.array_class as array_class
from .utils import (
func_is_geom_aabbs_overlap,
)
@qd.func
def func_find_intersect_midpoint(
i_ga,
i_gb,
i_b,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
):
# return the center of the intersecting AABB of AABBs of two geoms
intersect_lower = qd.max(geoms_state.aabb_min[i_ga, i_b], geoms_state.aabb_min[i_gb, i_b])
intersect_upper = qd.min(geoms_state.aabb_max[i_ga, i_b], geoms_state.aabb_max[i_gb, i_b])
return 0.5 * (intersect_lower + intersect_upper)
@qd.func
def func_check_collision_valid(
i_ga,
i_gb,
i_b,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
geoms_info: array_class.GeomsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
constraint_state: array_class.ConstraintState,
equalities_info: array_class.EqualitiesInfo,
collider_info: array_class.ColliderInfo,
):
is_valid = collider_info.collision_pair_idx[i_ga, i_gb] != -1
if is_valid:
i_la = geoms_info.link_idx[i_ga]
i_lb = geoms_info.link_idx[i_gb]
# Filter out collision pairs that are involved in dynamically registered weld equality constraints
for i_eq in range(rigid_global_info.n_equalities[None], constraint_state.qd_n_equalities[i_b]):
if equalities_info.eq_type[i_eq, i_b] == gs.EQUALITY_TYPE.WELD:
i_leqa = equalities_info.eq_obj1id[i_eq, i_b]
i_leqb = equalities_info.eq_obj2id[i_eq, i_b]
if (i_leqa == i_la and i_leqb == i_lb) or (i_leqa == i_lb and i_leqb == i_la):
is_valid = False
# hibernated <-> fixed links
if qd.static(static_rigid_sim_config.use_hibernation):
I_la = [i_la, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_la
I_lb = [i_lb, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_lb
if (links_state.hibernated[i_la, i_b] and links_info.is_fixed[I_lb]) or (
links_state.hibernated[i_lb, i_b] and links_info.is_fixed[I_la]
):
is_valid = False
return is_valid
@qd.func
def func_collision_clear(
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
collider_state: array_class.ColliderState,
static_rigid_sim_config: qd.template(),
):
_B = collider_state.n_contacts.shape[0]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(_B):
if qd.static(static_rigid_sim_config.use_hibernation):
collider_state.n_contacts_hibernated[i_b] = 0
# Advect hibernated contacts
for i_c in range(collider_state.n_contacts[i_b]):
i_la = collider_state.contact_data.link_a[i_c, i_b]
i_lb = collider_state.contact_data.link_b[i_c, i_b]
I_la = [i_la, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_la
I_lb = [i_lb, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_lb
# Pair of hibernated-fixed links -> hibernated contact
# TODO: we should also include hibernated-hibernated links and wake up the whole contact island
# once a new collision is detected
if (links_state.hibernated[i_la, i_b] and links_info.is_fixed[I_lb]) or (
links_state.hibernated[i_lb, i_b] and links_info.is_fixed[I_la]
):
i_c_hibernated = collider_state.n_contacts_hibernated[i_b]
if i_c != i_c_hibernated:
# Copying all fields of class StructContactData individually
# (fields mode doesn't support struct-level copy operations):
# fmt: off
collider_state.contact_data.geom_a[i_c_hibernated, i_b] = collider_state.contact_data.geom_a[i_c, i_b]
collider_state.contact_data.geom_b[i_c_hibernated, i_b] = collider_state.contact_data.geom_b[i_c, i_b]
collider_state.contact_data.penetration[i_c_hibernated, i_b] = collider_state.contact_data.penetration[i_c, i_b]
collider_state.contact_data.normal[i_c_hibernated, i_b] = collider_state.contact_data.normal[i_c, i_b]
collider_state.contact_data.pos[i_c_hibernated, i_b] = collider_state.contact_data.pos[i_c, i_b]
collider_state.contact_data.friction[i_c_hibernated, i_b] = collider_state.contact_data.friction[i_c, i_b]
collider_state.contact_data.sol_params[i_c_hibernated, i_b] = collider_state.contact_data.sol_params[i_c, i_b]
collider_state.contact_data.force[i_c_hibernated, i_b] = collider_state.contact_data.force[i_c, i_b]
collider_state.contact_data.link_a[i_c_hibernated, i_b] = collider_state.contact_data.link_a[i_c, i_b]
collider_state.contact_data.link_b[i_c_hibernated, i_b] = collider_state.contact_data.link_b[i_c, i_b]
# fmt: on
collider_state.n_contacts_hibernated[i_b] = i_c_hibernated + 1
# Clear contacts: when hibernation is enabled, only clear non-hibernated contacts.
# The hibernated contacts (positions 0 to n_contacts_hibernated-1) were just advected and should be preserved.
for i_c in range(collider_state.n_contacts[i_b]):
should_clear = True
if qd.static(static_rigid_sim_config.use_hibernation):
# Only clear if this is not a hibernated contact
should_clear = i_c >= collider_state.n_contacts_hibernated[i_b]
if should_clear:
collider_state.contact_data.link_a[i_c, i_b] = -1
collider_state.contact_data.link_b[i_c, i_b] = -1
collider_state.contact_data.geom_a[i_c, i_b] = -1
collider_state.contact_data.geom_b[i_c, i_b] = -1
collider_state.contact_data.penetration[i_c, i_b] = 0.0
collider_state.contact_data.pos[i_c, i_b] = qd.Vector.zero(gs.qd_float, 3)
collider_state.contact_data.normal[i_c, i_b] = qd.Vector.zero(gs.qd_float, 3)
collider_state.contact_data.force[i_c, i_b] = qd.Vector.zero(gs.qd_float, 3)
if qd.static(static_rigid_sim_config.use_hibernation):
collider_state.n_contacts[i_b] = collider_state.n_contacts_hibernated[i_b]
else:
collider_state.n_contacts[i_b] = 0
@qd.kernel(fastcache=gs.use_fastcache)
def func_broad_phase(
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
constraint_state: array_class.ConstraintState,
collider_state: array_class.ColliderState,
equalities_info: array_class.EqualitiesInfo,
collider_info: array_class.ColliderInfo,
errno: array_class.V_ANNOTATION,
):
"""
Sweep and Prune (SAP) for broad-phase collision detection.
This function sorts the geometry axis-aligned bounding boxes (AABBs) along a specified axis and checks for
potential collision pairs based on the AABB overlap.
"""
n_geoms, _B = collider_state.active_buffer.shape
n_links = links_info.geom_start.shape[0]
# Clear collider state
func_collision_clear(links_state, links_info, collider_state, static_rigid_sim_config)
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(_B):
axis = 0
# Calculate the number of active geoms for this environment
# (for heterogeneous entities, different envs may have different geoms)
env_n_geoms = 0
for i_l in range(n_links):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
env_n_geoms = env_n_geoms + links_info.geom_end[I_l] - links_info.geom_start[I_l]
# copy updated geom aabbs to buffer for sorting
if collider_state.first_time[i_b]:
i_buffer = 0
for i_l in range(n_links):
I_l = [i_l, i_b] if qd.static(static_rigid_sim_config.batch_links_info) else i_l
for i_g in range(links_info.geom_start[I_l], links_info.geom_end[I_l]):
collider_state.sort_buffer.value[2 * i_buffer, i_b] = geoms_state.aabb_min[i_g, i_b][axis]
collider_state.sort_buffer.i_g[2 * i_buffer, i_b] = i_g
collider_state.sort_buffer.is_max[2 * i_buffer, i_b] = False
collider_state.sort_buffer.value[2 * i_buffer + 1, i_b] = geoms_state.aabb_max[i_g, i_b][axis]
collider_state.sort_buffer.i_g[2 * i_buffer + 1, i_b] = i_g
collider_state.sort_buffer.is_max[2 * i_buffer + 1, i_b] = True
geoms_state.min_buffer_idx[i_buffer, i_b] = 2 * i_g
geoms_state.max_buffer_idx[i_buffer, i_b] = 2 * i_g + 1
i_buffer = i_buffer + 1
collider_state.first_time[i_b] = False
else:
# warm start. If `use_hibernation=True`, it's already updated in rigid_solver.
if qd.static(not static_rigid_sim_config.use_hibernation):
for i in range(env_n_geoms * 2):
if collider_state.sort_buffer.is_max[i, i_b]:
collider_state.sort_buffer.value[i, i_b] = geoms_state.aabb_max[
collider_state.sort_buffer.i_g[i, i_b], i_b
][axis]
else:
collider_state.sort_buffer.value[i, i_b] = geoms_state.aabb_min[
collider_state.sort_buffer.i_g[i, i_b], i_b
][axis]
# insertion sort, which has complexity near O(n) for nearly sorted array
for i in range(1, 2 * env_n_geoms):
key_value = collider_state.sort_buffer.value[i, i_b]
key_is_max = collider_state.sort_buffer.is_max[i, i_b]
key_i_g = collider_state.sort_buffer.i_g[i, i_b]
j = i - 1
while j >= 0 and key_value < collider_state.sort_buffer.value[j, i_b]:
collider_state.sort_buffer.value[j + 1, i_b] = collider_state.sort_buffer.value[j, i_b]
collider_state.sort_buffer.is_max[j + 1, i_b] = collider_state.sort_buffer.is_max[j, i_b]
collider_state.sort_buffer.i_g[j + 1, i_b] = collider_state.sort_buffer.i_g[j, i_b]
if qd.static(static_rigid_sim_config.use_hibernation):
if collider_state.sort_buffer.is_max[j, i_b]:
geoms_state.max_buffer_idx[collider_state.sort_buffer.i_g[j, i_b], i_b] = j + 1
else:
geoms_state.min_buffer_idx[collider_state.sort_buffer.i_g[j, i_b], i_b] = j + 1
j -= 1
collider_state.sort_buffer.value[j + 1, i_b] = key_value
collider_state.sort_buffer.is_max[j + 1, i_b] = key_is_max
collider_state.sort_buffer.i_g[j + 1, i_b] = key_i_g
if qd.static(static_rigid_sim_config.use_hibernation):
if key_is_max:
geoms_state.max_buffer_idx[key_i_g, i_b] = j + 1
else:
geoms_state.min_buffer_idx[key_i_g, i_b] = j + 1
# sweep over the sorted AABBs to find potential collision pairs
n_broad = 0
if qd.static(not static_rigid_sim_config.use_hibernation):
n_active = 0
for i in range(2 * env_n_geoms):
if not collider_state.sort_buffer.is_max[i, i_b]:
for j in range(n_active):
i_ga = collider_state.active_buffer[j, i_b]
i_gb = collider_state.sort_buffer.i_g[i, i_b]
if i_ga > i_gb:
i_ga, i_gb = i_gb, i_ga
if not func_check_collision_valid(
i_ga,
i_gb,
i_b,
links_state,
links_info,
geoms_info,
rigid_global_info,
static_rigid_sim_config,
constraint_state,
equalities_info,
collider_info,
):
continue
if not func_is_geom_aabbs_overlap(geoms_state, i_ga, i_gb, i_b):
# Clear collision normal cache if not in contact
if qd.static(not static_rigid_sim_config.enable_mujoco_compatibility):
i_pair = collider_info.collision_pair_idx[i_ga, i_gb]
collider_state.contact_cache.normal[i_pair, i_b] = qd.Vector.zero(gs.qd_float, 3)
continue
if n_broad == collider_info.max_collision_pairs_broad[None]:
errno[i_b] = errno[i_b] | array_class.ErrorCode.OVERFLOW_CANDIDATE_CONTACTS
break
collider_state.broad_collision_pairs[n_broad, i_b][0] = i_ga
collider_state.broad_collision_pairs[n_broad, i_b][1] = i_gb
n_broad = n_broad + 1
collider_state.active_buffer[n_active, i_b] = collider_state.sort_buffer.i_g[i, i_b]
n_active = n_active + 1
else:
i_g_to_remove = collider_state.sort_buffer.i_g[i, i_b]
for j in range(n_active):
if collider_state.active_buffer[j, i_b] == i_g_to_remove:
if j < n_active - 1:
for k in range(j, n_active - 1):
collider_state.active_buffer[k, i_b] = collider_state.active_buffer[k + 1, i_b]
n_active = n_active - 1
break
else:
if rigid_global_info.n_awake_dofs[i_b] > 0:
n_active_awake = 0
n_active_hib = 0
for i in range(2 * env_n_geoms):
is_incoming_geom_hibernated = geoms_state.hibernated[collider_state.sort_buffer.i_g[i, i_b], i_b]
if not collider_state.sort_buffer.is_max[i, i_b]:
# both awake and hibernated geom check with active awake geoms
for j in range(n_active_awake):
i_ga = collider_state.active_buffer_awake[j, i_b]
i_gb = collider_state.sort_buffer.i_g[i, i_b]
if i_ga > i_gb:
i_ga, i_gb = i_gb, i_ga
if not func_check_collision_valid(
i_ga,
i_gb,
i_b,
links_state,
links_info,
geoms_info,
rigid_global_info,
static_rigid_sim_config,
constraint_state,
equalities_info,
collider_info,
):
continue
if not func_is_geom_aabbs_overlap(geoms_state, i_ga, i_gb, i_b):
# Clear collision normal cache if not in contact
if qd.static(not static_rigid_sim_config.enable_mujoco_compatibility):
i_pair = collider_info.collision_pair_idx[i_ga, i_gb]
collider_state.contact_cache.normal[i_pair, i_b] = qd.Vector.zero(gs.qd_float, 3)
continue
collider_state.broad_collision_pairs[n_broad, i_b][0] = i_ga
collider_state.broad_collision_pairs[n_broad, i_b][1] = i_gb
n_broad = n_broad + 1
# if incoming geom is awake, also need to check with hibernated geoms
if not is_incoming_geom_hibernated:
for j in range(n_active_hib):
i_ga = collider_state.active_buffer_hib[j, i_b]
i_gb = collider_state.sort_buffer.i_g[i, i_b]
if i_ga > i_gb:
i_ga, i_gb = i_gb, i_ga
if not func_check_collision_valid(
i_ga,
i_gb,
i_b,
links_state,
links_info,
geoms_info,
rigid_global_info,
static_rigid_sim_config,
constraint_state,
equalities_info,
collider_info,
):
continue
if not func_is_geom_aabbs_overlap(geoms_state, i_ga, i_gb, i_b):
# Clear collision normal cache if not in contact
i_pair = collider_info.collision_pair_idx[i_ga, i_gb]
collider_state.contact_cache.normal[i_pair, i_b] = qd.Vector.zero(gs.qd_float, 3)
continue
collider_state.broad_collision_pairs[n_broad, i_b][0] = i_ga
collider_state.broad_collision_pairs[n_broad, i_b][1] = i_gb
n_broad = n_broad + 1
if is_incoming_geom_hibernated:
collider_state.active_buffer_hib[n_active_hib, i_b] = collider_state.sort_buffer.i_g[i, i_b]
n_active_hib = n_active_hib + 1
else:
collider_state.active_buffer_awake[n_active_awake, i_b] = collider_state.sort_buffer.i_g[
i, i_b
]
n_active_awake = n_active_awake + 1
else:
i_g_to_remove = collider_state.sort_buffer.i_g[i, i_b]
if is_incoming_geom_hibernated:
for j in range(n_active_hib):
if collider_state.active_buffer_hib[j, i_b] == i_g_to_remove:
if j < n_active_hib - 1:
for k in range(j, n_active_hib - 1):
collider_state.active_buffer_hib[k, i_b] = collider_state.active_buffer_hib[
k + 1, i_b
]
n_active_hib = n_active_hib - 1
break
else:
for j in range(n_active_awake):
if collider_state.active_buffer_awake[j, i_b] == i_g_to_remove:
if j < n_active_awake - 1:
for k in range(j, n_active_awake - 1):
collider_state.active_buffer_awake[k, i_b] = (
collider_state.active_buffer_awake[k + 1, i_b]
)
n_active_awake = n_active_awake - 1
break
collider_state.n_broad_pairs[i_b] = n_broad
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/broadphase.py",
"license": "Apache License 2.0",
"lines": 346,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/collider.py | """
Collider module for rigid body collision detection.
This module provides collision detection functionality for the rigid body solver,
including broad-phase (sweep-and-prune), narrow-phase (convex-convex, SDF-based,
terrain), and contact management.
"""
from typing import TYPE_CHECKING
import numpy as np
import torch
import trimesh
import genesis as gs
import genesis.utils.array_class as array_class
import genesis.engine.solvers.rigid.rigid_solver as rigid_solver
from genesis.engine.materials.rigid import Rigid
from genesis.utils.misc import tensor_to_array, qd_to_torch, qd_to_numpy
from genesis.utils.sdf import SDF
from . import mpr
from . import gjk
from . import support_field
# Import and re-export from submodules for backward compatibility
from .broadphase import (
func_find_intersect_midpoint,
func_check_collision_valid,
func_collision_clear,
func_broad_phase,
)
from .contact import (
collider_kernel_reset,
kernel_collider_clear,
collider_kernel_get_contacts,
func_add_contact,
func_set_contact,
func_add_diff_contact_input,
func_compute_tolerance,
func_contact_orthogonals,
func_rotate_frame,
func_set_upstream_grad,
)
from . import narrowphase
from .narrowphase import (
CCD_ALGORITHM_CODE,
func_contact_sphere_sdf,
func_contact_vertex_sdf,
func_contact_edge_sdf,
func_contact_convex_convex_sdf,
func_contact_mpr_terrain,
func_add_prism_vert,
func_plane_box_contact,
func_convex_convex_contact,
func_box_box_contact,
func_narrow_phase_diff_convex_vs_convex,
func_narrow_phase_convex_specializations,
func_narrow_phase_any_vs_terrain,
func_narrow_phase_nonconvex_vs_nonterrain,
)
if TYPE_CHECKING:
from genesis.engine.solvers.rigid.rigid_solver import RigidSolver
IS_OLD_TORCH = tuple(map(int, torch.__version__.split(".")[:2])) < (2, 8)
NEUTRAL_COLLISION_RES_ABS = 0.01
NEUTRAL_COLLISION_RES_REL = 0.05
class Collider:
def __init__(self, rigid_solver: "RigidSolver"):
self._solver = rigid_solver
self._mc_perturbation = 1e-3 if self._solver._enable_mujoco_compatibility else 1e-2
self._mc_tolerance = 1e-3 if self._solver._enable_mujoco_compatibility else 1e-2
self._mpr_to_gjk_overlap_ratio = 0.25
self._box_MAXCONPAIR = 16
self._diff_pos_tolerance = 1e-2
self._diff_normal_tolerance = 1e-2
self._init_static_config()
self._init_collision_fields()
self._sdf = SDF(rigid_solver)
self._mpr = mpr.MPR(rigid_solver)
self._gjk = gjk.GJK(rigid_solver)
self._support_field = support_field.SupportField(rigid_solver)
if self._collider_static_config.has_nonconvex_nonterrain:
self._sdf.activate()
if self._collider_static_config.has_convex_convex:
self._gjk.activate()
if self._collider_static_config.has_terrain or self._collider_static_config.has_convex_convex:
self._support_field.activate()
if gs.use_zerocopy:
self._contact_data: dict[str, torch.Tensor] = {}
for key, name in (
("link_a", "link_a"),
("link_b", "link_b"),
("geom_a", "geom_a"),
("geom_b", "geom_b"),
("penetration", "penetration"),
("position", "pos"),
("normal", "normal"),
("force", "force"),
):
self._contact_data[key] = qd_to_torch(
getattr(self._collider_state.contact_data, name), transpose=True, copy=False
)
# Make sure that the initial state is clean
self.clear()
def _init_static_config(self) -> None:
# Identify the convex collision detection (ccd) algorithm
if self._solver._options.use_gjk_collision:
if self._solver._enable_mujoco_compatibility:
ccd_algorithm = CCD_ALGORITHM_CODE.MJ_GJK
else:
ccd_algorithm = CCD_ALGORITHM_CODE.GJK
else:
if self._solver._enable_mujoco_compatibility:
ccd_algorithm = CCD_ALGORITHM_CODE.MJ_MPR
else:
ccd_algorithm = CCD_ALGORITHM_CODE.MPR
n_contacts_per_pair = 20 if self._solver._static_rigid_sim_config.requires_grad else 5
if (
self._solver._options.box_box_detection
and sum(geom.type == gs.GEOM_TYPE.BOX for geom in self._solver.geoms) > 1
):
n_contacts_per_pair = max(n_contacts_per_pair, self._box_MAXCONPAIR)
# Determine which combination of collision detection algorithms must be enabled
self._n_possible_pairs, self._collision_pair_idx = self._compute_collision_pair_idx()
has_any_vs_terrain = False
has_convex_vs_convex = False
has_convex_specialization = False
has_nonconvex_vs_nonterrain = False
for i_ga in range(self._solver.n_geoms):
for i_gb in range(i_ga + 1, self._solver.n_geoms):
if self._collision_pair_idx[i_ga, i_gb] == -1:
continue
geom_a, geom_b = self._solver.geoms[i_ga], self._solver.geoms[i_gb]
if geom_a.type == gs.GEOM_TYPE.TERRAIN or geom_b.type == gs.GEOM_TYPE.TERRAIN:
has_any_vs_terrain = True
if geom_a.is_convex and geom_b.is_convex:
has_convex_vs_convex = True
if self._solver._options.box_box_detection:
if geom_a.type in (gs.GEOM_TYPE.TERRAIN, gs.GEOM_TYPE.BOX) or geom_b.type in (
gs.GEOM_TYPE.TERRAIN,
gs.GEOM_TYPE.BOX,
):
has_convex_specialization = True
elif (geom_a.type == gs.GEOM_TYPE.BOX and geom_b.type == gs.GEOM_TYPE.PLANE) or (
geom_a.type == gs.GEOM_TYPE.PLANE and geom_b.type == gs.GEOM_TYPE.BOX
):
has_convex_specialization = True
if (
not (geom_a.is_convex and geom_b.is_convex)
and geom_a.type != gs.GEOM_TYPE.TERRAIN
and geom_b.type != gs.GEOM_TYPE.TERRAIN
):
has_nonconvex_vs_nonterrain = True
# Initialize the static config, which stores every data that are compile-time constants.
# Note that updating any of them will trigger recompilation.
self._collider_static_config = array_class.StructColliderStaticConfig(
has_terrain=has_any_vs_terrain,
has_convex_convex=has_convex_vs_convex,
has_convex_specialization=has_convex_specialization,
has_nonconvex_nonterrain=has_nonconvex_vs_nonterrain,
n_contacts_per_pair=n_contacts_per_pair,
ccd_algorithm=ccd_algorithm,
)
def _init_collision_fields(self) -> None:
# Pre-compute fields, as they are needed to initialize the collider state and info.
vert_neighbors, vert_neighbor_start, vert_n_neighbors = self._compute_verts_connectivity()
n_vert_neighbors = len(vert_neighbors)
# Initialize [info], which stores every data that must be considered mutable from Quadrants's perspective,
# i.e. unknown at compile time, but IMMUTABLE from Genesis scene's perspective after build.
self._collider_info = array_class.get_collider_info(
self._solver,
n_vert_neighbors,
self._collider_static_config,
mc_perturbation=self._mc_perturbation,
mc_tolerance=self._mc_tolerance,
mpr_to_gjk_overlap_ratio=self._mpr_to_gjk_overlap_ratio,
diff_pos_tolerance=self._diff_pos_tolerance,
diff_normal_tolerance=self._diff_normal_tolerance,
)
self._init_collision_pair_idx(self._collision_pair_idx)
self._init_verts_connectivity(vert_neighbors, vert_neighbor_start, vert_n_neighbors)
self._init_max_contact_pairs(self._n_possible_pairs)
self._init_terrain_state()
# Initialize [state], which stores every data that are may be updated at every single simulation step
n_possible_pairs_ = max(self._n_possible_pairs, 1)
self._collider_state = array_class.get_collider_state(
self._solver,
self._solver._static_rigid_sim_config,
n_possible_pairs_,
self._solver._options.multiplier_collision_broad_phase,
self._collider_info,
self._collider_static_config,
)
# 'contact_data_cache' is not used in Quadrants kernels, so keep it outside of the collider state / info
self._contact_data_cache: dict[tuple[bool, bool], dict[str, torch.Tensor | tuple[torch.Tensor]]] = {}
self.reset()
def _compute_collision_pair_idx(self):
"""
Compute flat indices of all valid collision pairs.
For each pair of geoms, determine if they can collide based on their properties and the solver configuration.
Pairs that are already colliding at the initial configuration (qpos0) are filtered out with a warning.
"""
# Links whose contact is handled by an external solver (e.g. IPC) — exclude from GJK collision.
# Only applies when the IPC coupler is active. Mirrors the link filtering logic in
# IPCCoupler._add_rigid_geoms_to_ipc: for two_way_soft_constraint with a link filter,
# only the filtered links are in IPC; for all other coupling modes, all links are in IPC.
from genesis.engine.couplers import IPCCoupler
# Links delegated to IPC coupler (skip pair only when BOTH are IPC-handled)
ipc_delegated_links = set()
ipc_only_links = set()
if isinstance(self._solver.sim.coupler, IPCCoupler):
for entity in self._solver._entities:
if not entity.material.needs_coup:
continue
mode = entity.material.coup_type
if mode is None:
continue
if mode == "ipc_only":
ipc_only_links.update(entity.links)
link_filter_names = entity.material.coup_links
if mode == "two_way_soft_constraint" and link_filter_names is not None:
for name in link_filter_names:
ipc_delegated_links.add(entity.get_link(name=name))
else:
ipc_delegated_links.update(entity.links)
# Compute vertices all geometries, shrunk by 0.1% to avoid false positive when detecting self-collision
geoms_verts: list[np.ndarray] = []
for geom in self._solver.geoms:
verts = tensor_to_array(geom.get_verts())
verts = verts.reshape((-1, *verts.shape[-2:]))
centroid = verts.mean(axis=1, keepdims=True)
verts = centroid + (1.0 - 1e-3) * (verts - centroid)
geoms_verts.append(verts)
# Track pairs that are colliding in neutral configuration for warning
self_colliding_pairs: list[tuple[int, int]] = []
n_possible_pairs = 0
collision_pair_idx = np.full((self._solver.n_geoms, self._solver.n_geoms), fill_value=-1, dtype=gs.np_int)
for i_ga in range(self._solver.n_geoms):
geom_a = self._solver.geoms[i_ga]
link_a = geom_a.link
e_a = geom_a.entity
for i_gb in range(i_ga + 1, self._solver.n_geoms):
geom_b = self._solver.geoms[i_gb]
link_b = geom_b.link
e_b = geom_b.entity
# geoms in the same link
if link_a is link_b:
continue
# Skip all pairs involving ipc_only links (IPC fully controls their pose)
if link_a in ipc_only_links or link_b in ipc_only_links:
continue
# Skip pairs where both links are delegated to IPC
if link_a in ipc_delegated_links and link_b in ipc_delegated_links:
continue
# Filter out right away weld constraint that have been declared statically and cannot be removed
is_valid = True
for eq in self._solver.equalities:
if eq.type == gs.EQUALITY_TYPE.WELD and {eq.eq_obj1id, eq.eq_obj2id} == {link_a.idx, link_b.idx}:
is_valid = False
break
if not is_valid:
continue
# contype and conaffinity
if ((e_a is e_b) or not (e_a.is_local_collision_mask or e_b.is_local_collision_mask)) and not (
(geom_a.contype & geom_b.conaffinity) or (geom_b.contype & geom_a.conaffinity)
):
continue
# pair of fixed links wrt the world
if link_a.is_fixed and link_b.is_fixed:
continue
# self collision
if link_a.root_idx == link_b.root_idx:
if not self._solver._enable_self_collision:
continue
# adjacent links
# FIXME: Links should be considered adjacent if connected by only fixed joints.
if not self._solver._enable_adjacent_collision:
is_adjacent = False
link_a_, link_b_ = (link_a, link_b) if link_a.idx < link_b.idx else (link_b, link_a)
while link_b_.parent_idx != -1:
if link_b_.parent_idx == link_a_.idx:
is_adjacent = True
break
if not all(joint.type is gs.JOINT_TYPE.FIXED for joint in link_b_.joints):
break
link_b_ = self._solver.links[link_b_.parent_idx]
if is_adjacent:
continue
# active in neutral configuration (qpos0)
is_self_colliding = False
for i_b in range(1 if not self._solver._enable_neutral_collision else 0):
verts_a = geoms_verts[i_ga][i_b]
mesh_a = trimesh.Trimesh(vertices=verts_a, faces=geom_a.init_faces, process=False)
verts_b = geoms_verts[i_gb][i_b]
mesh_b = trimesh.Trimesh(vertices=verts_b, faces=geom_b.init_faces, process=False)
bounds_a, bounds_b = mesh_a.bounds, mesh_b.bounds
if not ((bounds_a[1] < bounds_b[0]).any() or (bounds_b[1] < bounds_a[0]).any()):
voxels_a = mesh_a.voxelized(
pitch=min(NEUTRAL_COLLISION_RES_ABS, NEUTRAL_COLLISION_RES_REL * max(mesh_a.extents))
)
voxels_b = mesh_b.voxelized(
pitch=min(NEUTRAL_COLLISION_RES_ABS, NEUTRAL_COLLISION_RES_REL * max(mesh_b.extents))
)
coords_a = voxels_a.indices_to_points(np.argwhere(voxels_a.matrix))
coords_b = voxels_b.indices_to_points(np.argwhere(voxels_b.matrix))
if voxels_a.is_filled(coords_b).any() or voxels_b.is_filled(coords_a).any():
is_self_colliding = True
self_colliding_pairs.append((i_ga, i_gb))
break
if is_self_colliding:
continue
collision_pair_idx[i_ga, i_gb] = n_possible_pairs
n_possible_pairs = n_possible_pairs + 1
# Emit warning for self-collision pairs
if self_colliding_pairs:
pairs = ", ".join((f"({i_ga}, {i_gb})") for i_ga, i_gb in self_colliding_pairs)
gs.logger.warning(
f"Filtered out geometry pairs causing self-collision for the neutral configuration (qpos0): {pairs}. "
"Consider tuning Morph option 'decompose_robot_error_threshold' or specify dedicated collision meshes. "
"This behavior can be disabled by setting Morph option 'enable_neutral_collision=True'."
)
return n_possible_pairs, collision_pair_idx
def _compute_verts_connectivity(self):
"""
Compute the vertex connectivity.
"""
vert_neighbors = []
vert_neighbor_start = []
vert_n_neighbors = []
offset = 0
for geom in self._solver.geoms:
vert_neighbors.append(geom.vert_neighbors + geom.vert_start)
vert_neighbor_start.append(geom.vert_neighbor_start + offset)
vert_n_neighbors.append(geom.vert_n_neighbors)
offset = offset + len(geom.vert_neighbors)
if self._solver.n_verts > 0:
vert_neighbors = np.concatenate(vert_neighbors, dtype=gs.np_int)
vert_neighbor_start = np.concatenate(vert_neighbor_start, dtype=gs.np_int)
vert_n_neighbors = np.concatenate(vert_n_neighbors, dtype=gs.np_int)
return vert_neighbors, vert_neighbor_start, vert_n_neighbors
def _init_collision_pair_idx(self, collision_pair_idx):
if self._n_possible_pairs == 0:
self._collider_info.collision_pair_idx.fill(-1)
return
self._collider_info.collision_pair_idx.from_numpy(collision_pair_idx)
def _init_verts_connectivity(self, vert_neighbors, vert_neighbor_start, vert_n_neighbors):
if self._solver.n_verts > 0:
self._collider_info.vert_neighbors.from_numpy(vert_neighbors)
self._collider_info.vert_neighbor_start.from_numpy(vert_neighbor_start)
self._collider_info.vert_n_neighbors.from_numpy(vert_n_neighbors)
def _init_max_contact_pairs(self, n_possible_pairs):
max_collision_pairs = min(self._solver.max_collision_pairs, n_possible_pairs)
max_contact_pairs = max_collision_pairs * self._collider_static_config.n_contacts_per_pair
max_contact_pairs_broad = max_collision_pairs * self._solver._options.multiplier_collision_broad_phase
self._collider_info.max_possible_pairs[None] = n_possible_pairs
self._collider_info.max_collision_pairs[None] = max_collision_pairs
self._collider_info.max_collision_pairs_broad[None] = max_contact_pairs_broad
self._collider_info.max_contact_pairs[None] = max_contact_pairs
def _init_terrain_state(self):
if self._collider_static_config.has_terrain:
solver = self._solver
links_idx = solver.geoms_info.link_idx.to_numpy()[solver.geoms_info.type.to_numpy() == gs.GEOM_TYPE.TERRAIN]
entity_idx = solver.links_info.entity_idx.to_numpy()[links_idx[0]]
if isinstance(entity_idx, np.ndarray):
entity_idx = entity_idx[0]
entity = solver._entities[entity_idx]
scale = entity.terrain_scale.astype(gs.np_float)
rc = np.array(entity.terrain_hf.shape, dtype=gs.np_int)
hf = entity.terrain_hf.astype(gs.np_float) * scale[1]
xyz_maxmin = np.array(
[rc[0] * scale[0], rc[1] * scale[0], hf.max(), 0, 0, hf.min() - 1.0],
dtype=gs.np_float,
)
self._collider_info.terrain_hf.from_numpy(hf)
self._collider_info.terrain_rc.from_numpy(rc)
self._collider_info.terrain_scale.from_numpy(scale)
self._collider_info.terrain_xyz_maxmin.from_numpy(xyz_maxmin)
def reset(self, envs_idx=None, *, cache_only: bool = True) -> None:
self._contact_data_cache.clear()
if gs.use_zerocopy:
envs_idx = slice(None) if envs_idx is None else envs_idx
if not cache_only:
first_time = qd_to_torch(self._collider_state.first_time, copy=False)
if isinstance(envs_idx, torch.Tensor) and envs_idx.dtype == torch.bool:
first_time.masked_fill_(envs_idx, True)
else:
first_time[envs_idx] = True
normal = qd_to_torch(self._collider_state.contact_cache.normal, copy=False)
if isinstance(envs_idx, torch.Tensor) and (not IS_OLD_TORCH or envs_idx.dtype == torch.bool):
if envs_idx.dtype == torch.bool:
normal.masked_fill_(envs_idx[None, :, None], 0.0)
else:
normal.scatter_(1, envs_idx[None, :, None].expand((normal.shape[0], -1, 3)), 0.0)
elif envs_idx is None:
normal.zero_()
else:
normal[:, envs_idx] = 0.0
return
if envs_idx is None:
envs_idx = self._solver._scene._envs_idx
collider_kernel_reset(envs_idx, self._solver._static_rigid_sim_config, self._collider_state, cache_only)
def clear(self, envs_idx=None):
self.reset(envs_idx, cache_only=False)
if envs_idx is None:
envs_idx = self._solver._scene._envs_idx
kernel_collider_clear(
envs_idx,
self._solver.links_state,
self._solver.links_info,
self._solver._static_rigid_sim_config,
self._collider_state,
)
def detection(self) -> None:
rigid_solver.kernel_update_geom_aabbs(
self._solver.geoms_state,
self._solver.geoms_init_AABB,
self._solver._static_rigid_sim_config,
)
if self._n_possible_pairs == 0:
return
self._contact_data_cache.clear()
func_broad_phase(
self._solver.links_state,
self._solver.links_info,
self._solver.geoms_state,
self._solver.geoms_info,
self._solver._rigid_global_info,
self._solver._static_rigid_sim_config,
self._solver.constraint_solver.constraint_state,
self._collider_state,
self._solver.equalities_info,
self._collider_info,
self._solver._errno,
)
if self._collider_static_config.has_convex_convex:
narrowphase.func_narrow_phase_convex_vs_convex(
self._solver.links_state,
self._solver.links_info,
self._solver.geoms_state,
self._solver.geoms_info,
self._solver.geoms_init_AABB,
self._solver.verts_info,
self._solver.faces_info,
self._solver.edges_info,
self._solver._rigid_global_info,
self._solver._static_rigid_sim_config,
self._collider_state,
self._collider_info,
self._collider_static_config,
self._mpr._mpr_state,
self._mpr._mpr_info,
self._gjk._gjk_state,
self._gjk._gjk_info,
self._gjk._gjk_static_config,
self._sdf._sdf_info,
self._support_field._support_field_info,
self._gjk._gjk_state.diff_contact_input,
self._solver._errno,
)
if self._collider_static_config.has_convex_specialization:
func_narrow_phase_convex_specializations(
self._solver.geoms_state,
self._solver.geoms_info,
self._solver.geoms_init_AABB,
self._solver.verts_info,
self._solver._rigid_global_info,
self._solver._static_rigid_sim_config,
self._collider_state,
self._collider_info,
self._collider_static_config,
self._solver._errno,
)
if self._collider_static_config.has_terrain:
func_narrow_phase_any_vs_terrain(
self._solver.geoms_state,
self._solver.geoms_info,
self._solver.geoms_init_AABB,
self._solver._static_rigid_sim_config,
self._collider_state,
self._collider_info,
self._collider_static_config,
self._mpr._mpr_state,
self._mpr._mpr_info,
self._support_field._support_field_info,
self._solver._errno,
)
if self._collider_static_config.has_nonconvex_nonterrain:
func_narrow_phase_nonconvex_vs_nonterrain(
self._solver.links_state,
self._solver.links_info,
self._solver.geoms_state,
self._solver.geoms_info,
self._solver.geoms_init_AABB,
self._solver.verts_info,
self._solver.edges_info,
self._solver._rigid_global_info,
self._solver._static_rigid_sim_config,
self._collider_state,
self._collider_info,
self._collider_static_config,
self._sdf._sdf_info,
self._solver._errno,
)
def get_contacts(self, as_tensor: bool = True, to_torch: bool = True, keep_batch_dim: bool = False):
# Early return if already pre-computed
contact_data = self._contact_data_cache.setdefault((as_tensor, to_torch), {})
if contact_data:
return contact_data.copy()
n_envs = self._solver.n_envs
if gs.use_zerocopy:
n_contacts = qd_to_torch(self._collider_state.n_contacts, copy=False)
if as_tensor or n_envs == 0:
n_contacts_max = (n_contacts if n_envs == 0 else n_contacts.max()).item()
for key, data in self._contact_data.items():
if n_envs == 0:
data = data[0, :n_contacts_max] if not keep_batch_dim else data[:, :n_contacts_max]
elif as_tensor:
data = data[:, :n_contacts_max]
if to_torch:
if gs.backend == gs.cpu:
data = data.clone()
else:
data = tensor_to_array(data)
if n_envs > 0 and not as_tensor:
if keep_batch_dim:
data = tuple([data[i : i + 1, :j] for i, j in enumerate(n_contacts.tolist())])
else:
data = tuple([data[i, :j] for i, j in enumerate(n_contacts.tolist())])
contact_data[key] = data
return contact_data.copy()
# Find out how much dynamic memory must be allocated
n_contacts = qd_to_numpy(self._collider_state.n_contacts)
n_contacts_max = n_contacts.max().item()
if as_tensor:
out_size = n_contacts_max * max(n_envs, 1)
else:
*n_contacts_starts, out_size = np.cumsum(n_contacts)
n_contacts = n_contacts.tolist()
# Allocate output buffer
if to_torch:
iout = torch.full((out_size, 4), -1, dtype=gs.tc_int, device=gs.device)
fout = torch.zeros((out_size, 10), dtype=gs.tc_float, device=gs.device)
else:
iout = np.full((out_size, 4), -1, dtype=gs.np_int)
fout = np.zeros((out_size, 10), dtype=gs.np_float)
# Copy contact data
if n_contacts_max > 0:
collider_kernel_get_contacts(
as_tensor, iout, fout, self._solver._static_rigid_sim_config, self._collider_state
)
# Build structured view (no copy)
if as_tensor:
if n_envs > 0:
iout = iout.reshape((n_envs, n_contacts_max, 4))
fout = fout.reshape((n_envs, n_contacts_max, 10))
if keep_batch_dim and n_envs == 0:
iout = iout.reshape((1, n_contacts_max, 4))
fout = fout.reshape((1, n_contacts_max, 10))
iout_chunks = (iout[..., 0], iout[..., 1], iout[..., 2], iout[..., 3])
fout_chunks = (fout[..., 0], fout[..., 1:4], fout[..., 4:7], fout[..., 7:])
values = (*iout_chunks, *fout_chunks)
else:
# Split smallest dimension first, then largest dimension
if n_envs == 0:
iout_chunks = (iout[..., 0], iout[..., 1], iout[..., 2], iout[..., 3])
fout_chunks = (fout[..., 0], fout[..., 1:4], fout[..., 4:7], fout[..., 7:])
values = (*iout_chunks, *fout_chunks)
elif n_contacts_max >= n_envs:
if to_torch:
iout_chunks = torch.split(iout, n_contacts)
fout_chunks = torch.split(fout, n_contacts)
else:
iout_chunks = np.split(iout, n_contacts_starts)
fout_chunks = np.split(fout, n_contacts_starts)
iout_chunks = ((out[..., 0], out[..., 1], out[..., 2], out[..., 3]) for out in iout_chunks)
fout_chunks = ((out[..., 0], out[..., 1:4], out[..., 4:7], out[..., 7:]) for out in fout_chunks)
values = (*zip(*iout_chunks), *zip(*fout_chunks))
else:
iout_chunks = (iout[..., 0], iout[..., 1], iout[..., 2], iout[..., 3])
fout_chunks = (fout[..., 0], fout[..., 1:4], fout[..., 4:7], fout[..., 7:])
if n_envs == 1:
values = [(value,) for value in (*iout_chunks, *fout_chunks)]
else:
if to_torch:
iout_chunks = (torch.split(out, n_contacts) for out in iout_chunks)
fout_chunks = (torch.split(out, n_contacts) for out in fout_chunks)
else:
iout_chunks = (np.split(out, n_contacts_starts) for out in iout_chunks)
fout_chunks = (np.split(out, n_contacts_starts) for out in fout_chunks)
values = (*iout_chunks, *fout_chunks)
# Store contact information in cache
contact_data.update(
zip(("link_a", "link_b", "geom_a", "geom_b", "penetration", "position", "normal", "force"), values)
)
return contact_data.copy()
def backward(self, dL_dposition, dL_dnormal, dL_dpenetration):
func_set_upstream_grad(dL_dposition, dL_dnormal, dL_dpenetration, self._collider_state)
# Compute gradient
func_narrow_phase_diff_convex_vs_convex.grad(
self._solver.geoms_state,
self._solver.geoms_info,
self._solver._static_rigid_sim_config,
self._collider_state,
self._collider_info,
self._gjk._gjk_info,
self._collider_state.diff_contact_input,
)
from genesis.utils.deprecated_module_wrapper import create_virtual_deprecated_module
create_virtual_deprecated_module(__name__, "genesis.engine.solvers.rigid.collider_decomp")
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/collider.py",
"license": "Apache License 2.0",
"lines": 603,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/epa.py | """
Expanding Polytope Algorithm (EPA) for penetration depth computation.
This module contains the EPA algorithm implementation for computing exact
penetration depth and contact normals for intersecting convex objects.
Includes both standard and numerically robust ("safe") variants.
"""
import quadrants as qd
import genesis as gs
import genesis.utils.geom as gu
import genesis.utils.array_class as array_class
from .constants import RETURN_CODE, EPA_POLY_INIT_RETURN_CODE, GJK_RETURN_CODE
from .gjk_utils import (
func_triangle_affine_coords,
func_ray_triangle_intersection,
func_point_triangle_intersection,
func_origin_tetra_intersection,
func_project_origin_to_plane,
)
from .utils import (
func_is_discrete_geoms,
)
# Import func_support from gjk_support to avoid circular dependency
from .gjk_support import func_support
@qd.func
def func_epa(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
i_b,
):
"""
EPA algorithm to find the exact penetration depth and contact normal using the simplex constructed by GJK.
.. seealso::
MuJoCo's original implementation:
https://github.com/google-deepmind/mujoco/blob/7dc7a349c5ba2db2d3f8ab50a367d08e2f1afbbc/src/engine/engine_collision_gjk.c#L1331
"""
upper = gjk_info.FLOAT_MAX[None]
upper2 = gjk_info.FLOAT_MAX_SQ[None]
lower = 0.0
tolerance = gjk_info.tolerance[None]
# Index of the nearest face
nearest_i_f = -1
prev_nearest_i_f = -1
discrete = func_is_discrete_geoms(geoms_info, i_ga, i_gb)
if discrete:
# If the objects are discrete, we do not use tolerance.
tolerance = gjk_info.FLOAT_MIN[None]
k_max = gjk_info.epa_max_iterations[None]
for k in range(k_max):
prev_nearest_i_f = nearest_i_f
# Find the polytope face with the smallest distance to the origin
lower2 = gjk_info.FLOAT_MAX_SQ[None]
for i in range(gjk_state.polytope.nfaces_map[i_b]):
i_f = gjk_state.polytope_faces_map[i_b, i]
face_dist2 = gjk_state.polytope_faces.dist2[i_b, i_f]
if face_dist2 < lower2:
lower2 = face_dist2
nearest_i_f = i_f
if lower2 > upper2 or nearest_i_f < 0:
# Invalid face found, stop the algorithm (lower bound of depth is larger than upper bound)
nearest_i_f = prev_nearest_i_f
break
if lower2 <= gjk_info.FLOAT_MIN_SQ[None]:
# Invalid lower bound (0), stop the algorithm (origin is on the affine hull of face)
break
# Find a new support point w from the nearest face's normal
lower = qd.sqrt(lower2)
dir = gjk_state.polytope_faces.normal[i_b, nearest_i_f]
wi = func_epa_support(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
dir,
lower,
)
w = gjk_state.polytope_verts.mink[i_b, wi]
# The upper bound of depth at k-th iteration
upper_k = w.dot(dir) / lower
if upper_k < upper:
upper = upper_k
upper2 = upper**2
# If the upper bound and lower bound are close enough, we can stop the algorithm
if (upper - lower) < tolerance:
break
if discrete:
repeated = False
for i in range(gjk_state.polytope.nverts[i_b] - 1):
if (
gjk_state.polytope_verts.id1[i_b, i] == gjk_state.polytope_verts.id1[i_b, wi]
and gjk_state.polytope_verts.id2[i_b, i] == gjk_state.polytope_verts.id2[i_b, wi]
):
# The vertex w is already in the polytope,
# so we do not need to add it again.
repeated = True
break
if repeated:
break
gjk_state.polytope.horizon_w[i_b] = w
# Compute horizon
horizon_flag = func_epa_horizon(gjk_state, gjk_info, i_b, nearest_i_f)
if horizon_flag:
# There was an error in the horizon construction, so the horizon edge is not a closed loop.
nearest_i_f = -1
break
if gjk_state.polytope.horizon_nedges[i_b] < 3:
# Should not happen, because at least three edges should be in the horizon from one deleted face.
nearest_i_f = -1
break
# Check if the memory space is enough for attaching new faces
nfaces = gjk_state.polytope.nfaces[i_b]
nedges = gjk_state.polytope.horizon_nedges[i_b]
if nfaces + nedges >= gjk_info.polytope_max_faces[None]:
# If the polytope is full, we cannot insert new faces
break
# Attach the new faces
for i in range(nedges):
# Face id of the current face to attach
i_f0 = nfaces + i
# Face id of the next face to attach
i_f1 = nfaces + (i + 1) % nedges
horizon_i_f = gjk_state.polytope_horizon_data.face_idx[i_b, i]
horizon_i_e = gjk_state.polytope_horizon_data.edge_idx[i_b, i]
horizon_v1 = gjk_state.polytope_faces.verts_idx[i_b, horizon_i_f][horizon_i_e]
horizon_v2 = gjk_state.polytope_faces.verts_idx[i_b, horizon_i_f][(horizon_i_e + 1) % 3]
# Change the adjacent face index of the existing face
gjk_state.polytope_faces.adj_idx[i_b, horizon_i_f][horizon_i_e] = i_f0
# Attach the new face.
# If this if the first face, will be adjacent to the face that will be attached last.
adj_i_f_0 = i_f0 - 1 if (i > 0) else nfaces + nedges - 1
adj_i_f_1 = horizon_i_f
adj_i_f_2 = i_f1
dist2 = func_attach_face_to_polytope(
gjk_state,
gjk_info,
i_b,
wi,
horizon_v2,
horizon_v1,
adj_i_f_2, # Previous face id
adj_i_f_1,
adj_i_f_0, # Next face id
)
if dist2 <= 0:
# Unrecoverable numerical issue
nearest_i_f = -1
break
if (dist2 >= lower2) and (dist2 <= upper2):
# Store face in the map
nfaces_map = gjk_state.polytope.nfaces_map[i_b]
gjk_state.polytope_faces_map[i_b, nfaces_map] = i_f0
gjk_state.polytope_faces.map_idx[i_b, i_f0] = nfaces_map
gjk_state.polytope.nfaces_map[i_b] += 1
# Clear the horizon data for the next iteration
gjk_state.polytope.horizon_nedges[i_b] = 0
if (gjk_state.polytope.nfaces_map[i_b] == 0) or (nearest_i_f == -1):
# No face candidate left
break
if nearest_i_f != -1:
# Nearest face found
dist2 = gjk_state.polytope_faces.dist2[i_b, nearest_i_f]
func_epa_witness(gjk_state, i_ga, i_gb, i_b, nearest_i_f)
gjk_state.n_witness[i_b] = 1
gjk_state.distance[i_b] = -qd.sqrt(dist2)
else:
# No face found, so the objects are not colliding
gjk_state.n_witness[i_b] = 0
gjk_state.distance[i_b] = 0
return nearest_i_f
@qd.func
def func_epa_witness(
gjk_state: array_class.GJKState,
i_ga,
i_gb,
i_b,
i_f,
):
"""
Compute the witness points from the geometries for the face i_f of the polytope.
"""
# Find the affine coordinates of the origin's projection on the face i_f
face_iv1 = gjk_state.polytope_faces.verts_idx[i_b, i_f][0]
face_iv2 = gjk_state.polytope_faces.verts_idx[i_b, i_f][1]
face_iv3 = gjk_state.polytope_faces.verts_idx[i_b, i_f][2]
face_v1 = gjk_state.polytope_verts.mink[i_b, face_iv1]
face_v2 = gjk_state.polytope_verts.mink[i_b, face_iv2]
face_v3 = gjk_state.polytope_verts.mink[i_b, face_iv3]
face_normal = gjk_state.polytope_faces.normal[i_b, i_f]
_lambda = func_triangle_affine_coords(
face_normal,
face_v1,
face_v2,
face_v3,
)
# Point on geom 1
v1 = gjk_state.polytope_verts.obj1[i_b, face_iv1]
v2 = gjk_state.polytope_verts.obj1[i_b, face_iv2]
v3 = gjk_state.polytope_verts.obj1[i_b, face_iv3]
witness1 = v1 * _lambda[0] + v2 * _lambda[1] + v3 * _lambda[2]
# Point on geom 2
v1 = gjk_state.polytope_verts.obj2[i_b, face_iv1]
v2 = gjk_state.polytope_verts.obj2[i_b, face_iv2]
v3 = gjk_state.polytope_verts.obj2[i_b, face_iv3]
witness2 = v1 * _lambda[0] + v2 * _lambda[1] + v3 * _lambda[2]
gjk_state.witness.point_obj1[i_b, 0] = witness1
gjk_state.witness.point_obj2[i_b, 0] = witness2
@qd.func
def func_epa_horizon(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
nearest_i_f,
):
"""
Compute the horizon, which represents the area of the polytope that is visible from the vertex w, and thus
should be deleted for the expansion of the polytope.
"""
w = gjk_state.polytope.horizon_w[i_b]
# Initialize the stack by inserting the nearest face
gjk_state.polytope_horizon_stack.face_idx[i_b, 0] = nearest_i_f
gjk_state.polytope_horizon_stack.edge_idx[i_b, 0] = 0
top = 1
is_first = True
flag = RETURN_CODE.SUCCESS
while top > 0:
# Pop the top face from the stack
i_f = gjk_state.polytope_horizon_stack.face_idx[i_b, top - 1]
i_e = gjk_state.polytope_horizon_stack.edge_idx[i_b, top - 1]
i_v = gjk_state.polytope_faces.verts_idx[i_b, i_f][0]
v = gjk_state.polytope_verts.mink[i_b, i_v]
top -= 1
# If the face is already deleted, skip it
is_deleted = gjk_state.polytope_faces.map_idx[i_b, i_f] == -2
if (not is_first) and (is_deleted):
continue
# Check visibility of the face. Two requirements for the face to be visible:
# 1. The face normal should point towards the vertex w
# 2. The vertex w should be on the other side of the face to the origin
is_visible = gjk_state.polytope_faces.normal[i_b, i_f].dot(w - v) > gjk_info.FLOAT_MIN[None]
# The first face is always considered visible.
if is_visible or is_first:
# If visible, delete the face from the polytope
func_delete_face_from_polytope(gjk_state, i_b, i_f)
# Add the other two or three edges of the face to the stack.
# The order is important to form a closed loop.
for k in range(0 if is_first else 1, 3):
i_e2 = (i_e + k) % 3
adj_face_idx = gjk_state.polytope_faces.adj_idx[i_b, i_f][i_e2]
adj_face_is_deleted = gjk_state.polytope_faces.map_idx[i_b, adj_face_idx] == -2
if not adj_face_is_deleted:
# Get the related edge id from the adjacent face. Since adjacent faces have different
# orientations, we need to use the ending vertex of the edge.
start_vert_idx = gjk_state.polytope_faces.verts_idx[i_b, i_f][(i_e2 + 1) % 3]
adj_edge_idx = func_get_edge_idx(gjk_state, i_b, adj_face_idx, start_vert_idx)
gjk_state.polytope_horizon_stack.face_idx[i_b, top] = adj_face_idx
gjk_state.polytope_horizon_stack.edge_idx[i_b, top] = adj_edge_idx
top += 1
else:
# If not visible, add the edge to the horizon.
flag = func_add_edge_to_horizon(gjk_state, i_b, i_f, i_e)
if flag:
# If the edges do not form a closed loop, there is an error in the algorithm.
break
is_first = False
return flag
@qd.func
def func_add_edge_to_horizon(
gjk_state: array_class.GJKState,
i_b,
i_f,
i_e,
):
"""
Add an edge to the horizon data structure.
"""
horizon_nedges = gjk_state.polytope.horizon_nedges[i_b]
gjk_state.polytope_horizon_data.edge_idx[i_b, horizon_nedges] = i_e
gjk_state.polytope_horizon_data.face_idx[i_b, horizon_nedges] = i_f
gjk_state.polytope.horizon_nedges[i_b] += 1
return RETURN_CODE.SUCCESS
@qd.func
def func_get_edge_idx(
gjk_state: array_class.GJKState,
i_b,
i_f,
i_v,
):
"""
Get the edge index from the face, starting from the vertex i_v.
If the face is comprised of [v1, v2, v3], the edges are: [v1, v2], [v2, v3], [v3, v1].
Therefore, if i_v was v1, the edge index is 0, and if i_v was v2, the edge index is 1.
"""
verts = gjk_state.polytope_faces.verts_idx[i_b, i_f]
ret = gs.qd_int(2)
if verts[0] == i_v:
ret = 0
elif verts[1] == i_v:
ret = 1
return ret
@qd.func
def func_delete_face_from_polytope(
gjk_state: array_class.GJKState,
i_b,
i_f,
):
"""
Delete the face from the polytope.
"""
face_map_idx = gjk_state.polytope_faces.map_idx[i_b, i_f]
if face_map_idx >= 0:
last_face_idx = gjk_state.polytope_faces_map[i_b, gjk_state.polytope.nfaces_map[i_b] - 1]
# Make the map to point to the last face
gjk_state.polytope_faces_map[i_b, face_map_idx] = last_face_idx
# Change map index of the last face
gjk_state.polytope_faces.map_idx[i_b, last_face_idx] = face_map_idx
# Decrease the number of faces in the polytope
gjk_state.polytope.nfaces_map[i_b] -= 1
# Mark the face as deleted
gjk_state.polytope_faces.map_idx[i_b, i_f] = -2
@qd.func
def func_epa_insert_vertex_to_polytope(
gjk_state: array_class.GJKState,
i_b: int,
obj1_point,
obj2_point,
obj1_localpos,
obj2_localpos,
obj1_id: int,
obj2_id: int,
minkowski_point,
):
"""
Copy vertex information into the polytope.
"""
n = gjk_state.polytope.nverts[i_b]
gjk_state.polytope_verts.obj1[i_b, n] = obj1_point
gjk_state.polytope_verts.obj2[i_b, n] = obj2_point
gjk_state.polytope_verts.local_obj1[i_b, n] = obj1_localpos
gjk_state.polytope_verts.local_obj2[i_b, n] = obj2_localpos
gjk_state.polytope_verts.id1[i_b, n] = obj1_id
gjk_state.polytope_verts.id2[i_b, n] = obj2_id
gjk_state.polytope_verts.mink[i_b, n] = minkowski_point
gjk_state.polytope.nverts[i_b] += 1
return n
@qd.func
def func_epa_init_polytope_2d(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
i_b,
):
"""
Create the polytope for EPA from a 1-simplex (line segment).
Returns
-------
int
0 when successful, or a flag indicating an error.
"""
flag = EPA_POLY_INIT_RETURN_CODE.SUCCESS
# Get the simplex vertices
v1 = gjk_state.simplex_vertex.mink[i_b, 0]
v2 = gjk_state.simplex_vertex.mink[i_b, 1]
diff = v2 - v1
# Find the element in [diff] with the smallest magnitude, because it will give us the largest cross product
min_val = qd.abs(diff[0])
min_i = 0
for i in qd.static(range(1, 3)):
abs_diff_i = qd.abs(diff[i])
if abs_diff_i < min_val:
min_val = abs_diff_i
min_i = i
# Cross product with the found axis, then rotate it by 120 degrees around the axis [diff] to get three more
# points spaced 120 degrees apart
rotmat = gu.qd_rotvec_to_R(diff * qd.math.radians(120.0), rigid_global_info.EPS[None])
e = gs.qd_vec3(0.0, 0.0, 0.0)
e[min_i] = 1.0
d1 = e.cross(diff)
d2 = rotmat @ d1
d3 = rotmat @ d2
# Insert the first two vertices into the polytope
vi = qd.Vector([0, 0, 0, 0, 0], dt=qd.i32)
for i in range(2):
vi[i] = func_epa_insert_vertex_to_polytope(
gjk_state,
i_b,
gjk_state.simplex_vertex.obj1[i_b, i],
gjk_state.simplex_vertex.obj2[i_b, i],
gjk_state.simplex_vertex.local_obj1[i_b, i],
gjk_state.simplex_vertex.local_obj2[i_b, i],
gjk_state.simplex_vertex.id1[i_b, i],
gjk_state.simplex_vertex.id2[i_b, i],
gjk_state.simplex_vertex.mink[i_b, i],
)
# Find three more vertices using [d1, d2, d3] as support vectors, and insert them into the polytope
for i in range(3):
di = d1
if i == 1:
di = d2
elif i == 2:
di = d3
di_norm = di.norm()
vi[i + 2] = func_epa_support(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
di,
di_norm,
)
v3 = gjk_state.polytope_verts.mink[i_b, vi[2]]
v4 = gjk_state.polytope_verts.mink[i_b, vi[3]]
v5 = gjk_state.polytope_verts.mink[i_b, vi[4]]
# Build hexahedron (6 faces) from the five vertices.
# * This hexahedron would have line [v1, v2] as the central axis, and the other three vertices would be on the
# sides of the hexahedron, as they are spaced 120 degrees apart.
# * We already know the face and adjacent face indices in building this.
# * While building the hexahedron by attaching faces, if the face is very close to the origin, we replace the
# 1-simplex with the 2-simplex, and restart from it.
for i in range(6):
# Vertex indices for the faces in the hexahedron
i_v1, i_v2, i_v3 = vi[0], vi[2], vi[3]
# Adjacent face indices for the faces in the hexahedron
i_a1, i_a2, i_a3 = 1, 3, 2
if i == 1:
i_v1, i_v2, i_v3 = vi[0], vi[4], vi[2]
i_a1, i_a2, i_a3 = 2, 4, 0
elif i == 2:
i_v1, i_v2, i_v3 = vi[0], vi[3], vi[4]
i_a1, i_a2, i_a3 = 0, 5, 1
elif i == 3:
i_v1, i_v2, i_v3 = vi[1], vi[3], vi[2]
i_a1, i_a2, i_a3 = 5, 0, 4
elif i == 4:
i_v1, i_v2, i_v3 = vi[1], vi[2], vi[4]
i_a1, i_a2, i_a3 = 3, 1, 5
elif i == 5:
i_v1, i_v2, i_v3 = vi[1], vi[4], vi[3]
i_a1, i_a2, i_a3 = 4, 2, 3
if (
func_attach_face_to_polytope(gjk_state, gjk_info, i_b, i_v1, i_v2, i_v3, i_a1, i_a2, i_a3)
< gjk_info.FLOAT_MIN_SQ[None]
):
func_replace_simplex_3(gjk_state, i_b, i_v1, i_v2, i_v3)
flag = EPA_POLY_INIT_RETURN_CODE.P2_FALLBACK3
break
if flag == RETURN_CODE.SUCCESS:
if not func_ray_triangle_intersection(v1, v2, v3, v4, v5):
# The hexahedron should be convex by definition, but somehow if it is not, we return non-convex flag
flag = EPA_POLY_INIT_RETURN_CODE.P2_NONCONVEX
if flag == RETURN_CODE.SUCCESS:
# Initialize face map
for i in qd.static(range(6)):
gjk_state.polytope_faces_map[i_b, i] = i
gjk_state.polytope_faces.map_idx[i_b, i] = i
gjk_state.polytope.nfaces_map[i_b] = 6
return flag
@qd.func
def func_epa_init_polytope_3d(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
i_b,
):
"""
Create the polytope for EPA from a 2-simplex (triangle).
Returns
-------
int
0 when successful, or a flag indicating an error.
"""
flag = EPA_POLY_INIT_RETURN_CODE.SUCCESS
# Get the simplex vertices
v1 = gjk_state.simplex_vertex.mink[i_b, 0]
v2 = gjk_state.simplex_vertex.mink[i_b, 1]
v3 = gjk_state.simplex_vertex.mink[i_b, 2]
# Get normal; if it is zero, we cannot proceed
n = (v2 - v1).cross(v3 - v1)
n_norm = n.norm()
if n_norm < gjk_info.FLOAT_MIN[None]:
flag = EPA_POLY_INIT_RETURN_CODE.P3_BAD_NORMAL
n_neg = -n
# Save vertices in the polytope
vi = qd.Vector([0, 0, 0, 0, 0], dt=qd.i32)
for i in range(3):
vi[i] = func_epa_insert_vertex_to_polytope(
gjk_state,
i_b,
gjk_state.simplex_vertex.obj1[i_b, i],
gjk_state.simplex_vertex.obj2[i_b, i],
gjk_state.simplex_vertex.local_obj1[i_b, i],
gjk_state.simplex_vertex.local_obj2[i_b, i],
gjk_state.simplex_vertex.id1[i_b, i],
gjk_state.simplex_vertex.id2[i_b, i],
gjk_state.simplex_vertex.mink[i_b, i],
)
# Find the fourth and fifth vertices using the normal
# as the support vector. We form a hexahedron (6 faces)
# with these five vertices.
for i in range(2):
dir = n if i == 0 else n_neg
vi[i + 3] = func_epa_support(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
dir,
n_norm,
)
v4 = gjk_state.polytope_verts.mink[i_b, vi[3]]
v5 = gjk_state.polytope_verts.mink[i_b, vi[4]]
# Check if v4 or v5 located inside the triangle.
# If so, we do not proceed anymore.
for i in range(2):
v = v4 if i == 0 else v5
if func_point_triangle_intersection(gjk_info, v, v1, v2, v3):
flag = EPA_POLY_INIT_RETURN_CODE.P3_INVALID_V4 if i == 0 else EPA_POLY_INIT_RETURN_CODE.P3_INVALID_V5
break
if flag == EPA_POLY_INIT_RETURN_CODE.SUCCESS:
# If origin does not lie inside the triangle, we need to
# check if the hexahedron contains the origin.
tets_has_origin = gs.qd_ivec2(0, 0)
for i in range(2):
v = v4 if i == 0 else v5
tets_has_origin[i] = 1 if func_origin_tetra_intersection(v1, v2, v3, v) == RETURN_CODE.SUCCESS else 0
# @TODO: It's possible for GJK to return a triangle with origin not contained in it but within tolerance
# from it. In that case, the hexahedron could possibly be constructed that does ont contain the origin, but
# there is penetration depth.
if (
gjk_state.simplex.dist[i_b] > 10 * gjk_info.FLOAT_MIN[None]
and (not tets_has_origin[0])
and (not tets_has_origin[1])
):
flag = EPA_POLY_INIT_RETURN_CODE.P3_MISSING_ORIGIN
else:
# Build hexahedron (6 faces) from the five vertices.
for i in range(6):
# Vertex indices for the faces in the hexahedron
i_v1, i_v2, i_v3 = vi[3], vi[0], vi[1]
# Adjacent face indices for the faces in the hexahedron
i_a1, i_a2, i_a3 = 1, 3, 2
if i == 1:
i_v1, i_v2, i_v3 = vi[3], vi[2], vi[0]
i_a1, i_a2, i_a3 = 2, 4, 0
elif i == 2:
i_v1, i_v2, i_v3 = vi[3], vi[1], vi[2]
i_a1, i_a2, i_a3 = 0, 5, 1
elif i == 3:
i_v1, i_v2, i_v3 = vi[4], vi[1], vi[0]
i_a1, i_a2, i_a3 = 5, 0, 4
elif i == 4:
i_v1, i_v2, i_v3 = vi[4], vi[0], vi[2]
i_a1, i_a2, i_a3 = 3, 1, 5
elif i == 5:
i_v1, i_v2, i_v3 = vi[4], vi[2], vi[1]
i_a1, i_a2, i_a3 = 4, 2, 3
dist2 = func_attach_face_to_polytope(gjk_state, gjk_info, i_b, i_v1, i_v2, i_v3, i_a1, i_a2, i_a3)
if dist2 < gjk_info.FLOAT_MIN_SQ[None]:
flag = EPA_POLY_INIT_RETURN_CODE.P3_ORIGIN_ON_FACE
break
if flag == EPA_POLY_INIT_RETURN_CODE.SUCCESS:
# Initialize face map
for i in qd.static(range(6)):
gjk_state.polytope_faces_map[i_b, i] = i
gjk_state.polytope_faces.map_idx[i_b, i] = i
gjk_state.polytope.nfaces_map[i_b] = 6
return flag
@qd.func
def func_epa_init_polytope_4d(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_ga,
i_gb,
i_b,
):
"""
Create the polytope for EPA from a 3-simplex (tetrahedron).
Returns
-------
int
0 when successful, or a flag indicating an error.
"""
flag = EPA_POLY_INIT_RETURN_CODE.SUCCESS
# Insert simplex vertices into the polytope
vi = qd.Vector([0, 0, 0, 0], dt=qd.i32)
for i in range(4):
vi[i] = func_epa_insert_vertex_to_polytope(
gjk_state,
i_b,
gjk_state.simplex_vertex.obj1[i_b, i],
gjk_state.simplex_vertex.obj2[i_b, i],
gjk_state.simplex_vertex.local_obj1[i_b, i],
gjk_state.simplex_vertex.local_obj2[i_b, i],
gjk_state.simplex_vertex.id1[i_b, i],
gjk_state.simplex_vertex.id2[i_b, i],
gjk_state.simplex_vertex.mink[i_b, i],
)
# If origin is on any face of the tetrahedron, replace the simplex with a 2-simplex (triangle)
for i in range(4):
# Vertex indices for the faces in the hexahedron
v1, v2, v3 = vi[0], vi[1], vi[2]
# Adjacent face indices for the faces in the hexahedron
a1, a2, a3 = 1, 3, 2
if i == 1:
v1, v2, v3 = vi[0], vi[3], vi[1]
a1, a2, a3 = 2, 3, 0
elif i == 2:
v1, v2, v3 = vi[0], vi[2], vi[3]
a1, a2, a3 = 0, 3, 1
elif i == 3:
v1, v2, v3 = vi[3], vi[2], vi[1]
a1, a2, a3 = 2, 0, 1
dist2 = func_attach_face_to_polytope(gjk_state, gjk_info, i_b, v1, v2, v3, a1, a2, a3)
if dist2 < gjk_info.FLOAT_MIN_SQ[None]:
func_replace_simplex_3(gjk_state, i_b, v1, v2, v3)
flag = EPA_POLY_INIT_RETURN_CODE.P4_FALLBACK3
break
if flag == EPA_POLY_INIT_RETURN_CODE.SUCCESS:
# If the tetrahedron does not contain the origin, we do not proceed anymore.
if (
func_origin_tetra_intersection(
gjk_state.polytope_verts.mink[i_b, vi[0]],
gjk_state.polytope_verts.mink[i_b, vi[1]],
gjk_state.polytope_verts.mink[i_b, vi[2]],
gjk_state.polytope_verts.mink[i_b, vi[3]],
)
== RETURN_CODE.FAIL
):
flag = EPA_POLY_INIT_RETURN_CODE.P4_MISSING_ORIGIN
if flag == EPA_POLY_INIT_RETURN_CODE.SUCCESS:
# Initialize face map
for i in qd.static(range(4)):
gjk_state.polytope_faces_map[i_b, i] = i
gjk_state.polytope_faces.map_idx[i_b, i] = i
gjk_state.polytope.nfaces_map[i_b] = 4
return flag
@qd.func
def func_epa_support(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
i_b,
dir,
dir_norm,
):
"""
Find support points on the two objects using [dir] and insert them into the polytope.
Parameters
----------
dir: gs.qd_vec3
Vector from [ga] (obj1) to [gb] (obj2).
"""
d = gs.qd_vec3(1, 0, 0)
if dir_norm > gjk_info.FLOAT_MIN[None]:
d = dir / dir_norm
(
support_point_obj1,
support_point_obj2,
support_point_localpos1,
support_point_localpos2,
support_point_id_obj1,
support_point_id_obj2,
support_point_minkowski,
) = func_support(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
i_b,
d,
pos_a,
quat_a,
pos_b,
quat_b,
False,
)
# Insert the support points into the polytope
v_index = func_epa_insert_vertex_to_polytope(
gjk_state,
i_b,
support_point_obj1,
support_point_obj2,
support_point_localpos1,
support_point_localpos2,
support_point_id_obj1,
support_point_id_obj2,
support_point_minkowski,
)
return v_index
@qd.func
def func_attach_face_to_polytope(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
i_v1,
i_v2,
i_v3,
i_a1,
i_a2,
i_a3,
):
"""
Attach a face to the polytope.
[i_v1, i_v2, i_v3] are the vertices of the face, [i_a1, i_a2, i_a3] are the adjacent faces.
Returns
-------
float
Squared distance of the face to the origin.
"""
dist2 = 0.0
n = gjk_state.polytope.nfaces[i_b]
gjk_state.polytope_faces.verts_idx[i_b, n][0] = i_v1
gjk_state.polytope_faces.verts_idx[i_b, n][1] = i_v2
gjk_state.polytope_faces.verts_idx[i_b, n][2] = i_v3
gjk_state.polytope_faces.adj_idx[i_b, n][0] = i_a1
gjk_state.polytope_faces.adj_idx[i_b, n][1] = i_a2
gjk_state.polytope_faces.adj_idx[i_b, n][2] = i_a3
gjk_state.polytope.nfaces[i_b] += 1
# Compute the squared distance of the face to the origin
gjk_state.polytope_faces.normal[i_b, n], ret = func_project_origin_to_plane(
gjk_info,
gjk_state.polytope_verts.mink[i_b, i_v3],
gjk_state.polytope_verts.mink[i_b, i_v2],
gjk_state.polytope_verts.mink[i_b, i_v1],
)
if ret == RETURN_CODE.SUCCESS:
normal = gjk_state.polytope_faces.normal[i_b, n]
gjk_state.polytope_faces.dist2[i_b, n] = normal.dot(normal)
gjk_state.polytope_faces.map_idx[i_b, n] = -1 # No map index yet
dist2 = gjk_state.polytope_faces.dist2[i_b, n]
return dist2
@qd.func
def func_replace_simplex_3(
gjk_state: array_class.GJKState,
i_b,
i_v1,
i_v2,
i_v3,
):
"""
Replace the simplex with a 2-simplex (triangle) from polytope vertices.
Parameters
----------
i_v1, i_v2, i_v3: int
Indices of the vertices in the polytope that will be used to form the triangle.
"""
gjk_state.simplex.nverts[i_b] = 3
for i in qd.static(range(3)):
i_v = i_v1
if i == 1:
i_v = i_v2
elif i == 2:
i_v = i_v3
gjk_state.simplex_vertex.obj1[i_b, i] = gjk_state.polytope_verts.obj1[i_b, i_v]
gjk_state.simplex_vertex.obj2[i_b, i] = gjk_state.polytope_verts.obj2[i_b, i_v]
gjk_state.simplex_vertex.id1[i_b, i] = gjk_state.polytope_verts.id1[i_b, i_v]
gjk_state.simplex_vertex.id2[i_b, i] = gjk_state.polytope_verts.id2[i_b, i_v]
gjk_state.simplex_vertex.mink[i_b, i] = gjk_state.polytope_verts.mink[i_b, i_v]
# Reset polytope
gjk_state.polytope.nverts[i_b] = 0
gjk_state.polytope.nfaces[i_b] = 0
gjk_state.polytope.nfaces_map[i_b] = 0
@qd.func
def func_safe_epa(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
i_b,
):
"""
Safe EPA algorithm to find the exact penetration depth and contact normal using the simplex constructed by GJK.
This implementation is more robust than the one based on MuJoCo's implementation for the following reasons:
1) It guarantees that the lower bound of the depth is always smaller than the upper bound, within the tolerance.
2) This is because we acknowledge that polytope face normal could be unstable when the face is degenerate. Even
in that case, we can robustly estimate the lower bound of the depth, which gives us more robust results.
3) In determining the normal direction of a polytope face, we use origin and the polytope vertices altogether
to get a more stable normal direction, rather than just the origin.
"""
upper = gjk_info.FLOAT_MAX[None]
upper2 = gjk_info.FLOAT_MAX_SQ[None]
lower = gs.qd_float(0.0)
tolerance = gjk_info.tolerance[None]
EPS = rigid_global_info.EPS[None]
# Index of the nearest face
nearest_i_f = gs.qd_int(-1)
prev_nearest_i_f = gs.qd_int(-1)
discrete = func_is_discrete_geoms(geoms_info, i_ga, i_gb)
if discrete:
# If the objects are discrete, we do not use tolerance.
tolerance = rigid_global_info.EPS[None]
k_max = gjk_info.epa_max_iterations[None]
for k in range(k_max):
prev_nearest_i_f = nearest_i_f
# Find the polytope face with the smallest distance to the origin
lower2 = gjk_info.FLOAT_MAX_SQ[None]
for i in range(gjk_state.polytope.nfaces_map[i_b]):
i_f = gjk_state.polytope_faces_map[i_b, i]
face_dist2 = gjk_state.polytope_faces.dist2[i_b, i_f]
if face_dist2 < lower2:
lower2 = face_dist2
nearest_i_f = i_f
if lower2 > upper2 or nearest_i_f == -1:
# Invalid face found, stop the algorithm (lower bound of depth is larger than upper bound)
nearest_i_f = prev_nearest_i_f
break
# Find a new support point w from the nearest face's normal
lower = qd.sqrt(lower2)
dir = gjk_state.polytope_faces.normal[i_b, nearest_i_f]
wi = func_epa_support(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
dir,
1.0,
)
w = gjk_state.polytope_verts.mink[i_b, wi]
# The upper bound of depth at k-th iteration
upper_k = w.dot(dir)
if upper_k < upper:
upper = upper_k
upper2 = upper**2
# If the upper bound and lower bound are close enough, we can stop the algorithm
if (upper - lower) < tolerance:
break
if discrete:
repeated = False
for i in range(gjk_state.polytope.nverts[i_b]):
if i == wi:
continue
elif (
gjk_state.polytope_verts.id1[i_b, i] == gjk_state.polytope_verts.id1[i_b, wi]
and gjk_state.polytope_verts.id2[i_b, i] == gjk_state.polytope_verts.id2[i_b, wi]
):
# The vertex w is already in the polytope, so we do not need to add it again.
repeated = True
break
if repeated:
break
gjk_state.polytope.horizon_w[i_b] = w
# Compute horizon
horizon_flag = func_epa_horizon(gjk_state, gjk_info, i_b, nearest_i_f)
if horizon_flag:
# There was an error in the horizon construction, so the horizon edge is not a closed loop.
nearest_i_f = -1
break
if gjk_state.polytope.horizon_nedges[i_b] < 3:
# Should not happen, because at least three edges should be in the horizon from one deleted face.
nearest_i_f = -1
break
# Check if the memory space is enough for attaching new faces
nfaces = gjk_state.polytope.nfaces[i_b]
nedges = gjk_state.polytope.horizon_nedges[i_b]
if nfaces + nedges >= gjk_info.polytope_max_faces[None]:
# If the polytope is full, we cannot insert new faces
break
# Attach the new faces
# print("Attaching new faces to the polytope")
attach_flag = RETURN_CODE.SUCCESS
for i in range(nedges):
# Face id of the current face to attach
i_f0 = nfaces + i
# Face id of the next face to attach
i_f1 = nfaces + (i + 1) % nedges
horizon_i_f = gjk_state.polytope_horizon_data.face_idx[i_b, i]
horizon_i_e = gjk_state.polytope_horizon_data.edge_idx[i_b, i]
horizon_v1 = gjk_state.polytope_faces.verts_idx[i_b, horizon_i_f][horizon_i_e]
horizon_v2 = gjk_state.polytope_faces.verts_idx[i_b, horizon_i_f][(horizon_i_e + 1) % 3]
# Change the adjacent face index of the existing face
gjk_state.polytope_faces.adj_idx[i_b, horizon_i_f][horizon_i_e] = i_f0
# Attach the new face.
# If this if the first face, will be adjacent to the face that will be attached last.
adj_i_f_0 = i_f0 - 1 if (i > 0) else nfaces + nedges - 1
adj_i_f_1 = horizon_i_f
adj_i_f_2 = i_f1
attach_flag = func_safe_attach_face_to_polytope(
gjk_state,
gjk_info,
i_b,
wi,
horizon_v2,
horizon_v1,
adj_i_f_2, # Previous face id
adj_i_f_1,
adj_i_f_0, # Next face id
)
if attach_flag != RETURN_CODE.SUCCESS:
# Unrecoverable numerical issue
break
dist2 = gjk_state.polytope_faces.dist2[i_b, gjk_state.polytope.nfaces[i_b] - 1]
if (dist2 >= lower2 - EPS) and (dist2 <= upper2 + EPS):
# Store face in the map
nfaces_map = gjk_state.polytope.nfaces_map[i_b]
gjk_state.polytope_faces_map[i_b, nfaces_map] = i_f0
gjk_state.polytope_faces.map_idx[i_b, i_f0] = nfaces_map
gjk_state.polytope.nfaces_map[i_b] += 1
if attach_flag != RETURN_CODE.SUCCESS:
nearest_i_f = -1
break
# Clear the horizon data for the next iteration
gjk_state.polytope.horizon_nedges[i_b] = 0
if (gjk_state.polytope.nfaces_map[i_b] == 0) or (nearest_i_f == -1):
# No face candidate left
nearest_i_f = -1
break
if nearest_i_f != -1:
# Nearest face found
dist2 = gjk_state.polytope_faces.dist2[i_b, nearest_i_f]
flag = func_safe_epa_witness(gjk_state, gjk_info, i_ga, i_gb, i_b, nearest_i_f)
if flag == RETURN_CODE.SUCCESS:
gjk_state.n_witness[i_b] = 1
gjk_state.distance[i_b] = -qd.sqrt(dist2)
else:
# Failed to compute witness points, so the objects are not colliding
gjk_state.n_witness[i_b] = 0
gjk_state.distance[i_b] = 0.0
nearest_i_f = -1
else:
# No face found, so the objects are not colliding
gjk_state.n_witness[i_b] = 0
gjk_state.distance[i_b] = 0.0
return nearest_i_f
@qd.func
def func_safe_epa_witness(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_ga,
i_gb,
i_b,
i_f,
):
"""
Compute the witness points from the geometries for the face i_f of the polytope.
"""
flag = RETURN_CODE.SUCCESS
# Find the affine coordinates of the origin's projection on the face i_f
face_iv1 = gjk_state.polytope_faces.verts_idx[i_b, i_f][0]
face_iv2 = gjk_state.polytope_faces.verts_idx[i_b, i_f][1]
face_iv3 = gjk_state.polytope_faces.verts_idx[i_b, i_f][2]
face_v1 = gjk_state.polytope_verts.mink[i_b, face_iv1]
face_v2 = gjk_state.polytope_verts.mink[i_b, face_iv2]
face_v3 = gjk_state.polytope_verts.mink[i_b, face_iv3]
# Project origin onto the face plane to get the barycentric coordinates
proj_o, _ = func_project_origin_to_plane(gjk_info, face_v1, face_v2, face_v3)
_lambda = func_triangle_affine_coords(proj_o, face_v1, face_v2, face_v3)
# Check validity of affine coordinates through reprojection
v1 = gjk_state.polytope_verts.mink[i_b, face_iv1]
v2 = gjk_state.polytope_verts.mink[i_b, face_iv2]
v3 = gjk_state.polytope_verts.mink[i_b, face_iv3]
proj_o_lambda = v1 * _lambda[0] + v2 * _lambda[1] + v3 * _lambda[2]
reprojection_error = (proj_o - proj_o_lambda).norm()
# Take into account the face magnitude, as the error is relative to the face size.
max_edge_len_inv = qd.rsqrt(
max((v1 - v2).norm_sqr(), (v2 - v3).norm_sqr(), (v3 - v1).norm_sqr(), gjk_info.FLOAT_MIN_SQ[None])
)
rel_reprojection_error = reprojection_error * max_edge_len_inv
if rel_reprojection_error > gjk_info.polytope_max_reprojection_error[None]:
flag = RETURN_CODE.FAIL
if flag == RETURN_CODE.SUCCESS:
# Point on geom 1
v1 = gjk_state.polytope_verts.obj1[i_b, face_iv1]
v2 = gjk_state.polytope_verts.obj1[i_b, face_iv2]
v3 = gjk_state.polytope_verts.obj1[i_b, face_iv3]
witness1 = v1 * _lambda[0] + v2 * _lambda[1] + v3 * _lambda[2]
# Point on geom 2
v1 = gjk_state.polytope_verts.obj2[i_b, face_iv1]
v2 = gjk_state.polytope_verts.obj2[i_b, face_iv2]
v3 = gjk_state.polytope_verts.obj2[i_b, face_iv3]
witness2 = v1 * _lambda[0] + v2 * _lambda[1] + v3 * _lambda[2]
gjk_state.witness.point_obj1[i_b, 0] = witness1
gjk_state.witness.point_obj2[i_b, 0] = witness2
return flag
@qd.func
def func_safe_epa_init(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_ga,
i_gb,
i_b,
):
"""
Create the polytope for safe EPA from a 3-simplex (tetrahedron).
Assume the tetrahedron is a non-degenerate simplex.
"""
# Insert simplex vertices into the polytope
vi = qd.Vector([0, 0, 0, 0], dt=qd.i32)
for i in range(4):
vi[i] = func_epa_insert_vertex_to_polytope(
gjk_state,
i_b,
gjk_state.simplex_vertex.obj1[i_b, i],
gjk_state.simplex_vertex.obj2[i_b, i],
gjk_state.simplex_vertex.local_obj1[i_b, i],
gjk_state.simplex_vertex.local_obj2[i_b, i],
gjk_state.simplex_vertex.id1[i_b, i],
gjk_state.simplex_vertex.id2[i_b, i],
gjk_state.simplex_vertex.mink[i_b, i],
)
for i in range(4):
# Vertex indices for the faces in the hexahedron
v1, v2, v3 = vi[0], vi[1], vi[2]
# Adjacent face indices for the faces in the hexahedron
a1, a2, a3 = 1, 3, 2
if i == 1:
v1, v2, v3 = vi[0], vi[3], vi[1]
a1, a2, a3 = 2, 3, 0
elif i == 2:
v1, v2, v3 = vi[0], vi[2], vi[3]
a1, a2, a3 = 0, 3, 1
elif i == 3:
v1, v2, v3 = vi[3], vi[2], vi[1]
a1, a2, a3 = 2, 0, 1
func_safe_attach_face_to_polytope(gjk_state, gjk_info, i_b, v1, v2, v3, a1, a2, a3)
# Initialize face map
for i in qd.static(range(4)):
gjk_state.polytope_faces_map[i_b, i] = i
gjk_state.polytope_faces.map_idx[i_b, i] = i
gjk_state.polytope.nfaces_map[i_b] = 4
@qd.func
def func_safe_attach_face_to_polytope(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
i_v1,
i_v2,
i_v3,
i_a1,
i_a2,
i_a3,
):
"""
Attach a face to the polytope.
While attaching the face, 1) determine its normal direction, and 2) estimate the lower bound of the penetration
depth in robust manner.
[i_v1, i_v2, i_v3] are the vertices of the face, [i_a1, i_a2, i_a3] are the adjacent faces.
"""
n = gjk_state.polytope.nfaces[i_b]
gjk_state.polytope_faces.verts_idx[i_b, n][0] = i_v1
gjk_state.polytope_faces.verts_idx[i_b, n][1] = i_v2
gjk_state.polytope_faces.verts_idx[i_b, n][2] = i_v3
gjk_state.polytope_faces.adj_idx[i_b, n][0] = i_a1
gjk_state.polytope_faces.adj_idx[i_b, n][1] = i_a2
gjk_state.polytope_faces.adj_idx[i_b, n][2] = i_a3
gjk_state.polytope_faces.visited[i_b, n] = 0
gjk_state.polytope.nfaces[i_b] += 1
# Compute the normal of the plane
normal, flag = func_plane_normal(
gjk_info,
gjk_state.polytope_verts.mink[i_b, i_v3],
gjk_state.polytope_verts.mink[i_b, i_v2],
gjk_state.polytope_verts.mink[i_b, i_v1],
)
if flag == RETURN_CODE.SUCCESS:
face_center = (
gjk_state.polytope_verts.mink[i_b, i_v1]
+ gjk_state.polytope_verts.mink[i_b, i_v2]
+ gjk_state.polytope_verts.mink[i_b, i_v3]
) / 3.0
# Use origin for initialization
max_orient = -normal.dot(face_center)
max_abs_orient = qd.abs(max_orient)
# Consider other vertices in the polytope to reorient the normal
nverts = gjk_state.polytope.nverts[i_b]
for i_v in range(nverts):
if i_v != i_v1 and i_v != i_v2 and i_v != i_v3:
diff = gjk_state.polytope_verts.mink[i_b, i_v] - face_center
orient = normal.dot(diff)
if qd.abs(orient) > max_abs_orient:
max_abs_orient = qd.abs(orient)
max_orient = orient
if max_orient > 0.0:
normal = -normal
gjk_state.polytope_faces.normal[i_b, n] = normal
# Compute the safe lower bound of the penetration depth. We can do this by taking the minimum dot product
# between the face normal and the vertices of the polytope face. This is safer than selecting one of the
# vertices, because the face normal could be unstable, which ends up in significantly different dot product
# values for different vertices.
min_dist2 = gjk_info.FLOAT_MAX[None]
for i in qd.static(range(3)):
i_v = i_v1
if i == 1:
i_v = i_v2
elif i == 2:
i_v = i_v3
v = gjk_state.polytope_verts.mink[i_b, i_v]
dist2 = normal.dot(v) ** 2
if dist2 < min_dist2:
min_dist2 = dist2
dist2 = min_dist2
gjk_state.polytope_faces.dist2[i_b, n] = dist2
gjk_state.polytope_faces.map_idx[i_b, n] = -1 # No map index yet
return flag
@qd.func
def func_plane_normal(
gjk_info: array_class.GJKInfo,
v1,
v2,
v3,
):
"""
Compute the reliable normal of the plane defined by three points.
"""
normal, flag = gs.qd_vec3(0.0, 0.0, 0.0), RETURN_CODE.FAIL
finished = False
d21 = v2 - v1
d31 = v3 - v1
d32 = v3 - v2
for i in qd.static(range(3)):
if not finished:
n = gs.qd_vec3(0.0, 0.0, 0.0)
if i == 0:
# Normal = (v1 - v2) x (v3 - v2)
n = d32.cross(d21)
elif i == 1:
# Normal = (v2 - v1) x (v3 - v1)
n = d21.cross(d31)
else:
# Normal = (v1 - v3) x (v2 - v3)
n = d31.cross(d32)
nn = n.norm()
if nn == 0:
# Zero normal, cannot project.
flag = RETURN_CODE.FAIL
finished = True
elif nn > gjk_info.FLOAT_MIN[None]:
normal = n.normalized()
flag = RETURN_CODE.SUCCESS
finished = True
return normal, flag
from genesis.utils.deprecated_module_wrapper import create_virtual_deprecated_module
create_virtual_deprecated_module(__name__, "genesis.engine.solvers.rigid.gjk_decomp")
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/epa.py",
"license": "Apache License 2.0",
"lines": 1237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/gjk.py | import math
import quadrants as qd
import genesis as gs
import genesis.utils.geom as gu
import genesis.utils.array_class as array_class
from .constants import RETURN_CODE, GJK_RETURN_CODE, EPA_POLY_INIT_RETURN_CODE
from .gjk_utils import (
func_ray_triangle_intersection,
func_triangle_affine_coords,
func_point_triangle_intersection,
func_point_plane_same_side,
func_origin_tetra_intersection,
func_project_origin_to_plane,
)
from .utils import (
func_is_discrete_geoms,
func_is_equal_vec,
func_det3,
)
from . import support_field
# Import support functions that are shared with epa
from .gjk_support import func_support, support_driver, support_mesh
# Import EPA functions directly
from .epa import (
func_epa_init_polytope_2d,
func_epa_init_polytope_3d,
func_epa_init_polytope_4d,
func_epa,
func_safe_epa_init,
func_safe_epa,
)
# Import multi_contact functions directly
from .multi_contact import (
func_safe_normalize,
func_multi_contact,
)
class GJK:
def __init__(self, rigid_solver):
self._solver = rigid_solver
# Initialize static configuration.
# MuJoCo's multi-contact detection algorithm is disabled by default, because it is often less stable than the
# other multi-contact detection algorithm. However, we keep the code here for compatibility with MuJoCo and for
# possible future use.
enable_mujoco_multi_contact = False
gjk_max_iterations = 50
epa_max_iterations = 50
# 6 * epa_max_iterations is the maximum number of faces in the polytope.
polytope_max_faces = 6 * epa_max_iterations
if rigid_solver._static_rigid_sim_config.requires_grad:
# For differentiable contact detection, we find multiple contact points for each pair.
max_contacts_per_pair = 20
max_contact_polygon_verts = 1
elif enable_mujoco_multi_contact:
# The maximum number of contacts per pair is related to the maximum number of contact manifold vertices.
# MuJoCo sets [max_contacts_per_pair] to 50 and [max_contact_polygon_verts] to 150, when it uses
# multi-contact detection algorithm, assuming that the faces could have more than 4 vertices. However, we
# set them to smaller values, because we do not expect the faces to have more than 4 vertices in most cases,
# and we want to keep the memory usage low.
max_contacts_per_pair = 8
max_contact_polygon_verts = 30
else:
max_contacts_per_pair = 1
max_contact_polygon_verts = 1
self._gjk_static_config = array_class.StructGJKStaticConfig(
enable_mujoco_multi_contact=enable_mujoco_multi_contact,
)
# Initialize GJK info
self._gjk_info = array_class.get_gjk_info(
max_contacts_per_pair=max_contacts_per_pair,
max_contact_polygon_verts=max_contact_polygon_verts,
gjk_max_iterations=gjk_max_iterations,
epa_max_iterations=epa_max_iterations,
# When using larger minimum values (e.g. gs.EPS), unstability could occur for some examples (e.g. box
# pyramid). Also, since different backends could have different precisions (e.g. computing vector norm),
# we use a very small value, so that there is no discrepancy between backends.
FLOAT_MIN=1e-15,
FLOAT_MAX=1e15,
tolerance=1e-6,
collision_eps=1e-6,
# This value has been experimentally determined based on the examples that we currently have (e.g. pyramid,
# tower, ...), but it could be further tuned based on the future examples.
simplex_max_degeneracy_sq=1e-5**2,
polytope_max_faces=polytope_max_faces,
# This value has been experimentally determined based on the examples that we currently have (e.g. pyramid,
# tower, ...). We observed the error usually reaches around 5e-4, so we set the threshold to 1e-4 to be
# safe. However, this value could be further tuned based on the future examples.
polytope_max_reprojection_error=1e-4,
# The values are matching MuJoCo for compatibility. Increasing this value could be useful for detecting
# contact manifolds even when the normals are not perfectly aligned, but we observed that it leads to more
# false positives and thus not a perfect solution for the multi-contact detection.
contact_face_tol=math.cos(1.6e-3),
contact_edge_tol=math.sin(1.6e-3),
# FIXME: Adjust these values based on the case study.
diff_contact_eps_boundary=1e-2,
diff_contact_eps_distance=1e-2,
diff_contact_eps_affine=1e-2,
# We apply sqrt to the 10 * EPS value because we often use the square of the normal norm as the denominator.
diff_contact_min_normal_norm=math.sqrt(gs.EPS * 10.0),
diff_contact_min_penetration=gs.EPS * 100.0,
)
# Initialize GJK state
self._gjk_state = array_class.get_gjk_state(
rigid_solver, rigid_solver._static_rigid_sim_config, self._gjk_info, False
)
self._is_active = False
def activate(self):
if self._is_active:
return
self._gjk_state = array_class.get_gjk_state(
self._solver, self._solver._static_rigid_sim_config, self._gjk_info, True
)
self._is_active = True
@property
def is_active(self):
return self._is_active
@qd.func
def func_compare_sign(a, b):
"""
Compare the sign of two values.
"""
ret = 0
if a > 0 and b > 0:
ret = 1
elif a < 0 and b < 0:
ret = -1
return ret
@qd.func
def clear_cache(gjk_state: array_class.GJKState, i_b):
"""
Clear the cache information to prepare for the next GJK-EPA run.
The cache includes the temporary information about simplex consturction or multi-contact detection.
"""
gjk_state.support_mesh_prev_vertex_id[i_b, 0] = -1
gjk_state.support_mesh_prev_vertex_id[i_b, 1] = -1
gjk_state.multi_contact_flag[i_b] = False
gjk_state.last_searched_simplex_vertex_id[i_b] = 0
@qd.func
def func_gjk_contact(
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
gjk_static_config: qd.template(),
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
i_b,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
):
"""
Detect (possibly multiple) contact between two geometries using GJK and EPA algorithms.
We first run the GJK algorithm to find the minimum distance between the two geometries. If the distance is
smaller than the collision epsilon, we consider the geometries colliding. If they are colliding, we run the EPA
algorithm to find the exact contact points and normals.
.. seealso::
MuJoCo's implementation:
https://github.com/google-deepmind/mujoco/blob/7dc7a349c5ba2db2d3f8ab50a367d08e2f1afbbc/src/engine/engine_collision_gjk.c#L2259
"""
# Clear the cache to prepare for this GJK-EPA run
clear_cache(gjk_state, i_b)
# We use MuJoCo's GJK implementation when the compatibility mode is enabled
if qd.static(static_rigid_sim_config.enable_mujoco_compatibility):
# If any one of the geometries is a sphere or capsule, which are sphere-swept primitives,
# we can shrink them to a point or line to detect shallow penetration faster
is_sphere_swept_geom_a, is_sphere_swept_geom_b = (
func_is_sphere_swept_geom(geoms_info, i_ga),
func_is_sphere_swept_geom(geoms_info, i_gb),
)
shrink_sphere = is_sphere_swept_geom_a or is_sphere_swept_geom_b
# Run GJK
for _ in range(2 if shrink_sphere else 1):
distance = func_gjk(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
i_b,
pos_a,
quat_a,
pos_b,
quat_b,
shrink_sphere,
)
if shrink_sphere:
# If we shrunk the sphere and capsule to point and line and the distance is larger than the collision
# epsilon, it means a shallow penetration. Thus we subtract the radius of the sphere and the capsule to
# get the actual distance. If the distance is smaller than the collision epsilon, it means a deep
# penetration, which requires the default GJK handling.
if distance > gjk_info.collision_eps[None]:
radius_a, radius_b = 0.0, 0.0
if is_sphere_swept_geom_a:
radius_a = geoms_info.data[i_ga][0]
if is_sphere_swept_geom_b:
radius_b = geoms_info.data[i_gb][0]
wa = gjk_state.witness.point_obj1[i_b, 0]
wb = gjk_state.witness.point_obj2[i_b, 0]
n = func_safe_normalize(gjk_info, wb - wa)
gjk_state.distance[i_b] = distance - (radius_a + radius_b)
gjk_state.witness.point_obj1[i_b, 0] = wa + (radius_a * n)
gjk_state.witness.point_obj2[i_b, 0] = wb - (radius_b * n)
break
# Only try shrinking the sphere once
shrink_sphere = False
distance = gjk_state.distance[i_b]
nsimplex = gjk_state.nsimplex[i_b]
collided = distance < gjk_info.collision_eps[None]
# To run EPA, we need following conditions:
# 1. We did not find min. distance with shrink_sphere flag
# 2. We have a valid GJK simplex (nsimplex > 0)
# 3. We have a collision (distance < collision_epsilon)
do_epa = (not shrink_sphere) and collided and (nsimplex > 0)
if do_epa:
# Assume touching
gjk_state.distance[i_b] = 0
# Initialize polytope
gjk_state.polytope.nverts[i_b] = 0
gjk_state.polytope.nfaces[i_b] = 0
gjk_state.polytope.nfaces_map[i_b] = 0
gjk_state.polytope.horizon_nedges[i_b] = 0
# Construct the initial polytope from the GJK simplex
polytope_flag = EPA_POLY_INIT_RETURN_CODE.SUCCESS
if nsimplex == 2:
polytope_flag = func_epa_init_polytope_2d(
geoms_info,
verts_info,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
)
elif nsimplex == 4:
polytope_flag = func_epa_init_polytope_4d(gjk_state, gjk_info, i_ga, i_gb, i_b)
# Polytope 3D could be used as a fallback for 2D and 4D cases
if (
nsimplex == 3
or (polytope_flag == EPA_POLY_INIT_RETURN_CODE.P2_FALLBACK3)
or (polytope_flag == EPA_POLY_INIT_RETURN_CODE.P4_FALLBACK3)
):
polytope_flag = func_epa_init_polytope_3d(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
)
# Run EPA from the polytope
if polytope_flag == EPA_POLY_INIT_RETURN_CODE.SUCCESS:
i_f = func_epa(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
)
if qd.static(gjk_static_config.enable_mujoco_multi_contact):
# To use MuJoCo's multi-contact detection algorithm,
# (1) [i_f] should be a valid face index in the polytope (>= 0),
# (2) Both of the geometries should be discrete,
# (3) [enable_mujoco_multi_contact] should be True. Default to False.
if i_f >= 0 and func_is_discrete_geoms(geoms_info, i_ga, i_gb, i_b):
func_multi_contact(
geoms_info,
verts_info,
faces_info,
gjk_state,
gjk_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
i_f,
)
gjk_state.multi_contact_flag[i_b] = True
else:
gjk_flag = func_safe_gjk(
geoms_info,
verts_info,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
)
if gjk_flag == GJK_RETURN_CODE.INTERSECT:
# Initialize polytope
gjk_state.polytope.nverts[i_b] = 0
gjk_state.polytope.nfaces[i_b] = 0
gjk_state.polytope.nfaces_map[i_b] = 0
gjk_state.polytope.horizon_nedges[i_b] = 0
# Construct the initial polytope from the GJK simplex
func_safe_epa_init(gjk_state, gjk_info, i_ga, i_gb, i_b)
# Run EPA from the polytope
func_safe_epa(
geoms_info,
verts_info,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
)
# Compute the final contact points and normals
n_contacts = 0
gjk_state.is_col[i_b] = gjk_state.distance[i_b] < 0.0
gjk_state.penetration[i_b] = -gjk_state.distance[i_b] if gjk_state.is_col[i_b] else 0.0
if gjk_state.is_col[i_b]:
for i in range(gjk_state.n_witness[i_b]):
w1 = gjk_state.witness.point_obj1[i_b, i]
w2 = gjk_state.witness.point_obj2[i_b, i]
contact_pos = 0.5 * (w1 + w2)
normal = w2 - w1
normal_len = normal.norm()
if normal_len < gjk_info.FLOAT_MIN[None]:
continue
normal = normal / normal_len
gjk_state.contact_pos[i_b, n_contacts] = contact_pos
gjk_state.normal[i_b, n_contacts] = normal
n_contacts += 1
gjk_state.n_contacts[i_b] = n_contacts
# If there are no contacts, we set the penetration and is_col to 0
# FIXME: When we use if statement here, it leads to a bug in some backends (e.g. x86 cpu). Need to investigate.
gjk_state.is_col[i_b] = False if n_contacts == 0 else gjk_state.is_col[i_b]
gjk_state.penetration[i_b] = 0.0 if n_contacts == 0 else gjk_state.penetration[i_b]
@qd.func
def func_gjk(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
i_b,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
shrink_sphere,
):
"""
GJK algorithm to compute the minimum distance between two convex objects.
This implementation is based on the MuJoCo implementation.
TODO: This implementation could be further improved by referencing the follow-up work shown below.
Parameters
----------
shrink_sphere: bool
If True, use point and line support functions for sphere and capsule geometries, respectively. It is more
efficient and stable for shallow penetrations than the full GJK algorithm. However, if there is a deep
penetration, we have to fallback to the full GJK algorithm by setting this parameter to False.
.. seealso::
MuJoCo's original implementation:
https://github.com/google-deepmind/mujoco/blob/7dc7a349c5ba2db2d3f8ab50a367d08e2f1afbbc/src/engine/engine_collision_gjk.c#L171
Original paper:
Gilbert, Elmer G., Daniel W. Johnson, and S. Sathiya Keerthi.
"A fast procedure for computing the distance between complex objects in three-dimensional space."
IEEE Journal on Robotics and Automation 4.2 (2002): 193-203.
Further improvements:
Cameron, Stephen. "Enhancing GJK: Computing minimum and penetration distances between convex polyhedra."
Proceedings of international conference on robotics and automation. Vol. 4. IEEE, 1997.
https://www.cs.ox.ac.uk/people/stephen.cameron/distances/gjk2.4/
Montaut, Louis, et al. "Collision detection accelerated: An optimization perspective."
https://arxiv.org/abs/2205.09663
"""
# Simplex index
n = gs.qd_int(0)
# Final number of simplex vertices
nsimplex = gs.qd_int(0)
# Number of witness points and distance
nx = gs.qd_int(0)
dist = gs.qd_float(0.0)
# Lambda for barycentric coordinates
_lambda = gs.qd_vec4(1.0, 0.0, 0.0, 0.0)
# Whether or not we need to compute the exact distance.
get_dist = shrink_sphere
# We can use GJK intersection algorithm only for collision detection if we do not have to compute the distance.
backup_gjk = not get_dist
# Support vector to compute the next support point.
support_vector = gs.qd_vec3(0.0, 0.0, 0.0)
support_vector_norm = gs.qd_float(0.0)
# Whether or not the main loop finished early because intersection or seperation was detected.
early_stop = False
# Set initial guess of support vector using the thread-local positions, which should be a non-zero vector.
approx_witness_point_obj1 = pos_a
approx_witness_point_obj2 = pos_b
support_vector = approx_witness_point_obj1 - approx_witness_point_obj2
if support_vector.dot(support_vector) < gjk_info.FLOAT_MIN_SQ[None]:
support_vector = gs.qd_vec3(1.0, 0.0, 0.0)
# Epsilon for convergence check.
epsilon = gs.qd_float(0.0)
if not func_is_discrete_geoms(geoms_info, i_ga, i_gb):
# If the objects are smooth, finite convergence is not guaranteed, so we need to set some epsilon
# to determine convergence.
epsilon = 0.5 * (gjk_info.tolerance[None] ** 2)
for i in range(gjk_info.gjk_max_iterations[None]):
# Compute the current support points
support_vector_norm = support_vector.norm()
if support_vector_norm < gjk_info.FLOAT_MIN[None]:
# If the support vector is too small, it means that origin is located in the Minkowski difference
# with high probability, so we can stop.
break
# Dir to compute the support point (pointing from obj1 to obj2)
dir = -support_vector * (1.0 / support_vector_norm)
(
gjk_state.simplex_vertex.obj1[i_b, n],
gjk_state.simplex_vertex.obj2[i_b, n],
gjk_state.simplex_vertex.local_obj1[i_b, n],
gjk_state.simplex_vertex.local_obj2[i_b, n],
gjk_state.simplex_vertex.id1[i_b, n],
gjk_state.simplex_vertex.id2[i_b, n],
gjk_state.simplex_vertex.mink[i_b, n],
) = func_support(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
i_b,
dir,
pos_a,
quat_a,
pos_b,
quat_b,
shrink_sphere,
)
# Early stopping based on Frank-Wolfe duality gap. We need to find the minimum [support_vector_norm],
# and if we denote it as [x], the problem formulation is: min_x |x|^2.
# If we denote f(x) = |x|^2, then the Frank-Wolfe duality gap is:
# |x - x_min|^2 <= < grad f(x), x - s> = < 2x, x - s >,
# where s is the vertex of the Minkowski difference found by x. Here < 2x, x - s > is guaranteed to be
# non-negative, and 2 is cancelled out in the definition of the epsilon.
x_k = support_vector
s_k = gjk_state.simplex_vertex.mink[i_b, n]
diff = x_k - s_k
if diff.dot(x_k) < epsilon:
# Convergence condition is met, we can stop.
if i == 0:
n = 1
break
# Check if the objects are separated using support vector
if not get_dist:
is_separated = x_k.dot(s_k) > 0.0
if is_separated:
nsimplex = 0
nx = 0
dist = gjk_info.FLOAT_MAX[None]
early_stop = True
break
if n == 3 and backup_gjk:
# Tetrahedron is generated, try to detect collision if possible.
intersect_code = func_gjk_intersect(
geoms_info=geoms_info,
verts_info=verts_info,
static_rigid_sim_config=static_rigid_sim_config,
collider_state=collider_state,
collider_static_config=collider_static_config,
gjk_state=gjk_state,
gjk_info=gjk_info,
support_field_info=support_field_info,
i_ga=i_ga,
i_gb=i_gb,
i_b=i_b,
pos_a=pos_a,
quat_a=quat_a,
pos_b=pos_b,
quat_b=quat_b,
)
if intersect_code == GJK_RETURN_CODE.SEPARATED:
# No intersection, objects are separated
nx = 0
dist = gjk_info.FLOAT_MAX[None]
nsimplex = 0
early_stop = True
break
elif intersect_code == GJK_RETURN_CODE.INTERSECT:
# Intersection found
nx = 0
dist = 0.0
nsimplex = 4
early_stop = True
break
else:
# Since gjk_intersect failed (e.g. origin is on the simplex face), fallback to distance computation
backup_gjk = False
# Compute the barycentric coordinates of the closest point to the origin in the simplex
_lambda = func_gjk_subdistance(gjk_state, gjk_info, i_b, n + 1)
# Remove vertices from the simplex with zero barycentric coordinates
n = 0
for j in qd.static(range(4)):
if _lambda[j] > 0:
gjk_state.simplex_vertex.obj1[i_b, n] = gjk_state.simplex_vertex.obj1[i_b, j]
gjk_state.simplex_vertex.obj2[i_b, n] = gjk_state.simplex_vertex.obj2[i_b, j]
gjk_state.simplex_vertex.id1[i_b, n] = gjk_state.simplex_vertex.id1[i_b, j]
gjk_state.simplex_vertex.id2[i_b, n] = gjk_state.simplex_vertex.id2[i_b, j]
gjk_state.simplex_vertex.mink[i_b, n] = gjk_state.simplex_vertex.mink[i_b, j]
_lambda[n] = _lambda[j]
n += 1
# Should not occur
if n < 1:
nsimplex = 0
nx = 0
dist = gjk_info.FLOAT_MAX[None]
early_stop = True
break
# Get the next support vector
next_support_vector = func_simplex_vertex_linear_comb(gjk_state, i_b, 2, 0, 1, 2, 3, _lambda, n)
if func_is_equal_vec(next_support_vector, support_vector, gjk_info.FLOAT_MIN[None]):
# If the next support vector is equal to the previous one, we converged to the minimum distance
break
support_vector = next_support_vector
if n == 4:
# We have a tetrahedron containing the origin, so we can return early. This is because only when
# the origin is inside the tetrahedron, the barycentric coordinates are all positive. While MuJoCo
# does not set the [support_vector_norm] to zero as we do, it is necessary, because otherwise the
# [support_vector_norm] could be non-zero value even if there is contact.
support_vector_norm = 0
break
if not early_stop:
# If [get_dist] was True and there was no numerical error, [return_code] would be SUCCESS.
nx = 1
nsimplex = n
dist = support_vector_norm
# Compute witness points
for i in range(2):
witness_point = func_simplex_vertex_linear_comb(gjk_state, i_b, i, 0, 1, 2, 3, _lambda, nsimplex)
if i == 0:
gjk_state.witness.point_obj1[i_b, 0] = witness_point
else:
gjk_state.witness.point_obj2[i_b, 0] = witness_point
gjk_state.n_witness[i_b] = nx
gjk_state.distance[i_b] = dist
gjk_state.nsimplex[i_b] = nsimplex
return gjk_state.distance[i_b]
@qd.func
def func_gjk_intersect(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
i_b,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
):
"""
Check if the two objects intersect using the GJK algorithm.
This function refines the simplex until it contains the origin or it is determined that the objects are
separated. It is used to check if the objects intersect, not to find the minimum distance between them.
"""
# Copy simplex to temporary storage
for i in qd.static(range(4)):
gjk_state.simplex_vertex_intersect.obj1[i_b, i] = gjk_state.simplex_vertex.obj1[i_b, i]
gjk_state.simplex_vertex_intersect.obj2[i_b, i] = gjk_state.simplex_vertex.obj2[i_b, i]
gjk_state.simplex_vertex_intersect.id1[i_b, i] = gjk_state.simplex_vertex.id1[i_b, i]
gjk_state.simplex_vertex_intersect.id2[i_b, i] = gjk_state.simplex_vertex.id2[i_b, i]
gjk_state.simplex_vertex_intersect.mink[i_b, i] = gjk_state.simplex_vertex.mink[i_b, i]
# Simplex index
si = qd.Vector([0, 1, 2, 3], dt=gs.qd_int)
flag = GJK_RETURN_CODE.NUM_ERROR
for i in range(gjk_info.gjk_max_iterations[None]):
# Compute normal and signed distance of the triangle faces of the simplex with respect to the origin.
# These normals are supposed to point outwards from the simplex.
# If the origin is inside the plane, [sdist] will be positive.
is_sdist_all_zero = True
for j in range(4):
s0, s1, s2 = si[2], si[1], si[3]
if j == 1:
s0, s1, s2 = si[0], si[2], si[3]
elif j == 2:
s0, s1, s2 = si[1], si[0], si[3]
elif j == 3:
s0, s1, s2 = si[0], si[1], si[2]
n, s = func_gjk_triangle_info(gjk_state, gjk_info, i_b, s0, s1, s2)
gjk_state.simplex_buffer_intersect.normal[i_b, j] = n
gjk_state.simplex_buffer_intersect.sdist[i_b, j] = s
if qd.abs(s) > gjk_info.FLOAT_MIN[None]:
is_sdist_all_zero = False
# If the origin is strictly on any affine hull of the faces, convergence will fail, so ignore this case
if is_sdist_all_zero:
break
# Find the face with the smallest signed distance. We need to find [min_i] for the next iteration.
min_i = 0
for j in qd.static(range(1, 4)):
if gjk_state.simplex_buffer_intersect.sdist[i_b, j] < gjk_state.simplex_buffer_intersect.sdist[i_b, min_i]:
min_i = j
min_si = si[min_i]
min_normal = gjk_state.simplex_buffer_intersect.normal[i_b, min_i]
min_sdist = gjk_state.simplex_buffer_intersect.sdist[i_b, min_i]
# If origin is inside the simplex, the signed distances will all be positive
if min_sdist >= 0:
# Origin is inside the simplex, so we can stop
flag = GJK_RETURN_CODE.INTERSECT
# Copy the temporary simplex to the main simplex
for j in qd.static(range(4)):
gjk_state.simplex_vertex.obj1[i_b, j] = gjk_state.simplex_vertex_intersect.obj1[i_b, si[j]]
gjk_state.simplex_vertex.obj2[i_b, j] = gjk_state.simplex_vertex_intersect.obj2[i_b, si[j]]
gjk_state.simplex_vertex.id1[i_b, j] = gjk_state.simplex_vertex_intersect.id1[i_b, si[j]]
gjk_state.simplex_vertex.id2[i_b, j] = gjk_state.simplex_vertex_intersect.id2[i_b, si[j]]
gjk_state.simplex_vertex.mink[i_b, j] = gjk_state.simplex_vertex_intersect.mink[i_b, si[j]]
break
# Replace the worst vertex (which has the smallest signed distance) with new candidate
(
gjk_state.simplex_vertex_intersect.obj1[i_b, min_si],
gjk_state.simplex_vertex_intersect.obj2[i_b, min_si],
gjk_state.simplex_vertex_intersect.local_obj1[i_b, min_si],
gjk_state.simplex_vertex_intersect.local_obj2[i_b, min_si],
gjk_state.simplex_vertex_intersect.id1[i_b, min_si],
gjk_state.simplex_vertex_intersect.id2[i_b, min_si],
gjk_state.simplex_vertex_intersect.mink[i_b, min_si],
) = func_support(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
i_b,
min_normal,
pos_a,
quat_a,
pos_b,
quat_b,
False,
)
# Check if the origin is strictly outside of the Minkowski difference (which means there is no collision)
new_minkowski = gjk_state.simplex_vertex_intersect.mink[i_b, min_si]
is_no_collision = new_minkowski.dot(min_normal) < 0
if is_no_collision:
flag = GJK_RETURN_CODE.SEPARATED
break
# Swap vertices in the simplex to retain orientation
m = (min_i + 1) % 4
n = (min_i + 2) % 4
swap = si[m]
si[m] = si[n]
si[n] = swap
return flag
@qd.func
def func_gjk_triangle_info(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
i_va,
i_vb,
i_vc,
):
"""
Compute normal and signed distance of the triangle face on the simplex from the origin.
"""
vertex_1 = gjk_state.simplex_vertex_intersect.mink[i_b, i_va]
vertex_2 = gjk_state.simplex_vertex_intersect.mink[i_b, i_vb]
vertex_3 = gjk_state.simplex_vertex_intersect.mink[i_b, i_vc]
normal = (vertex_3 - vertex_1).cross(vertex_2 - vertex_1)
normal_length = normal.norm()
sdist = 0.0
if (normal_length > gjk_info.FLOAT_MIN[None]) and (normal_length < gjk_info.FLOAT_MAX[None]):
normal = normal * (1.0 / normal_length)
sdist = normal.dot(vertex_1)
else:
# If the normal length is unstable, return max distance.
sdist = gjk_info.FLOAT_MAX[None]
return normal, sdist
@qd.func
def func_gjk_subdistance(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
n,
):
"""
Compute the barycentric coordinates of the closest point to the origin in the n-simplex.
.. seealso::
Montanari, Mattia, Nik Petrinic, and Ettore Barbieri. "Improving the GJK algorithm for faster and more reliable
distance queries between convex objects." ACM Transactions on Graphics (TOG) 36.3 (2017): 1-17.
https://dl.acm.org/doi/10.1145/3072959.3083724
"""
_lambda = qd.math.vec4(1.0, 0.0, 0.0, 0.0)
# Whether or not the subdistance was computed successfully for the n-simplex.
flag = RETURN_CODE.SUCCESS
dmin = gjk_info.FLOAT_MAX[None]
if n == 4:
_lambda, flag3d = func_gjk_subdistance_3d(gjk_state, i_b, 0, 1, 2, 3)
flag = flag3d
if (flag == RETURN_CODE.FAIL) or n == 3:
failed_3d = n == 4
num_iter = 1
if failed_3d:
# Iterate through 4 faces of the tetrahedron
num_iter = 4
for i in range(num_iter):
k_1, k_2, k_3 = i, (i + 1) % 4, (i + 2) % 4
_lambda2d, flag2d = func_gjk_subdistance_2d(gjk_state, gjk_info, i_b, k_1, k_2, k_3)
if failed_3d:
if flag2d == RETURN_CODE.SUCCESS:
closest_point = func_simplex_vertex_linear_comb(gjk_state, i_b, 2, k_1, k_2, k_3, 0, _lambda2d, 3)
d = closest_point.dot(closest_point)
if d < dmin:
dmin = d
_lambda.fill(0.0)
_lambda[k_1] = _lambda2d[0]
_lambda[k_2] = _lambda2d[1]
_lambda[k_3] = _lambda2d[2]
else:
if flag2d == RETURN_CODE.SUCCESS:
_lambda = _lambda2d
flag = flag2d
if (flag == RETURN_CODE.FAIL) or n == 2:
failed_3d = n == 4
failed_2d = n == 3
num_iter = 1
if failed_3d:
# Iterate through 6 edges of the tetrahedron
num_iter = 6
elif failed_2d:
# Iterate through 3 edges of the triangle
num_iter = 3
for i in range(num_iter):
k_1, k_2 = i, (i + 1) % 3
if i >= 3:
k_1, k_2 = i - 3, 3
_lambda1d = func_gjk_subdistance_1d(gjk_state, i_b, k_1, k_2)
if failed_3d or failed_2d:
closest_point = func_simplex_vertex_linear_comb(gjk_state, i_b, 2, k_1, k_2, 0, 0, _lambda1d, 2)
d = closest_point.dot(closest_point)
if d < dmin:
dmin = d
_lambda.fill(0.0)
_lambda[k_1] = _lambda1d[0]
_lambda[k_2] = _lambda1d[1]
else:
_lambda = _lambda1d
return _lambda
@qd.func
def func_gjk_subdistance_3d(
gjk_state: array_class.GJKState,
i_b,
i_s1,
i_s2,
i_s3,
i_s4,
):
"""
Compute the barycentric coordinates of the closest point to the origin in the 3-simplex (tetrahedron).
"""
flag = RETURN_CODE.FAIL
_lambda = gs.qd_vec4(0, 0, 0, 0)
# Simplex vertices
s1 = gjk_state.simplex_vertex.mink[i_b, i_s1]
s2 = gjk_state.simplex_vertex.mink[i_b, i_s2]
s3 = gjk_state.simplex_vertex.mink[i_b, i_s3]
s4 = gjk_state.simplex_vertex.mink[i_b, i_s4]
# Compute the cofactors to find det(M), which corresponds to the signed volume of the tetrahedron
Cs = qd.math.vec4(0.0, 0.0, 0.0, 0.0)
for i in range(4):
v1, v2, v3 = s2, s3, s4
if i == 1:
v1, v2, v3 = s1, s3, s4
elif i == 2:
v1, v2, v3 = s1, s2, s4
elif i == 3:
v1, v2, v3 = s1, s2, s3
Cs[i] = func_det3(v1, v2, v3)
Cs[0], Cs[2] = -Cs[0], -Cs[2]
m_det = Cs.sum()
# Compare sign of the cofactors with the determinant
scs = gs.qd_ivec4(0, 0, 0, 0)
for i in range(4):
scs[i] = func_compare_sign(Cs[i], m_det)
if scs.all():
# If all barycentric coordinates are positive, the origin is inside the tetrahedron
_lambda = Cs / m_det
flag = RETURN_CODE.SUCCESS
return _lambda, flag
@qd.func
def func_gjk_subdistance_2d(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
i_s1,
i_s2,
i_s3,
):
"""
Compute the barycentric coordinates of the closest point to the origin in the 2-simplex (triangle).
"""
_lambda = qd.math.vec4(0, 0, 0, 0)
flag = RETURN_CODE.FAIL
# Project origin onto affine hull of the simplex (triangle)
proj_orig, proj_flag = func_project_origin_to_plane(
gjk_info,
gjk_state.simplex_vertex.mink[i_b, i_s1],
gjk_state.simplex_vertex.mink[i_b, i_s2],
gjk_state.simplex_vertex.mink[i_b, i_s3],
)
if proj_flag == RETURN_CODE.SUCCESS:
# We should find the barycentric coordinates of the projected point, but the linear system is not square:
# [ s1.x, s2.x, s3.x ] [ l1 ] = [ proj_o.x ]
# [ s1.y, s2.y, s3.y ] [ l2 ] = [ proj_o.y ]
# [ s1.z, s2.z, s3.z ] [ l3 ] = [ proj_o.z ]
# [ 1, 1, 1, ] [ ? ] = [ 1.0 ]
# So we remove one row before solving the system. We exclude the axis with the largest projection of the
# simplex using the minors of the above linear system.
s1 = gjk_state.simplex_vertex.mink[i_b, i_s1]
s2 = gjk_state.simplex_vertex.mink[i_b, i_s2]
s3 = gjk_state.simplex_vertex.mink[i_b, i_s3]
ms = gs.qd_vec3(
s2[1] * s3[2] - s2[2] * s3[1] - s1[1] * s3[2] + s1[2] * s3[1] + s1[1] * s2[2] - s1[2] * s2[1],
s2[0] * s3[2] - s2[2] * s3[0] - s1[0] * s3[2] + s1[2] * s3[0] + s1[0] * s2[2] - s1[2] * s2[0],
s2[0] * s3[1] - s2[1] * s3[0] - s1[0] * s3[1] + s1[1] * s3[0] + s1[0] * s2[1] - s1[1] * s2[0],
)
absms = qd.abs(ms)
m_max = 0.0
s1_2d, s2_2d, s3_2d = gs.qd_vec2(0, 0), gs.qd_vec2(0, 0), gs.qd_vec2(0, 0)
proj_orig_2d = gs.qd_vec2(0, 0)
for i in range(3):
if absms[i] >= absms[(i + 1) % 3] and absms[i] >= absms[(i + 2) % 3]:
# Remove the i-th row from the linear system
m_max = ms[i]
i0, i1 = (i + 1) % 3, (i + 2) % 3
if i == 1:
i0, i1 = i1, i0
s1_2d[0], s1_2d[1] = s1[i0], s1[i1]
s2_2d[0], s2_2d[1] = s2[i0], s2[i1]
s3_2d[0], s3_2d[1] = s3[i0], s3[i1]
proj_orig_2d[0] = proj_orig[i0]
proj_orig_2d[1] = proj_orig[i1]
break
# Now we find the barycentric coordinates of the projected point by solving the linear system:
# [ s1_2d.x, s2_2d.x, s3_2d.x ] [ l1 ] = [ proj_orig_2d.x ]
# [ s1_2d.y, s2_2d.y, s3_2d.y ] [ l2 ] = [ proj_orig_2d.y ]
# [ 1, 1, 1, ] [ l3 ] = [ 1.0 ]
cs = gs.qd_vec3(0, 0, 0)
for i in range(3):
s2d0, s2d1 = s2_2d, s3_2d
if i == 1:
s2d0, s2d1 = s3_2d, s1_2d
elif i == 2:
s2d0, s2d1 = s1_2d, s2_2d
# Corresponds to the signed area of 2-simplex (triangle): (proj_orig_2d, s2d0, s2d1)
cs[i] = (
proj_orig_2d[0] * s2d0[1]
+ proj_orig_2d[1] * s2d1[0]
+ s2d0[0] * s2d1[1]
- proj_orig_2d[0] * s2d1[1]
- proj_orig_2d[1] * s2d0[0]
- s2d1[0] * s2d0[1]
)
# Compare sign of the cofactors with the determinant
scs = gs.qd_ivec3(0, 0, 0)
for i in range(3):
scs[i] = func_compare_sign(cs[i], m_max)
if scs.all():
# If all barycentric coordinates are positive, the origin is inside the 2-simplex (triangle)
for i in qd.static(range(3)):
_lambda[i] = cs[i] / m_max
flag = RETURN_CODE.SUCCESS
return _lambda, flag
@qd.func
def func_gjk_subdistance_1d(
gjk_state: array_class.GJKState,
i_b,
i_s1,
i_s2,
):
"""
Compute the barycentric coordinates of the closest point to the origin in the 1-simplex (line segment).
"""
_lambda = gs.qd_vec4(0, 0, 0, 0)
s1 = gjk_state.simplex_vertex.mink[i_b, i_s1]
s2 = gjk_state.simplex_vertex.mink[i_b, i_s2]
p_o = func_project_origin_to_line(s1, s2)
mu_max = 0.0
index = -1
for i in range(3):
mu = s1[i] - s2[i]
if qd.abs(mu) >= qd.abs(mu_max):
mu_max = mu
index = i
C1 = p_o[index] - s2[index]
C2 = s1[index] - p_o[index]
# Determine if projection of origin lies inside 1-simplex
if func_compare_sign(mu_max, C1) and func_compare_sign(mu_max, C2):
_lambda[0] = C1 / mu_max
_lambda[1] = C2 / mu_max
else:
_lambda[0] = 0.0
_lambda[1] = 1.0
return _lambda
@qd.func
def func_is_sphere_swept_geom(
geoms_info: array_class.GeomsInfo,
i_g,
):
"""
Check if the given geoms are sphere-swept geometries.
"""
geom_type = geoms_info.type[i_g]
return geom_type == gs.GEOM_TYPE.SPHERE or geom_type == gs.GEOM_TYPE.CAPSULE
@qd.func
def func_project_origin_to_line(
v1,
v2,
):
"""
Project the origin onto the line defined by the simplex vertices.
P = v2 - ((v1 * diff) / (diff * diff)) * diff
"""
diff = v2 - v1
k = v2.dot(diff) / diff.dot(diff)
P = v2 - k * diff
return P
@qd.func
def func_simplex_vertex_linear_comb(
gjk_state: array_class.GJKState,
i_b,
i_v,
i_s1,
i_s2,
i_s3,
i_s4,
_lambda,
n,
):
"""
Compute the linear combination of the simplex vertices
Parameters:
----------
i_v: int
Which vertex to use (0: obj1, 1: obj2, 2: minkowski)
n: int
Number of vertices to combine, combine the first n vertices
"""
res = gs.qd_vec3(0, 0, 0)
s1 = gjk_state.simplex_vertex.obj1[i_b, i_s1]
s2 = gjk_state.simplex_vertex.obj1[i_b, i_s2]
s3 = gjk_state.simplex_vertex.obj1[i_b, i_s3]
s4 = gjk_state.simplex_vertex.obj1[i_b, i_s4]
if i_v == 1:
s1 = gjk_state.simplex_vertex.obj2[i_b, i_s1]
s2 = gjk_state.simplex_vertex.obj2[i_b, i_s2]
s3 = gjk_state.simplex_vertex.obj2[i_b, i_s3]
s4 = gjk_state.simplex_vertex.obj2[i_b, i_s4]
elif i_v == 2:
s1 = gjk_state.simplex_vertex.mink[i_b, i_s1]
s2 = gjk_state.simplex_vertex.mink[i_b, i_s2]
s3 = gjk_state.simplex_vertex.mink[i_b, i_s3]
s4 = gjk_state.simplex_vertex.mink[i_b, i_s4]
c1 = _lambda[0]
c2 = _lambda[1]
c3 = _lambda[2]
c4 = _lambda[3]
if n == 1:
res = s1 * c1
elif n == 2:
res = s1 * c1 + s2 * c2
elif n == 3:
res = s1 * c1 + s2 * c2 + s3 * c3
else:
res = s1 * c1 + s2 * c2 + s3 * c3 + s4 * c4
return res
@qd.func
def func_safe_gjk(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
i_b,
):
"""
Safe GJK algorithm to compute the minimum distance between two convex objects.
using thread-local pos/quat for both geometries.
Thread-safety note: Geometry indices `i_ga` and `i_gb` are only used for read-only
metadata access (checking geometry types via `func_is_discrete_geoms`) and passing to
support functions. They do not access `geoms_state.pos` or `geoms_state.quat`.
This implementation is safer than the one based on the MuJoCo implementation for the following reasons:
1) It guarantees that the origin is strictly inside the tetrahedron when the intersection is detected.
2) It guarantees to generate a non-degenerate tetrahedron if there is no numerical error, which is necessary
for the following EPA algorithm to work correctly.
3) When computing the face normals on the simplex, it uses a more robust method than using the origin.
TODO: This implementation could be improved by using shrink_sphere option as the MuJoCo implementation does.
TODO: This implementation could be further improved by referencing the follow-up work shown below.
.. seealso::
Original paper:
Gilbert, Elmer G., Daniel W. Johnson, and S. Sathiya Keerthi.
"A fast procedure for computing the distance between complex objects in three-dimensional space."
IEEE Journal on Robotics and Automation 4.2 (2002): 193-203.
Further improvements:
Cameron, Stephen. "Enhancing GJK: Computing minimum and penetration distances between convex polyhedra."
Proceedings of international conference on robotics and automation. Vol. 4. IEEE, 1997.
https://www.cs.ox.ac.uk/people/stephen.cameron/distances/gjk2.4/
Montaut, Louis, et al. "Collision detection accelerated: An optimization perspective."
https://arxiv.org/abs/2205.09663
"""
# Compute the initial tetrahedron using two random directions
init_flag = RETURN_CODE.SUCCESS
gjk_state.simplex.nverts[i_b] = 0
for i in range(4):
dir = qd.Vector.zero(gs.qd_float, 3)
dir[2 - i // 2] = 1.0 - 2.0 * (i % 2)
obj1, obj2, local_obj1, local_obj2, id1, id2, minkowski = func_safe_gjk_support(
geoms_info,
verts_info,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
dir,
)
# Check if the new vertex would make a valid simplex.
valid = func_is_new_simplex_vertex_valid(gjk_state, gjk_info, i_b, id1, id2, minkowski)
# If this is not a valid vertex, fall back to a brute-force routine to find a valid vertex.
if not valid:
obj1, obj2, local_obj1, local_obj2, id1, id2, minkowski, init_flag = func_search_valid_simplex_vertex(
geoms_info,
verts_info,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
)
# If the brute-force search failed, we cannot proceed with GJK.
if init_flag == RETURN_CODE.FAIL:
break
gjk_state.simplex_vertex.obj1[i_b, i] = obj1
gjk_state.simplex_vertex.obj2[i_b, i] = obj2
gjk_state.simplex_vertex.local_obj1[i_b, i] = local_obj1
gjk_state.simplex_vertex.local_obj2[i_b, i] = local_obj2
gjk_state.simplex_vertex.id1[i_b, i] = id1
gjk_state.simplex_vertex.id2[i_b, i] = id2
gjk_state.simplex_vertex.mink[i_b, i] = minkowski
gjk_state.simplex.nverts[i_b] += 1
gjk_flag = GJK_RETURN_CODE.SEPARATED
if init_flag == RETURN_CODE.SUCCESS:
# Simplex index
si = qd.Vector([0, 1, 2, 3], dt=gs.qd_int)
for i in range(gjk_info.gjk_max_iterations[None]):
# Compute normal and signed distance of the triangle faces of the simplex with respect to the origin.
# These normals are supposed to point outwards from the simplex. If the origin is inside the plane,
# [sdist] will be positive.
for j in range(4):
s0, s1, s2, ap = si[2], si[1], si[3], si[0]
if j == 1:
s0, s1, s2, ap = si[0], si[2], si[3], si[1]
elif j == 2:
s0, s1, s2, ap = si[1], si[0], si[3], si[2]
elif j == 3:
s0, s1, s2, ap = si[0], si[1], si[2], si[3]
n, s = func_safe_gjk_triangle_info(gjk_state, i_b, s0, s1, s2, ap)
gjk_state.simplex_buffer.normal[i_b, j] = n
gjk_state.simplex_buffer.sdist[i_b, j] = s
# Find the face with the smallest signed distance. We need to find [min_i] for the next iteration.
min_i = 0
for j in qd.static(range(1, 4)):
if gjk_state.simplex_buffer.sdist[i_b, j] < gjk_state.simplex_buffer.sdist[i_b, min_i]:
min_i = j
min_si = si[min_i]
min_normal = gjk_state.simplex_buffer.normal[i_b, min_i]
min_sdist = gjk_state.simplex_buffer.sdist[i_b, min_i]
# If origin is inside the simplex, the signed distances will all be positive
if min_sdist >= 0:
# Origin is inside the simplex, so we can stop
gjk_flag = GJK_RETURN_CODE.INTERSECT
break
# Check if the new vertex would make a valid simplex.
gjk_state.simplex.nverts[i_b] = 3
if min_si != 3:
gjk_state.simplex_vertex.obj1[i_b, min_si] = gjk_state.simplex_vertex.obj1[i_b, 3]
gjk_state.simplex_vertex.obj2[i_b, min_si] = gjk_state.simplex_vertex.obj2[i_b, 3]
gjk_state.simplex_vertex.local_obj1[i_b, min_si] = gjk_state.simplex_vertex.local_obj1[i_b, 3]
gjk_state.simplex_vertex.local_obj2[i_b, min_si] = gjk_state.simplex_vertex.local_obj2[i_b, 3]
gjk_state.simplex_vertex.id1[i_b, min_si] = gjk_state.simplex_vertex.id1[i_b, 3]
gjk_state.simplex_vertex.id2[i_b, min_si] = gjk_state.simplex_vertex.id2[i_b, 3]
gjk_state.simplex_vertex.mink[i_b, min_si] = gjk_state.simplex_vertex.mink[i_b, 3]
# Find a new candidate vertex to replace the worst vertex (which has the smallest signed distance)
obj1, obj2, local_obj1, local_obj2, id1, id2, minkowski = func_safe_gjk_support(
geoms_info,
verts_info,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
min_normal,
)
duplicate = func_is_new_simplex_vertex_duplicate(gjk_state, i_b, id1, id2)
if duplicate:
# If the new vertex is a duplicate, it means separation.
gjk_flag = GJK_RETURN_CODE.SEPARATED
break
degenerate = func_is_new_simplex_vertex_degenerate(gjk_state, gjk_info, i_b, minkowski)
if degenerate:
# If the new vertex is degenerate, we cannot proceed with GJK.
gjk_flag = GJK_RETURN_CODE.NUM_ERROR
break
# Check if the origin is strictly outside of the Minkowski difference (which means there is no collision)
is_no_collision = minkowski.dot(min_normal) < 0.0
if is_no_collision:
gjk_flag = GJK_RETURN_CODE.SEPARATED
break
gjk_state.simplex_vertex.obj1[i_b, 3] = obj1
gjk_state.simplex_vertex.obj2[i_b, 3] = obj2
gjk_state.simplex_vertex.local_obj1[i_b, 3] = local_obj1
gjk_state.simplex_vertex.local_obj2[i_b, 3] = local_obj2
gjk_state.simplex_vertex.id1[i_b, 3] = id1
gjk_state.simplex_vertex.id2[i_b, 3] = id2
gjk_state.simplex_vertex.mink[i_b, 3] = minkowski
gjk_state.simplex.nverts[i_b] = 4
if gjk_flag == GJK_RETURN_CODE.INTERSECT:
gjk_state.distance[i_b] = 0.0
else:
gjk_flag = GJK_RETURN_CODE.SEPARATED
gjk_state.distance[i_b] = gjk_info.FLOAT_MAX[None]
return gjk_flag
@qd.func
def func_is_new_simplex_vertex_valid(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
id1,
id2,
mink,
):
"""
Check validity of the incoming simplex vertex (defined by id1, id2 and mink).
To be a new valid simplex vertex, it should satisfy the following conditions:
1) The vertex should not be already in the simplex.
2) The simplex should not be degenerate after insertion.
"""
return (not func_is_new_simplex_vertex_duplicate(gjk_state, i_b, id1, id2)) and (
not func_is_new_simplex_vertex_degenerate(gjk_state, gjk_info, i_b, mink)
)
@qd.func
def func_is_new_simplex_vertex_duplicate(
gjk_state: array_class.GJKState,
i_b,
id1,
id2,
):
"""
Check if the incoming simplex vertex is already in the simplex.
"""
nverts = gjk_state.simplex.nverts[i_b]
found = False
for i in range(nverts):
if id1 == -1 or (gjk_state.simplex_vertex.id1[i_b, i] != id1):
continue
if id2 == -1 or (gjk_state.simplex_vertex.id2[i_b, i] != id2):
continue
found = True
break
return found
@qd.func
def func_is_new_simplex_vertex_degenerate(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
mink,
):
"""
Check if the simplex becomes degenerate after inserting a new vertex, assuming that the current simplex is okay.
"""
is_degenerate = False
# Check if the new vertex is not very close to the existing vertices
nverts = gjk_state.simplex.nverts[i_b]
for i in range(nverts):
if (gjk_state.simplex_vertex.mink[i_b, i] - mink).norm_sqr() < (gjk_info.simplex_max_degeneracy_sq[None]):
is_degenerate = True
break
if not is_degenerate:
# Check the validity based on the simplex dimension
if nverts == 2:
# Becomes a triangle if valid, check if the three vertices are not collinear
is_degenerate = func_is_colinear(
gjk_info,
gjk_state.simplex_vertex.mink[i_b, 0],
gjk_state.simplex_vertex.mink[i_b, 1],
mink,
)
elif nverts == 3:
# Becomes a tetrahedron if valid, check if the four vertices are not coplanar
is_degenerate = func_is_coplanar(
gjk_info,
gjk_state.simplex_vertex.mink[i_b, 0],
gjk_state.simplex_vertex.mink[i_b, 1],
gjk_state.simplex_vertex.mink[i_b, 2],
mink,
)
return is_degenerate
@qd.func
def func_is_colinear(
gjk_info: array_class.GJKInfo,
v1,
v2,
v3,
):
"""
Check if three points are collinear.
This function assumes that every pair of points is non-degenerate, i.e. no pair of points is identical.
"""
e1 = v2 - v1
e2 = v3 - v1
normal = e1.cross(e2)
return normal.norm_sqr() < (gjk_info.simplex_max_degeneracy_sq[None]) * e1.norm_sqr() * e2.norm_sqr()
@qd.func
def func_is_coplanar(
gjk_info: array_class.GJKInfo,
v1,
v2,
v3,
v4,
):
"""
Check if four points are coplanar.
This function assumes that every triplet of points is non-degenerate, i.e. no triplet of points is collinear.
"""
e1 = (v2 - v1).normalized()
e2 = (v3 - v1).normalized()
normal = e1.cross(e2)
diff = v4 - v1
return (normal.dot(diff) ** 2) < (gjk_info.simplex_max_degeneracy_sq[None]) * normal.norm_sqr() * diff.norm_sqr()
@qd.func
def func_search_valid_simplex_vertex(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
i_b,
):
"""
Search for a valid simplex vertex (non-duplicate, non-degenerate) in the Minkowski difference.
using thread-local pos/quat for both geometries.
"""
obj1 = gs.qd_vec3(0.0, 0.0, 0.0)
obj2 = gs.qd_vec3(0.0, 0.0, 0.0)
local_obj1 = gs.qd_vec3(0.0, 0.0, 0.0)
local_obj2 = gs.qd_vec3(0.0, 0.0, 0.0)
id1 = -1
id2 = -1
minkowski = gs.qd_vec3(0.0, 0.0, 0.0)
flag = RETURN_CODE.FAIL
# If both geometries are discrete, we can use a brute-force search to find a valid simplex vertex.
if func_is_discrete_geoms(geoms_info, i_ga, i_gb):
geom_nverts = gs.qd_ivec2(0, 0)
for i in range(2):
geom_nverts[i] = func_num_discrete_geom_vertices(geoms_info, i_ga if i == 0 else i_gb)
num_cases = geom_nverts[0] * geom_nverts[1]
for k in range(num_cases):
m = (k + gjk_state.last_searched_simplex_vertex_id[i_b]) % num_cases
i = m // geom_nverts[1]
j = m % geom_nverts[1]
id1 = geoms_info.vert_start[i_ga] + i
id2 = geoms_info.vert_start[i_gb] + j
for p in range(2):
obj, local_obj = func_get_discrete_geom_vertex(
geoms_info,
verts_info,
i_ga if p == 0 else i_gb,
pos_a if p == 0 else pos_b,
quat_a if p == 0 else quat_b,
i if p == 0 else j,
)
if p == 0:
obj1 = obj
local_obj1 = local_obj
else:
obj2 = obj
local_obj2 = local_obj
minkowski = obj1 - obj2
# Check if the new vertex is valid
if func_is_new_simplex_vertex_valid(gjk_state, gjk_info, i_b, id1, id2, minkowski):
flag = RETURN_CODE.SUCCESS
# Update buffer
gjk_state.last_searched_simplex_vertex_id[i_b] = (m + 1) % num_cases
break
else:
# Try search direction based on the current simplex.
nverts = gjk_state.simplex.nverts[i_b]
if nverts == 3:
# If we have a triangle, use its normal as the search direction.
v1 = gjk_state.simplex_vertex.mink[i_b, 0]
v2 = gjk_state.simplex_vertex.mink[i_b, 1]
v3 = gjk_state.simplex_vertex.mink[i_b, 2]
dir = (v3 - v1).cross(v2 - v1).normalized()
for i in range(2):
d = dir if i == 0 else -dir
obj1, obj2, local_obj1, local_obj2, id1, id2, minkowski = func_safe_gjk_support(
geoms_info,
verts_info,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
i_ga,
i_gb,
pos_a,
quat_a,
pos_b,
quat_b,
i_b,
d,
)
# Check if the new vertex is valid
if func_is_new_simplex_vertex_valid(gjk_state, gjk_info, i_b, id1, id2, minkowski):
flag = RETURN_CODE.SUCCESS
break
return obj1, obj2, local_obj1, local_obj2, id1, id2, minkowski, flag
@qd.func
def func_num_discrete_geom_vertices(
geoms_info: array_class.GeomsInfo,
i_g,
):
"""
Count the number of discrete vertices in the geometry.
"""
vert_start = geoms_info.vert_start[i_g]
vert_end = geoms_info.vert_end[i_g]
count = vert_end - vert_start
return count
@qd.func
def func_get_discrete_geom_vertex(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
i_g,
pos: qd.types.vector(3, dtype=gs.qd_float),
quat: qd.types.vector(4, dtype=gs.qd_float),
i_v,
):
"""
Get the discrete vertex of the geometry for the given index [i_v].
"""
geom_type = geoms_info.type[i_g]
# Get the vertex position in the local frame of the geometry.
v_ = qd.Vector([0.0, 0.0, 0.0], dt=gs.qd_float)
if geom_type == gs.GEOM_TYPE.BOX:
# For the consistency with the [func_support_box] function of [SupportField] class, we handle the box
# vertex positions in a different way than the general mesh.
v_ = qd.Vector(
[
(1.0 if (i_v & 1 == 1) else -1.0) * geoms_info.data[i_g][0] * 0.5,
(1.0 if (i_v & 2 == 2) else -1.0) * geoms_info.data[i_g][1] * 0.5,
(1.0 if (i_v & 4 == 4) else -1.0) * geoms_info.data[i_g][2] * 0.5,
],
dt=gs.qd_float,
)
elif geom_type == gs.GEOM_TYPE.MESH:
vert_start = geoms_info.vert_start[i_g]
v_ = verts_info.init_pos[vert_start + i_v]
# Transform the vertex position to the world frame using thread-local pos/quat
v = gu.qd_transform_by_trans_quat(v_, pos, quat)
return v, v_
@qd.func
def func_safe_gjk_triangle_info(
gjk_state: array_class.GJKState,
i_b,
i_ta,
i_tb,
i_tc,
i_apex,
):
"""
Compute normal and signed distance of the triangle face on the simplex from the origin.
The triangle is defined by the vertices [i_ta], [i_tb], and [i_tc], and the apex is used to orient the triangle
normal, so that it points outward from the simplex. Thus, if the origin is inside the simplex in terms of this
triangle, the signed distance will be positive.
"""
vertex_1 = gjk_state.simplex_vertex.mink[i_b, i_ta]
vertex_2 = gjk_state.simplex_vertex.mink[i_b, i_tb]
vertex_3 = gjk_state.simplex_vertex.mink[i_b, i_tc]
apex_vertex = gjk_state.simplex_vertex.mink[i_b, i_apex]
# This normal is guaranteed to be non-zero because we build the simplex avoiding degenerate vertices.
normal = (vertex_3 - vertex_1).cross(vertex_2 - vertex_1).normalized()
# Reorient the normal to point outward from the simplex
if normal.dot(apex_vertex - vertex_1) > 0.0:
normal = -normal
# Compute the signed distance from the origin to the triangle plane
sdist = normal.dot(vertex_1)
return normal, sdist
@qd.func
def func_safe_gjk_support(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
i_b,
dir,
):
"""
Find support points on the two objects using [dir] to use in the [safe_gjk] algorithm.
Uses thread-local pos/quat for both geometries.
This is a more robust version of the support function that finds only one pair of support points, because this
function perturbs the support direction to find the best support points that guarantee non-degenerate simplex
in the GJK algorithm.
Parameters:
----------
dir: gs.qd_vec3
The unit direction in which to find the support points, from [ga] (obj 1) to [gb] (obj 2).
"""
EPS = rigid_global_info.EPS[None]
obj1 = gs.qd_vec3(0.0, 0.0, 0.0)
obj2 = gs.qd_vec3(0.0, 0.0, 0.0)
local_obj1 = gs.qd_vec3(0.0, 0.0, 0.0)
local_obj2 = gs.qd_vec3(0.0, 0.0, 0.0)
id1 = gs.qd_int(-1)
id2 = gs.qd_int(-1)
mink = obj1 - obj2
for i in range(9):
n_dir = dir
if i > 0:
j = i - 1
n_dir[0] += -(1.0 - 2.0 * (j & 1)) * EPS
n_dir[1] += -(1.0 - 2.0 * (j & 2)) * EPS
n_dir[2] += -(1.0 - 2.0 * (j & 4)) * EPS
# First order normalization based on Taylor series is accurate enough
n_dir *= 2.0 - n_dir.dot(dir)
num_supports = func_count_support(geoms_info, support_field_info, i_ga, i_gb, quat_a, quat_b, n_dir)
if i > 0 and num_supports > 1:
# If this is a perturbed direction and we have more than one support point, we skip this iteration. If
# it was the original direction, we continue to find the support points to keep it as the baseline.
continue
# Use the current direction to find the support points.
for j in range(2):
d = n_dir if j == 0 else -n_dir
i_g = i_ga if j == 0 else i_gb
pos = pos_a if j == 0 else pos_b
quat = quat_a if j == 0 else quat_b
sp, local_sp, si = support_driver(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
d,
i_g,
pos,
quat,
i_b,
j,
False,
)
if j == 0:
obj1 = sp
local_obj1 = local_sp
id1 = si
else:
obj2 = sp
local_obj2 = local_sp
id2 = si
mink = obj1 - obj2
if i == 0:
if num_supports > 1:
# If there were multiple valid support points, we move on to the next iteration to perturb the
# direction and find better support points.
continue
else:
break
# If it was a perturbed direction, check if the support points have been found before.
if i == 8:
# If this was the last iteration, we don't check if it has been found before.
break
# Check if the updated simplex would be a degenerate simplex.
if func_is_new_simplex_vertex_valid(gjk_state, gjk_info, i_b, id1, id2, mink):
break
return obj1, obj2, local_obj1, local_obj2, id1, id2, mink
@qd.func
def count_support_driver(
geoms_info: array_class.GeomsInfo,
support_field_info: array_class.SupportFieldInfo,
d,
i_g,
quat: qd.types.vector(4, dtype=gs.qd_float),
):
"""
Count the number of possible support points in the given direction,
using thread-local quat instead of reading from geoms_state.
"""
geom_type = geoms_info.type[i_g]
count = 1
if geom_type == gs.GEOM_TYPE.BOX:
count = support_field._func_count_supports_box(d, quat)
elif geom_type == gs.GEOM_TYPE.MESH:
count = support_field._func_count_supports_world(
support_field_info,
d,
i_g,
quat,
)
return count
@qd.func
def func_count_support(
geoms_info: array_class.GeomsInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
quat_a: qd.types.vector(4, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
dir,
):
"""
Count the number of possible pairs of support points on the two objects
in the given direction, using thread-local pos/quat for both geometries.
"""
count = 1
for i in range(2):
count *= count_support_driver(
geoms_info,
support_field_info,
dir if i == 0 else -dir,
i_ga if i == 0 else i_gb,
quat_a if i == 0 else quat_b,
)
return count
from genesis.utils.deprecated_module_wrapper import create_virtual_deprecated_module
create_virtual_deprecated_module(__name__, "genesis.engine.solvers.rigid.gjk_decomp")
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/gjk.py",
"license": "Apache License 2.0",
"lines": 1665,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/multi_contact.py | """
Multi-contact detection for collision handling.
This module contains the multi-contact detection algorithm based on
Sutherland-Hodgman polygon clipping for finding multiple contact points
between colliding geometric entities (face-face, edge-face pairs).
"""
import quadrants as qd
import genesis as gs
import genesis.utils.geom as gu
import genesis.utils.array_class as array_class
from .constants import RETURN_CODE
from .utils import (
func_is_equal_vec,
)
@qd.func
def func_multi_contact(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_ga,
i_gb,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
i_b,
i_f,
):
"""
Multi-contact detection algorithm based on Sutherland-Hodgman polygon clipping algorithm. For the two geometric
entities that form the minimum distance (e.g. face-face, edge-face), this function tests if the pair is
parallel, and if so, it clips one of the pair against the other to find the contact points.
Parameters
----------
i_f: int
Index of the face in the EPA polytope where the minimum distance is found.
.. seealso::
MuJoCo's original implementation:
https://github.com/google-deepmind/mujoco/blob/7dc7a349c5ba2db2d3f8ab50a367d08e2f1afbbc/src/engine/engine_collision_gjk.c#L2112
"""
# Get vertices of the nearest face from EPA
v11i = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[0]].id1
v12i = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[1]].id1
v13i = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[2]].id1
v21i = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[0]].id2
v22i = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[1]].id2
v23i = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[2]].id2
v11 = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[0]].obj1
v12 = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[1]].obj1
v13 = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[2]].obj1
v21 = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[0]].obj2
v22 = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[1]].obj2
v23 = gjk_state.polytope_verts[i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[2]].obj2
# Get the simplex dimension of geom 1 and 2
nface1, nface2 = 0, 0
for i in range(2):
v1i, v2i, v3i, v1, v2, v3 = v11i, v12i, v13i, v11, v12, v13
if i == 1:
v1i, v2i, v3i, v1, v2, v3 = v21i, v22i, v23i, v21, v22, v23
nface, v1i, v2i, v3i, v1, v2, v3 = func_simplex_dim(v1i, v2i, v3i, v1, v2, v3)
if i == 0:
nface1, v11i, v12i, v13i, v11, v12, v13 = nface, v1i, v2i, v3i, v1, v2, v3
else:
nface2, v21i, v22i, v23i, v21, v22, v23 = nface, v1i, v2i, v3i, v1, v2, v3
dir = gjk_state.witness[i_b, 0].point_obj2 - gjk_state.witness[i_b, 0].point_obj1
dir_neg = gjk_state.witness[i_b, 0].point_obj1 - gjk_state.witness[i_b, 0].point_obj2
# Get all possible face normals for each geom
nnorms1, nnorms2 = 0, 0
geom_type_a = geoms_info.type[i_ga]
geom_type_b = geoms_info.type[i_gb]
for i_g0 in range(2):
geom_type = geom_type_a if i_g0 == 0 else geom_type_b
i_g = i_ga if i_g0 == 0 else i_gb
nface = nface1 if i_g0 == 0 else nface2
v1i = v11i if i_g0 == 0 else v21i
v2i = v12i if i_g0 == 0 else v22i
v3i = v13i if i_g0 == 0 else v23i
t_dir = dir_neg if i_g0 == 0 else dir
nnorms = 0
if geom_type == gs.GEOM_TYPE.BOX:
quat = quat_a if i_g0 == 0 else quat_b
nnorms = func_potential_box_normals(
geoms_info, gjk_state, gjk_info, i_g, quat, i_b, nface, v1i, v2i, v3i, t_dir
)
elif geom_type == gs.GEOM_TYPE.MESH:
quat = quat_a if i_g0 == 0 else quat_b
nnorms = func_potential_mesh_normals(
geoms_info, verts_info, faces_info, gjk_state, gjk_info, i_g, quat, i_b, nface, v1i, v2i, v3i
)
for i_n in range(nnorms):
if i_g0 == 0:
gjk_state.contact_faces[i_b, i_n].normal1 = gjk_state.contact_normals[i_b, i_n].normal
gjk_state.contact_faces[i_b, i_n].id1 = gjk_state.contact_normals[i_b, i_n].id
nnorms1 = nnorms
else:
gjk_state.contact_faces[i_b, i_n].normal2 = gjk_state.contact_normals[i_b, i_n].normal
gjk_state.contact_faces[i_b, i_n].id2 = gjk_state.contact_normals[i_b, i_n].id
nnorms2 = nnorms
# Determine if any two face normals match
aligned_faces_idx, aligned_faces_flag = func_find_aligned_faces(gjk_state, gjk_info, i_b, nnorms1, nnorms2)
no_multiple_contacts = False
edgecon1, edgecon2 = False, False
if aligned_faces_flag == RETURN_CODE.FAIL:
# No aligned faces found; check if there was edge-face collision
# [is_edge_face]: geom1 is edge, geom2 is face
# [is_face_edge]: geom1 is face, geom2 is edge
is_edge_face = (nface1 < 3) and (nface1 <= nface2)
is_face_edge = (not is_edge_face) and nface2 < 3
if is_edge_face or is_face_edge:
i_g = i_ga if is_edge_face else i_gb
geom_type = geom_type_a if is_edge_face else geom_type_b
nface = nface1 if is_edge_face else nface2
v1 = v11 if is_edge_face else v21
v2 = v12 if is_edge_face else v22
v1i = v11i if is_edge_face else v21i
v2i = v12i if is_edge_face else v22i
nnorms = 0
if geom_type == gs.GEOM_TYPE.BOX:
pos = pos_a if is_edge_face else pos_b
quat = quat_a if is_edge_face else quat_b
nnorms = func_potential_box_edge_normals(
geoms_info, gjk_state, gjk_info, i_g, pos, quat, i_b, nface, v1, v2, v1i, v2i
)
elif geom_type == gs.GEOM_TYPE.MESH:
pos = pos_a if is_edge_face else pos_b
quat = quat_a if is_edge_face else quat_b
nnorms = func_potential_mesh_edge_normals(
geoms_info,
verts_info,
faces_info,
gjk_state,
gjk_info,
i_g,
pos,
quat,
i_b,
nface,
v1,
v2,
v1i,
v2i,
)
if is_edge_face:
nnorms1 = nnorms
else:
nnorms2 = nnorms
if nnorms > 0:
for i_n in range(nnorms):
if is_edge_face:
gjk_state.contact_faces[i_b, i_n].normal1 = gjk_state.contact_normals[i_b, i_n].normal
else:
gjk_state.contact_faces[i_b, i_n].normal2 = gjk_state.contact_normals[i_b, i_n].normal
gjk_state.contact_faces[i_b, i_n].endverts = gjk_state.contact_normals[i_b, i_n].endverts
# Check if any of the edge normals match
nedges, nfaces = nnorms1, nnorms2
if not is_edge_face:
nedges, nfaces = nfaces, nedges
aligned_faces_idx, aligned_edge_face_flag = func_find_aligned_edge_face(
gjk_state, gjk_info, i_b, nedges, nfaces, is_edge_face
)
if aligned_edge_face_flag == RETURN_CODE.FAIL:
no_multiple_contacts = True
else:
if is_edge_face:
edgecon1 = True
else:
edgecon2 = True
else:
# No multiple contacts found
no_multiple_contacts = True
if not no_multiple_contacts:
i, j = aligned_faces_idx[0], aligned_faces_idx[1]
# Recover matching edge or face from geoms
for k in range(2):
edgecon = edgecon1 if k == 0 else edgecon2
geom_type = geom_type_a if k == 0 else geom_type_b
i_g = i_ga if k == 0 else i_gb
nface = 0
if edgecon:
if k == 0:
gjk_state.contact_faces[i_b, 0].vert1 = gjk_state.polytope_verts[
i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[0]
].obj1
gjk_state.contact_faces[i_b, 1].vert1 = gjk_state.contact_faces[i_b, i].endverts
else:
gjk_state.contact_faces[i_b, 0].vert2 = gjk_state.polytope_verts[
i_b, gjk_state.polytope_faces[i_b, i_f].verts_idx[0]
].obj2
gjk_state.contact_faces[i_b, 1].vert2 = gjk_state.contact_faces[i_b, j].endverts
nface = 2
else:
normal_face_idx = gjk_state.contact_faces[i_b, i].id1
if k == 0 and edgecon2:
# Since [i] is the edge idx, use [j]
normal_face_idx = gjk_state.contact_faces[i_b, j].id1
elif k == 1:
normal_face_idx = gjk_state.contact_faces[i_b, j].id2
if geom_type == gs.GEOM_TYPE.BOX:
pos = pos_a if k == 0 else pos_b
quat = quat_a if k == 0 else quat_b
nface = func_box_face(geoms_info, gjk_state, i_g, pos, quat, i_b, k, normal_face_idx)
elif geom_type == gs.GEOM_TYPE.MESH:
pos = pos_a if k == 0 else pos_b
quat = quat_a if k == 0 else quat_b
nface = func_mesh_face(verts_info, faces_info, gjk_state, i_g, pos, quat, i_b, k, normal_face_idx)
if k == 0:
nface1 = nface
else:
nface2 = nface
approx_dir = gs.qd_vec3(0.0, 0.0, 0.0)
normal = gs.qd_vec3(0.0, 0.0, 0.0)
if edgecon1:
# Face 1 is an edge, so clip face 1 against face 2
approx_dir = gjk_state.contact_faces[i_b, j].normal2 * dir.norm()
normal = gjk_state.contact_faces[i_b, j].normal2
elif edgecon2:
# Face 2 is an edge, so clip face 2 against face 1
approx_dir = gjk_state.contact_faces[i_b, j].normal1 * dir.norm()
normal = gjk_state.contact_faces[i_b, j].normal1
else:
# Face-face contact
approx_dir = gjk_state.contact_faces[i_b, j].normal2 * dir.norm()
normal = gjk_state.contact_faces[i_b, i].normal1
# Clip polygon
func_clip_polygon(gjk_state, gjk_info, i_b, nface1, nface2, edgecon1, edgecon2, normal, approx_dir)
@qd.func
def func_simplex_dim(
v1i,
v2i,
v3i,
v1,
v2,
v3,
):
"""
Determine the dimension of the given simplex (1-3).
If every point is the same, 1-dim. If two points are the same, 2-dim. If all points are different, 3-dim.
"""
dim = 0
rv1i, rv2i, rv3i = v1i, v2i, v3i
rv1, rv2, rv3 = v1, v2, v3
if v1i != v2i:
if (v1i == v3i) or (v2i == v3i):
# Two points are the same
dim = 2
else:
# All points are different
dim = 3
else:
if v1i != v3i:
# Two points are the same
dim = 2
# Swap v2 and v3
rv2i, rv3i = rv3i, rv2i
rv2, rv3 = rv3, rv2
else:
# All points are the same
dim = 1
return dim, rv1i, rv2i, rv3i, rv1, rv2, rv3
@qd.func
def func_potential_box_normals(
geoms_info: array_class.GeomsInfo,
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_g,
quat: qd.types.vector(4, dtype=gs.qd_float),
i_b,
dim,
v1,
v2,
v3,
dir,
):
"""
For a simplex defined on a box with three vertices [v1, v2, v3], we find which face normals are potentially
related to the simplex.
If the simplex is a triangle, at most one face normal is related.
If the simplex is a line, at most two face normals are related.
If the simplex is a point, at most three face normals are related.
We identify related face normals to the simplex by checking the vertex indices of the simplex.
Thread-safety note: Geometry index `i_g` is only used for read-only metadata access
(vertex start index). It does not access `geoms_state.pos` or `geoms_state.quat`.
Note that this function only uses quat (not pos) since face normals are orientation-dependent
but not position-dependent.
"""
# Change to local vertex indices
v1 -= geoms_info.vert_start[i_g]
v2 -= geoms_info.vert_start[i_g]
v3 -= geoms_info.vert_start[i_g]
# Number of potential face normals
n_normals = 0
# Fallback if the simplex is degenerate
is_degenerate_simplex = False
c = 0
xyz = gs.qd_ivec3(0, 0, 0)
for i in range(3):
# 1 when every vertex has positive xyz coordinate,
# -1 when every vertex has negative xyz coordinate,
# 0 when vertices are mixed
xyz[i] = func_cmp_bit(v1, v2, v3, dim, i)
for i in range(1 if dim == 3 else 3):
# Determine the normal vector in the local space
local_n = gs.qd_vec3(xyz[0], xyz[1], xyz[2])
w = 1
if dim == 2:
w = xyz[i]
if dim == 2 or dim == 1:
local_n = gs.qd_vec3(0, 0, 0)
local_n[i] = xyz[i]
global_n = gu.qd_transform_by_quat(local_n, quat)
if dim == 3:
gjk_state.contact_normals[i_b, 0].normal = global_n
# Note that only one of [x, y, z] could be non-zero, because the triangle is on the box face.
sgn = xyz.sum()
for j in range(3):
if xyz[j]:
gjk_state.contact_normals[i_b, c].id = j * 2
c += 1
if sgn == -1:
# Flip if needed
gjk_state.contact_normals[i_b, 0].id = gjk_state.contact_normals[i_b, 0].id + 1
elif dim == 2:
if w:
if (i == 0) or (i == 1):
gjk_state.contact_normals[i_b, c].normal = global_n
else:
gjk_state.contact_normals[i_b, 1].normal = global_n
for j in range(3):
if i == j:
gjk_state.contact_normals[i_b, c].id = j * 2 if xyz[j] > 0 else j * 2 + 1
break
c += 1
elif dim == 1:
gjk_state.contact_normals[i_b, c].normal = global_n
for j in range(3):
if i == j:
gjk_state.contact_normals[i_b, c].id = j * 2 if xyz[j] > 0 else j * 2 + 1
break
c += 1
# Check [c] for detecting degenerate cases
if dim == 3:
# [c] should be 1 in normal case, but if triangle does not lie on the box face, it could be other values.
n_normals = 1
is_degenerate_simplex = c != 1
elif dim == 2:
# [c] should be 2 in normal case, but if edge does not lie on the box edge, it could be other values.
n_normals = 2
is_degenerate_simplex = c != 2
elif dim == 1:
n_normals = 3
is_degenerate_simplex = False
# If the simplex was degenerate, find the face normal using collision normal
if is_degenerate_simplex:
n_normals = (
1
if func_box_normal_from_collision_normal(gjk_state, gjk_info, i_g, quat, i_b, dir) == RETURN_CODE.SUCCESS
else 0
)
return n_normals
@qd.func
def func_cmp_bit(
v1,
v2,
v3,
n,
shift,
):
"""
Compare one bit of v1 and v2 that sits at position `shift` (shift = 0 for the LSB, 1 for the next bit, ...).
Returns:
-------
int
1 if both bits are 1
-1 if both bits are 0
0 if bits differ
"""
b1 = (v1 >> shift) & 1 # 0 or 1
b2 = (v2 >> shift) & 1 # 0 or 1
b3 = (v3 >> shift) & 1 # 0 or 1
res = 0
if n == 3:
both_set = b1 & b2 & b3 # 1 when 11, else 0
both_clear = (b1 ^ 1) & (b2 ^ 1) & (b3 ^ 1) # 1 when 00, else 0
res = both_set - both_clear
elif n == 2:
both_set = b1 & b2 # 1 when 11, else 0
both_clear = (b1 ^ 1) & (b2 ^ 1) # 1 when 00, else 0
res = both_set - both_clear
elif n == 1:
both_set = b1 # 1 when 1, else 0
both_clear = b1 ^ 1 # 1 when 0, else 0
res = both_set - both_clear
return res
@qd.func
def func_box_normal_from_collision_normal(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_g,
quat: qd.types.vector(4, dtype=gs.qd_float),
i_b,
dir,
):
"""
Among the 6 faces of the box, find the one of which normal is closest to the [dir].
Thread-safety note: Geometry index `i_g` is not used in this function at all
(retained for API consistency with original). It does not access `geoms_state.pos`
or `geoms_state.quat`.
"""
# Every box face normal
normals = qd.Vector(
[1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0],
dt=gs.qd_float,
)
# Get local collision normal
local_dir = gu.qd_transform_by_quat(dir, gu.qd_inv_quat(quat))
local_dir = local_dir.normalized()
# Determine the closest face normal
flag = RETURN_CODE.FAIL
for i in range(6):
n = gs.qd_vec3(normals[3 * i + 0], normals[3 * i + 1], normals[3 * i + 2])
if local_dir.dot(n) > gjk_info.contact_face_tol[None]:
flag = RETURN_CODE.SUCCESS
gjk_state.contact_normals[i_b, 0].normal = n
gjk_state.contact_normals[i_b, 0].id = i
break
return flag
@qd.func
def func_potential_mesh_normals(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_g,
quat: qd.types.vector(4, dtype=gs.qd_float),
i_b,
dim,
v1,
v2,
v3,
):
"""
For a simplex defined on a mesh with three vertices [v1, v2, v3],
we find which face normals are potentially related to the simplex.
If the simplex is a triangle, at most one face normal is related.
If the simplex is a line, at most two face normals are related.
If the simplex is a point, multiple faces that are adjacent to the point
could be related.
We identify related face normals to the simplex by checking the vertex indices of the simplex.
Thread-safety note: Geometry index `i_g` is only used for read-only metadata access
(face start/end indices). It does not access `geoms_state.pos` or `geoms_state.quat`.
Note that this function only uses quat (not pos) since face normals are orientation-dependent
but not position-dependent.
"""
# Number of potential face normals
n_normals = 0
# Exhaustive search for the face normals
# @TODO: This would require a lot of cost if the mesh is large. It would be better to precompute adjacency
# information in the solver and use it here.
face_start = geoms_info.face_start[i_g]
face_end = geoms_info.face_end[i_g]
for i_f in range(face_start, face_end):
face = faces_info[i_f].verts_idx
has_vs = gs.qd_ivec3(0, 0, 0)
if v1 == face[0] or v1 == face[1] or v1 == face[2]:
has_vs[0] = 1
if v2 == face[0] or v2 == face[1] or v2 == face[2]:
has_vs[1] = 1
if v3 == face[0] or v3 == face[1] or v3 == face[2]:
has_vs[2] = 1
compute_normal = True
for j in range(dim):
compute_normal = compute_normal and (has_vs[j] == 1)
if compute_normal:
v1pos = verts_info.init_pos[face[0]]
v2pos = verts_info.init_pos[face[1]]
v3pos = verts_info.init_pos[face[2]]
# Compute the face normal
n = (v2pos - v1pos).cross(v3pos - v1pos)
n = n.normalized()
n = gu.qd_transform_by_quat(n, quat)
gjk_state.contact_normals[i_b, n_normals].normal = n
gjk_state.contact_normals[i_b, n_normals].id = i_f
n_normals += 1
if dim == 3:
break
elif dim == 2:
if n_normals == 2:
break
else:
if n_normals == gjk_info.max_contact_polygon_verts[None]:
break
return n_normals
@qd.func
def func_find_aligned_faces(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
nv,
nw,
):
"""
Find if any two faces from [contact_faces] are aligned.
"""
res = gs.qd_ivec2(0, 0)
flag = RETURN_CODE.FAIL
for i, j in qd.ndrange(nv, nw):
ni = gjk_state.contact_faces[i_b, i].normal1
nj = gjk_state.contact_faces[i_b, j].normal2
if ni.dot(nj) < -gjk_info.contact_face_tol[None]:
res[0] = i
res[1] = j
flag = RETURN_CODE.SUCCESS
break
return res, flag
@qd.func
def func_potential_box_edge_normals(
geoms_info: array_class.GeomsInfo,
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_g,
pos: qd.types.vector(3, dtype=gs.qd_float),
quat: qd.types.vector(4, dtype=gs.qd_float),
i_b,
dim,
v1,
v2,
v1i,
v2i,
):
"""
For a simplex defined on a box with two vertices [v1, v2],
we find which edge normals are potentially related to the simplex.
If the simplex is a line, at most one edge normal are related.
If the simplex is a point, at most three edge normals are related.
We identify related edge normals to the simplex by checking the vertex indices of the simplex.
Thread-safety note: Geometry index `i_g` is only used for read-only metadata access
(geometry size data, vertex start index). It does not access `geoms_state.pos` or
`geoms_state.quat`.
"""
g_size_x = geoms_info.data[i_g][0] * 0.5
g_size_y = geoms_info.data[i_g][1] * 0.5
g_size_z = geoms_info.data[i_g][2] * 0.5
v1i -= geoms_info.vert_start[i_g]
v2i -= geoms_info.vert_start[i_g]
n_normals = 0
if dim == 2:
# If the nearest face is an edge
gjk_state.contact_normals[i_b, 0].endverts = v2
gjk_state.contact_normals[i_b, 0].normal = func_safe_normalize(gjk_info, v2 - v1)
n_normals = 1
elif dim == 1:
# If the nearest face is a point, consider three adjacent edges
x = g_size_x if (v1i & 1) else -g_size_x
y = g_size_y if (v1i & 2) else -g_size_y
z = g_size_z if (v1i & 4) else -g_size_z
for i in range(3):
bv = gs.qd_vec3(-x, y, z)
if i == 1:
bv = gs.qd_vec3(x, -y, z)
elif i == 2:
bv = gs.qd_vec3(x, y, -z)
ev = gu.qd_transform_by_trans_quat(bv, pos, quat)
r = func_safe_normalize(gjk_info, ev - v1)
gjk_state.contact_normals[i_b, i].endverts = ev
gjk_state.contact_normals[i_b, i].normal = r
n_normals = 3
return n_normals
@qd.func
def func_potential_mesh_edge_normals(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_g,
pos: qd.types.vector(3, dtype=gs.qd_float),
quat: qd.types.vector(4, dtype=gs.qd_float),
i_b,
dim,
v1,
v2,
v1i,
v2i,
):
"""
For a simplex defined on a mesh with two vertices [v1, v2],
we find which edge normals are potentially related to the simplex.
If the simplex is a line, at most one edge normal are related.
If the simplex is a point, multiple edges that are adjacent to the point could be related.
We identify related edge normals to the simplex by checking the vertex indices of the simplex.
Thread-safety note: Geometry index `i_g` is only used for read-only metadata access
(face start/end indices). It does not access `geoms_state.pos` or `geoms_state.quat`.
"""
# Number of potential face normals
n_normals = 0
if dim == 2:
# If the nearest face is an edge
gjk_state.contact_normals[i_b, 0].endverts = v2
gjk_state.contact_normals[i_b, 0].normal = func_safe_normalize(gjk_info, v2 - v1)
n_normals = 1
elif dim == 1:
# If the nearest face is a point, consider every adjacent edge
# Exhaustive search for the edge normals
face_start = geoms_info.face_start[i_g]
face_end = geoms_info.face_end[i_g]
for i_f in range(face_start, face_end):
face = faces_info[i_f].verts_idx
v1_idx = -1
if v1i == face[0]:
v1_idx = 0
elif v1i == face[1]:
v1_idx = 1
elif v1i == face[2]:
v1_idx = 2
if v1_idx != -1:
# Consider the next vertex of [v1] in the face
v2_idx = (v1_idx + 1) % 3
t_v2i = face[v2_idx]
# Compute the edge normal
v2_pos = verts_info.init_pos[t_v2i]
v2_pos = gu.qd_transform_by_trans_quat(v2_pos, pos, quat)
t_res = func_safe_normalize(gjk_info, v2_pos - v1)
gjk_state.contact_normals[i_b, n_normals].normal = t_res
gjk_state.contact_normals[i_b, n_normals].endverts = v2_pos
n_normals += 1
if n_normals == gjk_info.max_contact_polygon_verts[None]:
break
return n_normals
@qd.func
def func_safe_normalize(
gjk_info: array_class.GJKInfo,
v,
):
"""
Normalize the vector [v] safely.
"""
norm = v.norm()
if norm < gjk_info.FLOAT_MIN[None]:
# If the vector is too small, set it to a default value
v[0] = 1.0
v[1] = 0.0
v[2] = 0.0
else:
# Normalize the vector
inv_norm = 1.0 / norm
v *= inv_norm
return v
@qd.func
def func_find_aligned_edge_face(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
nedge,
nface,
is_edge_face,
):
"""
Find if an edge and face from [contact_faces] are aligned.
"""
res = gs.qd_ivec2(0, 0)
flag = RETURN_CODE.FAIL
for i, j in qd.ndrange(nedge, nface):
ni = gjk_state.contact_faces[i_b, i].normal1
nj = gjk_state.contact_faces[i_b, j].normal2
if not is_edge_face:
# The first normal is the edge normal
ni = gjk_state.contact_faces[i_b, i].normal2
if not is_edge_face:
# The second normal is the face normal
nj = gjk_state.contact_faces[i_b, j].normal1
if qd.abs(ni.dot(nj)) < gjk_info.contact_edge_tol[None]:
res[0] = i
res[1] = j
flag = RETURN_CODE.SUCCESS
break
return res, flag
@qd.func
def func_box_face(
geoms_info: array_class.GeomsInfo,
gjk_state: array_class.GJKState,
i_g,
pos: qd.types.vector(3, dtype=gs.qd_float),
quat: qd.types.vector(4, dtype=gs.qd_float),
i_b,
i_o,
face_idx,
):
"""
Get the face vertices of the box geometry.
Thread-safety note: Geometry index `i_g` is only used for read-only metadata access
(geometry size data). It does not access `geoms_state.pos` or `geoms_state.quat`.
"""
g_size_x = geoms_info.data[i_g][0]
g_size_y = geoms_info.data[i_g][1]
g_size_z = geoms_info.data[i_g][2]
# Axis to fix, 0: x, 1: y, 2: z
axis = face_idx // 2
# Side of the fixed axis, 1: positive, -1: negative
side = 1 - 2 * (face_idx & 1)
nface = 4 if face_idx >= 0 and face_idx < 6 else 0
vs = qd.Vector([0.0 for _ in range(3 * 4)], dt=gs.qd_float)
if nface:
for i in qd.static(range(4)):
b0 = i & 1
b1 = i >> 1
# +1, +1, -1, -1
su = 1 - 2 * b1
# +1, -1, -1, +1
sv = 1 - 2 * (b0 ^ b1)
# Flip sv based on [side]
sv = sv * side
s = gs.qd_vec3(0, 0, 0)
s[axis] = side
s[(axis + 1) % 3] = su
s[(axis + 2) % 3] = sv
vs[3 * i + 0] = s[0] * g_size_x
vs[3 * i + 1] = s[1] * g_size_y
vs[3 * i + 2] = s[2] * g_size_z
# Transform the vertices to the global coordinates
for i in range(nface):
v = gs.qd_vec3(vs[3 * i + 0], vs[3 * i + 1], vs[3 * i + 2]) * 0.5
v = gu.qd_transform_by_trans_quat(v, pos, quat)
if i_o == 0:
gjk_state.contact_faces[i_b, i].vert1 = v
else:
gjk_state.contact_faces[i_b, i].vert2 = v
return nface
@qd.func
def func_mesh_face(
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
gjk_state: array_class.GJKState,
i_g,
pos: qd.types.vector(3, dtype=gs.qd_float),
quat: qd.types.vector(4, dtype=gs.qd_float),
i_b,
i_o,
face_idx,
):
"""
Get the face vertices of the mesh.
Thread-safety note: Geometry index `i_g` is only used to pass through to `faces_info`
and `verts_info` for read-only metadata access (face vertex indices, initial positions).
It does not access `geoms_state.pos` or `geoms_state.quat`.
"""
nvert = 3
for i in range(nvert):
i_v = faces_info[face_idx].verts_idx[i]
v = verts_info.init_pos[i_v]
v = gu.qd_transform_by_trans_quat(v, pos, quat)
if i_o == 0:
gjk_state.contact_faces[i_b, i].vert1 = v
else:
gjk_state.contact_faces[i_b, i].vert2 = v
return nvert
@qd.func
def func_clip_polygon(
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
i_b,
nface1,
nface2,
edgecon1,
edgecon2,
normal,
approx_dir,
):
"""
Clip a polygon against the another polygon using Sutherland-Hodgman algorithm.
Parameters:
----------
normal: gs.qd_vec3
The normal of the clipping polygon.
approx_dir: gs.qd_vec3
Preferred separation direction for the clipping.
"""
clipping_polygon = 1 if not edgecon1 else 2
clipping_polygon_nface = nface1 if clipping_polygon == 1 else nface2
# The clipping polygon should be at least a triangle
if clipping_polygon_nface >= 3:
# For each edge of the clipping polygon, find the half-plane that is defined by the edge and the normal.
# The normal of half-plane is perpendicular to the edge and face normal.
for i in range(clipping_polygon_nface):
v1 = gjk_state.contact_faces[i_b, i].vert1
v2 = gjk_state.contact_faces[i_b, (i + 1) % clipping_polygon_nface].vert1
v3 = gjk_state.contact_faces[i_b, (i + 2) % clipping_polygon_nface].vert1
if clipping_polygon == 2:
v1 = gjk_state.contact_faces[i_b, i].vert2
v2 = gjk_state.contact_faces[i_b, (i + 1) % clipping_polygon_nface].vert2
v3 = gjk_state.contact_faces[i_b, (i + 2) % clipping_polygon_nface].vert2
# Plane normal
res = (v2 - v1).cross(normal)
# Reorient normal if needed
inside_v3 = func_halfspace(gjk_info, v1, res, v3)
if not inside_v3:
res = -res
gjk_state.contact_halfspaces[i_b, i].normal = res
# Plane distance
gjk_state.contact_halfspaces[i_b, i].dist = v1.dot(res)
# Initialize buffers to store the clipped polygons
nclipped = gs.qd_ivec2(0, 0)
nclipped[0] = nface2 if clipping_polygon == 1 else nface1
# These values are swapped during the clipping process.
pi, ci = 0, 1
for i in range(nclipped[pi]):
if clipping_polygon == 1:
gjk_state.contact_clipped_polygons[i_b, pi, i] = gjk_state.contact_faces[i_b, i].vert2
else:
gjk_state.contact_clipped_polygons[i_b, pi, i] = gjk_state.contact_faces[i_b, i].vert1
# For each edge of the clipping polygon, clip the subject polygon against it.
# Here we use the Sutherland-Hodgman algorithm.
for e in range(clipping_polygon_nface):
# Get the point [a] on the clipping polygon edge,
# and the normal [n] of the half-plane defined by the edge.
a = gjk_state.contact_faces[i_b, e].vert1
if clipping_polygon == 2:
a = gjk_state.contact_faces[i_b, e].vert2
n = gjk_state.contact_halfspaces[i_b, e].normal
d = gjk_state.contact_halfspaces[i_b, e].dist
for i in range(nclipped[pi]):
# Get edge PQ of the subject polygon
P = gjk_state.contact_clipped_polygons[i_b, pi, i]
Q = gjk_state.contact_clipped_polygons[i_b, pi, (i + 1) % nclipped[pi]]
# Determine if P and Q are inside or outside the half-plane
inside_P = func_halfspace(gjk_info, a, n, P)
inside_Q = func_halfspace(gjk_info, a, n, Q)
# PQ entirely outside the clipping edge, skip
if not inside_P and not inside_Q:
continue
# PQ entirely inside the clipping edge, add Q to the clipped polygon
if inside_P and inside_Q:
gjk_state.contact_clipped_polygons[i_b, ci, nclipped[ci]] = Q
nclipped[ci] += 1
continue
# PQ intersects the half-plane, add the intersection point
t, ip = func_plane_intersect(gjk_info, n, d, P, Q)
if t >= 0 and t <= 1:
gjk_state.contact_clipped_polygons[i_b, ci, nclipped[ci]] = ip
nclipped[ci] += 1
# If Q is inside the half-plane, add it to the clipped polygon
if inside_Q:
gjk_state.contact_clipped_polygons[i_b, ci, nclipped[ci]] = Q
nclipped[ci] += 1
# Swap the buffers for the next edge clipping
pi, ci = ci, pi
# Reset the next clipped polygon count
nclipped[ci] = 0
nclipped_polygon = nclipped[pi]
if nclipped_polygon >= 1:
if gjk_info.max_contacts_per_pair[None] < 5 and nclipped_polygon > 4:
# Approximate the clipped polygon with a convex quadrilateral
gjk_state.n_witness[i_b] = 4
rect = func_approximate_polygon_with_quad(gjk_state, i_b, pi, nclipped_polygon)
for i in range(4):
witness2 = gjk_state.contact_clipped_polygons[i_b, pi, rect[i]]
witness1 = witness2 - approx_dir
gjk_state.witness[i_b, i].point_obj1 = witness1
gjk_state.witness[i_b, i].point_obj2 = witness2
elif nclipped_polygon > gjk_info.max_contacts_per_pair[None]:
# If the number of contacts exceeds the limit,
# only use the first [max_contacts_per_pair] contacts.
gjk_state.n_witness[i_b] = gjk_info.max_contacts_per_pair[None]
for i in range(gjk_info.max_contacts_per_pair[None]):
witness2 = gjk_state.contact_clipped_polygons[i_b, pi, i]
witness1 = witness2 - approx_dir
gjk_state.witness[i_b, i].point_obj1 = witness1
gjk_state.witness[i_b, i].point_obj2 = witness2
else:
n_witness = 0
# Just use every contact in the clipped polygon
for i in range(nclipped_polygon):
skip = False
polygon_vert = gjk_state.contact_clipped_polygons[i_b, pi, i]
# Find if there were any duplicate contacts similar to [polygon_vert]
for j in range(n_witness):
prev_witness = gjk_state.witness[i_b, j].point_obj2
skip = func_is_equal_vec(polygon_vert, prev_witness, gjk_info.FLOAT_MIN[None])
if skip:
break
if not skip:
gjk_state.witness[i_b, n_witness].point_obj2 = polygon_vert
gjk_state.witness[i_b, n_witness].point_obj1 = polygon_vert - approx_dir
n_witness += 1
gjk_state.n_witness[i_b] = n_witness
@qd.func
def func_halfspace(
gjk_info: array_class.GJKInfo,
a,
n,
p,
):
"""
Check if the point [p] is inside the half-space defined by the plane with normal [n] and point [a].
"""
return (p - a).dot(n) > -gjk_info.FLOAT_MIN[None]
@qd.func
def func_plane_intersect(
gjk_info: array_class.GJKInfo,
pn,
pd,
v1,
v2,
):
"""
Compute the intersection point of the line segment [v1, v2]
with the plane defined by the normal [pn] and distance [pd].
v1 + t * (v2 - v1) = intersection point
Return:
-------
t: float
The parameter t that defines the intersection point on the line segment.
"""
t = gjk_info.FLOAT_MAX[None]
ip = gs.qd_vec3(0, 0, 0)
dir = v2 - v1
normal_dot = pn.dot(dir)
if qd.abs(normal_dot) > gjk_info.FLOAT_MIN[None]:
t = (pd - pn.dot(v1)) / normal_dot
if t >= 0 and t <= 1:
ip = v1 + t * dir
return t, ip
@qd.func
def func_approximate_polygon_with_quad(
gjk_state: array_class.GJKState,
i_b,
polygon_start,
nverts,
):
"""
Find a convex quadrilateral that approximates the given N-gon [polygon]. We find it by selecting the four
vertices in the polygon that form the maximum area quadrilateral.
"""
i_v = gs.qd_ivec4(0, 1, 2, 3)
i_v0 = gs.qd_ivec4(0, 1, 2, 3)
m = func_quadrilateral_area(gjk_state, i_b, polygon_start, i_v[0], i_v[1], i_v[2], i_v[3])
# 1: change b, 2: change c, 3: change d
change_flag = 3
while True:
i_v0[0], i_v0[1], i_v0[2], i_v0[3] = i_v[0], i_v[1], i_v[2], i_v[3]
if change_flag == 3:
i_v0[3] = (i_v[3] + 1) % nverts
elif change_flag == 2:
i_v0[2] = (i_v[2] + 1) % nverts
# Compute the area of the quadrilateral formed by the vertices
m_next = func_quadrilateral_area(gjk_state, i_b, polygon_start, i_v0[0], i_v0[1], i_v0[2], i_v0[3])
if m_next <= m:
# If the area did not increase
if change_flag == 3:
if i_v[1] == i_v[0]:
i_v[1] = (i_v[1] + 1) % nverts
if i_v[2] == i_v[1]:
i_v[2] = (i_v[2] + 1) % nverts
if i_v[3] == i_v[2]:
i_v[3] = (i_v[3] + 1) % nverts
# Change a if possible
if i_v[0] == nverts - 1:
break
i_v[0] = (i_v[0] + 1) % nverts
elif change_flag == 2:
# Now change b
change_flag = 1
elif change_flag == 1:
# Now change d
change_flag = 3
else:
# If the area increased
m = m_next
i_v[0], i_v[1], i_v[2], i_v[3] = i_v0[0], i_v0[1], i_v0[2], i_v0[3]
if change_flag == 3:
# Now change c
change_flag = 2
elif change_flag == 2:
# Keep changing c
pass
elif change_flag == 1:
# Keep changing b
pass
return i_v
@qd.func
def func_quadrilateral_area(
gjk_state: array_class.GJKState,
i_b,
i_0,
i_v0,
i_v1,
i_v2,
i_v3,
):
"""
Compute the area of the quadrilateral formed by vertices [i_v0, i_v1, i_v2, i_v3] in the [verts] array.
"""
a = gjk_state.contact_clipped_polygons[i_b, i_0, i_v0]
b = gjk_state.contact_clipped_polygons[i_b, i_0, i_v1]
c = gjk_state.contact_clipped_polygons[i_b, i_0, i_v2]
d = gjk_state.contact_clipped_polygons[i_b, i_0, i_v3]
e = (d - a).cross(b - d) + (c - b).cross(a - c)
return 0.5 * e.norm()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/multi_contact.py",
"license": "Apache License 2.0",
"lines": 1000,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/narrowphase.py | """
Narrow-phase collision detection functions.
This module contains SDF-based contact detection, convex-convex contact,
terrain detection, box-box contact, and multi-contact search algorithms.
"""
import sys
from enum import IntEnum
import quadrants as qd
import genesis as gs
import genesis.utils.array_class as array_class
import genesis.utils.geom as gu
import genesis.utils.sdf as sdf
from . import capsule_contact, diff_gjk, gjk, mpr
from .box_contact import (
func_box_box_contact,
func_plane_box_contact,
)
from .contact import (
func_add_contact,
func_add_diff_contact_input,
func_compute_tolerance,
func_contact_orthogonals,
func_rotate_frame,
func_set_contact,
)
from .utils import func_point_in_geom_aabb
class CCD_ALGORITHM_CODE(IntEnum):
"""Convex collision detection algorithm codes."""
# Our MPR (with SDF)
MPR = 0
# MuJoCo MPR
MJ_MPR = 1
# Our GJK
GJK = 2
# MuJoCo GJK
MJ_GJK = 3
@qd.func
def func_contact_sphere_sdf(
i_ga,
i_gb,
i_b,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
collider_static_config: qd.template(),
sdf_info: array_class.SDFInfo,
):
is_col = False
penetration = gs.qd_float(0.0)
normal = qd.Vector.zero(gs.qd_float, 3)
contact_pos = qd.Vector.zero(gs.qd_float, 3)
sphere_center = geoms_state.pos[i_ga, i_b]
sphere_radius = geoms_info.data[i_ga][0]
center_to_b_dist = sdf.sdf_func_world(geoms_state, geoms_info, sdf_info, sphere_center, i_gb, i_b)
if center_to_b_dist < sphere_radius:
is_col = True
normal = sdf.sdf_func_normal_world(
geoms_state, geoms_info, rigid_global_info, collider_static_config, sdf_info, sphere_center, i_gb, i_b
)
penetration = sphere_radius - center_to_b_dist
contact_pos = sphere_center - (sphere_radius - 0.5 * penetration) * normal
return is_col, normal, penetration, contact_pos
@qd.func
def func_contact_vertex_sdf(
i_ga,
i_gb,
i_b,
ga_pos: qd.types.vector(3, dtype=gs.qd_float),
ga_quat: qd.types.vector(4, dtype=gs.qd_float),
gb_pos: qd.types.vector(3, dtype=gs.qd_float),
gb_quat: qd.types.vector(4, dtype=gs.qd_float),
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
collider_static_config: qd.template(),
sdf_info: array_class.SDFInfo,
):
is_col = False
penetration = gs.qd_float(0.0)
normal = qd.Vector.zero(gs.qd_float, 3)
contact_pos = qd.Vector.zero(gs.qd_float, 3)
for i_v in range(geoms_info.vert_start[i_ga], geoms_info.vert_end[i_ga]):
vertex_pos = gu.qd_transform_by_trans_quat(verts_info.init_pos[i_v], ga_pos, ga_quat)
if func_point_in_geom_aabb(geoms_state, i_gb, i_b, vertex_pos):
new_penetration = -sdf.sdf_func_world_local(geoms_info, sdf_info, vertex_pos, i_gb, gb_pos, gb_quat)
if new_penetration > penetration:
is_col = True
contact_pos = vertex_pos
penetration = new_penetration
if is_col:
# Compute contact normal only once, and only in case of contact
normal = sdf.sdf_func_normal_world_local(
geoms_info, rigid_global_info, collider_static_config, sdf_info, contact_pos, i_gb, gb_pos, gb_quat
)
# The contact point must be offsetted by half the penetration depth
contact_pos = contact_pos + 0.5 * penetration * normal
return is_col, normal, penetration, contact_pos
@qd.func
def func_contact_edge_sdf(
i_ga,
i_gb,
i_b,
ga_pos: qd.types.vector(3, dtype=gs.qd_float),
ga_quat: qd.types.vector(4, dtype=gs.qd_float),
gb_pos: qd.types.vector(3, dtype=gs.qd_float),
gb_quat: qd.types.vector(4, dtype=gs.qd_float),
geoms_state: array_class.GeomsState, # For AABB only
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
edges_info: array_class.EdgesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
collider_static_config: qd.template(),
sdf_info: array_class.SDFInfo,
):
EPS = rigid_global_info.EPS[None]
is_col = False
penetration = gs.qd_float(0.0)
normal = qd.Vector.zero(gs.qd_float, 3)
contact_pos = qd.Vector.zero(gs.qd_float, 3)
ga_sdf_cell_size = sdf_info.geoms_info.sdf_cell_size[i_ga]
for i_e in range(geoms_info.edge_start[i_ga], geoms_info.edge_end[i_ga]):
cur_length = edges_info.length[i_e]
if cur_length > ga_sdf_cell_size:
i_v0 = edges_info.v0[i_e]
i_v1 = edges_info.v1[i_e]
p_0 = gu.qd_transform_by_trans_quat(verts_info.init_pos[i_v0], ga_pos, ga_quat)
p_1 = gu.qd_transform_by_trans_quat(verts_info.init_pos[i_v1], ga_pos, ga_quat)
vec_01 = gu.qd_normalize(p_1 - p_0, EPS)
sdf_grad_0_b = sdf.sdf_func_grad_world_local(
geoms_info, rigid_global_info, collider_static_config, sdf_info, p_0, i_gb, gb_pos, gb_quat
)
sdf_grad_1_b = sdf.sdf_func_grad_world_local(
geoms_info, rigid_global_info, collider_static_config, sdf_info, p_1, i_gb, gb_pos, gb_quat
)
# check if the edge on a is facing towards mesh b
sdf_grad_0_a = sdf.sdf_func_grad_world_local(
geoms_info, rigid_global_info, collider_static_config, sdf_info, p_0, i_ga, ga_pos, ga_quat
)
sdf_grad_1_a = sdf.sdf_func_grad_world_local(
geoms_info, rigid_global_info, collider_static_config, sdf_info, p_1, i_ga, ga_pos, ga_quat
)
normal_edge_0 = sdf_grad_0_a - sdf_grad_0_a.dot(vec_01) * vec_01
normal_edge_1 = sdf_grad_1_a - sdf_grad_1_a.dot(vec_01) * vec_01
if normal_edge_0.dot(sdf_grad_0_b) < 0 or normal_edge_1.dot(sdf_grad_1_b) < 0:
# check if closest point is between the two points
if sdf_grad_0_b.dot(vec_01) < 0 and sdf_grad_1_b.dot(vec_01) > 0:
while cur_length > ga_sdf_cell_size:
p_mid = 0.5 * (p_0 + p_1)
if (
sdf.sdf_func_grad_world_local(
geoms_info,
rigid_global_info,
collider_static_config,
sdf_info,
p_mid,
i_gb,
gb_pos,
gb_quat,
).dot(vec_01)
< 0
):
p_0 = p_mid
else:
p_1 = p_mid
cur_length = 0.5 * cur_length
p = 0.5 * (p_0 + p_1)
new_penetration = -sdf.sdf_func_world_local(geoms_info, sdf_info, p, i_gb, gb_pos, gb_quat)
if new_penetration > penetration:
is_col = True
normal = sdf.sdf_func_normal_world_local(
geoms_info, rigid_global_info, collider_static_config, sdf_info, p, i_gb, gb_pos, gb_quat
)
contact_pos = p
penetration = new_penetration
# The contact point must be offsetted by half the penetration depth, for consistency with MPR
contact_pos = contact_pos + 0.5 * penetration * normal
return is_col, normal, penetration, contact_pos
@qd.func
def func_contact_convex_convex_sdf(
i_ga,
i_gb,
i_b,
i_va_ws,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
collider_info: array_class.ColliderInfo,
collider_static_config: qd.template(),
sdf_info: array_class.SDFInfo,
rigid_global_info: array_class.RigidGlobalInfo,
enable_edge_detection_fallback: qd.template(),
):
EPS = rigid_global_info.EPS[None]
gb_vert_start = geoms_info.vert_start[i_gb]
ga_pos = geoms_state.pos[i_ga, i_b]
ga_quat = geoms_state.quat[i_ga, i_b]
gb_pos = geoms_state.pos[i_gb, i_b]
gb_quat = geoms_state.quat[i_gb, i_b]
is_col = False
penetration = gs.qd_float(0.0)
normal = qd.Vector.zero(gs.qd_float, 3)
contact_pos = qd.Vector.zero(gs.qd_float, 3)
i_va = i_va_ws
if i_va == -1:
# start traversing on the vertex graph with a smart initial vertex
pos_vb = gu.qd_transform_by_trans_quat(verts_info.init_pos[gb_vert_start], gb_pos, gb_quat)
i_va = sdf.sdf_func_find_closest_vert(geoms_state, geoms_info, sdf_info, pos_vb, i_ga, i_b)
i_v_closest = i_va
pos_v_closest = gu.qd_transform_by_trans_quat(verts_info.init_pos[i_v_closest], ga_pos, ga_quat)
sd_v_closest = sdf.sdf_func_world(geoms_state, geoms_info, sdf_info, pos_v_closest, i_gb, i_b)
while True:
for i_neighbor_ in range(
collider_info.vert_neighbor_start[i_va],
collider_info.vert_neighbor_start[i_va] + collider_info.vert_n_neighbors[i_va],
):
i_neighbor = collider_info.vert_neighbors[i_neighbor_]
pos_neighbor = gu.qd_transform_by_trans_quat(verts_info.init_pos[i_neighbor], ga_pos, ga_quat)
sd_neighbor = sdf.sdf_func_world(geoms_state, geoms_info, sdf_info, pos_neighbor, i_gb, i_b)
if sd_neighbor < sd_v_closest - 1e-5: # 1e-5 (0.01mm) to avoid endless loop due to numerical instability
i_v_closest = i_neighbor
sd_v_closest = sd_neighbor
pos_v_closest = pos_neighbor
if i_v_closest == i_va: # no better neighbor
break
else:
i_va = i_v_closest
# i_va is the deepest vertex
pos_a = pos_v_closest
if sd_v_closest < 0.0:
is_col = True
normal = sdf.sdf_func_normal_world(
geoms_state, geoms_info, rigid_global_info, collider_static_config, sdf_info, pos_a, i_gb, i_b
)
penetration = -sd_v_closest
contact_pos = pos_a + 0.5 * penetration * normal
elif enable_edge_detection_fallback: # check edge surrounding it
for i_neighbor_ in range(
collider_info.vert_neighbor_start[i_va],
collider_info.vert_neighbor_start[i_va] + collider_info.vert_n_neighbors[i_va],
):
i_neighbor = collider_info.vert_neighbors[i_neighbor_]
p_0 = pos_v_closest
p_1 = gu.qd_transform_by_trans_quat(verts_info.init_pos[i_neighbor], ga_pos, ga_quat)
vec_01 = gu.qd_normalize(p_1 - p_0, EPS)
sdf_grad_0_b = sdf.sdf_func_grad_world(
geoms_state, geoms_info, rigid_global_info, collider_static_config, sdf_info, p_0, i_gb, i_b
)
sdf_grad_1_b = sdf.sdf_func_grad_world(
geoms_state, geoms_info, rigid_global_info, collider_static_config, sdf_info, p_1, i_gb, i_b
)
# check if the edge on a is facing towards mesh b (I am not 100% sure about this, subject to removal)
sdf_grad_0_a = sdf.sdf_func_grad_world(
geoms_state, geoms_info, rigid_global_info, collider_static_config, sdf_info, p_0, i_ga, i_b
)
sdf_grad_1_a = sdf.sdf_func_grad_world(
geoms_state, geoms_info, rigid_global_info, collider_static_config, sdf_info, p_1, i_ga, i_b
)
normal_edge_0 = sdf_grad_0_a - sdf_grad_0_a.dot(vec_01) * vec_01
normal_edge_1 = sdf_grad_1_a - sdf_grad_1_a.dot(vec_01) * vec_01
if normal_edge_0.dot(sdf_grad_0_b) < 0 or normal_edge_1.dot(sdf_grad_1_b) < 0:
# check if closest point is between the two points
if sdf_grad_0_b.dot(vec_01) < 0 and sdf_grad_1_b.dot(vec_01) > 0:
cur_length = (p_1 - p_0).norm()
ga_sdf_cell_size = sdf_info.geoms_info.sdf_cell_size[i_ga]
while cur_length > ga_sdf_cell_size:
p_mid = 0.5 * (p_0 + p_1)
side = sdf.sdf_func_grad_world(
geoms_state,
geoms_info,
rigid_global_info,
collider_static_config,
sdf_info,
p_mid,
i_gb,
i_b,
).dot(vec_01)
if side < 0:
p_0 = p_mid
else:
p_1 = p_mid
cur_length = 0.5 * cur_length
p = 0.5 * (p_0 + p_1)
new_penetration = -sdf.sdf_func_world(geoms_state, geoms_info, sdf_info, p, i_gb, i_b)
if new_penetration > 0.0:
is_col = True
normal = sdf.sdf_func_normal_world(
geoms_state, geoms_info, rigid_global_info, collider_static_config, sdf_info, p, i_gb, i_b
)
contact_pos = p
penetration = new_penetration
break
return is_col, normal, penetration, contact_pos, i_va
@qd.func
def func_contact_mpr_terrain(
i_ga,
i_gb,
i_b,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
geoms_init_AABB: array_class.GeomsInitAABB,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_info: array_class.ColliderInfo,
collider_static_config: qd.template(),
mpr_state: array_class.MPRState,
mpr_info: array_class.MPRInfo,
support_field_info: array_class.SupportFieldInfo,
errno: array_class.V_ANNOTATION,
):
ga_pos, ga_quat = geoms_state.pos[i_ga, i_b], geoms_state.quat[i_ga, i_b]
gb_pos, gb_quat = geoms_state.pos[i_gb, i_b], geoms_state.quat[i_gb, i_b]
margin = gs.qd_float(0.0)
is_return = False
tolerance = func_compute_tolerance(i_ga, i_gb, i_b, collider_info.mc_tolerance[None], geoms_info, geoms_init_AABB)
if not is_return:
# Transform to terrain's frame (using local variables, not modifying global state)
ga_pos_terrain_frame, ga_quat_terrain_frame = gu.qd_transform_pos_quat_by_trans_quat(
ga_pos - gb_pos,
ga_quat,
qd.Vector.zero(gs.qd_float, 3),
gu.qd_inv_quat(gb_quat),
)
gb_pos_terrain_frame = qd.Vector.zero(gs.qd_float, 3)
gb_quat_terrain_frame = gu.qd_identity_quat()
center_a = gu.qd_transform_by_trans_quat(geoms_info.center[i_ga], ga_pos_terrain_frame, ga_quat_terrain_frame)
for i_axis, i_m in qd.ndrange(3, 2):
direction = qd.Vector.zero(gs.qd_float, 3)
if i_m == 0:
direction[i_axis] = 1.0
else:
direction[i_axis] = -1.0
v1 = mpr.support_driver(
geoms_info,
collider_state,
collider_static_config,
support_field_info,
direction,
i_ga,
i_b,
ga_pos_terrain_frame,
ga_quat_terrain_frame,
)
collider_state.xyz_max_min[3 * i_m + i_axis, i_b] = v1[i_axis]
for i in qd.static(range(3)):
collider_state.prism[i, i_b][2] = collider_info.terrain_xyz_maxmin[5]
if (
collider_info.terrain_xyz_maxmin[i] < collider_state.xyz_max_min[i + 3, i_b] - margin
or collider_info.terrain_xyz_maxmin[i + 3] > collider_state.xyz_max_min[i, i_b] + margin
):
is_return = True
if not is_return:
sh = collider_info.terrain_scale[0]
r_min = gs.qd_int(qd.floor((collider_state.xyz_max_min[3, i_b] - collider_info.terrain_xyz_maxmin[3]) / sh))
r_max = gs.qd_int(qd.ceil((collider_state.xyz_max_min[0, i_b] - collider_info.terrain_xyz_maxmin[3]) / sh))
c_min = gs.qd_int(qd.floor((collider_state.xyz_max_min[4, i_b] - collider_info.terrain_xyz_maxmin[4]) / sh))
c_max = gs.qd_int(qd.ceil((collider_state.xyz_max_min[1, i_b] - collider_info.terrain_xyz_maxmin[4]) / sh))
r_min = qd.max(0, r_min)
c_min = qd.max(0, c_min)
r_max = qd.min(collider_info.terrain_rc[0] - 1, r_max)
c_max = qd.min(collider_info.terrain_rc[1] - 1, c_max)
n_con = 0
for r in range(r_min, r_max):
nvert = 0
for c in range(c_min, c_max + 1):
for i in range(2):
if n_con < qd.static(collider_static_config.n_contacts_per_pair):
nvert = nvert + 1
func_add_prism_vert(
sh * (r + i) + collider_info.terrain_xyz_maxmin[3],
sh * c + collider_info.terrain_xyz_maxmin[4],
collider_info.terrain_hf[r + i, c] + margin,
i_b,
collider_state,
)
if nvert > 2 and (
collider_state.prism[3, i_b][2] >= collider_state.xyz_max_min[5, i_b]
or collider_state.prism[4, i_b][2] >= collider_state.xyz_max_min[5, i_b]
or collider_state.prism[5, i_b][2] >= collider_state.xyz_max_min[5, i_b]
):
center_b = qd.Vector.zero(gs.qd_float, 3)
for i_p in qd.static(range(6)):
center_b = center_b + collider_state.prism[i_p, i_b]
center_b = center_b / 6.0
is_col, normal, penetration, contact_pos = mpr.func_mpr_contact_from_centers(
geoms_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
mpr_state,
mpr_info,
support_field_info,
i_ga,
i_gb,
i_b,
center_a,
center_b,
ga_pos_terrain_frame,
ga_quat_terrain_frame,
gb_pos_terrain_frame,
gb_quat_terrain_frame,
)
if is_col:
normal = gu.qd_transform_by_quat(normal, gb_quat)
contact_pos = gu.qd_transform_by_quat(contact_pos, gb_quat)
contact_pos = contact_pos + gb_pos
valid = True
i_c = collider_state.n_contacts[i_b]
for j in range(n_con):
if (
contact_pos - collider_state.contact_data.pos[i_c - j - 1, i_b]
).norm() < tolerance:
valid = False
break
if valid:
func_add_contact(
i_ga,
i_gb,
normal,
contact_pos,
penetration,
i_b,
geoms_state,
geoms_info,
collider_state,
collider_info,
errno,
)
n_con = n_con + 1
@qd.func
def func_add_prism_vert(
x,
y,
z,
i_b,
collider_state: array_class.ColliderState,
):
collider_state.prism[0, i_b] = collider_state.prism[1, i_b]
collider_state.prism[1, i_b] = collider_state.prism[2, i_b]
collider_state.prism[3, i_b] = collider_state.prism[4, i_b]
collider_state.prism[4, i_b] = collider_state.prism[5, i_b]
collider_state.prism[2, i_b][0] = x
collider_state.prism[5, i_b][0] = x
collider_state.prism[2, i_b][1] = y
collider_state.prism[5, i_b][1] = y
collider_state.prism[5, i_b][2] = z
@qd.func
def func_convex_convex_contact(
i_ga,
i_gb,
i_b,
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
geoms_init_AABB: array_class.GeomsInitAABB,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_info: array_class.ColliderInfo,
collider_static_config: qd.template(),
mpr_state: array_class.MPRState,
mpr_info: array_class.MPRInfo,
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
gjk_static_config: qd.template(),
support_field_info: array_class.SupportFieldInfo,
# FIXME: Passing nested data structure as input argument is not supported for now.
diff_contact_input: array_class.DiffContactInput,
errno: array_class.V_ANNOTATION,
):
if not (geoms_info.type[i_ga] == gs.GEOM_TYPE.PLANE and geoms_info.type[i_gb] == gs.GEOM_TYPE.BOX):
EPS = rigid_global_info.EPS[None]
# Disabling multi-contact for pairs of decomposed geoms would speed up simulation but may cause physical
# instabilities in the few cases where multiple contact points are actually need. Increasing the tolerance
# criteria to get rid of redundant contact points seems to be a better option.
multi_contact = (
static_rigid_sim_config.enable_multi_contact
# and not (self._solver.geoms_info[i_ga].is_decomposed and self._solver.geoms_info[i_gb].is_decomposed)
and geoms_info.type[i_ga] != gs.GEOM_TYPE.SPHERE
and geoms_info.type[i_ga] != gs.GEOM_TYPE.ELLIPSOID
and geoms_info.type[i_gb] != gs.GEOM_TYPE.SPHERE
and geoms_info.type[i_gb] != gs.GEOM_TYPE.ELLIPSOID
)
tolerance = func_compute_tolerance(
i_ga, i_gb, i_b, collider_info.mc_tolerance[None], geoms_info, geoms_init_AABB
)
diff_pos_tolerance = func_compute_tolerance(
i_ga, i_gb, i_b, collider_info.diff_pos_tolerance[None], geoms_info, geoms_init_AABB
)
diff_normal_tolerance = collider_info.diff_normal_tolerance[None]
# Load original geometry state into thread-local variables
# These are the UNPERTURBED states used as reference point for each independent perturbation
ga_pos_original = geoms_state.pos[i_ga, i_b]
ga_quat_original = geoms_state.quat[i_ga, i_b]
gb_pos_original = geoms_state.pos[i_gb, i_b]
gb_quat_original = geoms_state.quat[i_gb, i_b]
# Current (possibly perturbed) state - initialized to original, updated during perturbations
ga_pos_current = ga_pos_original
ga_quat_current = ga_quat_original
gb_pos_current = gb_pos_original
gb_quat_current = gb_quat_original
# Pre-allocate some buffers
# Note that the variables post-fixed with _0 are the values of these
# variables for contact 0 (used for multi-contact).
is_col_0 = False
penetration_0 = gs.qd_float(0.0)
normal_0 = qd.Vector.zero(gs.qd_float, 3)
contact_pos_0 = qd.Vector.zero(gs.qd_float, 3)
# Whether narrowphase detected a contact.
is_col = False
penetration = gs.qd_float(0.0)
normal = qd.Vector.zero(gs.qd_float, 3)
contact_pos = qd.Vector.zero(gs.qd_float, 3)
n_con = gs.qd_int(0)
axis_0 = qd.Vector.zero(gs.qd_float, 3)
axis_1 = qd.Vector.zero(gs.qd_float, 3)
qrot = qd.Vector.zero(gs.qd_float, 4)
i_pair = collider_info.collision_pair_idx[(i_gb, i_ga) if i_ga > i_gb else (i_ga, i_gb)]
for i_detection in range(5):
prefer_gjk = (
collider_static_config.ccd_algorithm == CCD_ALGORITHM_CODE.GJK
or collider_static_config.ccd_algorithm == CCD_ALGORITHM_CODE.MJ_GJK
)
# Apply perturbations to thread-local state
if multi_contact and is_col_0:
# Perturbation axis must not be aligned with the principal axes of inertia the geometry,
# otherwise it would be more sensitive to ill-conditioning.
axis = (2 * (i_detection % 2) - 1) * axis_0 + (1 - 2 * ((i_detection // 2) % 2)) * axis_1
qrot = gu.qd_rotvec_to_quat(collider_info.mc_perturbation[None] * axis, EPS)
# Apply perturbation starting from original state
ga_pos_current, ga_quat_current = func_rotate_frame(
pos=ga_pos_original, quat=ga_quat_original, contact_pos=contact_pos_0, qrot=qrot
)
gb_pos_current, gb_quat_current = func_rotate_frame(
pos=gb_pos_original, quat=gb_quat_original, contact_pos=contact_pos_0, qrot=gu.qd_inv_quat(qrot)
)
if (multi_contact and is_col_0) or (i_detection == 0):
if geoms_info.type[i_ga] == gs.GEOM_TYPE.CAPSULE and geoms_info.type[i_gb] == gs.GEOM_TYPE.CAPSULE:
is_col, normal, contact_pos, penetration = capsule_contact.func_capsule_capsule_contact(
i_ga=i_ga,
i_gb=i_gb,
ga_pos=ga_pos_current,
ga_quat=ga_quat_current,
gb_pos=gb_pos_current,
gb_quat=gb_quat_current,
geoms_info=geoms_info,
rigid_global_info=rigid_global_info,
)
elif (
geoms_info.type[i_ga] == gs.GEOM_TYPE.SPHERE and geoms_info.type[i_gb] == gs.GEOM_TYPE.CAPSULE
) or (geoms_info.type[i_ga] == gs.GEOM_TYPE.CAPSULE and geoms_info.type[i_gb] == gs.GEOM_TYPE.SPHERE):
is_col, normal, contact_pos, penetration = capsule_contact.func_sphere_capsule_contact(
i_ga=i_ga,
i_gb=i_gb,
ga_pos=ga_pos_current,
ga_quat=ga_quat_current,
gb_pos=gb_pos_current,
gb_quat=gb_quat_current,
geoms_info=geoms_info,
rigid_global_info=rigid_global_info,
)
elif geoms_info.type[i_ga] == gs.GEOM_TYPE.PLANE:
plane_dir = qd.Vector(
[geoms_info.data[i_ga][0], geoms_info.data[i_ga][1], geoms_info.data[i_ga][2]], dt=gs.qd_float
)
plane_dir = gu.qd_transform_by_quat(plane_dir, ga_quat_current)
normal = -plane_dir.normalized()
v1 = mpr.support_driver(
geoms_info,
collider_state,
collider_static_config,
support_field_info,
normal,
i_gb,
i_b,
gb_pos_current,
gb_quat_current,
)
penetration = normal.dot(v1 - ga_pos_current)
contact_pos = v1 - 0.5 * penetration * normal
is_col = penetration > 0.0
else:
### MPR, MJ_MPR
if qd.static(
collider_static_config.ccd_algorithm in (CCD_ALGORITHM_CODE.MPR, CCD_ALGORITHM_CODE.MJ_MPR)
):
# Try using MPR before anything else
is_mpr_updated = False
normal_ws = collider_state.contact_cache.normal[i_pair, i_b]
is_mpr_guess_direction_available = (qd.abs(normal_ws) > EPS).any()
for i_mpr in range(2):
if i_mpr == 1:
# Try without warm-start if no contact was detected using it.
# When penetration depth is very shallow, MPR may wrongly classify two geometries as not
# in contact while they actually are. This helps to improve contact persistence without
# increasing much the overall computational cost since the fallback should not be
# triggered very often.
if qd.static(not static_rigid_sim_config.enable_mujoco_compatibility):
if (i_detection == 0) and not is_col and is_mpr_guess_direction_available:
normal_ws = qd.Vector.zero(gs.qd_float, 3)
is_mpr_guess_direction_available = False
is_mpr_updated = False
if not is_mpr_updated:
is_col, normal, penetration, contact_pos = mpr.func_mpr_contact(
geoms_info,
geoms_init_AABB,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
mpr_state,
mpr_info,
support_field_info,
i_ga,
i_gb,
i_b,
normal_ws,
ga_pos_current,
ga_quat_current,
gb_pos_current,
gb_quat_current,
)
is_mpr_updated = True
# Fallback on GJK if collision is detected by MPR if the initial penetration is already quite
# large, and either no collision direction was cached or the geometries have large overlap. This
# contact information provided by MPR may be unreliable in these cases.
if qd.static(collider_static_config.ccd_algorithm == CCD_ALGORITHM_CODE.MPR):
if penetration > tolerance:
prefer_gjk = not is_mpr_guess_direction_available or (
collider_info.mc_tolerance[None] * penetration
>= collider_info.mpr_to_gjk_overlap_ratio[None] * tolerance
)
### GJK, MJ_GJK
if qd.static(collider_static_config.ccd_algorithm != CCD_ALGORITHM_CODE.MJ_MPR):
if prefer_gjk:
if qd.static(static_rigid_sim_config.requires_grad):
diff_gjk.func_gjk_contact(
links_state,
links_info,
geoms_state,
geoms_info,
geoms_init_AABB,
verts_info,
faces_info,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
diff_contact_input,
i_ga,
i_gb,
i_b,
ga_pos_current,
ga_quat_current,
gb_pos_current,
gb_quat_current,
diff_pos_tolerance,
diff_normal_tolerance,
)
else:
gjk.func_gjk_contact(
geoms_state,
geoms_info,
verts_info,
faces_info,
rigid_global_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
gjk_static_config,
support_field_info,
i_ga,
i_gb,
i_b,
ga_pos_current,
ga_quat_current,
gb_pos_current,
gb_quat_current,
)
is_col = gjk_state.is_col[i_b] == 1
penetration = gjk_state.penetration[i_b]
n_contacts = gjk_state.n_contacts[i_b]
if is_col:
if qd.static(static_rigid_sim_config.requires_grad):
for i_c in range(n_contacts):
func_add_diff_contact_input(
i_ga,
i_gb,
i_b,
i_c,
gjk_state,
collider_state,
collider_info,
)
func_add_contact(
i_ga,
i_gb,
gjk_state.normal[i_b, i_c],
gjk_state.contact_pos[i_b, i_c],
gjk_state.diff_penetration[i_b, i_c],
i_b,
geoms_state,
geoms_info,
collider_state,
collider_info,
errno,
)
break
else:
if gjk_state.multi_contact_flag[i_b]:
# Since we already found multiple contact points, add the discovered contact
# points and stop multi-contact search.
for i_c in range(n_contacts):
# Ignore contact points if the number of contacts exceeds the limit.
if i_c < qd.static(collider_static_config.n_contacts_per_pair):
contact_pos = gjk_state.contact_pos[i_b, i_c]
normal = gjk_state.normal[i_b, i_c]
if qd.static(static_rigid_sim_config.requires_grad):
penetration = gjk_state.diff_penetration[i_b, i_c]
func_add_contact(
i_ga,
i_gb,
normal,
contact_pos,
penetration,
i_b,
geoms_state,
geoms_info,
collider_state,
collider_info,
errno,
)
break
else:
contact_pos = gjk_state.contact_pos[i_b, 0]
normal = gjk_state.normal[i_b, 0]
if i_detection == 0:
is_col_0, normal_0, penetration_0, contact_pos_0 = is_col, normal, penetration, contact_pos
if is_col_0:
func_add_contact(
i_ga,
i_gb,
normal_0,
contact_pos_0,
penetration_0,
i_b,
geoms_state,
geoms_info,
collider_state,
collider_info,
errno,
)
if multi_contact:
# Perturb geom_a around two orthogonal axes to find multiple contacts
axis_0, axis_1 = func_contact_orthogonals(
i_ga,
i_gb,
normal,
i_b,
links_state,
links_info,
geoms_state,
geoms_info,
geoms_init_AABB,
rigid_global_info,
static_rigid_sim_config,
)
n_con = 1
if qd.static(
collider_static_config.ccd_algorithm in (CCD_ALGORITHM_CODE.MPR, CCD_ALGORITHM_CODE.GJK)
):
collider_state.contact_cache.normal[i_pair, i_b] = normal
else:
# Clear collision normal cache if not in contact
collider_state.contact_cache.normal[i_pair, i_b] = qd.Vector.zero(gs.qd_float, 3)
elif multi_contact and is_col:
# For perturbed iterations (i_detection > 0), correct contact position and normal. This applies to all
# collision methods when multi-contact is enabled, except mujoco compatible.
#
# 1. Project the contact point on both geometries
# 2. Revert the effect of small rotation
# 3. Update contact point
if qd.static(
collider_static_config.ccd_algorithm not in (CCD_ALGORITHM_CODE.MJ_MPR, CCD_ALGORITHM_CODE.MJ_GJK)
):
contact_point_a = (
gu.qd_transform_by_quat(
(contact_pos - 0.5 * penetration * normal) - contact_pos_0,
gu.qd_inv_quat(qrot),
)
+ contact_pos_0
)
contact_point_b = (
gu.qd_transform_by_quat(
(contact_pos + 0.5 * penetration * normal) - contact_pos_0,
qrot,
)
+ contact_pos_0
)
contact_pos = 0.5 * (contact_point_a + contact_point_b)
# First-order correction of the normal direction.
# The way the contact normal gets twisted by applying perturbation of geometry poses is
# unpredictable as it depends on the final portal discovered by MPR. Alternatively, let compute
# the minimal rotation that makes the corrected twisted normal as closed as possible to the
# original one, up to the scale of the perturbation, then apply first-order Taylor expansion of
# Rodrigues' rotation formula.
twist_rotvec = qd.math.clamp(
normal.cross(normal_0),
-collider_info.mc_perturbation[None],
collider_info.mc_perturbation[None],
)
normal = normal + twist_rotvec.cross(normal)
# Make sure that the penetration is still positive before adding contact point.
# Note that adding some negative tolerance improves physical stability by encouraging persistent
# contact points and thefore more continuous contact forces, without changing the mean-field
# dynamics since zero-penetration contact points should not induce any force.
penetration = normal.dot(contact_point_b - contact_point_a)
if qd.static(collider_static_config.ccd_algorithm == CCD_ALGORITHM_CODE.MJ_GJK):
# Only change penetration to the initial one, because the normal vector could change abruptly
# under MuJoCo's GJK-EPA.
penetration = penetration_0
# Discard contact point is repeated
repeated = False
for i_c in range(n_con):
if not repeated:
idx_prev = collider_state.n_contacts[i_b] - 1 - i_c
prev_contact = collider_state.contact_data.pos[idx_prev, i_b]
if (contact_pos - prev_contact).norm() < tolerance:
repeated = True
if not repeated:
if penetration > -tolerance:
penetration = qd.max(penetration, 0.0)
func_add_contact(
i_ga,
i_gb,
normal,
contact_pos,
penetration,
i_b,
geoms_state,
geoms_info,
collider_state,
collider_info,
errno,
)
n_con = n_con + 1
@qd.kernel(fastcache=gs.use_fastcache)
def func_narrow_phase_convex_vs_convex(
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
geoms_init_AABB: array_class.GeomsInitAABB,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
edges_info: array_class.EdgesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_info: array_class.ColliderInfo,
collider_static_config: qd.template(),
mpr_state: array_class.MPRState,
mpr_info: array_class.MPRInfo,
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
gjk_static_config: qd.template(),
sdf_info: array_class.SDFInfo,
support_field_info: array_class.SupportFieldInfo,
diff_contact_input: array_class.DiffContactInput,
errno: array_class.V_ANNOTATION,
):
_B = collider_state.active_buffer.shape[1]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(_B):
for i_pair in range(collider_state.n_broad_pairs[i_b]):
i_ga = collider_state.broad_collision_pairs[i_pair, i_b][0]
i_gb = collider_state.broad_collision_pairs[i_pair, i_b][1]
if geoms_info.type[i_ga] > geoms_info.type[i_gb]:
i_ga, i_gb = i_gb, i_ga
if (
geoms_info.is_convex[i_ga]
and geoms_info.is_convex[i_gb]
and not geoms_info.type[i_gb] == gs.GEOM_TYPE.TERRAIN
and not (
static_rigid_sim_config.box_box_detection
and geoms_info.type[i_ga] == gs.GEOM_TYPE.BOX
and geoms_info.type[i_gb] == gs.GEOM_TYPE.BOX
)
):
if not (geoms_info.type[i_ga] == gs.GEOM_TYPE.PLANE and geoms_info.type[i_gb] == gs.GEOM_TYPE.BOX):
func_convex_convex_contact(
i_ga=i_ga,
i_gb=i_gb,
i_b=i_b,
links_state=links_state,
links_info=links_info,
geoms_state=geoms_state,
geoms_info=geoms_info,
geoms_init_AABB=geoms_init_AABB,
verts_info=verts_info,
faces_info=faces_info,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
collider_state=collider_state,
collider_info=collider_info,
collider_static_config=collider_static_config,
mpr_state=mpr_state,
mpr_info=mpr_info,
gjk_state=gjk_state,
gjk_info=gjk_info,
gjk_static_config=gjk_static_config,
support_field_info=support_field_info,
# FIXME: Passing nested data structure as input argument is not supported for now.
diff_contact_input=diff_contact_input,
errno=errno,
)
@qd.kernel(fastcache=gs.use_fastcache)
def func_narrow_phase_diff_convex_vs_convex(
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_info: array_class.ColliderInfo,
gjk_info: array_class.GJKInfo,
# FIXME: Passing nested data structure as input argument is not supported for now.
diff_contact_input: array_class.DiffContactInput,
):
# Compute reference contacts
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.PARTIAL)
for i_c, i_b in qd.ndrange(collider_state.contact_data.pos.shape[0], collider_state.active_buffer.shape[1]):
if i_c < collider_state.n_contacts[i_b]:
ref_id = collider_state.diff_contact_input.ref_id[i_b, i_c]
is_ref = i_c == ref_id
i_ga = collider_state.diff_contact_input.geom_a[i_b, i_c]
i_gb = collider_state.diff_contact_input.geom_b[i_b, i_c]
if is_ref:
ref_penetration = -1.0
contact_pos, contact_normal, penetration, weight = diff_gjk.func_differentiable_contact(
geoms_state, diff_contact_input, gjk_info, i_ga, i_gb, i_b, i_c, ref_penetration
)
collider_state.diff_contact_input.ref_penetration[i_b, i_c] = penetration
func_set_contact(
i_ga,
i_gb,
contact_normal,
contact_pos,
penetration * weight,
i_b,
i_c,
geoms_state,
geoms_info,
collider_state,
collider_info,
)
# Compute other contacts
for i_c, i_b in qd.ndrange(collider_state.contact_data.pos.shape[0], collider_state.active_buffer.shape[1]):
if i_c < collider_state.n_contacts[i_b]:
ref_id = collider_state.diff_contact_input.ref_id[i_b, i_c]
is_ref = i_c == ref_id
i_ga = collider_state.diff_contact_input.geom_a[i_b, i_c]
i_gb = collider_state.diff_contact_input.geom_b[i_b, i_c]
if not is_ref:
ref_penetration = collider_state.diff_contact_input.ref_penetration[i_b, ref_id]
contact_pos, contact_normal, penetration, weight = diff_gjk.func_differentiable_contact(
geoms_state, diff_contact_input, gjk_info, i_ga, i_gb, i_b, i_c, ref_penetration
)
func_set_contact(
i_ga,
i_gb,
contact_normal,
contact_pos,
penetration * weight,
i_b,
i_c,
geoms_state,
geoms_info,
collider_state,
collider_info,
)
@qd.kernel(fastcache=gs.use_fastcache)
def func_narrow_phase_convex_specializations(
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
geoms_init_AABB: array_class.GeomsInitAABB,
verts_info: array_class.VertsInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_info: array_class.ColliderInfo,
collider_static_config: qd.template(),
errno: array_class.V_ANNOTATION,
):
_B = collider_state.active_buffer.shape[1]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(_B):
for i_pair in range(collider_state.n_broad_pairs[i_b]):
i_ga = collider_state.broad_collision_pairs[i_pair, i_b][0]
i_gb = collider_state.broad_collision_pairs[i_pair, i_b][1]
if geoms_info.type[i_ga] > geoms_info.type[i_gb]:
i_ga, i_gb = i_gb, i_ga
if geoms_info.type[i_ga] == gs.GEOM_TYPE.PLANE and geoms_info.type[i_gb] == gs.GEOM_TYPE.BOX:
func_plane_box_contact(
i_ga,
i_gb,
i_b,
geoms_state,
geoms_info,
geoms_init_AABB,
verts_info,
static_rigid_sim_config,
collider_state,
collider_info,
collider_static_config,
errno,
)
if qd.static(static_rigid_sim_config.box_box_detection):
if geoms_info.type[i_ga] == gs.GEOM_TYPE.BOX and geoms_info.type[i_gb] == gs.GEOM_TYPE.BOX:
func_box_box_contact(
i_ga,
i_gb,
i_b,
geoms_state,
geoms_info,
collider_state,
collider_info,
rigid_global_info,
collider_static_config,
errno,
)
@qd.kernel(fastcache=gs.use_fastcache)
def func_narrow_phase_any_vs_terrain(
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
geoms_init_AABB: array_class.GeomsInitAABB,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_info: array_class.ColliderInfo,
collider_static_config: qd.template(),
mpr_state: array_class.MPRState,
mpr_info: array_class.MPRInfo,
support_field_info: array_class.SupportFieldInfo,
errno: array_class.V_ANNOTATION,
):
"""
NOTE: for a single non-batched scene with a lot of collisioin pairs, it will be faster if we also parallelize over `self.n_collision_pairs`. However, parallelize over both B and collisioin_pairs (instead of only over B) leads to significantly slow performance for batched scene. We can treat B=0 and B>0 separately, but we will end up with messier code.
Therefore, for a big non-batched scene, users are encouraged to simply use `gs.cpu` backend.
Updated NOTE & TODO: For a HUGE scene with numerous bodies, it's also reasonable to run on GPU. Let's save this for later.
Update2: Now we use n_broad_pairs instead of n_collision_pairs, so we probably need to think about how to handle non-batched large scene better.
"""
_B = collider_state.active_buffer.shape[1]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(_B):
for i_pair in range(collider_state.n_broad_pairs[i_b]):
i_ga = collider_state.broad_collision_pairs[i_pair, i_b][0]
i_gb = collider_state.broad_collision_pairs[i_pair, i_b][1]
if qd.static(collider_static_config.has_terrain):
if geoms_info.type[i_ga] == gs.GEOM_TYPE.TERRAIN:
i_ga, i_gb = i_gb, i_ga
if geoms_info.type[i_gb] == gs.GEOM_TYPE.TERRAIN:
func_contact_mpr_terrain(
i_ga,
i_gb,
i_b,
geoms_state,
geoms_info,
geoms_init_AABB,
static_rigid_sim_config,
collider_state,
collider_info,
collider_static_config,
mpr_state,
mpr_info,
support_field_info,
errno,
)
@qd.kernel(fastcache=gs.use_fastcache)
def func_narrow_phase_nonconvex_vs_nonterrain(
links_state: array_class.LinksState,
links_info: array_class.LinksInfo,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
geoms_init_AABB: array_class.GeomsInitAABB,
verts_info: array_class.VertsInfo,
edges_info: array_class.EdgesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_info: array_class.ColliderInfo,
collider_static_config: qd.template(),
sdf_info: array_class.SDFInfo,
errno: array_class.V_ANNOTATION,
):
"""
NOTE: for a single non-batched scene with a lot of collisioin pairs, it will be faster if we also parallelize over `self.n_collision_pairs`. However, parallelize over both B and collisioin_pairs (instead of only over B) leads to significantly slow performance for batched scene. We can treat B=0 and B>0 separately, but we will end up with messier code.
Therefore, for a big non-batched scene, users are encouraged to simply use `gs.cpu` backend.
Updated NOTE & TODO: For a HUGE scene with numerous bodies, it's also reasonable to run on GPU. Let's save this for later.
Update2: Now we use n_broad_pairs instead of n_collision_pairs, so we probably need to think about how to handle non-batched large scene better.
"""
EPS = rigid_global_info.EPS[None]
_B = collider_state.active_buffer.shape[1]
qd.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL)
for i_b in range(_B):
for i_pair in range(collider_state.n_broad_pairs[i_b]):
i_ga = collider_state.broad_collision_pairs[i_pair, i_b][0]
i_gb = collider_state.broad_collision_pairs[i_pair, i_b][1]
if qd.static(collider_static_config.has_nonconvex_nonterrain):
if (
not (geoms_info.is_convex[i_ga] and geoms_info.is_convex[i_gb])
and geoms_info.type[i_gb] != gs.GEOM_TYPE.TERRAIN
):
is_col = False
tolerance = func_compute_tolerance(
i_ga, i_gb, i_b, collider_info.mc_tolerance[None], geoms_info, geoms_init_AABB
)
for i in range(2):
if i == 1:
i_ga, i_gb = i_gb, i_ga
# initial point
is_col_i = False
normal_i = qd.Vector.zero(gs.qd_float, 3)
contact_pos_i = qd.Vector.zero(gs.qd_float, 3)
if not is_col:
ga_pos = geoms_state.pos[i_ga, i_b]
ga_quat = geoms_state.quat[i_ga, i_b]
gb_pos = geoms_state.pos[i_gb, i_b]
gb_quat = geoms_state.quat[i_gb, i_b]
is_col_i, normal_i, penetration_i, contact_pos_i = func_contact_vertex_sdf(
i_ga,
i_gb,
i_b,
ga_pos,
ga_quat,
gb_pos,
gb_quat,
geoms_state,
geoms_info,
verts_info,
rigid_global_info,
collider_static_config,
sdf_info,
)
if is_col_i:
func_add_contact(
i_ga,
i_gb,
normal_i,
contact_pos_i,
penetration_i,
i_b,
geoms_state,
geoms_info,
collider_state,
collider_info,
errno,
)
if qd.static(static_rigid_sim_config.enable_multi_contact):
if not is_col and is_col_i:
ga_pos_original, ga_quat_original = (
geoms_state.pos[i_ga, i_b],
geoms_state.quat[i_ga, i_b],
)
gb_pos_original, gb_quat_original = (
geoms_state.pos[i_gb, i_b],
geoms_state.quat[i_gb, i_b],
)
# Perturb geom_a around two orthogonal axes to find multiple contacts
axis_0, axis_1 = func_contact_orthogonals(
i_ga,
i_gb,
normal_i,
i_b,
links_state,
links_info,
geoms_state,
geoms_info,
geoms_init_AABB,
rigid_global_info,
static_rigid_sim_config,
)
n_con = 1
for i_rot in range(1, 5):
axis = (2 * (i_rot % 2) - 1) * axis_0 + (1 - 2 * ((i_rot // 2) % 2)) * axis_1
qrot = gu.qd_rotvec_to_quat(collider_info.mc_perturbation[None] * axis, EPS)
# Apply perturbations to local variables (no global state modification)
ga_pos_perturbed, ga_quat_perturbed = func_rotate_frame(
ga_pos_original, ga_quat_original, contact_pos_i, qrot
)
gb_pos_perturbed, gb_quat_perturbed = func_rotate_frame(
gb_pos_original, gb_quat_original, contact_pos_i, gu.qd_inv_quat(qrot)
)
is_col, normal, penetration, contact_pos = func_contact_vertex_sdf(
i_ga,
i_gb,
i_b,
ga_pos_perturbed,
ga_quat_perturbed,
gb_pos_perturbed,
gb_quat_perturbed,
geoms_state,
geoms_info,
verts_info,
rigid_global_info,
collider_static_config,
sdf_info,
)
if is_col:
if qd.static(not static_rigid_sim_config.enable_mujoco_compatibility):
# 1. Project the contact point on both geometries
# 2. Revert the effect of small rotation
# 3. Update contact point
contact_point_a = (
gu.qd_transform_by_quat(
(contact_pos - 0.5 * penetration * normal) - contact_pos_i,
gu.qd_inv_quat(qrot),
)
+ contact_pos_i
)
contact_point_b = (
gu.qd_transform_by_quat(
(contact_pos + 0.5 * penetration * normal) - contact_pos_i,
qrot,
)
+ contact_pos_i
)
contact_pos = 0.5 * (contact_point_a + contact_point_b)
# First-order correction of the normal direction
twist_rotvec = qd.math.clamp(
normal.cross(normal_i),
-collider_info.mc_perturbation[None],
collider_info.mc_perturbation[None],
)
normal = normal + twist_rotvec.cross(normal)
# Make sure that the penetration is still positive
penetration = normal.dot(contact_point_b - contact_point_a)
# Discard contact point is repeated
repeated = False
for i_c in range(n_con):
if not repeated:
idx_prev = collider_state.n_contacts[i_b] - 1 - i_c
prev_contact = collider_state.contact_data.pos[idx_prev, i_b]
if (contact_pos - prev_contact).norm() < tolerance:
repeated = True
if not repeated:
if penetration > -tolerance:
penetration = qd.max(penetration, 0.0)
func_add_contact(
i_ga,
i_gb,
normal,
contact_pos,
penetration,
i_b,
geoms_state,
geoms_info,
collider_state,
collider_info,
errno,
)
n_con = n_con + 1
if not is_col: # check edge-edge if vertex-face is not detected
# Extract current poses for initial collision detection
ga_pos = geoms_state.pos[i_ga, i_b]
ga_quat = geoms_state.quat[i_ga, i_b]
gb_pos = geoms_state.pos[i_gb, i_b]
gb_quat = geoms_state.quat[i_gb, i_b]
is_col, normal, penetration, contact_pos = func_contact_edge_sdf(
i_ga,
i_gb,
i_b,
ga_pos,
ga_quat,
gb_pos,
gb_quat,
geoms_state,
geoms_info,
verts_info,
edges_info,
rigid_global_info,
collider_static_config,
sdf_info,
)
if is_col:
func_add_contact(
i_ga,
i_gb,
normal,
contact_pos,
penetration,
i_b,
geoms_state,
geoms_info,
collider_state,
collider_info,
errno,
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/narrowphase.py",
"license": "Apache License 2.0",
"lines": 1303,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/utils/deprecated_module_wrapper.py | import sys
from types import ModuleType
import genesis as gs
class _DeprecatedModuleWrapper(ModuleType):
"""
A module wrapper that shows a deprecation warning when accessed.
This allows us to support the old module name while
warning users to update their imports.
"""
def __init__(self, actual_module, old_name, new_name):
super().__init__(old_name)
self._actual_module = actual_module
self._old_name = old_name
self._new_name = new_name
self._warned = False
self.__file__ = getattr(actual_module, "__file__", None)
self.__package__ = ".".join(old_name.split(".")[:-1])
def __getattr__(self, name):
if not self._warned:
gs.logger.warning(f"Deprecated import: {self._old_name} has been renamed to {self._new_name}.")
self._warned = True
return getattr(self._actual_module, name)
def __dir__(self):
return dir(self._actual_module)
def create_virtual_deprecated_module(module_name: str, deprecated_name: str) -> None:
"""
Call from new module with:
- module_name=__name__
- deprecated_name is full dotted path, e.g.
"genesis.engine.solvers.rigid.rigid_solver_decomp"
"""
current_module = sys.modules[module_name]
wrapper = _DeprecatedModuleWrapper(current_module, deprecated_name, module_name)
sys.modules[deprecated_name] = wrapper
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/deprecated_module_wrapper.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/rigid/heterogeneous_simulation.py | """
Heterogeneous Simulation Example
================================
This example demonstrates heterogeneous simulation, where different parallel
environments can have different geometry variants for the same entity.
Variant Assignment Rules:
When passing a list of morphs to scene.add_entity(), variants are distributed
across environments using the following rules:
1. When n_envs >= n_variants:
Balanced block assignment. Environments are divided into blocks, with each
block assigned to one variant. For example, with 4 variants and 8 environments:
- Environments 0-1 -> Variant 0
- Environments 2-3 -> Variant 1
- Environments 4-5 -> Variant 2
- Environments 6-7 -> Variant 3
2. When n_envs < n_variants:
Each environment i gets variant i (0-indexed). Variants beyond n_envs are
unused. For example, with 4 variants and 2 environments:
- Environment 0 -> Variant 0 (first morph in list)
- Environment 1 -> Variant 1 (second morph in list)
- Variants 2 and 3 are unused
Usage:
python heterogeneous_simulation.py -v -n 4 # 4 environments (matches 4 variants)
python heterogeneous_simulation.py -v -n 8 # 8 environments (2 per variant)
python heterogeneous_simulation.py -v -n 2 # 2 environments (only first 2 variants used)
"""
import argparse
import numpy as np
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--vis", action="store_true", default=False)
parser.add_argument("-n", "--n_envs", type=int, default=4)
args = parser.parse_args()
########################## init ##########################
gs.init(backend=gs.gpu, precision="32")
########################## create a scene ##########################
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(3, -1, 1.5),
camera_lookat=(0.0, 0.0, 0.5),
),
show_viewer=args.vis,
)
########################## entities ##########################
plane = scene.add_entity(
gs.morphs.Plane(),
)
franka = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
)
# Define 4 geometry variants - see module docstring for variant assignment rules
morphs_heterogeneous = [
gs.morphs.Box(size=(0.04, 0.04, 0.04), pos=(0.65, 0.0, 0.02)), # Variant 0
gs.morphs.Box(size=(0.02, 0.02, 0.02), pos=(0.65, 0.0, 0.02)), # Variant 1
gs.morphs.Sphere(radius=0.015, pos=(0.65, 0.0, 0.02)), # Variant 2
gs.morphs.Sphere(radius=0.025, pos=(0.65, 0.0, 0.02)), # Variant 3
]
grasping_object = scene.add_entity(
morph=morphs_heterogeneous,
)
########################## build ##########################
scene.build(n_envs=args.n_envs, env_spacing=(1, 1))
motors_dof = np.arange(7)
fingers_dof = np.arange(7, 9)
l_qpos = [-1.0124, 1.5559, 1.3662, -1.6878, -1.5799, 1.7757, 1.4602, 0.04, 0.04]
if args.n_envs == 0:
franka.set_qpos(np.array(l_qpos))
else:
franka.set_qpos(np.array([l_qpos] * args.n_envs))
scene.step()
AABB = grasping_object.get_AABB()
mass = grasping_object.get_mass()
print("heterogeneous AABB", AABB)
print("heterogeneous mass", mass)
end_effector = franka.get_link("hand")
qpos = franka.inverse_kinematics(
link=end_effector,
pos=np.array([[0.65, 0.0, 0.135]] * args.n_envs),
quat=np.array([[0, 1, 0, 0]] * args.n_envs),
)
franka.control_dofs_position(qpos[..., :-2], motors_dof)
# hold
for i in range(100):
print("hold", i)
scene.step()
# grasp
finder_pos = 0.0
for i in range(100):
print("grasp", i)
franka.control_dofs_position(qpos[..., :-2], motors_dof)
franka.control_dofs_position(np.array([[finder_pos, finder_pos]] * args.n_envs), fingers_dof)
scene.step()
# lift
qpos = franka.inverse_kinematics(
link=end_effector,
pos=np.array([[0.65, 0.0, 0.3]] * args.n_envs),
quat=np.array([[0, 1, 0, 0]] * args.n_envs),
)
for i in range(200):
print("lift", i)
franka.control_dofs_position(qpos[..., :-2], motors_dof)
franka.control_dofs_position(np.array([[finder_pos, finder_pos]] * args.n_envs), fingers_dof)
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/rigid/heterogeneous_simulation.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/usd/import_stage.py | import argparse
import os
import numpy as np
from huggingface_hub import snapshot_download
import genesis as gs
from genesis.utils.misc import tensor_to_array
import genesis.utils.geom as gu
class JointAnimator:
"""
A simple JointAnimator to animate the joints' positions of the scene.
It uses the sin function to interpolate between the lower and upper limits of the joints.
"""
def __init__(self, scene: gs.Scene):
self.rigid_solver = scene.sim.rigid_solver
self.joint_lower, self.joint_upper = map(tensor_to_array, self.rigid_solver.get_dofs_limit())
init_positions = tensor_to_array(self.rigid_solver.get_dofs_position())
normalized_init_pos = np.where(
(self.joint_upper - self.joint_lower) > gs.EPS,
2.0 * (init_positions - self.joint_lower) / (self.joint_upper - self.joint_lower) - 1.0,
0.0,
)
self.init_phase = np.arcsin(normalized_init_pos)
self.rigid_solver.set_dofs_kp(gu.default_dofs_kp(self.rigid_solver.n_dofs))
def animate(self, scene: gs.Scene):
t = scene.t * scene.dt
theta = np.pi * t + self.init_phase
target = (self.joint_upper + self.joint_lower + (self.joint_upper - self.joint_lower) * np.sin(theta)) / 2
self.rigid_solver.control_dofs_position(target)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--num_steps", type=int, default=5000 if "PYTEST_VERSION" not in os.environ else 1)
parser.add_argument("-v", "--vis", action="store_true", default=False)
args = parser.parse_args()
gs.init(backend=gs.cpu)
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(4.0, 2.0, 2.5),
camera_lookat=(0.0, 0.0, 1.0),
camera_fov=40,
),
show_viewer=args.vis,
show_FPS=False,
)
asset_path = snapshot_download(
repo_type="dataset",
repo_id="Genesis-Intelligence/assets",
revision="c50bfe3e354e105b221ef4eb9a79504650709dd2",
allow_patterns="usd/Refrigerator055/*",
max_workers=1,
)
plane = scene.add_entity(
gs.morphs.Plane(),
)
entities = scene.add_stage(
morph=gs.morphs.USD(
file=f"{asset_path}/usd/Refrigerator055/Refrigerator055.usd",
pos=(0, 0, 0.9),
euler=(0, 0, 180),
),
# vis_mode="collision",
# visualize_contact=True,
)
scene.build()
joint_animator = JointAnimator(scene)
for _ in range(args.num_steps):
joint_animator.animate(scene)
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/usd/import_stage.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:tests/test_usd.py | """
Test USD parsing and comparison with compared scenes.
This module tests that USD files can be parsed correctly and that scenes
loaded from USD files match equivalent scenes loaded from compared files.
"""
import os
import time
import xml.etree.ElementTree as ET
import numpy as np
import pytest
try:
from pxr import Usd
except ImportError as e:
pytest.skip("USD is not supported because 'pxr' module is not available.", allow_module_level=True)
from pxr import Gf, Sdf, UsdGeom, UsdPhysics
from genesis.utils.usd import UsdContext, HAS_OMNIVERSE_KIT_SUPPORT
import genesis as gs
import genesis.utils.geom as gu
from .utils import assert_allclose, get_hf_dataset
from .test_mesh import check_gs_meshes, check_gs_surfaces
# Conversion from .usd to .glb significantly affects precision
USD_COLOR_TOL = 1e-07
USD_NORMALS_TOL = 1e-02
def to_array(s: str) -> np.ndarray:
"""Convert a string of space-separated floats to a numpy array."""
return np.array([float(x) for x in s.split()])
def compare_links(compared_links, usd_links, tol):
"""Compare links between two scenes."""
# Check number of links
assert len(compared_links) == len(usd_links)
# Create dictionaries keyed by link name for comparison
compared_links_by_name = {link.name: link for link in compared_links}
usd_links_by_name = {link.name: link for link in usd_links}
# Create index to name mappings for parent comparison
compared_idx_to_name = {i: link.name for i, link in enumerate(compared_links)}
usd_idx_to_name = {i: link.name for i, link in enumerate(usd_links)}
# Check that we have matching link names
compared_link_names = set(compared_links_by_name.keys())
usd_link_names = set(usd_links_by_name.keys())
assert compared_link_names == usd_link_names
# Compare all link properties by name
for link_name in sorted(compared_link_names):
compared_link = compared_links_by_name[link_name]
usd_link = usd_links_by_name[link_name]
err_msg = f"Properties mismatched for link {link_name}"
# Compare link properties
assert_allclose(compared_link.pos, usd_link.pos, tol=tol, err_msg=err_msg)
assert_allclose(compared_link.quat, usd_link.quat, tol=tol, err_msg=err_msg)
assert compared_link.is_fixed == usd_link.is_fixed, err_msg
assert len(compared_link.geoms) == len(usd_link.geoms), err_msg
assert compared_link.n_joints == usd_link.n_joints, err_msg
assert len(compared_link.vgeoms) == len(usd_link.vgeoms), err_msg
# Compare parent link by name (mapping indices to names)
compared_parent_idx = compared_link.parent_idx
usd_parent_idx = usd_link.parent_idx
if compared_parent_idx == -1:
compared_parent_name = None
else:
compared_parent_name = compared_idx_to_name.get(compared_parent_idx, f"<unknown idx {compared_parent_idx}>")
if usd_parent_idx == -1:
usd_parent_name = None
else:
usd_parent_name = usd_idx_to_name.get(usd_parent_idx, f"<unknown idx {usd_parent_idx}>")
assert compared_parent_name == usd_parent_name, err_msg
# Compare inertial properties if available
assert_allclose(compared_link.inertial_pos, usd_link.inertial_pos, tol=tol, err_msg=err_msg)
assert_allclose(compared_link.inertial_quat, usd_link.inertial_quat, tol=tol, err_msg=err_msg)
# Skip mass and inertia checks for fixed links - they're not used in simulation
if not compared_link.is_fixed:
assert_allclose(compared_link.inertial_mass, usd_link.inertial_mass, atol=tol, err_msg=err_msg)
assert_allclose(compared_link.inertial_i, usd_link.inertial_i, atol=tol, err_msg=err_msg)
def compare_joints(compared_joints, usd_joints, tol):
"""Compare joints between two scenes."""
# Check number of joints
assert len(compared_joints) == len(usd_joints)
# Create dictionaries keyed by joint name for comparison
compared_joints_by_name = {joint.name: joint for joint in compared_joints}
usd_joints_by_name = {joint.name: joint for joint in usd_joints}
# Check that we have matching joint names
compared_joint_names = set(compared_joints_by_name.keys())
usd_joint_names = set(usd_joints_by_name.keys())
assert compared_joint_names == usd_joint_names
# Compare all joint properties by name
for joint_name in sorted(compared_joint_names):
compared_joint = compared_joints_by_name[joint_name]
usd_joint = usd_joints_by_name[joint_name]
# Compare joint properties
assert compared_joint.type == usd_joint.type
err_msg = f"Properties mismatched for joint type {compared_joint.type}"
assert_allclose(compared_joint.pos, usd_joint.pos, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.quat, usd_joint.quat, tol=tol, err_msg=err_msg)
assert compared_joint.n_qs == usd_joint.n_qs, err_msg
assert compared_joint.n_dofs == usd_joint.n_dofs, err_msg
# Compare initial qpos
assert_allclose(compared_joint.init_qpos, usd_joint.init_qpos, tol=tol, err_msg=err_msg)
# Skip mass/inertia-dependent property checks for fixed joints - they're not used in simulation
if compared_joint.type != gs.JOINT_TYPE.FIXED:
# Compare dof limits
assert_allclose(compared_joint.dofs_limit, usd_joint.dofs_limit, tol=tol, err_msg=err_msg)
# Compare dof motion properties
assert_allclose(compared_joint.dofs_motion_ang, usd_joint.dofs_motion_ang, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.dofs_motion_vel, usd_joint.dofs_motion_vel, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.dofs_frictionloss, usd_joint.dofs_frictionloss, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.dofs_stiffness, usd_joint.dofs_stiffness, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.dofs_frictionloss, usd_joint.dofs_frictionloss, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.dofs_force_range, usd_joint.dofs_force_range, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.dofs_damping, usd_joint.dofs_damping, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.dofs_armature, usd_joint.dofs_armature, tol=tol, err_msg=err_msg)
# Compare dof control properties
assert_allclose(compared_joint.dofs_kp, usd_joint.dofs_kp, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.dofs_kv, usd_joint.dofs_kv, tol=tol, err_msg=err_msg)
assert_allclose(compared_joint.dofs_force_range, usd_joint.dofs_force_range, tol=tol, err_msg=err_msg)
def compare_geoms(compared_geoms, usd_geoms, tol):
"""Compare geoms between two scenes."""
assert len(compared_geoms) == len(usd_geoms)
# Sort geoms by link name for consistent comparison
compared_geoms_sorted = sorted(compared_geoms, key=lambda g: (g.link.name, g.idx))
usd_geoms_sorted = sorted(usd_geoms, key=lambda g: (g.link.name, g.idx))
for compared_geom, usd_geom in zip(compared_geoms_sorted, usd_geoms_sorted):
assert compared_geom.type == usd_geom.type
err_msg = f"Properties mismatched for geom type {compared_geom.type}"
assert_allclose(compared_geom.init_pos, usd_geom.init_pos, tol=tol, err_msg=err_msg)
assert_allclose(compared_geom.init_quat, usd_geom.init_quat, tol=tol, err_msg=err_msg)
assert_allclose(compared_geom.get_AABB(), usd_geom.get_AABB(), tol=tol, err_msg=err_msg)
def compare_vgeoms(compared_vgeoms, usd_vgeoms, tol):
"""Compare visual geoms between two scenes."""
assert len(compared_vgeoms) == len(usd_vgeoms)
# Sort geoms by link name for consistent comparison
compared_vgeoms_sorted = sorted(compared_vgeoms, key=lambda g: g.vmesh.metadata["name"])
usd_vgeoms_sorted = sorted(usd_vgeoms, key=lambda g: g.vmesh.metadata["name"].split("/")[-1])
for compared_vgeom, usd_vgeom in zip(compared_vgeoms_sorted, usd_vgeoms_sorted):
compared_vgeom_pos, compared_vgeom_quat = gu.transform_pos_quat_by_trans_quat(
compared_vgeom.init_pos, compared_vgeom.init_quat, compared_vgeom.link.pos, compared_vgeom.link.quat
)
usd_vgeom_pos, usd_vgeom_quat = gu.transform_pos_quat_by_trans_quat(
usd_vgeom.init_pos, usd_vgeom.init_quat, usd_vgeom.link.pos, usd_vgeom.link.quat
)
compared_vgeom_T = gu.trans_quat_to_T(compared_vgeom_pos, compared_vgeom_quat)
usd_vgeom_T = gu.trans_quat_to_T(usd_vgeom_pos, usd_vgeom_quat)
compared_vgeom_mesh = compared_vgeom.vmesh.copy()
usd_vgeom_mesh = usd_vgeom.vmesh.copy()
mesh_name = usd_vgeom_mesh.metadata["name"]
compared_vgeom_mesh.apply_transform(compared_vgeom_T)
usd_vgeom_mesh.apply_transform(usd_vgeom_T)
check_gs_meshes(compared_vgeom_mesh, usd_vgeom_mesh, mesh_name, tol, USD_NORMALS_TOL)
compared_vgeom_surface = compared_vgeom_mesh.surface
usd_vgeom_surface = usd_vgeom_mesh.surface
check_gs_surfaces(compared_vgeom_surface, usd_vgeom_surface, mesh_name)
def compare_scene(compared_scene: gs.Scene, usd_scene: gs.Scene, tol: float):
"""Compare structure and data between compared scene and USD scene."""
compared_entities = compared_scene.entities
usd_entities = usd_scene.entities
compared_geoms = [geom for entity in compared_entities for geom in entity.geoms]
usd_geoms = [geom for entity in usd_entities for geom in entity.geoms]
compare_geoms(compared_geoms, usd_geoms, tol=tol)
compared_joints = [joint for entity in compared_entities for joint in entity.joints]
usd_joints = [joint for entity in usd_entities for joint in entity.joints]
compare_joints(compared_joints, usd_joints, tol=tol)
compared_links = [link for entity in compared_entities for link in entity.links]
usd_links = [link for entity in usd_entities for link in entity.links]
compare_links(compared_links, usd_links, tol=tol)
def compare_mesh_scene(compared_scene: gs.Scene, usd_scene: gs.Scene, tol: float):
"""Compare mesh data between mesh scene and USD scene."""
compared_entities = compared_scene.entities
usd_entities = usd_scene.entities
compared_vgeoms = [vgeom for entity in compared_entities for vgeom in entity.vgeoms]
usd_vgeoms = [vgeom for entity in usd_entities for vgeom in entity.vgeoms]
compare_vgeoms(compared_vgeoms, usd_vgeoms, tol=tol)
def build_mjcf_scene(xml_path: str, scale: float):
"""Build a MJCF scene from its file path."""
# Create MJCF scene
mjcf_scene = gs.Scene()
mjcf_scene.add_entity(
gs.morphs.MJCF(
file=xml_path,
scale=scale,
convexify=False,
),
material=gs.materials.Rigid(
rho=1000.0,
),
)
mjcf_scene.build()
return mjcf_scene
def build_usd_scene(
usd_file: str,
scale: float,
vis_mode: str = "collision",
is_stage: bool = True,
fixed: bool = False,
):
"""Build a USD scene from its file path."""
# Create USD scene
scene = gs.Scene()
kwargs = dict(
morph=gs.morphs.USD(
usd_ctx=UsdContext(
usd_file,
use_bake_cache=False,
),
scale=scale,
fixed=fixed,
convexify=False,
),
material=gs.materials.Rigid(
rho=1000.0,
),
vis_mode=vis_mode,
)
if is_stage:
scene.add_stage(**kwargs)
else:
scene.add_entity(**kwargs)
# Note that it is necessary to build the scene because spatial inertia of some geometries may not be specified.
# In such a case, it will be estimated from the geometry during build (RigidLink._build to be specific).
scene.build()
return scene
def build_mesh_scene(mesh_file: str, scale: float):
"""Build a mesh scene from its file path."""
mesh_scene = gs.Scene()
mesh_morph = gs.morphs.Mesh(
file=mesh_file,
scale=scale,
euler=(-90, 0, 0),
group_by_material=False,
convexify=False,
)
mesh_scene.add_entity(
mesh_morph,
material=gs.materials.Rigid(
rho=1000.0,
),
)
mesh_scene.build()
return mesh_scene
@pytest.fixture
def xml_path(request, tmp_path, model_name):
"""Create a temporary MJCF/XML file from the fixture."""
mjcf = request.getfixturevalue(model_name)
xml_tree = ET.ElementTree(mjcf)
file_name = f"{model_name}.xml"
file_path = str(tmp_path / file_name)
xml_tree.write(file_path, encoding="utf-8", xml_declaration=True)
return file_path
# ==================== Primitive Tests ====================
@pytest.fixture(scope="session")
def all_primitives_mjcf():
"""Generate an MJCF model with various geometric primitives on a plane."""
mjcf = ET.Element("mujoco", model="primitives")
default = ET.SubElement(mjcf, "default")
ET.SubElement(default, "joint", armature="0.0")
worldbody = ET.SubElement(mjcf, "worldbody")
floor = ET.SubElement(worldbody, "body", name="/worldbody/floor")
ET.SubElement(floor, "geom", type="plane", pos="0. 0. 0.", size="40. 40. 40.")
# Box
box = ET.SubElement(worldbody, "body", name="/worldbody/box", pos="-0.6 0. 0.3")
ET.SubElement(box, "geom", type="box", size="0.2 0.2 0.2", pos="0. 0. 0.")
ET.SubElement(box, "joint", name="/worldbody/box_joint", type="free")
# Cylinder
cylinder = ET.SubElement(worldbody, "body", name="/worldbody/cylinder", pos="-0.2 0. 0.3")
ET.SubElement(cylinder, "geom", type="cylinder", size="0.15 0.2", pos="0. 0. 0.")
ET.SubElement(cylinder, "joint", name="/worldbody/cylinder_joint", type="free")
# Capsule
capsule = ET.SubElement(worldbody, "body", name="/worldbody/capsule", pos="0.2 0. 0.3")
ET.SubElement(capsule, "geom", type="capsule", size="0.15 0.2", pos="0. 0. 0.")
ET.SubElement(capsule, "joint", name="/worldbody/capsule_joint", type="free")
# Sphere
sphere = ET.SubElement(worldbody, "body", name="/worldbody/sphere", pos="0.6 0. 0.3")
ET.SubElement(sphere, "geom", type="sphere", size="0.2", pos="0. 0. 0.")
ET.SubElement(sphere, "joint", name="/worldbody/sphere_joint", type="free")
return mjcf
@pytest.fixture(scope="session")
def all_primitives_usd(asset_tmp_path, all_primitives_mjcf: ET.ElementTree):
"""Generate a USD file equivalent to the MJCF all_primitives_mjcf fixture."""
# Extract data from MJCF XML structure
worldbody = all_primitives_mjcf.find("worldbody")
# Floor: body contains a geom with pos and size
floor_body = worldbody.find("body[@name='/worldbody/floor']")
floor_geom = floor_body.find("geom[@type='plane']")
floor_pos_str = floor_geom.get("pos", "0. 0. 0.")
floor_pos = to_array(floor_pos_str)
floor_size = to_array(floor_geom.get("size", "40. 40. 40."))
# Box: body has pos, geom inside has size
box_body = worldbody.find("body[@name='/worldbody/box']")
box_pos_str = box_body.get("pos", "0. 0. 0.")
box_pos = to_array(box_pos_str)
box_geom = box_body.find("geom[@type='box']")
box_size_str = box_geom.get("size", "0.2 0.2 0.2")
box_size = to_array(box_size_str)
# Cylinder: body has pos, geom has size (radius, half-height)
cylinder_body = worldbody.find("body[@name='/worldbody/cylinder']")
cylinder_pos_str = cylinder_body.get("pos", "0. 0. 0.")
cylinder_pos = to_array(cylinder_pos_str)
cylinder_geom = cylinder_body.find("geom[@type='cylinder']")
cylinder_size_str = cylinder_geom.get("size", "0.15 0.2")
cylinder_size = to_array(cylinder_size_str)
cylinder_radius = cylinder_size[0]
cylinder_half_height = cylinder_size[1]
# Capsule: body has pos, geom has size (radius, half-height)
capsule_body = worldbody.find("body[@name='/worldbody/capsule']")
capsule_pos_str = capsule_body.get("pos", "0. 0. 0.")
capsule_pos = to_array(capsule_pos_str)
capsule_geom = capsule_body.find("geom[@type='capsule']")
capsule_size_str = capsule_geom.get("size", "0.15 0.2")
capsule_size = to_array(capsule_size_str)
capsule_radius = capsule_size[0]
capsule_half_height = capsule_size[1]
# Sphere: body has pos, geom has size (radius)
sphere_body = worldbody.find("body[@name='/worldbody/sphere']")
sphere_pos_str = sphere_body.get("pos", "0. 0. 0.")
sphere_pos = to_array(sphere_pos_str)
sphere_geom = sphere_body.find("geom[@type='sphere']")
sphere_size_str = sphere_geom.get("size", "0.2")
sphere_radius = float(sphere_size_str) if isinstance(sphere_size_str, str) else sphere_size_str[0]
# Create temporary USD file
usd_file = str(asset_tmp_path / "all_primitives.usda")
# Create USD stage
stage = Usd.Stage.CreateNew(usd_file)
UsdGeom.SetStageUpAxis(stage, "Z")
UsdGeom.SetStageMetersPerUnit(stage, 1.0)
# Create root prim
root_prim = stage.DefinePrim("/worldbody", "Xform")
stage.SetDefaultPrim(root_prim)
# Create floor plane (fixed, collision-only)
# In MJCF: plane at floor_pos with size floor_size
# In USD: Create a plane geometry with CollisionAPI (fixed rigid body)
floor = UsdGeom.Plane.Define(stage, "/worldbody/floor")
floor.GetAxisAttr().Set("Z")
floor.AddTranslateOp().Set(Gf.Vec3d(floor_pos[0], floor_pos[1], floor_pos[2]))
# MJCF plane size - the third value is typically ignored for plane
# For USD Plane, we use width and length
floor.GetWidthAttr().Set(floor_size[0] * 2) # size[0] * 2
floor.GetLengthAttr().Set(floor_size[1] * 2) # size[1] * 2
# Make it a fixed collision-only rigid body
UsdPhysics.CollisionAPI.Apply(floor.GetPrim())
# No RigidBodyAPI means it's kinematic/fixed
# Create box (free rigid body)
# In MJCF: box at box_pos with size box_size (half-extent), free joint
box = UsdGeom.Cube.Define(stage, "/worldbody/box")
box.AddTranslateOp().Set(Gf.Vec3d(box_pos[0], box_pos[1], box_pos[2]))
# MJCF size is half-extent, USD size is full edge length
# So we need to multiply by 2
box.GetSizeAttr().Set(box_size[0] * 2.0)
box_rigid = UsdPhysics.RigidBodyAPI.Apply(box.GetPrim())
box_rigid.GetKinematicEnabledAttr().Set(False)
# Create free joint for box
free_joint_prim = UsdPhysics.Joint.Define(stage, "/worldbody/box_joint")
free_joint_prim.CreateBody0Rel().SetTargets([root_prim.GetPath()])
free_joint_prim.CreateBody1Rel().SetTargets([box.GetPrim().GetPath()])
free_joint_prim.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
free_joint_prim.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
# Create cylinder (free rigid body)
# In MJCF: cylinder size is (radius, half-height)
# In USD: cylinder has radius and height (full height)
cylinder = UsdGeom.Cylinder.Define(stage, "/worldbody/cylinder")
cylinder.AddTranslateOp().Set(Gf.Vec3d(cylinder_pos[0], cylinder_pos[1], cylinder_pos[2]))
cylinder.GetRadiusAttr().Set(cylinder_radius)
cylinder.GetHeightAttr().Set(cylinder_half_height * 2.0) # Convert half-height to full height
cylinder.GetAxisAttr().Set("Z")
cylinder_rigid = UsdPhysics.RigidBodyAPI.Apply(cylinder.GetPrim())
cylinder_rigid.GetKinematicEnabledAttr().Set(False)
# Create free joint for cylinder
cylinder_joint_prim = UsdPhysics.Joint.Define(stage, "/worldbody/cylinder_joint")
cylinder_joint_prim.CreateBody0Rel().SetTargets([root_prim.GetPath()])
cylinder_joint_prim.CreateBody1Rel().SetTargets([cylinder.GetPrim().GetPath()])
cylinder_joint_prim.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
cylinder_joint_prim.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
# Create capsule (free rigid body)
# In MJCF: capsule size is (radius, half-height)
# In USD: capsule has radius and height (full height)
capsule = UsdGeom.Capsule.Define(stage, "/worldbody/capsule")
capsule.AddTranslateOp().Set(Gf.Vec3d(capsule_pos[0], capsule_pos[1], capsule_pos[2]))
capsule.GetRadiusAttr().Set(capsule_radius)
capsule.GetHeightAttr().Set(capsule_half_height * 2.0) # Convert half-height to full height
capsule.GetAxisAttr().Set("Z")
capsule_rigid = UsdPhysics.RigidBodyAPI.Apply(capsule.GetPrim())
capsule_rigid.GetKinematicEnabledAttr().Set(False)
# Create free joint for capsule
capsule_joint_prim = UsdPhysics.Joint.Define(stage, "/worldbody/capsule_joint")
capsule_joint_prim.CreateBody0Rel().SetTargets([root_prim.GetPath()])
capsule_joint_prim.CreateBody1Rel().SetTargets([capsule.GetPrim().GetPath()])
capsule_joint_prim.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
capsule_joint_prim.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
# Create sphere (free rigid body)
# In MJCF: sphere size is radius
# In USD: sphere has radius
sphere = UsdGeom.Sphere.Define(stage, "/worldbody/sphere")
sphere.AddTranslateOp().Set(Gf.Vec3d(sphere_pos[0], sphere_pos[1], sphere_pos[2]))
sphere.GetRadiusAttr().Set(sphere_radius)
sphere_rigid = UsdPhysics.RigidBodyAPI.Apply(sphere.GetPrim())
sphere_rigid.GetKinematicEnabledAttr().Set(False)
# Create free joint for sphere
sphere_joint_prim = UsdPhysics.Joint.Define(stage, "/worldbody/sphere_joint")
sphere_joint_prim.CreateBody0Rel().SetTargets([root_prim.GetPath()])
sphere_joint_prim.CreateBody1Rel().SetTargets([sphere.GetPrim().GetPath()])
sphere_joint_prim.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
sphere_joint_prim.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
stage.Save()
return usd_file
@pytest.mark.required
@pytest.mark.parametrize("model_name", ["all_primitives_mjcf"])
@pytest.mark.parametrize("scale", [1.0, 2.0])
def test_primitives_mjcf_vs_usd(xml_path, all_primitives_usd, scale, tol):
"""Test that MJCF and USD scenes produce equivalent Genesis entities."""
mjcf_scene = build_mjcf_scene(xml_path, scale=scale)
usd_scene = build_usd_scene(all_primitives_usd, scale=scale)
compare_scene(mjcf_scene, usd_scene, tol=tol)
# ==================== Joint Tests ====================
@pytest.fixture(scope="session")
def all_joints_mjcf():
"""Generate an MJCF model with all joint types: prismatic, revolute, spherical, fixed, and free."""
mjcf = ET.Element("mujoco", model="all_joints")
default = ET.SubElement(mjcf, "default")
ET.SubElement(default, "joint", armature="0.0")
worldbody = ET.SubElement(mjcf, "worldbody")
floor = ET.SubElement(worldbody, "body", name="/worldbody/floor")
ET.SubElement(floor, "geom", type="plane", pos="0. 0. 0.", size="40. 40. 40.")
base = ET.SubElement(worldbody, "body", name="/worldbody/base", pos="0. 0. 0.1")
ET.SubElement(base, "geom", type="box", size="0.1 0.1 0.1", pos="0. 0. 0.")
# Prismatic joint branch
prismatic_box = ET.SubElement(base, "body", name="/worldbody/base/prismatic_box", pos="-0.5 0. 0.2")
ET.SubElement(prismatic_box, "geom", type="box", size="0.2 0.2 0.2", pos="0. 0. 0.")
ET.SubElement(
prismatic_box,
"joint",
name="/worldbody/base/prismatic_box_joint",
type="slide",
axis="0. 0. 1.",
range="-0.1 0.4",
stiffness="50.0",
damping="5.0",
)
# Revolute joint branch
# Add actuator for PD controller (maps to dofs_kp and dofs_kv)
# The parser uses: dofs_kp = -gear * biasprm[1] * scale^3
# So to get dofs_kp=120.0, we need biasprm[1] = -120.0 (with gear=1, scale=1)
actuator = ET.SubElement(mjcf, "actuator")
revolute_box = ET.SubElement(base, "body", name="/worldbody/base/revolute_box", pos="0. 0. 0.2")
ET.SubElement(revolute_box, "geom", type="box", size="0.2 0.2 0.2", pos="0. 0. 0.")
ET.SubElement(
revolute_box,
"joint",
name="/worldbody/base/revolute_box_joint",
type="hinge",
axis="0. 0. 1.",
range="-45 45",
stiffness="50.0",
damping="5.0",
)
# Spherical joint branch
spherical_box = ET.SubElement(base, "body", name="/worldbody/base/spherical_box", pos="0.5 0. 0.2")
ET.SubElement(spherical_box, "geom", type="box", size="0.2 0.2 0.2", pos="0. 0. 0.")
ET.SubElement(spherical_box, "joint", name="/worldbody/base/spherical_box_joint", type="ball")
# Fixed joint branch (no joint element means fixed in MJCF)
fixed_box = ET.SubElement(base, "body", name="/worldbody/base/fixed_box", pos="-0.5 0.5 0.2")
ET.SubElement(fixed_box, "geom", type="box", size="0.2 0.2 0.2", pos="0. 0. 0.")
# No joint element = fixed joint
# Free joint branch (must be at top level in MJCF - directly under worldbody)
free_box = ET.SubElement(worldbody, "body", name="/worldbody/free_box", pos="0.5 0.5 0.3")
ET.SubElement(free_box, "geom", type="box", size="0.2 0.2 0.2", pos="0. 0. 0.")
ET.SubElement(free_box, "joint", name="/worldbody/free_box_joint", type="free")
# Add actuators for PD controllers (prismatic and revolute only)
actuator = ET.SubElement(mjcf, "actuator")
ET.SubElement(
actuator,
"general",
name="/worldbody/base/prismatic_box_joint_actuator",
joint="/worldbody/base/prismatic_box_joint",
biastype="affine",
gainprm="120.0 0 0", # gainprm[0] must equal -biasprm[1] to avoid warning
biasprm="0 -120.0 -12.0", # biasprm format: [b0, b1, b2] where b1=kp, b2=kv (negated)
)
ET.SubElement(
actuator,
"general",
name="/worldbody/base/revolute_box_joint_actuator",
joint="/worldbody/base/revolute_box_joint",
biastype="affine",
gainprm="120.0 0 0",
biasprm="0 -120.0 -12.0",
)
return mjcf
@pytest.fixture(scope="session")
def all_joints_usd(asset_tmp_path, all_joints_mjcf: ET.ElementTree, request):
"""Generate a USD file equivalent to the all joints MJCF fixture.
Supports both with and without ArticulationRootAPI based on request.param.
"""
# Get the use_articulation_root parameter from request.param if available
use_articulation_root = getattr(request, "param", True)
worldbody = all_joints_mjcf.find("worldbody")
# Floor
floor_body = worldbody.find("body[@name='/worldbody/floor']")
floor_geom = floor_body.find("geom[@type='plane']")
floor_pos_str = floor_geom.get("pos")
floor_pos = to_array(floor_pos_str)
floor_size = to_array(floor_geom.get("size", "40. 40. 40."))
# Base
base_body = worldbody.find("body[@name='/worldbody/base']")
base_pos_str = base_body.get("pos")
base_pos = to_array(base_pos_str)
base_geom = base_body.find("geom[@type='box']")
base_size_str = base_geom.get("size")
base_size = to_array(base_size_str)
# Prismatic box
prismatic_box_body = base_body.find("body[@name='/worldbody/base/prismatic_box']")
prismatic_box_pos_str = prismatic_box_body.get("pos")
prismatic_box_pos = to_array(prismatic_box_pos_str)
prismatic_box_geom = prismatic_box_body.find("geom[@type='box']")
prismatic_box_size = to_array(prismatic_box_geom.get("size"))
prismatic_joint = prismatic_box_body.find("joint[@name='/worldbody/base/prismatic_box_joint']")
prismatic_range = to_array(prismatic_joint.get("range"))
# Revolute box
revolute_box_body = base_body.find("body[@name='/worldbody/base/revolute_box']")
revolute_box_pos_str = revolute_box_body.get("pos")
revolute_box_pos = to_array(revolute_box_pos_str)
revolute_box_geom = revolute_box_body.find("geom[@type='box']")
revolute_box_size = to_array(revolute_box_geom.get("size"))
revolute_joint = revolute_box_body.find("joint[@name='/worldbody/base/revolute_box_joint']")
revolute_range = to_array(revolute_joint.get("range"))
# Spherical box
spherical_box_body = base_body.find("body[@name='/worldbody/base/spherical_box']")
spherical_box_pos_str = spherical_box_body.get("pos")
spherical_box_pos = to_array(spherical_box_pos_str)
spherical_box_geom = spherical_box_body.find("geom[@type='box']")
spherical_box_size = to_array(spherical_box_geom.get("size"))
# Fixed box (no joint in MJCF means fixed)
fixed_box_body = base_body.find("body[@name='/worldbody/base/fixed_box']")
fixed_box_pos_str = fixed_box_body.get("pos")
fixed_box_pos = to_array(fixed_box_pos_str)
fixed_box_geom = fixed_box_body.find("geom[@type='box']")
fixed_box_size = to_array(fixed_box_geom.get("size"))
# Free box (at top level in MJCF)
free_box_body = worldbody.find("body[@name='/worldbody/free_box']")
free_box_pos_str = free_box_body.get("pos")
free_box_pos = to_array(free_box_pos_str)
free_box_geom = free_box_body.find("geom[@type='box']")
free_box_size = to_array(free_box_geom.get("size"))
# Create temporary USD file with suffix based on ArticulationRootAPI usage
suffix = "with_articulation_root" if use_articulation_root else "without_articulation_root"
usd_file = str(asset_tmp_path / f"all_joints_{suffix}.usda")
# Create USD stage
stage = Usd.Stage.CreateNew(usd_file)
UsdGeom.SetStageUpAxis(stage, "Z")
UsdGeom.SetStageMetersPerUnit(stage, 1.0)
# Create root prim
root_prim = stage.DefinePrim("/worldbody", "Xform")
stage.SetDefaultPrim(root_prim)
# Create floor plane (fixed, collision-only)
floor = UsdGeom.Plane.Define(stage, "/worldbody/floor")
floor.GetAxisAttr().Set("Z")
floor.AddTranslateOp().Set(Gf.Vec3d(floor_pos[0], floor_pos[1], floor_pos[2]))
floor.GetWidthAttr().Set(floor_size[0] * 2)
floor.GetLengthAttr().Set(floor_size[1] * 2)
UsdPhysics.CollisionAPI.Apply(floor.GetPrim())
# Create base (fixed, collision-only)
base = UsdGeom.Cube.Define(stage, "/worldbody/base")
if use_articulation_root:
UsdPhysics.ArticulationRootAPI.Apply(base.GetPrim())
base.AddTranslateOp().Set(Gf.Vec3d(base_pos[0], base_pos[1], base_pos[2]))
base.GetSizeAttr().Set(base_size[0] * 2.0)
UsdPhysics.CollisionAPI.Apply(base.GetPrim())
# Create prismatic box
prismatic_box = UsdGeom.Cube.Define(stage, "/worldbody/base/prismatic_box")
prismatic_box.AddTranslateOp().Set(Gf.Vec3d(prismatic_box_pos[0], prismatic_box_pos[1], prismatic_box_pos[2]))
prismatic_box.GetSizeAttr().Set(prismatic_box_size[0] * 2.0)
prismatic_box_rigid = UsdPhysics.RigidBodyAPI.Apply(prismatic_box.GetPrim())
prismatic_box_rigid.GetKinematicEnabledAttr().Set(False)
# Create prismatic joint
prismatic_joint_prim = UsdPhysics.PrismaticJoint.Define(stage, "/worldbody/base/prismatic_box_joint")
prismatic_joint_prim.CreateBody0Rel().SetTargets([base.GetPrim().GetPath()])
prismatic_joint_prim.CreateBody1Rel().SetTargets([prismatic_box.GetPrim().GetPath()])
prismatic_joint_prim.CreateAxisAttr().Set("Z")
prismatic_joint_prim.CreateLowerLimitAttr().Set(prismatic_range[0])
prismatic_joint_prim.CreateUpperLimitAttr().Set(prismatic_range[1])
prismatic_joint_prim.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
prismatic_joint_prim.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
prismatic_joint_prim.GetPrim().CreateAttribute("linear:stiffness", Sdf.ValueTypeNames.Float).Set(50.0)
prismatic_joint_prim.GetPrim().CreateAttribute("linear:damping", Sdf.ValueTypeNames.Float).Set(5.0)
prismatic_drive_api = UsdPhysics.DriveAPI.Apply(prismatic_joint_prim.GetPrim(), "linear")
prismatic_drive_api.CreateStiffnessAttr().Set(120.0)
prismatic_drive_api.CreateDampingAttr().Set(12.0)
# Create revolute box
revolute_box = UsdGeom.Cube.Define(stage, "/worldbody/base/revolute_box")
revolute_box.AddTranslateOp().Set(Gf.Vec3d(revolute_box_pos[0], revolute_box_pos[1], revolute_box_pos[2]))
revolute_box.GetSizeAttr().Set(revolute_box_size[0] * 2.0)
revolute_box_rigid = UsdPhysics.RigidBodyAPI.Apply(revolute_box.GetPrim())
revolute_box_rigid.GetKinematicEnabledAttr().Set(False)
# Create revolute joint
revolute_joint_prim = UsdPhysics.RevoluteJoint.Define(stage, "/worldbody/base/revolute_box_joint")
revolute_joint_prim.CreateBody0Rel().SetTargets([base.GetPrim().GetPath()])
revolute_joint_prim.CreateBody1Rel().SetTargets([revolute_box.GetPrim().GetPath()])
revolute_joint_prim.CreateAxisAttr().Set("Z")
revolute_joint_prim.CreateLowerLimitAttr().Set(revolute_range[0])
revolute_joint_prim.CreateUpperLimitAttr().Set(revolute_range[1])
revolute_joint_prim.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
revolute_joint_prim.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
revolute_joint_prim.GetPrim().CreateAttribute("stiffness", Sdf.ValueTypeNames.Float).Set(50.0)
revolute_joint_prim.GetPrim().CreateAttribute("angular:damping", Sdf.ValueTypeNames.Float).Set(5.0)
revolute_drive_api = UsdPhysics.DriveAPI.Apply(revolute_joint_prim.GetPrim(), "angular")
revolute_drive_api.CreateStiffnessAttr().Set(120.0)
revolute_drive_api.CreateDampingAttr().Set(12.0)
# Create spherical box
spherical_box = UsdGeom.Cube.Define(stage, "/worldbody/base/spherical_box")
spherical_box.AddTranslateOp().Set(Gf.Vec3d(spherical_box_pos[0], spherical_box_pos[1], spherical_box_pos[2]))
spherical_box.GetSizeAttr().Set(spherical_box_size[0] * 2.0)
spherical_box_rigid = UsdPhysics.RigidBodyAPI.Apply(spherical_box.GetPrim())
spherical_box_rigid.GetKinematicEnabledAttr().Set(False)
# Create spherical joint
spherical_joint_prim = UsdPhysics.SphericalJoint.Define(stage, "/worldbody/base/spherical_box_joint")
spherical_joint_prim.CreateBody0Rel().SetTargets([base.GetPrim().GetPath()])
spherical_joint_prim.CreateBody1Rel().SetTargets([spherical_box.GetPrim().GetPath()])
spherical_joint_prim.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
spherical_joint_prim.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
# Create fixed box
fixed_box = UsdGeom.Cube.Define(stage, "/worldbody/base/fixed_box")
fixed_box.AddTranslateOp().Set(Gf.Vec3d(fixed_box_pos[0], fixed_box_pos[1], fixed_box_pos[2]))
fixed_box.GetSizeAttr().Set(fixed_box_size[0] * 2.0)
fixed_box_rigid = UsdPhysics.RigidBodyAPI.Apply(fixed_box.GetPrim())
fixed_box_rigid.GetKinematicEnabledAttr().Set(False)
# Create fixed joint
fixed_joint_prim = UsdPhysics.FixedJoint.Define(stage, "/worldbody/base/fixed_box_joint")
fixed_joint_prim.CreateBody0Rel().SetTargets([base.GetPrim().GetPath()])
fixed_joint_prim.CreateBody1Rel().SetTargets([fixed_box.GetPrim().GetPath()])
fixed_joint_prim.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
fixed_joint_prim.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
# Create free box (at top level, not under base)
free_box = UsdGeom.Cube.Define(stage, "/worldbody/free_box")
free_box.AddTranslateOp().Set(Gf.Vec3d(free_box_pos[0], free_box_pos[1], free_box_pos[2]))
free_box.GetSizeAttr().Set(free_box_size[0] * 2.0)
free_box_rigid = UsdPhysics.RigidBodyAPI.Apply(free_box.GetPrim())
free_box_rigid.GetKinematicEnabledAttr().Set(False)
free_joint_prim = UsdPhysics.Joint.Define(stage, "/worldbody/free_box_joint")
free_joint_prim.CreateBody0Rel().SetTargets([root_prim.GetPath()])
free_joint_prim.CreateBody1Rel().SetTargets([free_box.GetPrim().GetPath()])
free_joint_prim.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
free_joint_prim.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
stage.Save()
return usd_file
@pytest.mark.required
@pytest.mark.parametrize("model_name", ["all_joints_mjcf"])
@pytest.mark.parametrize("scale", [1.0, 2.0])
@pytest.mark.parametrize(
"all_joints_usd", [True, False], indirect=True, ids=["with_articulation_root", "without_articulation_root"]
)
def test_joints_mjcf_vs_usd(xml_path, all_joints_usd, scale, tol):
"""
Test that MJCF and USD scenes with all joint types (prismatic, revolute, spherical, fixed, free)
produce equivalent Genesis entities.
This test verifies that all five joint types are correctly parsed from both
MJCF and USD formats and produce equivalent results, with and without ArticulationRootAPI.
"""
mjcf_scene = build_mjcf_scene(xml_path, scale=scale)
usd_scene = build_usd_scene(all_joints_usd, scale=scale)
# Compare entire scenes - this will check all joints via compare_joints
compare_scene(mjcf_scene, usd_scene, tol=tol)
@pytest.mark.required
@pytest.mark.parametrize("model_name", ["usd/sneaker_airforce", "usd/RoughnessTest"])
def test_usd_visual_parse(model_name, tol):
glb_asset_path = get_hf_dataset(pattern=f"{model_name}.glb")
glb_file = os.path.join(glb_asset_path, f"{model_name}.glb")
usd_asset_path = get_hf_dataset(pattern=f"{model_name}.usdz")
usd_file = os.path.join(usd_asset_path, f"{model_name}.usdz")
mesh_scene = build_mesh_scene(glb_file, scale=1.0)
usd_scene = build_usd_scene(usd_file, scale=1.0, vis_mode="visual", is_stage=False)
compare_mesh_scene(mesh_scene, usd_scene, tol=tol)
@pytest.mark.required
@pytest.mark.parametrize("usd_file", ["usd/nodegraph.usda"])
def test_usd_parse_nodegraph(usd_file):
asset_path = get_hf_dataset(pattern=usd_file)
usd_file = os.path.join(asset_path, usd_file)
usd_scene = build_usd_scene(usd_file, scale=1.0, vis_mode="visual", is_stage=False)
texture0 = usd_scene.entities[0].vgeoms[0].vmesh.surface.diffuse_texture
texture1 = usd_scene.entities[0].vgeoms[1].vmesh.surface.diffuse_texture
assert isinstance(texture0, gs.textures.ColorTexture)
assert isinstance(texture1, gs.textures.ColorTexture)
assert_allclose(texture0.color, (0.8, 0.2, 0.2), rtol=USD_COLOR_TOL)
assert_allclose(texture1.color, (0.2, 0.6, 0.9), rtol=USD_COLOR_TOL)
@pytest.mark.required
@pytest.mark.parametrize(
"usd_file", ["usd/WoodenCrate/WoodenCrate_D1_1002.usda", "usd/franka_mocap_teleop/table_scene.usd"]
)
@pytest.mark.parametrize("backend", [gs.cuda])
@pytest.mark.skipif(not HAS_OMNIVERSE_KIT_SUPPORT, reason="omniverse-kit support not available")
def test_usd_bake(usd_file, tmp_path):
RETRY_NUM = 3 if "PYTEST_XDIST_WORKER" in os.environ else 0
RETRY_DELAY = 30.0
asset_path = get_hf_dataset(pattern=os.path.join(os.path.dirname(usd_file), "*"), local_dir=tmp_path)
usd_fullpath = os.path.join(asset_path, usd_file)
# Note that bootstrapping omni-kit by multiple workers concurrently is causing failure.
# There is no easy way to get around this limitation except retrying after some delay...
retry_idx = 0
while True:
is_stage = usd_file == "usd/franka_mocap_teleop/table_scene.usd"
usd_scene = build_usd_scene(
usd_fullpath,
scale=1.0,
vis_mode="visual",
is_stage=is_stage,
fixed=True,
)
is_any_baked = False
for vgeom in usd_scene.entities[0].vgeoms:
bake_success = vgeom.vmesh.metadata["bake_success"]
try:
assert bake_success
except AssertionError:
if retry_idx < RETRY_NUM:
usd_scene.destroy()
print(f"Failed to bake usd. Trying again in {RETRY_DELAY}s...")
time.sleep(RETRY_DELAY)
break
raise
is_any_baked |= bake_success
else:
assert is_any_baked
break
@pytest.mark.required
@pytest.mark.parametrize("scale", [1.0, 2.0])
def test_massapi_invalid_defaults_mjcf_vs_usd(asset_tmp_path, scale, tol):
"""
Test that USD MassAPI with invalid default values produces equivalent results to MJCF.
USD Physics MassAPI defines some attributes with invalid default values:
- centerOfMass: default (-inf, -inf, -inf) - invalid, should be recomputed
- principalAxes: default (0, 0, 0, 0) - invalid quaternion, should be recomputed
- diagonalInertia: default (0, 0, 0) - valid but means ignored, should be recomputed
- mass: default 0 - valid but means ignored, should be recomputed
This test creates equivalent MJCF and USD scenes where mass properties are computed
from geometry (MJCF has no inertial element, USD has MassAPI with invalid defaults).
Both should produce equivalent results.
"""
mjcf = ET.Element("mujoco", model="massapi_test")
default = ET.SubElement(mjcf, "default")
ET.SubElement(default, "joint", armature="0.0")
worldbody = ET.SubElement(mjcf, "worldbody")
floor = ET.SubElement(worldbody, "body", name="/worldbody/floor")
ET.SubElement(floor, "geom", type="plane", pos="0. 0. 0.", size="40. 40. 40.")
box = ET.SubElement(worldbody, "body", name="/worldbody/test_box", pos="0. 0. 0.3")
ET.SubElement(box, "geom", type="box", size="0.2 0.2 0.2", pos="0. 0. 0.")
ET.SubElement(box, "joint", name="/worldbody/test_box_joint", type="free")
xml_tree = ET.ElementTree(mjcf)
xml_file = str(asset_tmp_path / "massapi_test.xml")
xml_tree.write(xml_file, encoding="utf-8", xml_declaration=True)
usd_file = str(asset_tmp_path / "massapi_test.usda")
stage = Usd.Stage.CreateNew(usd_file)
UsdGeom.SetStageUpAxis(stage, "Z")
UsdGeom.SetStageMetersPerUnit(stage, 1.0)
root_prim = stage.DefinePrim("/worldbody", "Xform")
stage.SetDefaultPrim(root_prim)
floor = UsdGeom.Plane.Define(stage, "/worldbody/floor")
floor.GetAxisAttr().Set("Z")
floor.AddTranslateOp().Set(Gf.Vec3d(0.0, 0.0, 0.0))
floor.GetWidthAttr().Set(80.0)
floor.GetLengthAttr().Set(80.0)
UsdPhysics.CollisionAPI.Apply(floor.GetPrim())
box = UsdGeom.Cube.Define(stage, "/worldbody/test_box")
box.AddTranslateOp().Set(Gf.Vec3d(0.0, 0.0, 0.3))
box.GetSizeAttr().Set(0.4) # 0.2 half-extent * 2
box_joint = UsdPhysics.Joint.Define(stage, "/worldbody/test_box_joint")
box_joint.CreateBody0Rel().SetTargets([root_prim.GetPath()])
box_joint.CreateBody1Rel().SetTargets([box.GetPrim().GetPath()])
box_joint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
box_joint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
box_rigid = UsdPhysics.RigidBodyAPI.Apply(box.GetPrim())
box_rigid.GetKinematicEnabledAttr().Set(False)
mass_api = UsdPhysics.MassAPI.Apply(box.GetPrim())
stage.Save()
mjcf_scene = build_mjcf_scene(xml_file, scale=scale)
usd_scene = build_usd_scene(usd_file, scale=scale)
compare_scene(mjcf_scene, usd_scene, tol=tol)
@pytest.mark.required
def test_uv_size_mismatch_no_crash(asset_tmp_path):
"""
Test that a USD mesh with mismatched UV size does not crash the parser for consistency with Nvidia omniverse.
"""
usd_file = str(asset_tmp_path / "uv_mismatch.usda")
stage = Usd.Stage.CreateNew(usd_file)
UsdGeom.SetStageUpAxis(stage, "Z")
UsdGeom.SetStageMetersPerUnit(stage, 1.0)
root_prim = stage.DefinePrim("/root", "Xform")
stage.SetDefaultPrim(root_prim)
# Create a simple triangle mesh with intentionally mismatched UVs
mesh = UsdGeom.Mesh.Define(stage, "/root/mesh")
mesh.GetPointsAttr().Set([Gf.Vec3f(0, 0, 0), Gf.Vec3f(1, 0, 0), Gf.Vec3f(0, 1, 0), Gf.Vec3f(1, 1, 0)])
mesh.GetFaceVertexIndicesAttr().Set([0, 1, 2, 1, 3, 2])
mesh.GetFaceVertexCountsAttr().Set([3, 3])
# Add UVs with intentionally wrong count (5 UVs for 4 vertices / 6 face-vertex indices)
primvar_api = UsdGeom.PrimvarsAPI(mesh.GetPrim())
uv_primvar = primvar_api.CreatePrimvar("st", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.vertex)
uv_primvar.Set([Gf.Vec2f(0, 0), Gf.Vec2f(1, 0), Gf.Vec2f(0, 1), Gf.Vec2f(1, 1), Gf.Vec2f(0.5, 0.5)])
UsdPhysics.RigidBodyAPI.Apply(mesh.GetPrim())
UsdPhysics.CollisionAPI.Apply(mesh.GetPrim())
stage.Save()
# This should NOT raise an exception — it should warn and discard UVs
usd_scene = build_usd_scene(usd_file, scale=1.0, vis_mode="collision")
assert len(usd_scene.entities) > 0
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_usd.py",
"license": "Apache License 2.0",
"lines": 795,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:examples/coupling/rigid_mpm_attachment.py | """
MPM to Rigid Link Attachment
Demonstrates attaching MPM particles to rigid links using soft constraints.
"""
import argparse
import os
import torch
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--vis", action="store_true", default=False)
parser.add_argument("-c", "--cpu", action="store_true", default=False)
args = parser.parse_args()
gs.init(backend=gs.cpu if args.cpu else gs.gpu)
scene = gs.Scene(
sim_options=gs.options.SimOptions(dt=2e-3, substeps=20),
mpm_options=gs.options.MPMOptions(
lower_bound=(-1.0, -1.0, 0.0),
upper_bound=(1.0, 1.0, 1.5),
grid_density=64,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.5, 0.0, 0.8),
camera_lookat=(0.0, 0.0, 0.4),
),
show_viewer=args.vis,
)
scene.add_entity(gs.morphs.Plane())
rigid_box = scene.add_entity(
gs.morphs.Box(pos=(0.0, 0.0, 0.55), size=(0.12, 0.12, 0.05), fixed=False),
)
mpm_cube = scene.add_entity(
material=gs.materials.MPM.Elastic(E=5e4, nu=0.3, rho=1000),
morph=gs.morphs.Box(pos=(0.0, 0.0, 0.35), size=(0.15, 0.15, 0.15)),
)
scene.build()
# Attach top particles of MPM cube to the rigid box
mask = mpm_cube.get_particles_in_bbox((-0.08, -0.08, 0.41), (0.08, 0.08, 0.44))
mpm_cube.set_particle_constraints(mask, rigid_box.links[0].idx, stiffness=1e5)
n_steps = 500 if "PYTEST_VERSION" not in os.environ else 1
initial_z = 0.55
for i in range(n_steps):
z_offset = 0.15 * (1 - abs((i % 200) - 100) / 100.0)
target_qpos = torch.tensor([0.0, 0.0, initial_z + z_offset, 1.0, 0.0, 0.0, 0.0], device=gs.device)
rigid_box.set_qpos(target_qpos)
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/coupling/rigid_mpm_attachment.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:tests/monitor_test_mem.py | from collections import defaultdict
import subprocess
import time
import os
import argparse
import psutil
import re
CHECK_INTERVAL = 2.0
def grep(contents: list[str], target):
return [l for l in contents if target in l]
def parse_test_name(test_name: str) -> dict[str, str]:
"""
Expected format: test_speed[env-constraint_solver-gjk_collision-batch_size-backend]
Example: test_speed[franka-None-True-30000-cuda]
Returns:
dict: Parsed parameters
"""
match = re.search(r"\[(.*?)\]", test_name)
if not match:
return {}
parts = match.group(1).split("-")
if len(parts) < 5:
return {}
params = {
"env": parts[0],
"constraint_solver": parts[1],
"gjk_collision": parts[2],
"batch_size": parts[3],
"backend": parts[4],
"dtype": parts[5],
}
# Remove "None" values for consistency
filtered_params = {}
for k, v in params.items():
if v != "None" and v is not None:
filtered_params[k] = v
return filtered_params
def get_cuda_usage() -> dict[int, int]:
output = subprocess.check_output(["nvidia-smi"]).decode("utf-8")
section = 0
subsec = 0
res = {}
for line in output.split("\n"):
if line.startswith("|============"):
section += 1
subsec = 0
continue
if line.startswith("+-------"):
subsec += 1
continue
if section == 2 and subsec == 0:
if "No running processes" in line:
continue
split_line = line.split()
pid = int(split_line[4])
mem = int(split_line[-2].split("MiB")[0])
res[pid] = mem
return res
def get_test_name_by_pid() -> dict[int, str]:
test_by_psid = {}
for proc in psutil.process_iter(["pid", "cmdline"]):
try:
cmdline = proc.info["cmdline"]
if cmdline is None:
continue
# Join cmdline to get full command string
cmd_str = " ".join(cmdline)
if "pytest: tests" in cmd_str:
# Find the test name after "::"
if "::" in cmd_str:
test_name = cmd_str.partition("::")[2]
if test_name.strip() != "":
test_by_psid[proc.info["pid"]] = test_name
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
# Process may have terminated or we don't have permission
pass
return test_by_psid
def format_result_line(test_name: str, max_mem_mb: int) -> str:
"""Format a result line in pipe-delimited format."""
params = parse_test_name(test_name)
params["max_mem_mb"] = str(max_mem_mb)
line_parts = [f"{k}={v}" for k, v in params.items()]
return " \t| ".join(line_parts)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--out-file", type=str, required=True)
parser.add_argument("--die-with-parent", action="store_true")
args = parser.parse_args()
max_mem_by_test = defaultdict(int)
f = open(args.out_file, "w")
old_mem_by_test = {}
num_results_written = 0
last_output_line = None
while not args.die_with_parent or os.getppid() != 1:
mem_by_pid = get_cuda_usage()
test_by_psid = get_test_name_by_pid()
num_tests = len(test_by_psid)
_mem_by_test = {}
for psid, test in test_by_psid.items():
if psid not in mem_by_pid:
continue
if test.strip() == "":
continue
_mem = mem_by_pid[psid]
_mem_by_test[test] = _mem
for test, _mem in _mem_by_test.items():
max_mem_by_test[test] = max(_mem, max_mem_by_test[test])
for _test, _mem in old_mem_by_test.items():
if _test not in _mem_by_test:
result_line = format_result_line(_test, max_mem_by_test[_test])
f.write(result_line + "\n")
f.flush()
num_results_written += 1
potential_output_line = (
f"{num_tests} tests running, of which {len(_mem_by_test)} on gpu. "
f"Num results written: {num_results_written} [updating] "
)
if potential_output_line != last_output_line:
print(potential_output_line, end="\r", flush=True)
last_output_line = potential_output_line
old_mem_by_test = _mem_by_test
time.sleep(CHECK_INTERVAL)
print("Test monitor exiting")
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/monitor_test_mem.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:examples/speed_benchmark/timers.py | import argparse
import os
from contextlib import nullcontext
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
import plotext as plt
import torch
import genesis as gs
from genesis.utils.misc import qd_to_torch
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# MODE: 0: no noise, 1: uniform noise, 2: env-specific noise
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mode", type=int, default=2, choices=(0, 1, 2))
parser.add_argument("-p", "--profiling", action="store_true", default=False)
args = parser.parse_args()
gs.init(backend=gs.gpu, precision="32", performance_mode=True, seed=0, logging_level="warning")
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.02,
substeps=2,
),
rigid_options=gs.options.RigidOptions(
enable_self_collision=False,
iterations=1,
ls_iterations=1,
max_collision_pairs=30,
),
show_viewer=False,
show_FPS=True,
)
scene.add_entity(
gs.morphs.Plane(),
)
robot = scene.add_entity(
gs.morphs.URDF(file="urdf/go2/urdf/go2.urdf"),
vis_mode="collision",
)
scene.build(n_envs=128)
ctrl_pos_0 = torch.tensor(
[0.0, 0.0, 0.0, 0.0, 0.8, 0.8, 1.0, 1.0, -1.5, -1.5, -1.5, -1.5],
dtype=gs.tc_float,
device=gs.device,
)
init_qpos = torch.tensor(
[0.0, 0.0, 0.42, 1.0, 0.0, 0.0, 0.0, *ctrl_pos_0],
dtype=gs.tc_float,
device=gs.device,
)
robot.set_qpos(init_qpos)
robot.control_dofs_position(ctrl_pos_0, dofs_idx_local=slice(6, 18))
timers = qd_to_torch(scene.rigid_solver.constraint_solver.constraint_state.timers)
stats = torch.zeros((3, *timers.shape), dtype=gs.tc_float, device=gs.device)
TIMER_LABELS = ("func_solve",)
with (
torch.profiler.profile(
on_trace_ready=torch.profiler.tensorboard_trace_handler("./benchmark"),
schedule=torch.profiler.schedule(wait=0, warmup=0, active=1),
record_shapes=False,
profile_memory=False,
with_stack=True,
with_flops=False,
)
if args.profiling
else nullcontext()
):
for step in range(500):
scene.step()
noise = (args.mode > 0) * torch.rand(
(*((scene.n_envs,) if (args.mode > 1) else ()), robot.n_dofs - 6),
dtype=gs.tc_float,
device=gs.device,
)
robot.control_dofs_position(ctrl_pos_0 + 0.3 * noise, slice(6, 18))
if not args.profiling:
if not gs.use_zerocopy:
timers = qd_to_torch(scene.rigid_solver.constraint_solver.constraint_state.timers)
stats[0] = timers
stats[1] = stats[1] * (step / (step + 1)) + timers / (step + 1)
stats[2] = stats[2] * (step / (step + 1)) + timers.sort(descending=False).values / (step + 1)
if (step + 1) % 500 == 0:
plt.clf()
plt.plot_size(260, 25)
plt.subplots(1, 3)
for i, mode in enumerate(("Last", "Average ordered", "Average sorted")):
for data, label in zip(stats[i], TIMER_LABELS):
plt.subplot(1, i + 1).plot(data.cpu().numpy(), label=label)
plt.title(f"[mode {args.mode}] {mode} per-env timings")
plt.show()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/speed_benchmark/timers.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/sensors/camera_as_sensor.py | """
Example demonstrating camera sensors with different rendering backends.
Creating cameras as sensors using add_sensor() with three backends
Rasterizer, Raytracer and BatchRenderer.
Test the attachment, add light, batch rendering functionalities.
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import genesis as gs
from genesis.utils.misc import tensor_to_array
from genesis.options.sensors import RasterizerCameraOptions, RaytracerCameraOptions, BatchRendererCameraOptions
########################## init ##########################
gs.init(seed=0, precision="32", backend=gs.gpu, logging_level="info")
########################## check dependencies ##########################
# Try to import LuisaRenderPy to determine if raytracer is available
try:
import LuisaRenderPy
ENABLE_RAYTRACER = True
print("✓ LuisaRenderPy available - Raytracer will be enabled")
except ImportError:
ENABLE_RAYTRACER = False
print("⊘ LuisaRenderPy not available - Raytracer will be disabled")
try:
import gs_madrona
ENABLE_MADRONA = True
print("✓ gs_madrona available - BatchRenderer will be enabled")
except ImportError:
ENABLE_MADRONA = False
print("⊘ gs_madrona not available - BatchRenderer will be disabled")
ENABLE_MADRONA = ENABLE_MADRONA and (gs.backend == gs.cuda)
########################## create a scene ##########################
# Choose renderer based on raytracer availability
if ENABLE_RAYTRACER:
renderer = gs.renderers.RayTracer(
env_surface=gs.surfaces.Emission(
emissive_texture=gs.textures.ColorTexture(
color=(0.2, 0.3, 0.5),
),
),
env_radius=20.0,
)
else:
# Use Rasterizer as fallback renderer
renderer = gs.renderers.Rasterizer()
scene = gs.Scene(
renderer=renderer,
show_viewer=False,
)
########################## entities ##########################
plane = scene.add_entity(
morph=gs.morphs.Plane(),
surface=gs.surfaces.Rough(
color=(0.4, 0.4, 0.4),
),
)
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
radius=0.5,
pos=(0.0, 0.0, 2.0),
),
surface=gs.surfaces.Smooth(
color=(1.0, 0.5, 0.5),
),
)
box = scene.add_entity(
morph=gs.morphs.Box(
size=(0.3, 0.3, 0.3),
pos=(1.0, 1.0, 1.0),
),
surface=gs.surfaces.Rough(
color=(0.5, 1.0, 0.5),
),
)
########################## Camera Configurations ##########################
# Define common camera parameters
CAMERA_COMMON_KWARGS = dict(
{
"up": (0.0, 0.0, 1.0),
"near": 0.1,
"far": 100.0,
}
)
CAMERA_SENSORS_KWARGS = [
{
"name": "cam0",
"pos": (3.0, 0.0, 2.0),
"lookat": (0.0, 0.0, 1.0),
"fov": 60.0,
"attachment": None, # No attachment
"lights": [{"pos": (2.0, 2.0, 5.0), "color": (1.0, 1.0, 1.0), "intensity": 1.0}],
},
{
"name": "cam1",
"pos": (0.0, 3.0, 2.0),
"lookat": (0.0, 0.0, 1.0),
"fov": 60.0,
"attachment": None,
"lights": [],
},
{
"name": "cam_attached",
"pos": (0.0, 0.0, 1.0),
"lookat": (0.0, 0.0, 0.0),
"fov": 70.0,
"attachment": {
"entity_idx": None,
"link_idx_local": 0,
},
"lights": [],
},
{
"name": "cam_offset_T",
"pos": (0.0, 0.0, 1.0),
"lookat": (0.0, 0.0, 0.0),
"fov": 70.0,
"attachment": {
"entity_idx": None,
"link_idx_local": 0,
"offset_T": np.eye(4), # Identity transform for testing
},
"lights": [],
},
]
# Create camera configurations for all backends
backends = [
("rasterizer", RasterizerCameraOptions, True), # Always enabled
("raytracer", RaytracerCameraOptions, ENABLE_RAYTRACER),
("batch_render", BatchRendererCameraOptions, ENABLE_MADRONA),
]
backend_configs = {}
for backend_name, options_class, enabled in backends:
if not enabled:
continue
configs = []
for camera_config in CAMERA_SENSORS_KWARGS:
name = f"{backend_name}_{camera_config['name']}"
res = (500, 600)
# Create options with common and backend-specific parameters
options_kwargs = {
"res": res,
"pos": camera_config["pos"],
"lookat": camera_config["lookat"],
"up": CAMERA_COMMON_KWARGS["up"],
"fov": camera_config["fov"],
"lights": camera_config["lights"],
}
# Handle attachment
attachment = camera_config["attachment"]
if attachment is not None:
# For attached cameras, set the entity_idx to the sphere's index
options_kwargs.update(
{
"entity_idx": sphere.idx,
"link_idx_local": attachment["link_idx_local"],
}
)
if "offset_T" in attachment:
options_kwargs["offset_T"] = attachment["offset_T"]
# Add backend-specific parameters
if options_class is RasterizerCameraOptions:
options_kwargs.update({"near": CAMERA_COMMON_KWARGS["near"], "far": CAMERA_COMMON_KWARGS["far"]})
elif options_class is RaytracerCameraOptions:
options_kwargs.update(
{
"model": "pinhole",
"spp": 64,
"denoise": False,
}
)
if attachment is None: # Only add env surface for non-attached cameras
options_kwargs.update(
{
"env_surface": gs.surfaces.Emission(
emissive_texture=gs.textures.ColorTexture(color=(0.2, 0.3, 0.5)),
),
"env_radius": 20.0,
}
)
elif options_class is BatchRendererCameraOptions:
options_kwargs.update({"use_rasterizer": True})
if camera_config["lights"]:
adjusted_lights = [{**light, "directional": False} for light in camera_config["lights"]]
options_kwargs["lights"] = adjusted_lights
# Adjust lights for raytracer (different intensity/color)
if options_class is RaytracerCameraOptions and camera_config["lights"]:
adjusted_lights = [
{**light, "color": (10.0, 10.0, 10.0), "intensity": 1.0} for light in camera_config["lights"]
]
options_kwargs["lights"] = adjusted_lights
options = options_class(**options_kwargs)
configs.append(
{
"name": name,
"options": options,
"attachment": camera_config["attachment"],
}
)
backend_configs[backend_name] = configs
########################## Create Cameras ##########################
cameras = {}
for group_name, configs in backend_configs.items():
print(f"\n=== {group_name} Cameras ===")
for config in configs:
camera = scene.add_sensor(config["options"])
cameras[config["name"]] = camera
print(f"✓ Created {len(configs)} {group_name.lower()} cameras")
########################## build ##########################
n_envs = 1
scene.build(n_envs=n_envs)
########################## identify attached cameras ##########################
print("\n=== Identifying Attached Cameras ===")
# Identify cameras that are configured to be attached
attached_cameras = []
for group_name, configs in backend_configs.items():
for config in configs:
if config["attachment"] is not None:
camera = cameras[config["name"]]
attached_cameras.append(camera)
print(f"✓ {config['name']} is attached to sphere")
print(f"✓ Identified {len(attached_cameras)} attached cameras")
########################## simulate and render ##########################
os.makedirs("camera_sensor_output", exist_ok=True)
for i in range(100):
scene.step()
# Render every 10 steps
if i % 10 == 0:
print(f"\n--- Step {i} ---")
camera_data = {}
for cam_name, camera in cameras.items():
data = camera.read()
camera_data[cam_name] = data
print(f" {cam_name.replace('_', ' ').title()} RGB shape: {data.rgb.shape}")
for cam_name, data in camera_data.items():
rgb_data = data.rgb[0] if data.rgb.ndim > 3 else data.rgb
suffix = "_env0" if n_envs > 1 else ""
filename = f"camera_sensor_output/{cam_name}{suffix}_step{i:03d}.png"
plt.imsave(filename, tensor_to_array(rgb_data))
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/sensors/camera_as_sensor.py",
"license": "Apache License 2.0",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/sensors/camera.py | """
Camera sensors for rendering: Rasterizer, Raytracer, and Batch Renderer.
"""
import sys
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Type
import numpy as np
import torch
import genesis as gs
from genesis.options.sensors import (
RasterizerCameraOptions,
RaytracerCameraOptions,
BatchRendererCameraOptions,
SensorOptions,
)
from genesis.utils.geom import (
pos_lookat_up_to_T,
T_to_trans,
T_to_quat,
trans_to_T,
trans_quat_to_T,
transform_by_quat,
transform_by_trans_quat,
)
from genesis.utils.misc import tensor_to_array
from genesis.vis.batch_renderer import BatchRenderer
from genesis.options.renderers import BatchRenderer as BatchRendererOptions
from genesis.options.vis import VisOptions
from genesis.vis.rasterizer import Rasterizer
from genesis.vis.rasterizer_context import RasterizerContext
from .base_sensor import (
Sensor,
SharedSensorMetadata,
RigidSensorMixin,
RigidSensorMetadataMixin,
)
from .sensor_manager import register_sensor
if TYPE_CHECKING:
from genesis.utils.ring_buffer import TensorRingBuffer
from genesis.vis.rasterizer import Rasterizer
from genesis.vis.rasterizer_context import RasterizerContext
from genesis.vis.batch_renderer import BatchRenderer
from genesis.vis.raytracer import Raytracer
# ========================== Data Class ==========================
class CameraData(NamedTuple):
"""Camera sensor return data."""
rgb: torch.Tensor
class MinimalVisualizerWrapper:
"""
Minimal visualizer wrapper for BatchRenderer camera sensors.
BatchRenderer requires a visualizer-like object to provide camera information and context,
but camera sensors don't need the full visualizer functionality (viewer, UI, etc.).
This wrapper provides just the minimal interface expected by BatchRenderer while
avoiding the overhead of creating a full visualizer instance.
"""
def __init__(self, scene, sensors, vis_options):
self.scene = scene
self._cameras = [] # Will be populated with camera wrappers
self._sensors = sensors # Keep reference to sensors
# Create a minimal rasterizer context for camera frustum visualization
# (required by BatchRenderer even though cameras don't render frustums)
self._context = RasterizerContext(vis_options)
self._context.build(scene)
self._context.reset()
class BaseCameraWrapper:
"""Base class for camera wrappers to reduce code duplication."""
def __init__(self, sensor):
self.sensor = sensor
self.uid = sensor._idx
self.res = sensor._options.res
self.fov = sensor._options.fov
self.near = sensor._options.near
self.far = sensor._options.far
class RasterizerCameraWrapper(BaseCameraWrapper):
"""Lightweight wrapper object used by the rasterizer backend."""
def __init__(self, sensor: "RasterizerCameraSensor"):
super().__init__(sensor)
self.aspect_ratio = self.res[0] / self.res[1]
class BatchRendererCameraWrapper(BaseCameraWrapper):
"""Wrapper object used by the batch renderer backend."""
def __init__(self, sensor: "BatchRendererCameraSensor"):
super().__init__(sensor)
self.idx = len(sensor._shared_metadata.sensors) # Camera index in batch
self.debug = False
self.model = sensor._options.model
# Initial pose
pos = torch.tensor(sensor._options.pos, dtype=gs.tc_float, device=gs.device)
lookat = torch.tensor(sensor._options.lookat, dtype=gs.tc_float, device=gs.device)
up = torch.tensor(sensor._options.up, dtype=gs.tc_float, device=gs.device)
# Store pos/lookat/up for later updates
self._pos = pos
self._lookat = lookat
self._up = up
self.transform = pos_lookat_up_to_T(pos, lookat, up)
def get_pos(self):
"""Get camera position (for batch renderer)."""
n_envs = self.sensor._manager._sim.n_envs
if self._pos.ndim > 1 or n_envs == 0:
return self._pos
return self._pos[None].expand((n_envs, -1))
def get_quat(self):
"""Get camera quaternion (for batch renderer)."""
quat = T_to_quat(self.transform)
n_envs = self.sensor._manager._sim.n_envs
if quat.ndim > 1 or n_envs == 0:
return quat
return quat[None].expand((n_envs, -1))
# ========================== Shared Metadata ==========================
@dataclass
class RasterizerCameraSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata):
"""Shared metadata for all Rasterizer cameras."""
# Rasterizer instance
renderer: Optional["Rasterizer"] = None
# RasterizerContext instance
context: Optional["RasterizerContext"] = None
# List of light dictionaries
lights: Optional[List[Dict[str, Any]]] = None
# List of RasterizerCameraSensor instances
sensors: Optional[List["RasterizerCameraSensor"]] = None
# {sensor_idx: np.ndarray with shape (B, H, W, 3)}
image_cache: Optional[Dict[int, np.ndarray]] = None
# Track when rasterizer cameras were last updated
last_render_timestep: int = -1
def destroy(self):
super().destroy()
if self.renderer is not None:
self.renderer.destroy()
self.renderer = None
if self.context is not None:
self.context.destroy()
self.context = None
self.lights = None
self.image_cache = None
self.sensors = None
@dataclass
class RaytracerCameraSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata):
"""Shared metadata for all Raytracer cameras."""
# Raytracer instance
renderer: Optional["Raytracer"] = None
# List of light objects
lights: Optional[List[Any]] = None
# List of RaytracerCameraSensor instances
sensors: Optional[List["RaytracerCameraSensor"]] = None
# {sensor_idx: np.ndarray with shape (B, H, W, 3)}
image_cache: Optional[Dict[int, np.ndarray]] = None
# Track when raytracer cameras were last updated
last_render_timestep: int = -1
def destroy(self):
super().destroy()
self.renderer = None
self.sensors = None
self.image_cache = None
@dataclass
class BatchRendererCameraSharedMetadata(RigidSensorMetadataMixin, SharedSensorMetadata):
"""Shared metadata for all Batch Renderer cameras."""
# BatchRenderer instance
renderer: Optional["BatchRenderer"] = None
# gs.List of lights
lights: Optional[Any] = None
# List of BatchRendererCameraSensor instances
sensors: Optional[List["BatchRendererCameraSensor"]] = None
# {sensor_idx: np.ndarray with shape (B, H, W, 3)}
image_cache: Optional[Dict[int, np.ndarray]] = None
# Track when batch was last rendered
last_render_timestep: int = -1
# MinimalVisualizerWrapper instance
visualizer_wrapper: Optional["MinimalVisualizerWrapper"] = None
def destroy(self):
super().destroy()
self.renderer = None
self.sensors = None
self.image_cache = None
self.visualizer_wrapper = None
# ========================== Base Camera Sensor ==========================
class BaseCameraSensor(RigidSensorMixin, Sensor[SharedSensorMetadata]):
"""
Base class for camera sensors that render RGB images into an internal image_cache.
This class centralizes:
- Attachment handling via RigidSensorMixin
- The _stale flag used for auto-render-on-read
- Common Sensor cache integration (shape/dtype)
- Shared read() method returning torch tensors
"""
def __init__(
self,
options: "SensorOptions",
idx: int,
data_cls: Type[CameraData],
manager: "gs.SensorManager",
):
super().__init__(options, idx, data_cls, manager)
self._stale: bool = True
# ========================== Cache Integration (shared) ==========================
def _get_return_format(self) -> tuple[tuple[int, ...], ...]:
w, h = self._options.res
return ((h, w, 3),)
@classmethod
def _get_cache_dtype(cls) -> torch.dtype:
return torch.uint8
@classmethod
def _update_shared_ground_truth_cache(
cls,
shared_metadata: SharedSensorMetadata,
shared_ground_truth_cache: torch.Tensor,
):
pass
@classmethod
def _update_shared_cache(
cls,
shared_metadata: SharedSensorMetadata,
shared_ground_truth_cache: torch.Tensor,
shared_cache: torch.Tensor,
buffered_data: "TensorRingBuffer",
):
# No per-step measured-cache update for cameras (handled lazily on read()).
pass
def _draw_debug(self, context: "RasterizerContext", buffer_updates: dict[str, np.ndarray]):
"""No debug drawing for cameras."""
pass
# ========================== Attachment handling ==========================
@gs.assert_built
def move_to_attach(self):
"""
Move the camera to follow the currently attached rigid link.
Uses a shared transform computation and delegates to _apply_camera_transform().
"""
if self._link is None:
gs.raise_exception("Camera not attached to any rigid link.")
if self._options.offset_T is not None:
offset_T = torch.tensor(self._options.offset_T, dtype=gs.tc_float, device=gs.device)
else:
pos = torch.tensor(self._options.pos, dtype=gs.tc_float, device=gs.device)
offset_T = trans_to_T(pos)
link_pos = self._link.get_pos()
link_quat = self._link.get_quat()
link_T = trans_quat_to_T(link_pos, link_quat)
camera_T = torch.matmul(link_T, offset_T)
self._apply_camera_transform(camera_T)
# ========================== Hooks for subclasses ==========================
def _apply_camera_transform(self, camera_T: torch.Tensor):
"""Apply the computed camera transform to the backend-specific camera representation."""
raise NotImplementedError
def _render_current_state(self):
"""Perform the actual render for the current state; subclasses must implement."""
raise NotImplementedError
# ========================== Shared read() ==========================
def _get_image_cache_entry(self):
"""Return this sensor's entry in the shared image cache."""
return self._shared_metadata.image_cache[self._idx]
def _ensure_rendered_for_current_state(self):
"""Ensure this camera has an up-to-date render before reading.
Base handles staleness and timestamps; subclasses implement _render_current_state().
"""
scene = self._manager._sim.scene
# If the scene time advanced, mark all cameras as stale
if self._shared_metadata.last_render_timestep != scene.t:
if self._shared_metadata.sensors is not None:
for sensor in self._shared_metadata.sensors:
sensor._stale = True
self._shared_metadata.last_render_timestep = scene.t
# If this camera is not stale, cache is considered fresh
if not self._stale:
return
# Update camera pose only when attached; detached cameras keep their last world pose
if self._link is not None:
self.move_to_attach()
# Call subclass-specific render
self._render_current_state()
# Mark as fresh
self._stale = False
def _sanitize_envs_idx(self, envs_idx):
"""Sanitize envs_idx to valid indices."""
if envs_idx is None:
return None
if isinstance(envs_idx, (int, np.integer)):
return envs_idx
return np.asarray(envs_idx)
@gs.assert_built
def read(self, envs_idx=None) -> CameraData:
"""Render if needed, then read the cached image from the backend-specific cache."""
self._ensure_rendered_for_current_state()
cached_image = self._get_image_cache_entry()
return _camera_read_from_image_cache(self, cached_image, envs_idx, to_numpy=False)
@classmethod
def reset(cls, shared_metadata, envs_idx):
"""Reset camera sensor (no state to reset)."""
pass
# ========================== Camera Sensor Helpers ==========================
def _camera_read_from_image_cache(sensor, cached_image, envs_idx, *, to_numpy: bool) -> CameraData:
"""
Shared helper to convert a cached RGB image array into CameraData with correct env handling.
Parameters
----------
sensor : any camera sensor with _manager and _return_data_class
cached_image : np.ndarray | torch.Tensor
Image cache for this camera, shaped (B, H, W, 3) or (H, W, 3) depending on n_envs.
envs_idx : None | int | sequence
Environment index/indices to select.
to_numpy : bool
If True and cached_image is a torch Tensor, convert to numpy first.
"""
if to_numpy and isinstance(cached_image, torch.Tensor):
cached_image = tensor_to_array(cached_image)
if envs_idx is None:
if sensor._manager._sim.n_envs == 0:
return sensor._return_data_class(rgb=cached_image[0])
return sensor._return_data_class(rgb=cached_image)
if isinstance(envs_idx, (int, np.integer)):
return sensor._return_data_class(rgb=cached_image[envs_idx])
return sensor._return_data_class(rgb=cached_image[envs_idx])
# ========================== Rasterizer Camera Sensor ==========================
@register_sensor(RasterizerCameraOptions, RasterizerCameraSharedMetadata, CameraData)
class RasterizerCameraSensor(BaseCameraSensor):
"""
Rasterizer camera sensor using OpenGL-based rendering.
This sensor renders RGB images using the existing Rasterizer backend,
but operates independently from the scene visualizer.
"""
def __init__(
self,
options: RasterizerCameraOptions,
idx: int,
data_cls: Type[CameraData],
manager: "gs.SensorManager",
):
super().__init__(options, idx, data_cls, manager)
self._options: RasterizerCameraOptions
self._camera_node = None
self._camera_target = None
self._camera_wrapper = None
self._is_camera_registered = False
# ========================== Sensor Lifecycle ==========================
def build(self):
"""Initialize the rasterizer and register this camera."""
super().build()
scene = self._manager._sim.scene
if self._shared_metadata.sensors is None:
self._shared_metadata.sensors = []
self._shared_metadata.lights = gs.List()
self._shared_metadata.image_cache = {}
# If a viewer is active, reuse its windowed OpenGL context for both offscreen and onscreen
# rendering, rather than creating a separate headless context which is fragile.
if scene.viewer is not None:
self._shared_metadata.context = scene.visualizer.context
self._shared_metadata.renderer = scene.visualizer.rasterizer
else:
# No viewer - create standalone rasterizer with offscreen context
self._shared_metadata.context = self._create_standalone_context(scene)
self._shared_metadata.renderer = Rasterizer(viewer=None, context=self._shared_metadata.context)
self._shared_metadata.renderer.build()
self._shared_metadata.sensors.append(self)
# Register camera now if standalone (offscreen), or defer to first render if using visualizer's rasterizer
# (visualizer isn't built yet at sensor.build() time)
if self._shared_metadata.renderer.offscreen:
self._ensure_camera_registered()
_B = max(self._manager._sim.n_envs, 1)
w, h = self._options.res
self._shared_metadata.image_cache[self._idx] = torch.zeros((_B, h, w, 3), dtype=torch.uint8, device=gs.device)
def _ensure_camera_registered(self):
"""Register this camera with the renderer (no-op if already registered)."""
if self._is_camera_registered:
return
# Add lights from options to the context
for light_config in self._options.lights:
if self._shared_metadata.lights is not None:
light_dict = self._convert_light_config_to_rasterizer(light_config)
self._shared_metadata.context.add_light(light_dict)
if self._camera_wrapper is None:
self._camera_wrapper = RasterizerCameraWrapper(self)
self._shared_metadata.renderer.add_camera(self._camera_wrapper)
self._update_camera_pose()
self._is_camera_registered = True
def _create_standalone_context(self, scene):
"""Create a simplified RasterizerContext for camera sensors."""
if not scene.sim._rigid_only and scene.n_envs > 1:
gs.raise_exception("Rasterizer with n_envs > 1, does not work when using non rigid simulation")
if sys.platform == "darwin":
if scene.n_envs > 1:
gs.raise_exception(
"Rasterizer with n_envs > 1, does not work on Metal because it doesn't support OpenGL 4.2"
)
env_separate_rigid = False
else:
if scene.n_envs > 1:
gs.logger.warning(
"Rasterizer with n_envs > 1 is slow as it doesn't do batched rendering consider using BatchRenderer instead."
)
env_separate_rigid = True
vis_options = VisOptions(
show_world_frame=False,
show_link_frame=False,
show_cameras=False,
rendered_envs_idx=range(max(self._manager._sim._B, 1)),
env_separate_rigid=env_separate_rigid,
)
context = RasterizerContext(vis_options)
context.build(scene)
context.reset()
return context
def _convert_light_config_to_rasterizer(self, light_config):
"""Convert a light config dict to rasterizer format."""
# Default values for rasterizer
light_type = light_config.get("type", "directional")
pos = light_config.get("pos", (0.0, 0.0, 5.0))
dir = light_config.get("dir", (0.0, 0.0, -1.0))
color = light_config.get("color", (1.0, 1.0, 1.0))
intensity = light_config.get("intensity", 1.0)
return {
"type": light_type,
"pos": pos,
"dir": dir,
"color": tuple(np.array(color) * intensity),
"intensity": intensity,
}
def _update_camera_pose(self):
"""Update camera pose based on options."""
pos = torch.tensor(self._options.pos, dtype=gs.tc_float, device=gs.device)
lookat = torch.tensor(self._options.lookat, dtype=gs.tc_float, device=gs.device)
up = torch.tensor(self._options.up, dtype=gs.tc_float, device=gs.device)
# If attached to a link and the link is built, pos is relative to link frame
if self._link is not None and self._link.is_built:
# Convert pos from link-relative to world coordinates
link_pos = self._link.get_pos()
link_quat = self._link.get_quat()
# Apply pos directly as offset from link
pos_world = transform_by_quat(pos, link_quat) + link_pos
pos = pos_world
elif self._link is not None:
# Link exists but not built yet - use configured pose as-is (treat as world coordinates for now)
# This will be corrected when move_to_attach is called
pass
transform = pos_lookat_up_to_T(pos, lookat, up)
self._camera_wrapper.transform = tensor_to_array(transform)
self._shared_metadata.renderer.update_camera(self._camera_wrapper)
def _apply_camera_transform(self, camera_T: torch.Tensor):
"""Update rasterizer camera wrapper from a world transform."""
self._ensure_camera_registered()
self._camera_wrapper.transform = tensor_to_array(camera_T)
self._shared_metadata.renderer.update_camera(self._camera_wrapper)
def _render_current_state(self):
"""Perform the actual render for the current state."""
self._ensure_camera_registered()
self._shared_metadata.context.update(force_render=True)
rgb_arr, _, _, _ = self._shared_metadata.renderer.render_camera(
self._camera_wrapper,
rgb=True,
depth=False,
segmentation=False,
normal=False,
)
# Ensure contiguous layout because the rendered array may have negative strides.
rgb_tensor = torch.from_numpy(np.ascontiguousarray(rgb_arr)).to(dtype=torch.uint8, device=gs.device)
if len(rgb_tensor.shape) == 3:
# Single environment rendered - add batch dimension.
rgb_tensor = rgb_tensor.unsqueeze(0)
self._shared_metadata.image_cache[self._idx][:] = rgb_tensor
# ========================== Raytracer Camera Sensor ==========================
@register_sensor(RaytracerCameraOptions, RaytracerCameraSharedMetadata, CameraData)
class RaytracerCameraSensor(BaseCameraSensor):
"""
Raytracer camera sensor using LuisaRender path tracing.
"""
def __init__(
self,
options: RaytracerCameraOptions,
idx: int,
data_cls: Type[CameraData],
manager: "gs.SensorManager",
):
super().__init__(options, idx, data_cls, manager)
self._options: RaytracerCameraOptions
self._camera_obj = None
def build(self):
"""Register a raytracer camera that reuses the visualizer pipeline."""
super().build()
scene = self._manager._sim.scene
visualizer = scene.visualizer
renderer = getattr(visualizer, "raytracer", None)
if renderer is None:
gs.raise_exception(
"RaytracerCameraSensor requires the scene to be created with `renderer=gs.renderers.RayTracer(...)`."
)
# Multi-environment rendering is not yet supported for Raytracer cameras
n_envs = self._manager._sim.n_envs
if n_envs > 1:
gs.raise_exception(
f"Raytracer camera sensors do not support multi-environment rendering (n_envs={n_envs}). "
"Use BatchRenderer camera sensors for batched rendering."
)
if self._shared_metadata.sensors is None:
self._shared_metadata.sensors = []
self._shared_metadata.lights = []
self._shared_metadata.image_cache = {}
self._shared_metadata.renderer = renderer
self._shared_metadata.sensors.append(self)
# Add lights from options as mesh lights to the scene
scene = self._manager._sim.scene
for light_config in self._options.lights:
if not scene.is_built:
self._add_light_as_mesh_light(scene, light_config)
# Compute world pose for the camera
pos = torch.tensor(self._options.pos, dtype=gs.tc_float, device=gs.device)
lookat = torch.tensor(self._options.lookat, dtype=gs.tc_float, device=gs.device)
up = torch.tensor(self._options.up, dtype=gs.tc_float, device=gs.device)
# If attached to a link and the link is built, transform pos to world coordinates
if self._link is not None and self._link.is_built:
link_pos = self._link.get_pos().squeeze(0)
link_quat = self._link.get_quat().squeeze(0)
# Apply pos directly as offset from link
pos = transform_by_trans_quat(pos, link_pos, link_quat)
# Transform lookat and up (no rotation offset since rotation is defined by lookat/up)
lookat = transform_by_trans_quat(lookat, link_pos, link_quat)
up = transform_by_quat(up, link_quat)
elif self._link is not None:
# Link exists but not built yet - use configured pose as-is (treat as world coordinates for now)
# This will be corrected when move_to_attach is called
pass
self._camera_obj = visualizer.add_camera(
res=self._options.res,
pos=pos,
lookat=lookat,
up=up,
model=self._options.model,
fov=self._options.fov,
aperture=self._options.aperture,
focus_dist=self._options.focus_dist,
GUI=False,
spp=self._options.spp,
denoise=self._options.denoise,
near=0.05,
far=100.0,
env_idx=None if n_envs == 0 else 0,
debug=False,
)
# Attach the visualizer camera to the link if this sensor is attached
if self._link is not None:
if self._options.offset_T is not None:
offset_T = torch.tensor(self._options.offset_T, dtype=gs.tc_float, device=gs.device)
else:
pos = torch.tensor(self._options.pos, dtype=gs.tc_float, device=gs.device)
offset_T = trans_to_T(pos)
self._camera_obj.attach(self._link, offset_T)
_B = max(n_envs, 1)
w, h = self._options.res
self._shared_metadata.image_cache[self._idx] = torch.zeros((_B, h, w, 3), dtype=torch.uint8, device=gs.device)
@gs.assert_built
def move_to_attach(self):
# Bypass original implementation since it will be handled by visualizer
pass
def _add_light_as_mesh_light(self, scene, light_config):
"""Add a light as a mesh light to the scene."""
# Default values for raytracer mesh lights
color = light_config.get("color", (1.0, 1.0, 1.0))
intensity = light_config.get("intensity", 1.0)
radius = light_config.get("radius", 0.5)
pos = light_config.get("pos", (0.0, 0.0, 5.0))
revert_dir = light_config.get("revert_dir", False)
double_sided = light_config.get("double_sided", False)
cutoff = light_config.get("cutoff", 180.0)
morph = gs.morphs.Sphere(pos=pos, radius=radius)
scene.add_mesh_light(
morph=morph,
color=(*color, 1.0),
intensity=intensity,
revert_dir=revert_dir,
double_sided=double_sided,
cutoff=cutoff,
)
def _render_current_state(self):
"""Perform the actual render for the current state."""
if self._link is not None:
self._camera_obj.move_to_attach()
rgb_arr, _, _, _ = self._camera_obj.render(
rgb=True,
depth=False,
segmentation=False,
colorize_seg=False,
normal=False,
antialiasing=False,
force_render=True,
)
# Ensure contiguous layout because the rendered array may have negative strides.
rgb_tensor = torch.from_numpy(np.ascontiguousarray(rgb_arr)).to(dtype=torch.uint8, device=gs.device)
self._shared_metadata.image_cache[self._idx][0] = rgb_tensor
# ========================== Batch Renderer Camera Sensor ==========================
@register_sensor(BatchRendererCameraOptions, BatchRendererCameraSharedMetadata, CameraData)
class BatchRendererCameraSensor(BaseCameraSensor):
"""
Batch renderer camera sensor using Madrona GPU batch rendering.
Note: All batch renderer cameras must have the same resolution.
"""
def __init__(
self,
options: BatchRendererCameraOptions,
idx: int,
data_cls: Type[CameraData],
manager: "gs.SensorManager",
):
super().__init__(options, idx, data_cls, manager)
self._options: BatchRendererCameraOptions
self._camera_obj = None
def build(self):
"""Initialize the batch renderer and register this camera."""
super().build()
if gs.backend != gs.cuda:
gs.raise_exception("BatchRendererCameraSensor requires CUDA backend.")
scene = self._manager._sim.scene
if self._shared_metadata.sensors is None:
self._shared_metadata.sensors = []
self._shared_metadata.lights = gs.List()
self._shared_metadata.image_cache = {}
self._shared_metadata.last_render_timestep = -1
all_sensors = self._manager._sensors_by_type[type(self)]
resolutions = [s._options.res for s in all_sensors]
if len(set(resolutions)) > 1:
gs.raise_exception(
f"All BatchRendererCameraSensor instances must have the same resolution. Found: {set(resolutions)}"
)
br_options = BatchRendererOptions(
use_rasterizer=self._options.use_rasterizer,
)
vis_options = VisOptions(
show_world_frame=False,
show_link_frame=False,
show_cameras=False,
rendered_envs_idx=range(max(self._manager._sim._B, 1)),
)
self._shared_metadata.visualizer_wrapper = MinimalVisualizerWrapper(scene, all_sensors, vis_options)
self._shared_metadata.renderer = BatchRenderer(
self._shared_metadata.visualizer_wrapper, br_options, vis_options
)
self._shared_metadata.sensors.append(self)
# Add lights from options to the renderer
for light_config in self._options.lights:
if self._shared_metadata.renderer is not None:
self._add_light_to_batch_renderer(light_config)
self._camera_obj = BatchRendererCameraWrapper(self)
if len(self._shared_metadata.sensors) == len(self._manager._sensors_by_type[type(self)]):
self._shared_metadata.visualizer_wrapper._cameras = [s._camera_obj for s in self._shared_metadata.sensors]
self._shared_metadata.renderer.build()
_B = max(self._manager._sim.n_envs, 1)
w, h = self._options.res
self._shared_metadata.image_cache[self._idx] = torch.zeros((_B, h, w, 3), dtype=torch.uint8, device=gs.device)
def _render_current_state(self):
"""Perform the actual render for the current state."""
sensors = self._shared_metadata.sensors or [self]
for sensor in sensors:
if sensor._link is not None:
sensor.move_to_attach()
self._shared_metadata.renderer.update_scene(force_render=True)
rgb_arr, *_ = self._shared_metadata.renderer.render(
rgb=True,
depth=False,
segmentation=False,
normal=False,
antialiasing=False,
force_render=True,
)
# rgb_arr might be a tuple of arrays (one per camera) or a single array
if isinstance(rgb_arr, (tuple, list)):
rgb_arrs = [torch.as_tensor(arr).to(dtype=torch.uint8, device=gs.device) for arr in rgb_arr]
else:
rgb_arrs = torch.as_tensor(rgb_arr).to(dtype=torch.uint8, device=gs.device)
for sensor, rgb_arr in zip(sensors, rgb_arrs):
sensor._shared_metadata.image_cache[sensor._idx][:] = rgb_arr
sensor._stale = False
self._shared_metadata.last_render_timestep = self._manager._sim.scene.t
def _apply_camera_transform(self, camera_T: torch.Tensor):
"""Update batch renderer camera from a world transform."""
# Note: BatchRenderer will pick up the updated transform on next render
self._camera_obj.transform = camera_T
self._camera_obj._pos = T_to_trans(camera_T)
def _add_light_to_batch_renderer(self, light_config):
"""Add a light to the batch renderer."""
# Default values for batch renderer
pos = light_config.get("pos", (0.0, 0.0, 5.0))
dir = light_config.get("dir", (0.0, 0.0, -1.0))
color = light_config.get("color", (1.0, 1.0, 1.0))
intensity = light_config.get("intensity", 1.0)
directional = light_config.get("directional", True)
castshadow = light_config.get("castshadow", True)
cutoff = light_config.get("cutoff", 45.0)
attenuation = light_config.get("attenuation", (1.0, 0.0, 0.0))
self._shared_metadata.renderer.add_light(
pos=pos,
dir=dir,
color=color,
intensity=intensity,
directional=directional,
castshadow=castshadow,
cutoff=cutoff,
attenuation=attenuation,
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/sensors/camera.py",
"license": "Apache License 2.0",
"lines": 693,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/options/sensors/camera.py | """
Camera sensor options for Rasterizer, Raytracer, and Batch Renderer backends.
"""
from typing import Any, Optional
import numpy as np
from pydantic import ConfigDict
import genesis as gs
from .options import RigidSensorOptionsMixin, SensorOptions, Vec3FType
class BaseCameraOptions(RigidSensorOptionsMixin, SensorOptions):
"""
Base class for camera sensor options containing common properties.
Parameters
----------
res : tuple[int, int]
Resolution as (width, height). Default is (512, 512).
pos : array-like[float, float, float]
Camera position offset. If attached to a link, this is relative to the link frame.
If not attached, this is relative to the world origin. Default is (3.5, 0.0, 1.5).
lookat : array-like[float, float, float]
Point the camera looks at in world frame.
up : array-like[float, float, float]
Up vector for camera orientation. Default is (0, 0, 1).
fov : float
Vertical field of view in degrees. Default is 60.0.
lights : list[dict], optional
List of lights to add for this camera backend. Each light is a dict with
backend-specific parameters. Default is empty list.
offset_T : np.ndarray, optional
4x4 transformation matrix specifying the camera's pose relative to the attached link.
If provided, this takes priority over pos_offset and euler_offset. Default is None.
entity_idx : int
The global entity index of the RigidEntity to which this sensor is attached. -1 or None for static sensors.
link_idx_local : int, optional
The local index of the RigidLink of the RigidEntity to which this sensor is attached.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
res: tuple[int, int] = (512, 512)
pos: Vec3FType = (3.5, 0.0, 1.5)
lookat: Vec3FType = (0.0, 0.0, 0.0)
up: Vec3FType = (0.0, 0.0, 1.0)
fov: float = 60.0
lights: list[dict] = []
offset_T: Optional[np.ndarray] = None
def model_post_init(self, _):
if not isinstance(self.res, (tuple, list)) or len(self.res) != 2:
gs.raise_exception(f"res must be a tuple of (width, height), got: {self.res}")
if self.res[0] <= 0 or self.res[1] <= 0:
gs.raise_exception(f"res must have positive dimensions, got: {self.res}")
if self.fov <= 0 or self.fov >= 180:
gs.raise_exception(f"fov must be between 0 and 180 degrees, got: {self.fov}")
if not isinstance(self.lights, list):
gs.raise_exception(f"lights must be a list, got: {type(self.lights)}")
for i, light in enumerate(self.lights):
if not isinstance(light, dict):
gs.raise_exception(f"lights[{i}] must be a dict, got: {type(light)}")
if self.offset_T is not None:
if self.offset_T.shape != (4, 4):
gs.raise_exception(f"offset_T must be a 4x4 array, got shape: {self.offset_T.shape}")
class RasterizerCameraOptions(BaseCameraOptions):
"""
Options for Rasterizer camera sensor (OpenGL-based rendering).
Parameters
----------
near : float
Near clipping plane distance. Default is 0.01.
far : float
Far clipping plane distance. Default is 100.0.
"""
near: float = 0.01
far: float = 100.0
# Camera images are updated lazily on read(), so skip per-step measured-cache updates
update_ground_truth_only: bool = True
def model_post_init(self, _):
super().model_post_init(_)
if self.near <= 0:
gs.raise_exception(f"near must be positive, got: {self.near}")
if self.far <= self.near:
gs.raise_exception(f"far must be greater than near, got near={self.near}, far={self.far}")
class RaytracerCameraOptions(BaseCameraOptions):
"""
Options for Raytracer camera sensor (LuisaRender path tracing).
Parameters
----------
model : str
Camera model: "pinhole" or "thinlens". Default is "pinhole".
spp : int
Samples per pixel for path tracing. Default is 256.
denoise : bool
Whether to apply denoising. Default is False.
aperture : float
Aperture size for thinlens camera (depth of field). Default is 2.8.
focal_len : float
Focal length in meters for thinlens camera. Default is 0.05.
focus_dist : float
Focus distance in meters for thinlens camera. Default is 3.0.
env_surface : gs.surfaces.Surface | None
Environment surface for skybox. Default is None.
env_radius : float
Environment sphere radius. Default is 15.0.
env_pos : tuple[float, float, float]
Environment sphere position. Default is (0, 0, 0).
env_quat : tuple[float, float, float, float]
Environment sphere quaternion (w, x, y, z). Default is (1, 0, 0, 0).
"""
model: str = "pinhole"
spp: int = 256
denoise: bool = False
aperture: float = 2.8
focal_len: float = 0.05
focus_dist: float = 3.0
env_surface: Any = None # gs.surfaces.Surface
env_radius: float = 15.0
env_pos: Vec3FType = (0.0, 0.0, 0.0)
env_quat: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0)
update_ground_truth_only: bool = True
def model_post_init(self, _):
super().model_post_init(_)
if self.model not in ("pinhole", "thinlens"):
gs.raise_exception(f"model must be 'pinhole' or 'thinlens', got: {self.model}")
if self.spp <= 0:
gs.raise_exception(f"spp must be positive, got: {self.spp}")
class BatchRendererCameraOptions(BaseCameraOptions):
"""
Options for Batch Renderer camera sensor (Madrona GPU batch rendering).
Note: All batch renderer cameras must have the same resolution.
Parameters
----------
use_rasterizer : bool
Whether to use rasterizer mode. Default is True.
"""
model: str = "pinhole"
near: float = 0.01
far: float = 100.0
use_rasterizer: bool = True
update_ground_truth_only: bool = True
def model_post_init(self, _):
super().model_post_init(_)
if self.near <= 0:
gs.raise_exception(f"near must be positive, got: {self.near}")
if self.far <= self.near:
gs.raise_exception(f"far must be greater than near, got near={self.near}, far={self.far}")
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/options/sensors/camera.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Genesis-Embodied-AI/Genesis:tests/test_sensor_camera.py | import sys
import weakref
import numpy as np
import pytest
import torch
import genesis as gs
from genesis.utils.misc import tensor_to_array
from genesis.utils.geom import trans_to_T
from .utils import assert_allclose, assert_equal, rgb_array_to_png_bytes
try:
import LuisaRenderPy
ENABLE_RAYTRACER = True
except ImportError:
ENABLE_RAYTRACER = False
try:
import gs_madrona
ENABLE_MADRONA = True
except ImportError:
ENABLE_MADRONA = False
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 1])
def test_rasterizer_non_batched(n_envs, show_viewer):
scene = gs.Scene(
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=show_viewer,
)
scene.add_entity(
morph=gs.morphs.Plane(),
surface=gs.surfaces.Rough(
color=(0.4, 0.4, 0.4),
),
)
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
radius=0.5,
pos=(0.0, 0.0, 2.0),
),
surface=gs.surfaces.Smooth(
color=(1.0, 0.5, 0.5),
),
)
scene.add_entity(
morph=gs.morphs.Box(
size=(0.3, 0.3, 0.3),
pos=(1.0, 1.0, 1.0),
),
surface=gs.surfaces.Rough(
color=(0.5, 1.0, 0.5),
),
)
raster_cam0 = scene.add_sensor(
gs.sensors.RasterizerCameraOptions(
res=(512, 512),
pos=(3.0, 0.0, 2.0),
lookat=(0.0, 0.0, 1.0),
up=(0.0, 0.0, 1.0),
fov=60.0,
near=0.1,
far=100.0,
lights=[
{
"pos": (2.0, 2.0, 5.0),
"color": (1.0, 1.0, 1.0),
"intensity": 5.0,
}
],
)
)
raster_cam1 = scene.add_sensor(
gs.sensors.RasterizerCameraOptions(
res=(256, 256),
pos=(0.0, 3.0, 2.0),
lookat=(0.0, 0.0, 1.0),
up=(0.0, 0.0, 1.0),
fov=45.0,
)
)
raster_cam_attached = scene.add_sensor(
gs.sensors.RasterizerCameraOptions(
res=(320, 240),
pos=(0.0, 0.0, 1.0), # Relative to link when attached
lookat=(0.0, 0.0, 0.0),
up=(0.0, 0.0, 1.0),
fov=70.0,
entity_idx=sphere.idx, # Attach to sphere
link_idx_local=0,
)
)
offset_T = np.eye(4)
offset_T[2, 3] = 1.0
raster_cam_offset_T = scene.add_sensor(
gs.sensors.RasterizerCameraOptions(
res=(320, 240),
pos=(0.0, 0.0, 1.0),
lookat=(0.0, 0.0, 0.0),
up=(0.0, 0.0, 1.0),
fov=70.0,
entity_idx=sphere.idx,
link_idx_local=0,
offset_T=offset_T,
)
)
scene.build(n_envs=n_envs)
for _ in range(10):
scene.step()
data_cam0 = raster_cam0.read()
data_cam1 = raster_cam1.read()
data_attached = raster_cam_attached.read()
data_offset_T = raster_cam_offset_T.read()
for _cam_name, data in [
("cam0", data_cam0),
("cam1", data_cam1),
("attached", data_attached),
("offset_T", data_offset_T),
]:
rgb_np = tensor_to_array(data.rgb)
mean = np.mean(rgb_np)
assert 1.0 < mean < 254.0
variance = np.var(rgb_np)
assert variance > 1.0
data_env0 = raster_cam0.read(envs_idx=0)
assert data_env0.rgb.shape == (512, 512, 3)
def _get_camera_world_pos(sensor):
renderer = sensor._shared_metadata.renderer
context = sensor._shared_metadata.context
node = renderer._camera_nodes[sensor._idx]
pose = context._scene.get_pose(node)
if pose.ndim == 3:
pose = pose[0]
return pose[:3, 3].copy()
cam_pos_initial = _get_camera_world_pos(raster_cam_attached)
cam_pos_initial_offset_T = _get_camera_world_pos(raster_cam_offset_T)
for _ in range(10): # Test over multiple steps
scene.step()
raster_cam_attached.read()
cam_pos_final = _get_camera_world_pos(raster_cam_attached)
cam_move_dist = np.linalg.norm(cam_pos_final - cam_pos_initial)
assert cam_move_dist > 1e-2
raster_cam_offset_T.read()
cam_pos_final_offset_T = _get_camera_world_pos(raster_cam_offset_T)
cam_move_dist_offset_T = np.linalg.norm(cam_pos_final_offset_T - cam_pos_initial_offset_T)
assert cam_move_dist_offset_T > 1e-2
assert_allclose(cam_move_dist_offset_T, cam_move_dist, atol=1e-2)
@pytest.mark.required
@pytest.mark.skipif(sys.platform == "darwin", reason="Not supported on this machine because it requires OpenGL 4.2.")
def test_rasterizer_batched(show_viewer, png_snapshot):
scene = gs.Scene(
show_viewer=show_viewer,
)
# Add a plane
scene.add_entity(
morph=gs.morphs.Plane(),
)
# Add a sphere
sphere = scene.add_entity(
morph=gs.morphs.Sphere(pos=(0.0, 0.0, 1.0), radius=0.3),
surface=gs.surfaces.Smooth(color=(1.0, 0.5, 0.5)),
)
camera = scene.add_sensor(
gs.sensors.RasterizerCameraOptions(
res=(64, 64),
pos=(3.0, 0.0, 1.5),
lookat=(0.0, 0.0, 0.5),
fov=60.0,
draw_debug=show_viewer,
)
)
scene.build(n_envs=2)
# Disable shadows systematically for Rasterizer because they are forcibly disabled on CPU backend anyway
camera._shared_metadata.context.shadow = False
sphere.set_pos([[0.0, 0.0, 1.0], [0.2, 0.0, 0.5]])
scene.step()
data = camera.read()
assert data.rgb.shape == (2, 64, 64, 3)
assert data.rgb.dtype == torch.uint8
assert (data.rgb[0] != data.rgb[1]).any(), "We should have different frames"
for i in range(scene.n_envs):
assert rgb_array_to_png_bytes(data.rgb[i]) == png_snapshot
@pytest.mark.required
@pytest.mark.skipif(sys.platform == "darwin", reason="Not supported on this machine because it requires OpenGL 4.2.")
def test_rasterizer_attached_batched(show_viewer, png_snapshot):
scene = gs.Scene(show_viewer=show_viewer)
# Add a plane
scene.add_entity(
morph=gs.morphs.Plane(),
)
# Add a sphere
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
radius=0.3,
pos=(0.0, 0.0, 1.0),
),
surface=gs.surfaces.Smooth(
color=(1.0, 0.5, 0.5),
),
)
options = gs.sensors.RasterizerCameraOptions(
res=(64, 64),
pos=(-0.4, 0.1, 2.0),
lookat=(-0.6, 0.4, 1.0),
fov=60.0,
entity_idx=sphere.idx,
draw_debug=show_viewer,
)
camera = scene.add_sensor(options)
scene.build(n_envs=2)
# Disable shadows systematically for Rasterizer because they are forcibly disabled on CPU backend anyway
camera._shared_metadata.context.shadow = False
sphere.set_pos([[0.0, 0.0, 1.0], [0.2, 0.0, 0.5]])
scene.step()
data = camera.read()
assert data.rgb.shape == (2, 64, 64, 3)
assert data.rgb.dtype == torch.uint8
assert (data.rgb[0] != data.rgb[1]).any(), "We should have different frames"
for i in range(scene.n_envs):
assert rgb_array_to_png_bytes(data.rgb[i]) == png_snapshot
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cuda])
@pytest.mark.parametrize("n_envs", [0, 2])
@pytest.mark.skipif(not ENABLE_MADRONA, reason="BatchRenderer is not supported because 'gs_madrona' is not available.")
def test_batch_renderer(n_envs, png_snapshot):
CAM_RES = (128, 256)
scene = gs.Scene(
show_viewer=False,
)
scene.add_entity(
morph=gs.morphs.Plane(),
)
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
radius=0.5,
pos=(0.0, 0.0, 1.0),
),
surface=gs.surfaces.Default(
color=(1.0, 0.5, 0.5),
),
)
camera_common_options = dict(
res=CAM_RES,
pos=(-2.0, 0.0, 1.5),
lookat=(0.0, 0.0, 1.0),
up=(0.0, 0.0, 1.5),
fov=70.0,
lights=[
dict(
pos=(2.0, 2.0, 5.0),
color=(1.0, 0.5, 0.25),
intensity=1.0,
directional=False,
)
],
use_rasterizer=True,
)
camera_1 = scene.add_sensor(gs.sensors.BatchRendererCameraOptions(**camera_common_options))
camera_2 = scene.add_sensor(
gs.sensors.BatchRendererCameraOptions(
**camera_common_options,
entity_idx=sphere.idx,
link_idx_local=0,
offset_T=trans_to_T(np.array([0.0, 0.0, 3.0])),
)
)
scene.build(n_envs=n_envs)
scene.step()
for camera in (camera_1, camera_2):
data = camera.read()
if n_envs > 0:
for i in range(n_envs):
assert rgb_array_to_png_bytes(data.rgb[i]) == png_snapshot
else:
assert rgb_array_to_png_bytes(data.rgb) == png_snapshot
@pytest.mark.required
def test_destroy_unbuilt_scene_with_camera():
"""Test that destroy on an unbuilt scene with cameras doesn't crash."""
scene = gs.Scene(show_viewer=False)
scene.add_entity(morph=gs.morphs.Plane())
scene.add_sensor(gs.sensors.RasterizerCameraOptions(res=(64, 64)))
# Scene.__del__ calls destroy(), and a crash in destroy() would result in some
# logspam.
scene.destroy()
@pytest.mark.required
def test_destroy_idempotent_with_camera():
"""Test that calling destroy twice on a scene with cameras doesn't crash."""
scene = gs.Scene(show_viewer=False)
camera = scene.add_sensor(gs.sensors.RasterizerCameraOptions(res=(64, 64)))
scene.build()
camera.read()
scene.destroy()
# Scene.__del__ calls destroy(), which means it's expected that destroy() will
# be called twice. A crash in destroy() would result in some logspam.
scene.destroy()
@pytest.mark.required
def test_rasterizer_destroy():
scene = gs.Scene(show_viewer=False)
cam1 = scene.add_sensor(gs.sensors.RasterizerCameraOptions(res=(64, 64)))
cam2 = scene.add_sensor(gs.sensors.RasterizerCameraOptions(res=(32, 32)))
scene.build()
cam1.read()
cam2.read()
offscreen_renderer_ref = weakref.ref(cam1._shared_metadata.renderer._renderer)
scene.destroy()
assert offscreen_renderer_ref() is None
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cuda])
@pytest.mark.skipif(not ENABLE_MADRONA, reason="BatchRenderer is not supported because 'gs_madrona' is not available.")
def test_batch_renderer_destroy():
scene = gs.Scene(show_viewer=False)
# FIXME: This test fails without any entities in the scene.
scene.add_entity(morph=gs.morphs.Plane())
cam1 = scene.add_sensor(gs.sensors.BatchRendererCameraOptions(res=(64, 64), use_rasterizer=True))
cam2 = scene.add_sensor(gs.sensors.BatchRendererCameraOptions(res=(64, 64), use_rasterizer=True))
scene.build()
cam1.read()
cam2.read()
shared_metadata = cam1._shared_metadata
assert cam1._shared_metadata is cam2._shared_metadata
assert len(shared_metadata.sensors) == 2
assert shared_metadata.renderer is not None
scene.destroy()
assert shared_metadata.sensors is None
assert shared_metadata.renderer is None
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cuda])
@pytest.mark.skipif(not ENABLE_RAYTRACER, reason="RayTracer is not supported because 'LuisaRenderPy' is not available.")
def test_raytracer_destroy():
scene = gs.Scene(
renderer=gs.renderers.RayTracer(
env_surface=gs.surfaces.Emission(
emissive_texture=gs.textures.ColorTexture(color=(0.2, 0.3, 0.5)),
),
env_radius=20.0,
),
show_viewer=False,
)
cam1 = scene.add_sensor(gs.sensors.RaytracerCameraOptions(res=(64, 64)))
cam2 = scene.add_sensor(gs.sensors.RaytracerCameraOptions(res=(64, 64)))
scene.build()
cam1.read()
cam2.read()
shared_metadata = cam1._shared_metadata
assert cam1._shared_metadata is cam2._shared_metadata
assert len(shared_metadata.sensors) == 2
assert shared_metadata.renderer is not None
scene.destroy()
assert shared_metadata.sensors is None
assert shared_metadata.renderer is None
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cuda])
@pytest.mark.skipif(not ENABLE_RAYTRACER, reason="RayTracer is not supported because 'LuisaRenderPy' is not available.")
def test_raytracer_attached_without_offset_T():
"""Test that RaytracerCameraSensor works when attached without explicit offset_T.
Also checks consistency with a scene-level camera (scene.add_camera) using the same
pose and attachment, to make sure both camera APIs produce matching output.
"""
CAM_RES = (128, 64)
CAM_POS = (0.0, 0.0, 2.0)
scene = gs.Scene(renderer=gs.renderers.RayTracer())
scene.add_entity(morph=gs.morphs.Plane())
sphere = scene.add_entity(morph=gs.morphs.Sphere())
# Sensor camera attached WITHOUT offset_T - should use pos as offset
camera_common_options = dict(
res=CAM_RES,
lookat=(0.0, 0.0, 0.0),
up=(0.0, 1.0, 0.0),
fov=30.0,
spp=64,
denoise=False,
)
sensor_camera = scene.add_sensor(
gs.sensors.RaytracerCameraOptions(
**camera_common_options,
pos=CAM_POS,
entity_idx=sphere.idx,
)
)
# Scene-level camera with the same pose, attached with explicit offset_T
scene_camera = scene.add_camera(
**camera_common_options,
)
scene.build()
# Attach scene-level camera with equivalent offset_T
scene_camera.attach(sphere.base_link, offset_T=trans_to_T(np.array(CAM_POS)))
scene.step()
sensor_data = sensor_camera.read()
assert sensor_data.rgb.shape == (CAM_RES[1], CAM_RES[0], 3)
assert sensor_data.rgb.float().std() > 1.0, "Sensor camera RGB std too low, image may be blank"
scene_camera.move_to_attach()
scene_rgb, *_ = scene_camera.render(rgb=True, force_render=True)
scene_rgb = tensor_to_array(scene_rgb, dtype=np.int32)
sensor_rgb = tensor_to_array(sensor_data.rgb, dtype=np.int32)
# Both cameras should produce the same image
assert_equal(sensor_rgb, scene_rgb)
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cuda])
@pytest.mark.parametrize("n_envs", [0, 1])
@pytest.mark.skipif(not ENABLE_RAYTRACER, reason="RayTracer is not supported because 'LuisaRenderPy' is not available.")
def test_raytracer(n_envs, png_snapshot):
# Relax pixel matching because RayTracer is not deterministic between different hardware (eg RTX6000 vs H100), even
# without denoiser.
png_snapshot.extension._blurred_kernel_size = 3
scene = gs.Scene(
renderer=gs.renderers.RayTracer(
env_surface=gs.surfaces.Emission(
emissive_texture=gs.textures.ColorTexture(
color=(0.2, 0.3, 0.5),
),
),
env_radius=20.0,
),
show_viewer=False,
)
scene.add_entity(
morph=gs.morphs.Plane(),
)
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
radius=0.5,
pos=(0.0, 0.0, 1.0),
),
surface=gs.surfaces.Default(
color=(1.0, 0.5, 0.5),
),
)
camera_common_options = dict(
res=(128, 256),
pos=(-2.0, 0.0, 1.5),
lookat=(0.0, 0.0, 1.0),
up=(0.0, 0.0, 1.5),
fov=70.0,
model="pinhole",
spp=64,
denoise=False,
lights=[
dict(
pos=(2.0, 2.0, 5.0),
color=(10.0, 10.0, 10.0),
intensity=1.0,
)
],
)
camera_1 = scene.add_sensor(
gs.sensors.RaytracerCameraOptions(
**camera_common_options,
env_surface=gs.surfaces.Emission(
emissive_texture=gs.textures.ColorTexture(
color=(0.2, 0.3, 0.5),
),
),
env_radius=20.0,
)
)
camera_2 = scene.add_sensor(
gs.sensors.RaytracerCameraOptions(
**camera_common_options,
entity_idx=sphere.idx,
link_idx_local=0,
offset_T=trans_to_T(np.array([0.0, 0.0, 3.0])),
)
)
scene.build(n_envs=n_envs)
scene.step()
for camera in (camera_1, camera_2):
data = camera.read()
if n_envs > 0:
for i in range(n_envs):
assert rgb_array_to_png_bytes(data.rgb[i]) == png_snapshot
else:
assert rgb_array_to_png_bytes(data.rgb) == png_snapshot
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_sensor_camera.py",
"license": "Apache License 2.0",
"lines": 470,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:examples/rendering/follow_entity.py | import argparse
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--fix", action="store_true", default=False)
args = parser.parse_args()
gs.init()
scene = gs.Scene(
vis_options=gs.options.VisOptions(
rendered_envs_idx=(0,),
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=False,
)
scene.add_entity(morph=gs.morphs.Plane())
cube = scene.add_entity(
gs.morphs.Box(
size=(0.1, 0.1, 0.1),
pos=(0.0, -0.9, 1.0),
euler=(15.0, 30.0, 60.0),
)
)
cam = scene.add_camera(
res=(640, 480),
pos=(2.0, 0.0, 1.5),
lookat=(0, 0, 0.7),
fov=40,
GUI=True,
)
cam.follow_entity(cube, fix_orientation=args.fix)
scene.build()
cube.set_dofs_velocity([0.0, 5.0, 0.0, 0.0, 0.0, 1.0])
for _ in range(100):
scene.step()
cam.render()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/rendering/follow_entity.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/tutorials/position_control_comparison.py | # This example compares the position control accuracy between 'control_dofs_position' and
# 'control_dofs_position_velocity' when tracking a dynamic trajectory.
# While both are equivalent in static, the former lacks the target velocity term of true PD controller in robotics,
# making it underperform compared to 'control_dofs_position_velocity'.
import argparse
import math
import matplotlib.pyplot as plt
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--vis", action="store_true", default=False)
parser.add_argument("-c", "--cpu", action="store_true", default=False)
args = parser.parse_args()
########################## init ##########################
gs.init(backend=gs.cpu if args.cpu else gs.gpu)
########################## create a scene ##########################
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(0, -3.5, 2.5),
camera_lookat=(0.0, 0.0, 0.5),
camera_fov=30,
max_FPS=None,
),
sim_options=gs.options.SimOptions(
dt=0.005,
),
show_viewer=False,
show_FPS=True,
)
########################## entities ##########################
plane = scene.add_entity(
gs.morphs.Plane(),
)
franka = scene.add_entity(
gs.morphs.MJCF(
file="xml/franka_emika_panda/panda.xml",
),
)
########################## build ##########################
scene.build()
joints_name = (
"joint1",
"joint2",
"joint3",
"joint4",
"joint5",
"joint6",
"joint7",
"finger_joint1",
"finger_joint2",
)
motors_dof_idx = [franka.get_joint(name).dofs_idx_local[0] for name in joints_name]
############ Optional: set control gains ############
# set positional gains
franka.set_dofs_kp(
kp=[4500, 4500, 3500, 3500, 2000, 2000, 2000, 100, 100],
dofs_idx_local=motors_dof_idx,
)
# set velocity gains
franka.set_dofs_kv(
kv=[450, 450, 350, 350, 200, 200, 200, 10, 10],
dofs_idx_local=motors_dof_idx,
)
# set force range for safety
franka.set_dofs_force_range(
lower=[-87, -87, -87, -87, -12, -12, -12, -100, -100],
upper=[87, 87, 87, 87, 12, 12, 12, 100, 100],
dofs_idx_local=motors_dof_idx,
)
# Hard reset
# Follow a sinusoid trajectory
A = 0.5 # motion amplitude, rad
f = 1.0 # motion frequency, Hz
# Use control_dofs_position
pos_simulation_result = []
franka.set_dofs_position([A, 0, 0, 0, 0, 0, 0, 0, 0], motors_dof_idx)
t0 = scene.t
while (t := (scene.t - t0) * scene.dt) < 2.0:
target_position = A * (1 + math.sin(2 * math.pi * f * t))
current_position = float(franka.get_qpos()[0])
pos_simulation_result.append([t, current_position, target_position])
franka.control_dofs_position([target_position, 0, 0, 0, 0, 0, 0, 0, 0], motors_dof_idx)
scene.step()
# Use control_dofs_position_velocity
pos_vel_simulation_result = []
franka.set_dofs_position([A, 0, 0, 0, 0, 0, 0, 0, 0], motors_dof_idx)
t0 = scene.t
while (t := (scene.t - t0) * scene.dt) < 2.0:
target_position = A * (1 + math.sin(2 * math.pi * f * t))
target_velocity = 2 * math.pi * f * A * math.cos(2 * math.pi * f * t)
current_position = float(franka.get_qpos()[0])
pos_vel_simulation_result.append([t, current_position, target_position])
franka.control_dofs_position_velocity(
[target_position, 0, 0, 0, 0, 0, 0, 0, 0],
[target_velocity, 0, 0, 0, 0, 0, 0, 0, 0],
motors_dof_idx,
)
scene.step()
# Plot results
pos_simulation_result = tuple(zip(*pos_simulation_result))
pos_vel_simulation_result = tuple(zip(*pos_vel_simulation_result))
plt.plot(pos_simulation_result[0], pos_simulation_result[1], label="control_dofs_position")
plt.plot(pos_vel_simulation_result[0], pos_vel_simulation_result[1], label="control_dofs_position_velocity")
plt.plot(pos_vel_simulation_result[0], pos_vel_simulation_result[2], color="black", label="Target position")
plt.xlabel("Time (s)")
plt.ylabel("Joint position (rad)")
plt.title("Comparison of joint position tracking with two different controllers")
plt.grid()
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/tutorials/position_control_comparison.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:genesis/engine/materials/FEM/cloth.py | """
Cloth material for IPC-based cloth simulation.
This material is used with FEMEntity and IPCCoupler for shell/membrane simulation.
"""
from .base import Base
class Cloth(Base):
"""
Cloth material for thin shell/membrane simulation using IPC.
This material is designed for cloth, fabric, and other thin flexible materials.
It uses shell-based FEM formulation (NeoHookeanShell) in the IPC backend.
When used with FEMEntity, it signals to IPCCoupler that this entity should be
treated as a 2D shell (cloth) rather than a 3D volumetric FEM object.
Parameters
----------
E : float, optional
Young's modulus (Pa), controlling stiffness. Default is 1e4 (10 kPa).
nu : float, optional
Poisson's ratio, describing volume change under stress.
Default is 0.49 (nearly incompressible).
rho : float, optional
Material density (kg/m³). Default is 200 (typical fabric).
thickness : float, optional
Shell thickness (m). Default is 0.001 (1mm).
bending_stiffness : float, optional
Bending resistance coefficient. If None, no bending resistance.
Default is None.
model : str, optional
FEM material model (not used for cloth, kept for compatibility).
Default is "stable_neohookean".
friction_mu : float, optional
Friction coefficient. Default is 0.1.
contact_resistance : float | None, optional
IPC contact resistance/stiffness override. ``None`` uses the IPC coupler
global default. Default is None.
Notes
-----
- Only works with IPCCoupler enabled
- Requires GPU backend
- Only accepts surface mesh morphs (Mesh, etc.)
- Uses FEMEntity infrastructure but simulated as 2D shell in IPC
Examples
--------
>>> cloth = scene.add_entity(
... morph=gs.morphs.Mesh(file="cloth.obj"),
... material=gs.materials.FEM.Cloth(
... E=10e3, nu=0.49, rho=200,
... thickness=0.001, bending_stiffness=10.0
... ),
... )
"""
def __init__(
self,
E=1e4, # Young's modulus (Pa)
nu=0.49, # Poisson's ratio
rho=200.0, # Density (kg/m³)
thickness=0.001, # Shell thickness (m)
bending_stiffness=None, # Optional bending stiffness
model="stable_neohookean", # FEM model (unused for cloth)
friction_mu=0.1,
contact_resistance=None,
):
# Call FEM base constructor
super().__init__(E=E, nu=nu, rho=rho, friction_mu=friction_mu, contact_resistance=contact_resistance)
# Cloth-specific properties
self._thickness = thickness
self._bending_stiffness = bending_stiffness
self._model = model
@property
def thickness(self):
"""Shell thickness (m)."""
return self._thickness
@property
def bending_stiffness(self):
"""Bending stiffness coefficient."""
return self._bending_stiffness
@property
def model(self):
"""FEM material model name (unused for cloth)."""
return self._model
def __repr__(self):
return f"<gs.materials.FEM.Cloth(E={self.E}, nu={self.nu}, rho={self.rho}, thickness={self.thickness})>"
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/materials/FEM/cloth.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Genesis-Embodied-AI/Genesis:genesis/options/recorders.py | from dataclasses import dataclass
from pydantic import Field
import genesis as gs
from .options import Options
IS_PYAV_AVAILABLE = False
try:
import av
IS_PYAV_AVAILABLE = True
except ImportError:
pass
class RecorderOptions(Options):
"""
Options for recording simulation data by automatically sampling data from a data source, e.g. a sensor.
Parameters
----------
hz: float, optional
The frequency at which to sample data, in Hz (samples per second).
If None, the data will be sampled every step.
buffer_size: int, optional
Applicable when run_in_thread is True. The size of the data queue buffer.
Defaults to 0, which means infinite size.
buffer_full_wait_time: float, optional
Applicable when run_in_thread is True. The time to wait for buffer space to become available when the
buffer is full. Defaults to 0.1 seconds.
"""
hz: float | None = None
buffer_size: int = 0
buffer_full_wait_time: float = 0.1
def model_post_init(self, context):
"""Validate the recorder options values before the recorder is added to the scene."""
if self.hz is not None and self.hz < gs.EPS:
gs.raise_exception(f"[{type(self).__name__}] recording hz should be greater than 0.")
if self.buffer_size < 0:
gs.raise_exception(f"[{type(self).__name__}] buffer size should be 0 (infinite size) or greater.")
if self.buffer_full_wait_time < gs.EPS:
gs.raise_exception(f"[{type(self).__name__}] buffer full wait time should be greater than 0.")
class BaseFileWriterOptions(RecorderOptions):
"""
Base class for file writer options.
Parameters
----------
filename: str
The path of the output file.
save_on_reset: bool, optional
Whether to save the data on reset. Defaults to False.
If True, a counter will be added to the filename and incremented on each reset.
"""
filename: str
save_on_reset: bool = False
class VideoFile(BaseFileWriterOptions):
"""
Stream video frames to file using PyAV.
The PyAV writer streams data directly to the file instead of buffering it in memory. Incoming data should either be
grayscale [H, W] or color [H, W, RGB] where values are uint8 (0, 255).
Parameters
----------
filename : str
The path of the output video file ending in ".mp4".
name : str
The name of the video. Note that it may be different from filename. If empty, then filename will be used as a
fallback. Default to "".
fps : int, optional
Frames per second for the video. Defaults to the data collection Hz ("real-time").
codec : str, optional
The codec to use for the video file. Defaults to "libx264".
bitrate: float
The bitrate of the video. This higher the better the quality of the video.
Defaults to 1.0.
codec_options: dict[str, str]
Additional low-level codec options that will be pass to ffmpeg. Empty by default.
save_on_reset: bool, optional
Whether to save the data on reset. If True, a counter will be added to the filename and incremented on each
reset. Defaults to False.
"""
fps: int | None = None
name: str = ""
codec: str = "libx264"
bitrate: float = 1.0
codec_options: dict[str, str] = Field(default_factory=dict)
def model_post_init(self, context):
if not IS_PYAV_AVAILABLE:
gs.raise_exception("PyAV is not installed. Please install it with `pip install av`.")
super().model_post_init(context)
if self.codec not in av.codecs_available:
gs.raise_exception(f"[{type(self).__name__}] Codec '{self._options.codec}' not supported.")
if not self.filename.endswith(".mp4"):
gs.raise_exception(f"[{type(self).__name__}] Video filename must have '.mp4' extension.")
class CSVFile(BaseFileWriterOptions):
"""
Writes to a .csv file using `csv.writer`.
Can handle any array-like or dict[str, array-like] output, e.g. from sensors.
Values must be N-dimensional tensors, arrays or scalars (np.generic, int, float, str)
If the data or header is a dict, it cannot be further nested. Values are processed in order.
Parameters
----------
filename : str
The name of the CSV file to save the data.
header : tuple[str] | None, optional
Column headers for the CSV file. It should match the format of the incoming data, where each scalar value has
an associated header. If the data is a dict, the header should match the total length of the number of values
after flattening the values.
save_every_write: bool, optional
Whether to flush the data to disk as soon as new data is recieved. Defaults to False.
save_on_reset: bool, optional
Whether to save the data on scene reset. Defaults to False.
If True, a counter will be added to the filename and incremented on each reset.
"""
header: tuple[str, ...] | None = None
save_every_write: bool = False
def model_post_init(self, context):
super().model_post_init(context)
if not self.filename.lower().endswith(".csv"):
gs.raise_exception(f"[{type(self).__name__}] CSV output must be a .csv file")
class NPZFile(BaseFileWriterOptions):
"""
Buffers all data and writes to a .npz file at cleanup.
Can handle any numeric or array-like or dict[str, array-like] data, e.g. from sensors.
Parameters
----------
filename : str
The name of the .npz file to save the data.
save_on_reset: bool, optional
Whether to save the data on reset. Defaults to False.
If True, a counter will be added to the filename and incremented on each reset.
"""
def model_post_init(self, context):
super().model_post_init(context)
if not self.filename.lower().endswith(".npz"):
gs.raise_exception(f"[{type(self).__name__}] NPZ output must be an .npz file")
class BasePlotterOptions(RecorderOptions):
"""
Base class for plot visualization.
Parameters
----------
title: str
The title of the plot.
window_size: tuple[int, int]
The size of the window in pixels.
save_to_filename: str | None
If provided, the animation will be saved to a file with the given filename.
show_window: bool | None
Whether to show the window. If not provided, it will be set to True if a display is connected, False otherwise.
"""
title: str = ""
window_size: tuple[int, int] = (800, 600)
save_to_filename: str | None = None
show_window: bool | None = None
@dataclass
class LinePlotterMixinOptions:
"""
Mixin class for live line plot visualization of scalar data.
The recorded data_func should return scalar data (single scalar, a tuple of scalars, or a dict with string keys and
scalar or tuple of scalars as values).
Parameters
----------
labels: tuple[str] | dict[str, tuple[str]] | None
The labels for the plot. The length of the labels should match the length of the data.
If a dict is provided, the data should also be a dict of tuples of strings that match the length of the data.
The keys will be used as subplot titles and the values will be used as labels within each subplot.
x_label: str, optional
Label for the horizontal axis.
y_label: str, optional
Label for the vertical axis.
history_length: int
The maximum number of previous data to store.
"""
labels: tuple[str, ...] | dict[str, tuple[str, ...]] | None = None
x_label: str = ""
y_label: str = ""
history_length: int = 100
class PyQtLinePlot(BasePlotterOptions, LinePlotterMixinOptions):
"""
Live line plot visualization of data using PyQtGraph.
The recorded data_func should return scalar data (single scalar, a tuple of scalars, or a dict with string keys and
scalar or tuple of scalars as values).
Parameters
----------
title: str
The title of the plot.
window_size: tuple[int, int]
The size of the window in pixels.
save_to_filename: str | None
If provided, the animation will be saved to a file with the given filename.
show_window: bool | None
Whether to show the window. If not provided, it will be set to True if a display is connected, False otherwise.
labels: tuple[str] | dict[str, tuple[str]] | None
The labels for the plot. The length of the labels should match the length of the data.
If a dict is provided, the data should also be a dict of tuples of strings that match the length of the data.
The keys will be used as subplot titles and the values will be used as labels within each subplot.
x_label: str, optional
Label for the horizontal axis.
y_label: str, optional
Label for the vertical axis.
history_length: int
The maximum number of previous data to store.
"""
pass
class MPLLinePlot(BasePlotterOptions, LinePlotterMixinOptions):
"""
Live line plot visualization of data using matplotlib.
The recorded data_func should return scalar data (single scalar, a tuple of scalars, or a dict with string keys and
scalar or tuple of scalars as values).
Parameters
----------
title: str
The title of the plot.
window_size: tuple[int, int]
The size of the window in pixels.
save_to_filename: str | None
If provided, the animation will be saved to a file with the given filename.
show_window: bool | None
Whether to show the window. If not provided, it will be set to True if a display is connected, False otherwise.
labels: tuple[str] | dict[str, tuple[str]] | None
The labels for the plot. The length of the labels should match the length of the data.
If a dict is provided, the data should also be a dict of tuples of strings that match the length of the data.
The keys will be used as subplot titles and the values will be used as labels within each subplot.
x_label: str, optional
Label for the horizontal axis.
y_label: str, optional
Label for the vertical axis.
history_length: int
The maximum number of previous data to store.
"""
pass
class MPLImagePlot(BasePlotterOptions):
"""
Live visualization of image data using matplotlib.
The image data should be an array-like object with shape (H, W), (H, W, 1), (H, W, 3), or (H, W, 4).
Parameters
----------
title: str
The title of the plot.
window_size: tuple[int, int]
The size of the window in pixels.
save_to_filename: str | None
If provided, the animation will be saved to a file with the given filename.
show_window: bool | None
Whether to show the window. If not provided, it will be set to True if a display is connected, False otherwise.
"""
pass
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/options/recorders.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Genesis-Embodied-AI/Genesis:genesis/options/sensors/options.py | from typing import TYPE_CHECKING
import numpy as np
from pydantic import Field, conlist
import genesis as gs
from ..options import Options
from .raycaster import DepthCameraPattern, RaycastPattern
Vec3FType = conlist(float, min_length=3, max_length=3)
Vec4FType = conlist(float, min_length=4, max_length=4)
Vec3FArrayType = conlist(Vec3FType, min_length=1)
FArrayType = conlist(float, min_length=1)
MaybeVec3FType = float | Vec3FType
Matrix3x3Type = conlist(conlist(float, min_length=3, max_length=3), min_length=3, max_length=3)
MaybeMatrix3x3Type = Matrix3x3Type | MaybeVec3FType
if TYPE_CHECKING:
from genesis.engine.scene import Scene
class SensorOptions(Options):
"""
Base class for all sensor options.
Each sensor should have their own options class that inherits from this class.
The options class should be registered with the SensorManager using the @register_sensor decorator.
Parameters
----------
delay : float
The read delay time in seconds. Data read will be outdated by this amount. Defaults to 0.0 (no delay).
update_ground_truth_only : bool
If True, the sensor will only update the ground truth data, and not the measured data. Defaults to False.
draw_debug : bool
If True and visualizer is active, the sensor will draw debug shapes in the scene. Defaults to False.
"""
delay: float = 0.0
update_ground_truth_only: bool = False
draw_debug: bool = False
def validate(self, scene: "Scene"):
"""
Validate the sensor options values before the sensor is added to the scene.
Use pydantic's model_post_init() for validation that does not require scene context.
"""
delay_hz = self.delay / scene._sim.dt
if not np.isclose(delay_hz, round(delay_hz), atol=gs.EPS):
gs.logger.warning(
f"{type(self).__name__}: Read delay should be a multiple of the simulation time step. Got {self.delay}"
f" and {scene._sim.dt}. Actual read delay will be {1 / round(delay_hz)}."
)
class RigidSensorOptionsMixin:
"""
Base options class for sensors that are attached to a RigidEntity.
Parameters
----------
entity_idx : int
The global entity index of the RigidEntity to which this sensor is attached. -1 or None for static sensors.
link_idx_local : int, optional
The local index of the RigidLink of the RigidEntity to which this sensor is attached.
pos_offset : array-like[float, float, float], optional
The positional offset of the sensor from the RigidLink.
euler_offset : array-like[float, float, float], optional
The rotational offset of the sensor from the RigidLink in degrees.
"""
entity_idx: int | None = -1
link_idx_local: int = 0
pos_offset: Vec3FType = (0.0, 0.0, 0.0)
euler_offset: Vec3FType = (0.0, 0.0, 0.0)
def validate(self, scene: "Scene"):
from genesis.engine.entities import RigidEntity
super().validate(scene)
if self.entity_idx is not None and self.entity_idx >= len(scene.entities):
gs.raise_exception(f"Invalid RigidEntity index {self.entity_idx}.")
if self.entity_idx is not None and self.entity_idx >= 0:
entity = scene.entities[self.entity_idx]
if not isinstance(entity, RigidEntity):
gs.raise_exception(f"Entity at index {self.entity_idx} is not a RigidEntity.")
if self.link_idx_local < 0 or self.link_idx_local >= entity.n_links:
gs.raise_exception(f"Invalid RigidLink index {self.link_idx_local} for entity {self.entity_idx}.")
class NoisySensorOptionsMixin:
"""
Base options class for analog sensors that are attached to a RigidEntity.
Parameters
----------
resolution : float | array-like[float, ...], optional
The measurement resolution of the sensor (smallest increment of change in the sensor reading).
Default is 0.0, which means no quantization is applied.
bias : float | array-like[float, ...], optional
The constant additive bias of the sensor.
noise : float | array-like[float, ...], optional
The standard deviation of the additive white noise.
random_walk : float | array-like[float, ...], optional
The standard deviation of the random walk, which acts as accumulated bias drift.
jitter : float, optional
The jitter in seconds modeled as a a random additive delay sampled from a normal distribution.
Jitter cannot be greater than delay. `interpolate` should be True when `jitter` is greater than 0.
interpolate : bool, optional
If True, the sensor data is interpolated between data points for delay + jitter.
Otherwise, the sensor data at the closest time step will be used. Default is False.
"""
resolution: float | FArrayType = 0.0
bias: float | FArrayType = 0.0
noise: float | FArrayType = 0.0
random_walk: float | FArrayType = 0.0
jitter: float = 0.0
interpolate: bool = False
def model_post_init(self, _):
if self.jitter > 0 and not self.interpolate:
gs.raise_exception(f"{type(self).__name__}: `interpolate` should be True when `jitter` is greater than 0.")
if self.jitter > self.delay:
gs.raise_exception(f"{type(self).__name__}: Jitter must be less than or equal to read delay.")
class Contact(RigidSensorOptionsMixin, SensorOptions):
"""
Sensor that returns bool based on whether associated RigidLink is in contact.
Parameters
----------
debug_sphere_radius : float, optional
The radius of the debug sphere. Defaults to 0.05.
debug_color : array-like[float, float, float, float], optional
The rgba color of the debug sphere. Defaults to (1.0, 0.0, 1.0, 0.5).
"""
debug_sphere_radius: float = 0.05
debug_color: Vec4FType = (1.0, 0.0, 1.0, 0.5)
class ContactForce(RigidSensorOptionsMixin, NoisySensorOptionsMixin, SensorOptions):
"""
Sensor that returns the total contact force being applied to the associated RigidLink in its local frame.
Parameters
----------
min_force : float | array-like[float, float, float], optional
The minimum detectable absolute force per each axis. Values below this will be treated as 0. Default is 0.
max_force : float | array-like[float, float, float], optional
The maximum output absolute force per each axis. Values above this will be clipped. Default is infinity.
debug_color : array-like[float, float, float, float], optional
The rgba color of the debug arrow. Defaults to (1.0, 0.0, 1.0, 0.5).
debug_scale : float, optional
The scale factor for the debug force arrow. Defaults to 0.01.
"""
min_force: MaybeVec3FType = 0.0
max_force: MaybeVec3FType = np.inf
debug_color: Vec4FType = (1.0, 0.0, 1.0, 0.5)
debug_scale: float = 0.01
def model_post_init(self, _):
if not (isinstance(self.min_force, float) or len(self.min_force) == 3):
gs.raise_exception(f"min_force must be a float or array-like of 3 floats, got: {self.min_force}")
if not (isinstance(self.max_force, float) or len(self.max_force) == 3):
gs.raise_exception(f"max_force must be a float or array-like of 3 floats, got: {self.max_force}")
if np.any(np.array(self.min_force) < 0):
gs.raise_exception(f"min_force must be non-negative, got: {self.min_force}")
if np.any(np.array(self.max_force) <= np.array(self.min_force)):
gs.raise_exception(f"min_force should be less than max_force, got: {self.min_force} and {self.max_force}")
if self.resolution is not None and not (isinstance(self.resolution, float) or len(self.resolution) == 3):
gs.raise_exception(f"resolution must be a float or array-like of 3 floats, got: {self.resolution}")
class KinematicContactProbe(RigidSensorOptionsMixin, NoisySensorOptionsMixin, SensorOptions):
"""
A tactile sensor which queries contact depth relative to given probe normals and within the radius of the probe
positions along a rigid entity link.
The returned force is an spring-like (kinematic) estimate based on contact depth, computed as
F = stiffness * penetration * probe_normal, as opposed to the actual impulse force on the link from the contact
obtained from the physics solver.
Note
----
If this sensor is attached to a fixed entity, it will not detect contacts with other fixed entities.
Parameters
----------
probe_local_pos : array-like[array-like[float, float, float]]
Probe positions in link-local frame. One (x, y, z) per probe.
probe_local_normal : array-like[array-like[float, float, float]]
Probe sensing directions in link-local frame. Penetration is measured along this axis.
radius : float | array-like[float]
Probe sensing radius in meters. Objects within this distance are detected. Default: 0.005 (5mm)
stiffness : float
User-defined coefficient for force estimation. Default: 1000.0.
"""
probe_local_pos: Vec3FArrayType = [(0.0, 0.0, 0.0)]
probe_local_normal: Vec3FArrayType = [(0.0, 0.0, 1.0)]
radius: float | FArrayType = 0.005
stiffness: float = 1000.0
debug_sphere_color: Vec4FType = (1.0, 0.5, 0.0, 0.4)
debug_contact_color: Vec4FType = (1.0, 0.2, 0.0, 0.8)
def model_post_init(self, _):
if np.any(np.array(self.radius) < 0):
gs.raise_exception(f"radius must be non-negative, got: {self.radius}")
if self.stiffness < 0:
gs.raise_exception(f"stiffness must be non-negative, got: {self.stiffness}")
probe_local_pos = self._validate_probe_arrays(self.probe_local_pos)
probe_local_normal = self._validate_probe_arrays(self.probe_local_normal)
norms = np.linalg.norm(probe_local_normal, axis=1)
if np.any(norms < gs.EPS):
gs.raise_exception(f"probe_local_normal must be non-zero vectors, got: {probe_local_normal}")
if len(probe_local_pos) != len(probe_local_normal):
gs.raise_exception(
"probe_local_pos and probe_local_normal must have the same length. "
f"Got {len(probe_local_pos)} positions and {len(probe_local_normal)} normals."
)
if not isinstance(self.radius, float) and len(self.radius) != len(probe_local_pos):
gs.raise_exception(
"If radius is array-like, it must have the same length as probe_local_pos. "
f"Got {len(self.radius)} radii and {len(probe_local_pos)} probe positions."
)
def _validate_probe_arrays(self, values: Vec3FArrayType) -> np.ndarray:
array = np.array(values, dtype=float)
if array.ndim != 2 or array.shape[1] != 3:
gs.raise_exception(f"Probe locals array must have shape (N, 3), got: {array.shape}")
if array.shape[0] == 0:
gs.raise_exception("Probe locals array must have at least one entry")
return array
class IMU(RigidSensorOptionsMixin, NoisySensorOptionsMixin, SensorOptions):
"""
IMU sensor returns the linear acceleration (accelerometer) and angular velocity (gyroscope)
of the associated entity link.
Parameters
----------
acc_resolution : float, optional
The measurement resolution of the accelerometer (smallest increment of change in the sensor reading).
Default is 0.0, which means no quantization is applied.
acc_cross_axis_coupling : float | array-like[float, float, float] | array-like with shape (3,3)
Accelerometer axes alignment as a 3x3 rotation matrix, where diagonal elements represent alignment (0.0 to 1.0)
for each axis, and off-diagonal elements account for cross-axis misalignment effects.
- If a scalar is provided (float), all off-diagonal elements are set to the scalar value.
- If a 3-element vector is provided (array-like[float, float, float]), off-diagonal elements are set.
- If a full 3x3 matrix is provided, it is used directly.
acc_bias : array-like[float, float, float]
The constant additive bias for each axis of the accelerometer.
acc_noise : array-like[float, float, float]
The standard deviation of the white noise for each axis of the accelerometer.
acc_random_walk : array-like[float, float, float]
The standard deviation of the random walk, which acts as accumulated bias drift.
gyro_resolution : float, optional
The measurement resolution of the gyroscope (smallest increment of change in the sensor reading).
Default is 0.0, which means no quantization is applied.
gyro_cross_axis_coupling : float | array-like[float, float, float] | array-like with shape (3,3)
Gyroscope axes alignment as a 3x3 rotation matrix, similar to `acc_cross_axis_coupling`.
gyro_bias : array-like[float, float, float]
The constant additive bias for each axis of the gyroscope.
gyro_noise : array-like[float, float, float]
The standard deviation of the white noise for each axis of the gyroscope.
gyro_random_walk : array-like[float, float, float]
The standard deviation of the bias drift for each axis of the gyroscope.
mag_resolution : float, optional
The measurement resolution of the magnetometer (smallest increment of change in the sensor reading).
Default is 0.0, which means no quantization is applied.
mag_cross_axis_coupling : float | array-like[float, float, float] | array-like with shape (3,3)
Magnetometer axes alignment as a 3x3 rotation matrix, similar to `acc_cross_axis_coupling`.
mag_bias : array-like[float, float, float]
The constant additive bias for each axis of the magnetometer.
mag_noise : array-like[float, float, float]
The standard deviation of the white noise for each axis of the gyroscope.
mag_random_walk : array-like[float, float, float]
The standard deviation of the bias drift for each axis of the magnetometer.
debug_acc_color : array-like[float, float, float, float], optional
The rgba color of the debug acceleration arrow. Defaults to (1.0, 0.0, 0.0, 0.6).
debug_acc_scale: float, optional
The scale factor for the debug acceleration arrow. Defaults to 0.01.
debug_gyro_color : array-like[float, float, float, float], optional
The rgba color of the debug gyroscope arrow. Defaults to (0.0, 1.0, 0.0, 0.6).
debug_gyro_scale: float, optional
The scale factor for the debug gyroscope arrow. Defaults to 0.01.
debug_mag_color : array-like[float, float, float, float], optional
The rgba color of the debug magnetometer arrow. Defaults to (0.0, 0.0, 1.0, 0.6).
debug_mag_scale: float, optional
The scale factor for the debug magnetometer arrow. Defaults to 0.01.
"""
# Accelerometer
acc_resolution: MaybeVec3FType = 0.0
acc_cross_axis_coupling: MaybeMatrix3x3Type = 0.0
acc_noise: MaybeVec3FType = 0.0
acc_bias: MaybeVec3FType = 0.0
acc_random_walk: MaybeVec3FType = 0.0
# Gyroscope
gyro_resolution: MaybeVec3FType = 0.0
gyro_cross_axis_coupling: MaybeMatrix3x3Type = 0.0
gyro_noise: MaybeVec3FType = 0.0
gyro_bias: MaybeVec3FType = 0.0
gyro_random_walk: MaybeVec3FType = 0.0
# Magnetometer (New)
mag_resolution: MaybeVec3FType = 0.0
mag_cross_axis_coupling: MaybeMatrix3x3Type = 0.0
mag_noise: MaybeVec3FType = 0.0
mag_bias: MaybeVec3FType = 0.0
mag_random_walk: MaybeVec3FType = 0.0
magnetic_field: MaybeVec3FType = (0.0, 0.0, 0.5)
debug_acc_color: Vec4FType = (1.0, 0.0, 0.0, 0.6)
debug_acc_scale: float = 0.01
debug_gyro_color: Vec4FType = (0.0, 1.0, 0.0, 0.6)
debug_gyro_scale: float = 0.01
debug_mag_color: Vec4FType = (0.0, 0.0, 1.0, 0.6)
debug_mag_scale: float = 0.5
def model_post_init(self, _):
self._validate_cross_axis_coupling(self.acc_cross_axis_coupling)
self._validate_cross_axis_coupling(self.gyro_cross_axis_coupling)
self._validate_cross_axis_coupling(self.mag_cross_axis_coupling)
def _validate_cross_axis_coupling(self, cross_axis_coupling):
cross_axis_coupling_np = np.array(cross_axis_coupling)
if cross_axis_coupling_np.shape not in ((), (3,), (3, 3)):
gs.raise_exception(
f"cross_axis_coupling shape should be (), (3,), or (3, 3), got: {cross_axis_coupling_np.shape}"
)
if np.any(cross_axis_coupling_np < 0.0) or np.any(cross_axis_coupling_np > 1.0):
gs.raise_exception(f"cross_axis_coupling values should be between 0.0 and 1.0, got: {cross_axis_coupling}")
class Raycaster(RigidSensorOptionsMixin, SensorOptions):
"""
Raycaster sensor that performs ray casting to get distance measurements and point clouds.
Parameters
----------
pattern: RaycastPatternOptions
The raycasting pattern for the sensor.
min_range : float, optional
The minimum sensing range in meters. Defaults to 0.0.
max_range : float, optional
The maximum sensing range in meters. Defaults to 20.0.
no_hit_value : float, optional
The value to return for no hit. Defaults to max_range if not specified.
return_world_frame : bool, optional
Whether to return points in the world frame. Defaults to False (local frame).
debug_sphere_radius: float, optional
The radius of each debug sphere drawn in the scene. Defaults to 0.02.
debug_ray_start_color: array-like[float, float, float, float], optional
The color of each debug ray start sphere drawn in the scene. Defaults to (0.5, 0.5, 1.0, 1.0).
debug_ray_hit_color: array-like[float, float, float, float], optional
The color of each debug ray hit point sphere drawn in the scene. Defaults to (1.0, 0.5, 0.5, 1.0).
"""
pattern: RaycastPattern
min_range: float = 0.0
max_range: float = 20.0
no_hit_value: float = Field(default_factory=lambda data: data["max_range"])
return_world_frame: bool = False
debug_sphere_radius: float = 0.02
debug_ray_start_color: Vec4FType = (0.5, 0.5, 1.0, 1.0)
debug_ray_hit_color: Vec4FType = (1.0, 0.5, 0.5, 1.0)
def model_post_init(self, _):
if self.min_range < 0.0:
gs.raise_exception(f"[{type(self).__name__}] min_range should be non-negative. Got: {self.min_range}.")
if self.max_range <= self.min_range:
gs.raise_exception(
f"[{type(self).__name__}] max_range {self.max_range} should be greater than min_range {self.min_range}."
)
class DepthCamera(Raycaster):
"""
Depth camera that uses ray casting to obtain depth images.
Parameters
----------
pattern: DepthCameraPattern
The raycasting pattern configuration for the sensor.
"""
pattern: DepthCameraPattern
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/options/sensors/options.py",
"license": "Apache License 2.0",
"lines": 338,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Genesis-Embodied-AI/Genesis:examples/sensors/lidar_teleop.py | import argparse
import os
import numpy as np
import genesis as gs
from genesis.utils.geom import euler_to_quat
from genesis.vis.keybindings import Key, KeyAction, Keybind
# Position and angle increments for keyboard teleop control
KEY_DPOS = 0.1
KEY_DANGLE = 0.1
# Number of obstacles to create in a ring around the robot
NUM_CYLINDERS = 8
NUM_BOXES = 6
CYLINDER_RING_RADIUS = 3.0
BOX_RING_RADIUS = 5.0
def main():
parser = argparse.ArgumentParser(description="Genesis LiDAR/Depth Camera Visualization with Keyboard Teleop")
parser.add_argument("-B", "--n_envs", type=int, default=0, help="Number of environments to replicate")
parser.add_argument("--cpu", action="store_true", help="Run on CPU instead of GPU")
parser.add_argument("--use-box", action="store_true", help="Use Box as robot instead of Go2")
parser.add_argument(
"--pattern", type=str, default="spherical", choices=("spherical", "depth", "grid"), help="Sensor pattern type"
)
args = parser.parse_args()
gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="32", logging_level="info")
scene = gs.Scene(
sim_options=gs.options.SimOptions(
gravity=(0.0, 0.0, -1.0),
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(-6.0, 0.0, 4.0),
camera_lookat=(0.0, 0.0, 0.5),
max_FPS=60,
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=True,
)
scene.add_entity(gs.morphs.Plane())
# create ring of obstacles to visualize raycaster sensor hits
for i in range(NUM_CYLINDERS):
angle = 2 * np.pi * i / NUM_CYLINDERS
x = CYLINDER_RING_RADIUS * np.cos(angle)
y = CYLINDER_RING_RADIUS * np.sin(angle)
scene.add_entity(
gs.morphs.Cylinder(
height=1.5,
radius=0.3,
pos=(x, y, 0.75),
fixed=True,
)
)
for i in range(NUM_BOXES):
angle = 2 * np.pi * i / NUM_BOXES + np.pi / 6
x = BOX_RING_RADIUS * np.cos(angle)
y = BOX_RING_RADIUS * np.sin(angle)
scene.add_entity(
gs.morphs.Box(
size=(0.5, 0.5, 2.0 * (i + 1) / NUM_BOXES),
pos=(x, y, 1.0),
fixed=False,
)
)
entity_kwargs = dict(
pos=(0.0, 0.0, 0.35),
quat=(1.0, 0.0, 0.0, 0.0),
fixed=True,
)
if args.use_box:
robot = scene.add_entity(
gs.morphs.Box(
size=(0.1, 0.1, 0.1),
**entity_kwargs,
)
)
pos_offset = (0.0, 0.0, 0.2)
else:
robot = scene.add_entity(
gs.morphs.URDF(
file="urdf/go2/urdf/go2.urdf",
**entity_kwargs,
)
)
pos_offset = (0.3, 0.0, 0.1)
sensor_kwargs = dict(
entity_idx=robot.idx,
pos_offset=pos_offset,
euler_offset=(0.0, 0.0, 0.0),
return_world_frame=True,
draw_debug=True,
)
if args.pattern == "depth":
sensor = scene.add_sensor(gs.sensors.DepthCamera(pattern=gs.sensors.DepthCameraPattern(), **sensor_kwargs))
scene.start_recording(
data_func=(lambda: sensor.read_image()[0]) if args.n_envs > 0 else sensor.read_image,
rec_options=gs.recorders.MPLImagePlot(),
)
else:
if args.pattern == "grid":
pattern_cfg = gs.sensors.GridPattern()
else:
if args.pattern != "spherical":
gs.logger.warning(f"Unrecognized raycaster pattern: {args.pattern}. Using 'spherical' instead.")
pattern_cfg = gs.sensors.SphericalPattern()
sensor = scene.add_sensor(gs.sensors.Lidar(pattern=pattern_cfg, **sensor_kwargs))
scene.build(n_envs=args.n_envs)
# Initialize pose state
init_pos = np.array([0.0, 0.0, 0.35], dtype=np.float32)
init_euler = np.array([0.0, 0.0, 0.0], dtype=np.float32)
target_pos = init_pos.copy()
target_euler = init_euler.copy()
def apply_pose_to_all_envs(pos_np: np.ndarray, quat_np: np.ndarray):
if args.n_envs > 0:
pos_np = np.expand_dims(pos_np, axis=0).repeat(args.n_envs, axis=0)
quat_np = np.expand_dims(quat_np, axis=0).repeat(args.n_envs, axis=0)
robot.set_pos(pos_np)
robot.set_quat(quat_np)
# Define control callbacks
def reset_pose():
target_pos[:] = init_pos
target_euler[:] = init_euler
def translate(index: int, is_negative: bool):
target_pos[index] += (-1 if is_negative else 1) * KEY_DPOS
def rotate(index: int, is_negative: bool):
target_euler[index] += (-1 if is_negative else 1) * KEY_DANGLE
# Register keybindings
scene.viewer.register_keybinds(
Keybind("move_forward", Key.UP, KeyAction.HOLD, callback=translate, args=(0, False)),
Keybind("move_backward", Key.DOWN, KeyAction.HOLD, callback=translate, args=(0, True)),
Keybind("move_right", Key.RIGHT, KeyAction.HOLD, callback=translate, args=(1, True)),
Keybind("move_left", Key.LEFT, KeyAction.HOLD, callback=translate, args=(1, False)),
Keybind("move_down", Key.J, KeyAction.HOLD, callback=translate, args=(2, True)),
Keybind("move_up", Key.K, KeyAction.HOLD, callback=translate, args=(2, False)),
Keybind("roll_ccw", Key.N, KeyAction.HOLD, callback=rotate, args=(0, False)),
Keybind("roll_cw", Key.M, KeyAction.HOLD, callback=rotate, args=(0, True)),
Keybind("pitch_up", Key.COMMA, KeyAction.HOLD, callback=rotate, args=(1, False)),
Keybind("pitch_down", Key.PERIOD, KeyAction.HOLD, callback=rotate, args=(1, True)),
Keybind("yaw_ccw", Key.O, KeyAction.HOLD, callback=rotate, args=(2, False)),
Keybind("yaw_cw", Key.P, KeyAction.HOLD, callback=rotate, args=(2, True)),
Keybind("reset", Key.BACKSLASH, KeyAction.HOLD, callback=reset_pose),
)
# Print controls
print("Keyboard Controls:")
print("[↑/↓/←/→]: Move XY")
print("[j/k]: Down/Up")
print("[n/m]: Roll CCW/CW")
print("[,/.]: Pitch Up/Down")
print("[o/p]: Yaw CCW/CW")
print("[\\]: Reset")
apply_pose_to_all_envs(target_pos, euler_to_quat(target_euler))
try:
while True:
apply_pose_to_all_envs(target_pos, euler_to_quat(target_euler))
scene.step()
if "PYTEST_VERSION" in os.environ:
break
except KeyboardInterrupt:
gs.logger.info("Simulation interrupted, exiting.")
finally:
gs.logger.info("Simulation finished.")
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/sensors/lidar_teleop.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:examples/sensors/contact_force_go2.py | import argparse
import os
from tqdm import tqdm
import genesis as gs
from genesis.recorders.plotters import IS_MATPLOTLIB_AVAILABLE, IS_PYQTGRAPH_AVAILABLE
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-dt", "--timestep", type=float, default=0.01, help="Simulation time step")
parser.add_argument("-v", "--vis", action="store_true", default=True, help="Show visualization GUI")
parser.add_argument("-nv", "--no-vis", action="store_false", dest="vis", help="Disable visualization GUI")
parser.add_argument("-c", "--cpu", action="store_true", help="Use CPU instead of GPU")
parser.add_argument("-t", "--seconds", type=float, default=2.0, help="Number of seconds to simulate")
parser.add_argument("-f", "--force", action="store_true", default=True, help="Use ContactForceSensor (xyz float)")
parser.add_argument("-nf", "--no-force", action="store_false", dest="force", help="Use ContactSensor (boolean)")
args = parser.parse_args()
########################## init ##########################
gs.init(backend=gs.cpu if args.cpu else gs.gpu, logging_level=None)
########################## scene setup ##########################
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=args.timestep,
),
rigid_options=gs.options.RigidOptions(
constraint_timeconst=max(0.01, 2 * args.timestep),
use_gjk_collision=True,
),
vis_options=gs.options.VisOptions(
show_world_frame=True,
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=args.vis,
)
scene.add_entity(gs.morphs.Plane())
foot_link_names = ("FR_foot", "FL_foot", "RR_foot", "RL_foot")
go2 = scene.add_entity(
gs.morphs.URDF(
file="urdf/go2/urdf/go2.urdf",
pos=(0.0, 0.0, 0.2),
links_to_keep=foot_link_names,
)
)
for link_name in foot_link_names:
if args.force:
sensor_options = gs.sensors.ContactForce(
entity_idx=go2.idx,
link_idx_local=go2.get_link(link_name).idx_local,
draw_debug=True,
)
plot_kwargs = dict(
title=f"{link_name} Force Sensor Data",
labels=["force_x", "force_y", "force_z"],
)
else:
sensor_options = gs.sensors.Contact(
entity_idx=go2.idx,
link_idx_local=go2.get_link(link_name).idx_local,
draw_debug=True,
)
plot_kwargs = dict(
title=f"{link_name} Contact Sensor Data",
labels=["in_contact"],
)
sensor = scene.add_sensor(sensor_options)
if IS_PYQTGRAPH_AVAILABLE:
sensor.start_recording(gs.recorders.PyQtLinePlot(**plot_kwargs))
elif IS_MATPLOTLIB_AVAILABLE:
print("pyqtgraph not found, falling back to matplotlib.")
sensor.start_recording(gs.recorders.MPLLinePlot(**plot_kwargs))
else:
print("matplotlib or pyqtgraph not found, skipping real-time plotting.")
scene.build()
try:
steps = int(args.seconds / args.timestep) if "PYTEST_VERSION" not in os.environ else 5
for _ in tqdm(range(steps)):
scene.step()
except KeyboardInterrupt:
gs.logger.info("Simulation interrupted, exiting.")
finally:
gs.logger.info("Simulation finished.")
scene.stop_recording()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/sensors/contact_force_go2.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:tests/test_examples.py | import os
import sys
import subprocess
from pathlib import Path
import pytest
EXAMPLES_DIR = Path(__file__).parents[1] / "examples"
ALLOW_PATTERNS = {
"*.py",
"collision/**/*.py",
"coupling/**/*.py",
"drone/interactive_drone.py",
"drone/fly_route.py",
"IPC_Solver/**/*.py",
"kinematic/**/*.py",
"rigid/**/*.py",
"render_async/**/*.py",
"sap_coupling/**/*.py",
"sensors/**/*.py",
"tutorials/**/*.py",
"usd/**/*.py",
"viewer_plugins/**/*.py",
}
IGNORE_SCRIPT_NAMES = {
"ddp_multi_gpu.py",
"multi_gpu.py",
"single_franka_batch_render.py", # FIXME: segfault on exit
"fem_cube_linked_with_arm.py", # FIXME: segfault on exit (corrupted double-linked list)
}
if sys.platform != "linux":
IGNORE_SCRIPT_NAMES |= {
"cut_dragon.py",
}
# Map example scripts or directories to their required optional dependencies.
# Directory keys apply recursively to all scripts within that directory.
EXAMPLE_DEPENDENCIES = {
"import_stage.py": ["pxr"], # Requires usd-core package (provides pxr module)
"IPC_Solver": ["uipc"], # Requires pyuipc package (provides uipc module)
}
TIMEOUT = 600
pytestmark = [
pytest.mark.examples,
]
def _discover_examples():
if not EXAMPLES_DIR.exists():
raise ValueError(f"Example directory '{EXAMPLES_DIR}' does not exist.")
files = []
for pattern in ALLOW_PATTERNS:
for path in EXAMPLES_DIR.glob(pattern):
if path.name not in IGNORE_SCRIPT_NAMES:
files.append(path)
return sorted(files)
@pytest.mark.examples
@pytest.mark.parametrize("backend", [None]) # Disable genesis initialization at worker level
@pytest.mark.parametrize("file", _discover_examples(), ids=lambda p: p.relative_to(EXAMPLES_DIR).as_posix())
def test_example(file: Path):
# Check for required optional dependencies (script-level and inherited from parent dirs)
rel = file.relative_to(EXAMPLES_DIR)
module_deps = list(EXAMPLE_DEPENDENCIES.get(rel.name, []))
for parent in rel.parents:
if parent != Path("."):
module_deps.extend(EXAMPLE_DEPENDENCIES.get(parent.as_posix(), []))
for module_name in module_deps:
pytest.importorskip(module_name, reason=f"Python module '{module_name}' not installed.")
# Disable keyboard control and monitoring when running the unit tests
env = os.environ.copy()
env["PYNPUT_BACKEND"] = "dummy"
path_rel = file.relative_to(EXAMPLES_DIR).as_posix()
try:
result = subprocess.run(
[sys.executable, str(file)], env=env, capture_output=True, text=True, check=False, timeout=TIMEOUT
)
except subprocess.TimeoutExpired as e:
err_msg = f"Timeout running example {path_rel}."
if e.stdout is not None:
err_msg += f"\n\n--- STDOUT ---\n{e.stdout.decode()}"
if e.stderr is not None:
err_msg += f"\n\n--- STDERR ---\n{e.stderr.decode()}"
pytest.fail(err_msg)
if result.returncode != 0:
pytest.fail(
f"Failed to run example {path_rel} (Exit Code {result.returncode}).\n\n"
f"--- STDOUT ---\n{result.stdout}\n\n--- STDERR ---\n{result.stderr}"
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_examples.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:examples/coupling/cloth_attached_to_rigid.py | import argparse
import math
import os
import genesis as gs
import genesis.utils.geom as gu
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--vis", action="store_true", default=False)
parser.add_argument("-c", "--cpu", action="store_true", default=False)
parser.add_argument("--horizon", type=int, default=100 if "PYTEST_VERSION" not in os.environ else 25)
parser.add_argument("--num_teleports", type=int, default=5)
args = parser.parse_args()
########################## init ##########################
gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="32", logging_level="info")
dt: float = 2e-2
particle_size: float = 1e-2
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=dt,
substeps=10,
),
pbd_options=gs.options.PBDOptions(
particle_size=particle_size,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(3.5, 0.0, 2.5),
camera_lookat=(0.0, 0.0, 0.5),
camera_fov=40,
max_FPS=50,
),
vis_options=gs.options.VisOptions(
rendered_envs_idx=[0],
),
show_viewer=args.vis,
)
########################## entities ##########################
rigid_material = gs.materials.Rigid(needs_coup=True, coup_friction=0.0)
# create ground plane
scene.add_entity(gs.morphs.Plane(), rigid_material)
# create box
box_morph = gs.morphs.Box(pos=[0.25, 0.25, 0.25], size=[0.25, 0.25, 0.25])
box = scene.add_entity(box_morph, rigid_material)
# create cloth
cloth_pos = (0.25, 0.25, 0.25 + 0.125 + particle_size)
cloth_scale = 0.5
cloth_morph = gs.morphs.Mesh(pos=cloth_pos, scale=cloth_scale, file="meshes/cloth.obj")
cloth_material = gs.materials.PBD.Cloth()
cloth_surface = gs.surfaces.Default(color=(0.2, 0.4, 0.8, 1.0)) # , vis_mode="particle")
cloth = scene.add_entity(cloth_morph, cloth_material, cloth_surface)
########################## build ##########################
scene.build(n_envs=0)
particles_idx = [0, 1, 2, 3, 4, 5, 6, 7]
box_link_idx = box.link_start
cloth.fix_particles_to_link(box_link_idx, particles_idx_local=particles_idx)
box.set_dofs_velocity([-0.0, 1.0, 0.0], dofs_idx_local=[0, 1, 2])
for i in range(args.horizon):
scene.step()
for j in range(args.num_teleports):
if j == 2:
cloth.release_particle(particles_idx)
new_pos = (0.2 * math.sin(j), 0.2 * math.cos(j), 0.5)
new_rot = gu.euler_to_quat((-30 * j, -40 * j, 70 * j))
box.set_pos(new_pos)
box.set_quat(new_rot)
for _ in range(args.horizon):
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/coupling/cloth_attached_to_rigid.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/sap_coupling/fem_fixed_constraint.py | import argparse
import math
import os
import sys
import torch
import genesis as gs
from huggingface_hub import snapshot_download
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cpu", action="store_true", default=(sys.platform == "darwin"))
parser.add_argument("-v", "--vis", action="store_true", default=False)
args = parser.parse_args()
n_steps = 150 if "PYTEST_VERSION" not in os.environ else 2
gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="64")
fem_material_linear_corotated = gs.materials.FEM.Elastic(
model="linear_corotated",
)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=1 / 60,
substeps=2,
),
fem_options=gs.options.FEMOptions(
use_implicit_solver=True,
enable_vertex_constraints=True,
),
coupler_options=gs.options.SAPCouplerOptions(),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.5, -1.5, 1.5),
camera_lookat=(-0.6, 0.8, 0),
max_FPS=60,
),
show_viewer=args.vis,
)
asset_path = snapshot_download(
repo_type="dataset",
repo_id="Genesis-Intelligence/assets",
revision="4d96c3512df4421d4dd3d626055d0d1ebdfdd7cc",
allow_patterns="cube8.obj",
max_workers=1,
)
cube = scene.add_entity(
morph=gs.morphs.Mesh(
file=f"{asset_path}/cube8.obj",
pos=(0.0, 0.0, 0.7),
scale=0.1,
),
material=fem_material_linear_corotated,
)
scene.build()
verts_idx = [0]
# Run simulation
for i in range(n_steps):
target_poss = cube.init_positions[verts_idx] + torch.tensor(
(0.15 * (math.cos(0.04 * i) - 1.0), 0.15 * math.sin(0.04 * i), 0.0)
)
cube.set_vertex_constraints(verts_idx, target_poss)
scene.step(update_visualizer=False)
if args.vis:
scene.visualizer.context.draw_debug_sphere(
pos=target_poss.squeeze(), radius=0.01, color=(1, 0, 1, 0.8), persistent=False
)
scene.visualizer.update(force=False, auto=True)
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/sap_coupling/fem_fixed_constraint.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/sap_coupling/fem_sphere_and_cube.py | import argparse
import sys
import genesis as gs
import os
from huggingface_hub import snapshot_download
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cpu", action="store_true", default=(sys.platform == "darwin"))
parser.add_argument("-v", "--vis", action="store_true", default=False)
args = parser.parse_args()
n_steps = 200 if "PYTEST_VERSION" not in os.environ else 2
gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="64")
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=1 / 60,
substeps=2,
),
fem_options=gs.options.FEMOptions(
use_implicit_solver=True,
),
coupler_options=gs.options.SAPCouplerOptions(),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.5, -1.5, 1.5),
camera_lookat=(0, 0, 0),
max_FPS=60,
),
show_viewer=args.vis,
)
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
pos=(0.0, 0.0, 0.1),
radius=0.1,
),
material=gs.materials.FEM.Elastic(
model="linear_corotated",
E=1e5,
nu=0.4,
),
)
asset_path = snapshot_download(
repo_type="dataset",
repo_id="Genesis-Intelligence/assets",
revision="4d96c3512df4421d4dd3d626055d0d1ebdfdd7cc",
allow_patterns="cube8.obj",
max_workers=1,
)
cube = scene.add_entity(
morph=gs.morphs.Mesh(
file=f"{asset_path}/cube8.obj",
pos=(0.0, 0.0, 0.4),
scale=0.1,
),
material=gs.materials.FEM.Elastic(
model="linear_corotated",
),
)
scene.build()
for _ in range(n_steps):
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/sap_coupling/fem_sphere_and_cube.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/sap_coupling/franka_grasp_fem_sphere.py | import argparse
import os
import sys
import numpy as np
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cpu", action="store_true", default=(sys.platform == "darwin"))
parser.add_argument("-v", "--vis", action="store_true", default=False)
args = parser.parse_args()
########################## init ##########################
gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="64")
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=1.0 / 60,
substeps=2,
),
rigid_options=gs.options.RigidOptions(
enable_self_collision=False,
),
fem_options=gs.options.FEMOptions(
use_implicit_solver=True,
pcg_threshold=1e-10,
),
coupler_options=gs.options.SAPCouplerOptions(
pcg_threshold=1e-10,
sap_convergence_atol=1e-10,
sap_convergence_rtol=1e-10,
linesearch_ftol=1e-10,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.3, 0.0, 0.15),
camera_lookat=(0.65, 0.0, 0.15),
max_FPS=60,
),
show_viewer=args.vis,
)
########################## entities ##########################
franka = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
material=gs.materials.Rigid(
coup_friction=1.0,
friction=1.0,
),
)
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
radius=0.02,
pos=(0.65, 0.0, 0.02),
),
material=gs.materials.FEM.Elastic(
model="linear_corotated",
friction_mu=1.0,
E=1e5,
nu=0.4,
),
)
########################## build ##########################
scene.build()
motors_dof = np.arange(7)
fingers_dof = np.arange(7, 9)
end_effector = franka.get_link("hand")
########################## simulate ##########################
# init
franka.set_qpos((-1.0124, 1.5559, 1.3662, -1.6878, -1.5799, 1.7757, 1.4602, 0.04, 0.04))
# hold
qpos = franka.inverse_kinematics(link=end_effector, pos=(0.65, 0.0, 0.13), quat=(0, 1, 0, 0))
franka.control_dofs_position(qpos[motors_dof], motors_dof)
for i in range(15 if "PYTEST_VERSION" not in os.environ else 1):
scene.step()
# grasp
for i in range(10 if "PYTEST_VERSION" not in os.environ else 1):
franka.control_dofs_force(np.array([-1.0, -1.0]), fingers_dof)
scene.step()
# lift
qpos = franka.inverse_kinematics(link=end_effector, pos=(0.65, 0.0, 0.3), quat=(0, 1, 0, 0))
franka.control_dofs_position(qpos[motors_dof], motors_dof)
for i in range(40 if "PYTEST_VERSION" not in os.environ else 1):
franka.control_dofs_force(np.array([-1.0, -1.0]), fingers_dof)
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/sap_coupling/franka_grasp_fem_sphere.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/sap_coupling/franka_grasp_rigid_cube.py | import argparse
import sys
import numpy as np
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cpu", action="store_true", default=(sys.platform == "darwin"))
parser.add_argument("-v", "--vis", action="store_true", default=False)
args = parser.parse_args()
########################## init ##########################
gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="64")
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=1.0 / 60,
substeps=2,
),
rigid_options=gs.options.RigidOptions(
enable_self_collision=False,
),
coupler_options=gs.options.SAPCouplerOptions(
pcg_threshold=1e-10,
sap_convergence_atol=1e-10,
sap_convergence_rtol=1e-10,
linesearch_ftol=1e-10,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.3, 0.0, 0.1),
camera_lookat=(0.65, 0.0, 0.1),
max_FPS=60,
),
show_viewer=args.vis,
)
########################## entities ##########################
franka = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
material=gs.materials.Rigid(
coup_friction=1.0,
friction=1.0,
),
)
cube = scene.add_entity(
morph=gs.morphs.Box(
size=(0.04, 0.04, 0.04),
pos=(0.65, 0.0, 0.02),
),
material=gs.materials.Rigid(
coup_friction=1.0,
friction=1.0,
),
)
########################## build ##########################
scene.build()
motors_dof = np.arange(7)
fingers_dof = np.arange(7, 9)
end_effector = franka.get_link("hand")
########################## simulate ##########################
# init
franka.set_qpos((-1.0124, 1.5559, 1.3662, -1.6878, -1.5799, 1.7757, 1.4602, 0.04, 0.04))
# hold
qpos = franka.inverse_kinematics(link=end_effector, pos=(0.65, 0.0, 0.13), quat=(0, 1, 0, 0))
franka.control_dofs_position(qpos[motors_dof], motors_dof)
for i in range(15):
scene.step()
# grasp
for i in range(10):
franka.control_dofs_force(np.array([-1.0, -1.0]), fingers_dof)
scene.step()
# lift
qpos = franka.inverse_kinematics(link=end_effector, pos=(0.65, 0.0, 0.3), quat=(0, 1, 0, 0))
franka.control_dofs_position(qpos[motors_dof], motors_dof)
for i in range(40):
franka.control_dofs_force(np.array([-1.0, -1.0]), fingers_dof)
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/sap_coupling/franka_grasp_rigid_cube.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:tests/test_grad.py | import numpy as np
import pytest
import torch
import genesis as gs
from genesis.utils.geom import R_to_quat
from genesis.utils.misc import qd_to_torch, qd_to_numpy, tensor_to_array
from genesis.utils import set_random_seed
from .utils import assert_allclose
# FIXME: Gradient computation is broken if debug mode is enabled and field is used
pytestmark = [
pytest.mark.debug(False),
]
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cpu, gs.gpu])
def test_differentiable_push(show_viewer):
HORIZON = 10
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=2e-3,
substeps=10,
requires_grad=True,
),
mpm_options=gs.options.MPMOptions(
lower_bound=(0.0, -1.0, 0.0),
upper_bound=(1.0, 1.0, 0.55),
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.5, -0.15, 2.42),
camera_lookat=(0.5, 0.5, 0.1),
),
show_viewer=show_viewer,
)
plane = scene.add_entity(
gs.morphs.URDF(
file="urdf/plane/plane.urdf",
fixed=True,
)
)
stick = scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/stirrer.obj",
scale=0.6,
pos=(0.5, 0.5, 0.05),
euler=(90.0, 0.0, 0.0),
),
material=gs.materials.Tool(
friction=8.0,
),
)
obj = scene.add_entity(
morph=gs.morphs.Box(
lower=(0.2, 0.1, 0.05),
upper=(0.4, 0.3, 0.15),
),
material=gs.materials.MPM.Elastic(
rho=500,
),
)
scene.build(n_envs=2)
init_pos = gs.tensor([[0.3, 0.1, 0.28], [0.3, 0.1, 0.5]], requires_grad=True)
stick.set_position(init_pos)
pos_obj_init = gs.tensor([0.3, 0.3, 0.1], requires_grad=True)
obj.set_position(pos_obj_init)
v_obj_init = gs.tensor([0.0, -1.0, 0.0], requires_grad=True)
obj.set_velocity(v_obj_init)
goal = gs.tensor([0.5, 0.8, 0.05])
loss = 0.0
v_list = []
for i in range(HORIZON):
v_i = gs.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], requires_grad=True)
stick.set_velocity(vel=v_i)
v_list.append(v_i)
scene.step()
if i == HORIZON // 2:
mpm_particles = scene.get_state().solvers_state[scene.solvers.index(scene.mpm_solver)]
loss += torch.pow(mpm_particles.pos[mpm_particles.active == 1] - goal, 2).sum()
if i == HORIZON - 2:
state = obj.get_state()
loss += torch.pow(state.pos - goal, 2).sum()
loss.backward()
# TODO: It would be great to compare the gradient to its analytical or numerical value.
for v_i in v_list[:-1]:
assert (v_i.grad.abs() > gs.EPS).any()
assert (v_list[-1].grad.abs() < gs.EPS).all()
@pytest.mark.required
@pytest.mark.precision("64")
@pytest.mark.parametrize("backend", [gs.cpu, gs.gpu])
def test_diff_contact():
RTOL = 1e-4
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.01,
# Turn on differentiable mode
requires_grad=True,
),
show_viewer=False,
)
box_size = 0.25
box_spacing = box_size
vec_one = np.array([1.0, 1.0, 1.0])
box_pos_offset = (0.0, 0.0, 0.0) + 0.5 * box_size * vec_one
box0 = scene.add_entity(
gs.morphs.Box(size=box_size * vec_one, pos=box_pos_offset),
)
box1 = scene.add_entity(
gs.morphs.Box(size=box_size * vec_one, pos=box_pos_offset + 0.8 * box_spacing * np.array([0, 0, 1])),
)
scene.build()
solver = scene.sim.rigid_solver
collider = solver.collider
# Set up initial configuration
x_ang, y_ang, z_ang = 3.0, 3.0, 3.0
box1.set_quat(R_to_quat(gs.euler_to_R([np.deg2rad(x_ang), np.deg2rad(y_ang), np.deg2rad(z_ang)])))
box0_init_pos = box0.get_pos().clone()
box1_init_pos = box1.get_pos().clone()
box0_init_quat = box0.get_quat().clone()
box1_init_quat = box1.get_quat().clone()
### Compute the initial loss and compute gradients using differentiable contact detection
# Detect contact
collider.detection()
# Get contact outputs and their grads
contacts = collider.get_contacts(as_tensor=True, to_torch=True, keep_batch_dim=True)
normal = contacts["normal"].requires_grad_()
position = contacts["position"].requires_grad_()
penetration = contacts["penetration"].requires_grad_()
loss = ((normal * position).sum(dim=-1) * penetration).sum()
dL_dnormal = torch.autograd.grad(loss, normal, retain_graph=True)[0]
dL_dposition = torch.autograd.grad(loss, position, retain_graph=True)[0]
dL_dpenetration = torch.autograd.grad(loss, penetration)[0]
# Compute analytical gradients of the geoms position and quaternion
collider.backward(dL_dposition, dL_dnormal, dL_dpenetration)
dL_dpos = qd_to_torch(solver.geoms_state.pos.grad)
dL_dquat = qd_to_torch(solver.geoms_state.quat.grad)
### Compute directional derivatives along random directions
FD_EPS = 1e-5
TRIALS = 100
def compute_dL_error(dL_dx, x_type):
dL_error_rel = 0.0
box0_input_pos = box0_init_pos
box1_input_pos = box1_init_pos
box0_input_quat = box0_init_quat
box1_input_quat = box1_init_quat
for _ in range(TRIALS):
rand_dx = torch.randn_like(dL_dx)
rand_dx = torch.nn.functional.normalize(rand_dx, dim=-1)
dL = (rand_dx * dL_dx).sum()
lossPs = []
for sign in (1, -1):
# Compute query point
if x_type == "pos":
box0_input_pos = box0_init_pos + sign * rand_dx[0, 0] * FD_EPS
box1_input_pos = box1_init_pos + sign * rand_dx[1, 0] * FD_EPS
else:
# FIXME: The quaternion should be normalized
box0_input_quat = box0_init_quat + sign * rand_dx[0, 0] * FD_EPS
box1_input_quat = box1_init_quat + sign * rand_dx[1, 0] * FD_EPS
# Update box positions
box0.set_pos(box0_input_pos)
box1.set_pos(box1_input_pos)
box0.set_quat(box0_input_quat)
box1.set_quat(box1_input_quat)
# Re-detect contact.
# We need to manually reset the contact counter as we are not running the whole sim step.
collider._collider_state.n_contacts.fill(0)
collider.detection()
contacts = collider.get_contacts(as_tensor=True, to_torch=True, keep_batch_dim=True)
normal, position, penetration = contacts["normal"], contacts["position"], contacts["penetration"]
# Compute loss
loss = ((normal * position).sum(dim=-1) * penetration).sum()
lossPs.append(loss)
dL_fd = (lossPs[0] - lossPs[1]) / (2 * FD_EPS)
dL_error_rel += (dL - dL_fd).abs() / max(dL.abs(), dL_fd.abs(), gs.EPS)
dL_error_rel /= TRIALS
return dL_error_rel
dL_dpos_error_rel = compute_dL_error(dL_dpos, "pos")
assert_allclose(dL_dpos_error_rel, 0.0, atol=RTOL)
dL_dquat_error_rel = compute_dL_error(dL_dquat, "quat")
assert_allclose(dL_dquat_error_rel, 0.0, atol=RTOL)
# We need to use 64-bit precision for this test because we need to use sufficiently small perturbation to get reliable
# gradient estimates through finite difference method. This small perturbation is not supported by 32-bit precision in
# stable way.
@pytest.mark.required
@pytest.mark.precision("64")
def test_diff_solver(monkeypatch):
from genesis.engine.solvers.rigid.constraint.solver import func_solve_init, func_solve_body
from genesis.engine.solvers.rigid.rigid_solver import kernel_step_1
RTOL = 1e-4
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.01,
requires_grad=True,
),
rigid_options=gs.options.RigidOptions(
# We use Newton's method because it converges faster than CG, and therefore gives better gradient estimation
# when using finite difference method
constraint_solver=gs.constraint_solver.Newton,
),
show_viewer=False,
)
scene.add_entity(gs.morphs.Plane(pos=(0, 0, 0)))
scene.add_entity(gs.morphs.Box(size=(1, 1, 1), pos=(10, 10, 0.49)))
franka = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
)
scene.build()
rigid_solver = scene._sim.rigid_solver
constraint_solver = rigid_solver.constraint_solver
franka.set_qpos([-1.0124, 1.5559, 1.3662, -1.6878, -1.5799, 1.7757, 1.4602, 0.04, 0.04])
# Monkeypatch the constraint resolve function to avoid overwriting the necessary information for computing gradients.
def constraint_solver_resolve():
func_solve_init(
dofs_info=rigid_solver.dofs_info,
dofs_state=rigid_solver.dofs_state,
entities_info=rigid_solver.entities_info,
constraint_state=constraint_solver.constraint_state,
rigid_global_info=rigid_solver._rigid_global_info,
static_rigid_sim_config=rigid_solver._static_rigid_sim_config,
)
func_solve_body(
entities_info=rigid_solver.entities_info,
dofs_state=rigid_solver.dofs_state,
constraint_state=constraint_solver.constraint_state,
rigid_global_info=rigid_solver._rigid_global_info,
static_rigid_sim_config=rigid_solver._static_rigid_sim_config,
)
monkeypatch.setattr(constraint_solver, "resolve", constraint_solver_resolve)
# Step once to compute constraint solver's inputs: [mass], [jac], [aref], [efc_D], [force]. We do not call the
# entire scene.step() because it will overwrite the necessary information that we need to compute the gradients.
kernel_step_1(
links_state=rigid_solver.links_state,
links_info=rigid_solver.links_info,
joints_state=rigid_solver.joints_state,
joints_info=rigid_solver.joints_info,
dofs_state=rigid_solver.dofs_state,
dofs_info=rigid_solver.dofs_info,
geoms_state=rigid_solver.geoms_state,
geoms_info=rigid_solver.geoms_info,
entities_state=rigid_solver.entities_state,
entities_info=rigid_solver.entities_info,
rigid_global_info=rigid_solver._rigid_global_info,
static_rigid_sim_config=rigid_solver._static_rigid_sim_config,
contact_island_state=constraint_solver.contact_island.contact_island_state,
is_forward_pos_updated=True,
is_forward_vel_updated=True,
is_backward=False,
)
constraint_solver.add_equality_constraints()
rigid_solver.collider.detection()
constraint_solver.add_inequality_constraints()
constraint_solver.resolve()
# Loss function to compute gradients using finite difference method
def compute_loss(input_mass, input_jac, input_aref, input_efc_D, input_force):
rigid_solver._rigid_global_info.mass_mat.from_numpy(input_mass)
constraint_solver.constraint_state.jac.from_numpy(input_jac)
constraint_solver.constraint_state.aref.from_numpy(input_aref)
constraint_solver.constraint_state.efc_D.from_numpy(input_efc_D)
rigid_solver.dofs_state.force.from_numpy(input_force)
# Recompute acc_smooth from the updated input variables
updated_acc_smooth = np.linalg.solve(input_mass[..., 0], input_force[..., 0])
rigid_solver.dofs_state.acc_smooth.from_numpy(updated_acc_smooth[..., None])
constraint_solver.resolve()
output_qacc = qd_to_torch(constraint_solver.qacc)
return ((output_qacc - target_qacc) ** 2).mean()
init_input_mass = qd_to_numpy(rigid_solver._rigid_global_info.mass_mat, copy=True)
init_input_jac = qd_to_numpy(constraint_solver.constraint_state.jac, copy=True)
init_input_aref = qd_to_numpy(constraint_solver.constraint_state.aref, copy=True)
init_input_efc_D = qd_to_numpy(constraint_solver.constraint_state.efc_D, copy=True)
init_input_force = qd_to_numpy(rigid_solver.dofs_state.force, copy=True)
# Initial output of the constraint solver
set_random_seed(0)
init_output_qacc = qd_to_torch(constraint_solver.qacc)
target_qacc = torch.from_numpy(np.random.randn(*init_output_qacc.shape)).to(device=gs.device)
target_qacc = target_qacc * init_output_qacc.abs().mean()
# Solve the constraint solver and get the output
output_qacc = qd_to_torch(constraint_solver.qacc, copy=True).requires_grad_(True)
# Compute loss and gradient of the output
loss = ((output_qacc - target_qacc) ** 2).mean()
dL_dqacc = tensor_to_array(torch.autograd.grad(loss, output_qacc)[0])
# Compute gradients of the input variables: [mass], [jac], [aref], [efc_D], [force]
constraint_solver.backward(dL_dqacc)
# Fetch gradients of the input variables
dL_dM = qd_to_numpy(constraint_solver.constraint_state.dL_dM)
dL_djac = qd_to_numpy(constraint_solver.constraint_state.dL_djac)
dL_daref = qd_to_numpy(constraint_solver.constraint_state.dL_daref)
dL_defc_D = qd_to_numpy(constraint_solver.constraint_state.dL_defc_D)
dL_dforce = qd_to_numpy(constraint_solver.constraint_state.dL_dforce)
### Compute directional derivatives along random directions
FD_EPS = 1e-3
TRIALS = 200
for dL_dx, x_type in (
(dL_dforce, "force"),
(dL_daref, "aref"),
(dL_defc_D, "efc_D"),
(dL_djac, "jac"),
(dL_dM, "mass"),
):
dL_error = 0.0
for _ in range(TRIALS):
rand_dx = np.random.randn(*dL_dx.shape)
rand_dx = rand_dx / max(
np.linalg.norm(rand_dx, axis=0 if x_type in ("force", "aref", "efc_D") else (0, 1)), gs.EPS
)
if x_type == "mass":
# Make rand_dx symmetric
rand_dx = (rand_dx + np.moveaxis(rand_dx, 0, 1)) * 0.5
dL = (rand_dx * dL_dx).sum()
input_force = init_input_force
input_aref = init_input_aref
input_efc_D = init_input_efc_D
input_jac = init_input_jac
input_mass = init_input_mass
# 1 * eps
if x_type == "force":
input_force = init_input_force + rand_dx * FD_EPS
elif x_type == "aref":
input_aref = init_input_aref + rand_dx * FD_EPS
elif x_type == "efc_D":
input_efc_D = init_input_efc_D + rand_dx * FD_EPS
elif x_type == "jac":
input_jac = init_input_jac + rand_dx * FD_EPS
elif x_type == "mass":
input_mass = init_input_mass + rand_dx * FD_EPS
lossP1 = compute_loss(input_mass, input_jac, input_aref, input_efc_D, input_force)
# -1 * eps
if x_type == "force":
input_force = init_input_force - rand_dx * FD_EPS
elif x_type == "aref":
input_aref = init_input_aref - rand_dx * FD_EPS
elif x_type == "efc_D":
input_efc_D = init_input_efc_D - rand_dx * FD_EPS
elif x_type == "jac":
input_jac = init_input_jac - rand_dx * FD_EPS
elif x_type == "mass":
input_mass = init_input_mass - rand_dx * FD_EPS
lossP2 = compute_loss(input_mass, input_jac, input_aref, input_efc_D, input_force)
dL_fd = (lossP1 - lossP2) / (2 * FD_EPS)
dL_error += (dL - dL_fd).abs() / max(abs(dL), abs(dL_fd), gs.EPS)
dL_error /= TRIALS
assert_allclose(dL_error, 0.0, atol=RTOL)
@pytest.mark.slow # ~250s
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cpu, gs.gpu])
def test_differentiable_rigid(show_viewer):
dt = 1e-2
horizon = 100
substeps = 1
goal_pos = gs.tensor([0.7, 1.0, 0.05])
goal_quat = gs.tensor([0.3, 0.2, 0.1, 0.9])
goal_quat = goal_quat / torch.norm(goal_quat, dim=-1, keepdim=True)
scene = gs.Scene(
sim_options=gs.options.SimOptions(dt=dt, substeps=substeps, requires_grad=True, gravity=(0, 0, -1)),
rigid_options=gs.options.RigidOptions(
enable_collision=False,
enable_self_collision=False,
enable_joint_limit=False,
disable_constraint=True,
use_contact_island=False,
use_hibernation=False,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.5, -0.15, 2.42),
camera_lookat=(0.5, 0.5, 0.1),
),
show_viewer=show_viewer,
)
box = scene.add_entity(
gs.morphs.Box(
pos=(0, 0, 0),
size=(0.1, 0.1, 0.2),
),
surface=gs.surfaces.Default(
color=(0.9, 0.0, 0.0, 1.0),
),
)
if show_viewer:
target = scene.add_entity(
gs.morphs.Box(
pos=goal_pos,
quat=goal_quat,
size=(0.1, 0.1, 0.2),
),
surface=gs.surfaces.Default(
color=(0.0, 0.9, 0.0, 0.5),
),
)
scene.build()
num_iter = 200
lr = 1e-2
init_pos = gs.tensor([0.3, 0.1, 0.28], requires_grad=True)
init_quat = gs.tensor([1.0, 0.0, 0.0, 0.0], requires_grad=True)
optimizer = torch.optim.Adam([init_pos, init_quat], lr=lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_iter, eta_min=1e-3)
for _ in range(num_iter):
scene.reset()
box.set_pos(init_pos)
box.set_quat(init_quat)
loss = 0
for _ in range(horizon):
scene.step()
if show_viewer:
target.set_pos(goal_pos)
target.set_quat(goal_quat)
box_state = box.get_state()
box_pos = box_state.pos
box_quat = box_state.quat
loss = torch.abs(box_pos - goal_pos).sum() + torch.abs(box_quat - goal_quat).sum()
optimizer.zero_grad()
loss.backward() # this lets gradient flow all the way back to tensor input
optimizer.step()
scheduler.step()
with torch.no_grad():
init_quat.data = init_quat / torch.norm(init_quat, dim=-1, keepdim=True)
assert_allclose(loss, 0.0, atol=1e-2)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_grad.py",
"license": "Apache License 2.0",
"lines": 413,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:tests/test_recorders.py | import csv
import numpy as np
import pytest
import genesis as gs
from genesis.utils.image_exporter import as_grayscale_image
from .utils import assert_allclose, rgb_array_to_png_bytes
@pytest.fixture
def mpl_agg_backend():
import matplotlib as mpl
import matplotlib.pyplot as plt
# Force using Agg backend for repeatability
try:
mpl_backend = mpl.get_backend()
except AttributeError:
mpl_backend = "Agg"
plt.switch_backend("Agg")
yield
# Restore original backend
plt.switch_backend(mpl_backend)
@pytest.mark.required
def test_plotter(tmp_path, monkeypatch, mpl_agg_backend, png_snapshot):
"""Test if the plotter recorders works."""
DT = 0.01
STEPS = 10
HISTORY_LENGTH = 5
# FIXME: Hijack video writter to keep track of all the frames that are being recorded
buffers = []
def process(self, data, cur_time):
nonlocal buffers
buffers.append((data, cur_time))
monkeypatch.setattr("genesis.recorders.file_writers.VideoFileWriter.process", process)
scene = gs.Scene(
sim_options=gs.options.SimOptions(dt=DT),
show_viewer=False,
show_FPS=False,
)
scene.add_entity(
morph=gs.morphs.Box(size=(0.1, 0.1, 0.1), pos=(0.0, 0.0, 0.5)),
material=gs.materials.Rigid(rho=1000.0),
)
call_count = 0
def dummy_data_func():
nonlocal call_count
call_count += 1
return {
"a": [call_count * 0.1, call_count * 0.2, call_count * 0.3],
"b": [call_count * 0.01, call_count * 0.02],
}
plotter = scene.start_recording(
data_func=dummy_data_func,
rec_options=gs.recorders.MPLLinePlot(
labels={"a": ("x", "y", "z"), "b": ("u", "v")},
title="Test MPLPlotter",
history_length=HISTORY_LENGTH,
window_size=(400, 300),
hz=1.0 / DT / 2, # half of the simulation frequency, so every other step
save_to_filename=str(tmp_path / "video.mp4"),
show_window=False,
),
)
scene.build()
for _ in range(STEPS):
scene.step()
if plotter.run_in_thread:
plotter.sync()
assert call_count == STEPS // 2 + 1 # one additional call during plot setup
assert len(plotter.line_plot.x_data) == HISTORY_LENGTH
assert np.isclose(plotter.line_plot.x_data[-1], STEPS * DT, atol=gs.EPS)
assert rgb_array_to_png_bytes(plotter.get_image_array()) == png_snapshot
assert len(buffers) == 5
assert_allclose([cur_time for _, cur_time in buffers], np.arange(STEPS + 1)[::2][1:] * DT, tol=gs.EPS)
for rgb_diff in np.diff([data for data, _ in buffers], axis=0):
assert rgb_diff.max() > 10.0
# Intentionally do not stop the recording to test the destructor
# scene.stop_recording()
@pytest.mark.required
def test_file_writers(tmp_path):
"""Test if the file writer recorders works."""
STEPS = 10
scene = gs.Scene(
show_viewer=False,
show_FPS=False,
)
scene.add_entity(morph=gs.morphs.Plane())
box = scene.add_entity(
morph=gs.morphs.Box(
size=(0.1, 0.1, 0.1),
pos=(0.0, 0.0, 0.06),
),
)
contact_sensor = scene.add_sensor(gs.sensors.Contact(entity_idx=box.idx))
csv_file = tmp_path / "contact_data.csv"
csv_writer = gs.recorders.CSVFile(filename=str(csv_file), header=("in_contact",))
contact_sensor.start_recording(csv_writer)
npz_file = tmp_path / "scene_data.npz"
scene.start_recording(
data_func=lambda: {"box_pos": box.get_pos(), "dummy": 1},
rec_options=gs.recorders.NPZFile(filename=str(npz_file)),
)
scene.build()
for _ in range(STEPS):
scene.step()
scene.stop_recording()
assert csv_file.exists()
with open(csv_file, "r") as f:
reader = csv.reader(f)
rows = list(reader)
assert len(rows) == STEPS + 1 # header + data rows
assert rows[1][1] in ("False", "0") # not in contact initially
assert rows[-1][1] in ("True", "1") # in contact after falling
assert npz_file.exists()
data = np.load(npz_file)
assert "timestamp" in data
assert "box_pos" in data
assert "dummy" in data
assert len(data["timestamp"]) == STEPS
@pytest.mark.required
def test_video_writer(tmp_path):
"""Test if the VideoFileWriter works with camera rendering."""
STEPS = 10
scene = gs.Scene(
show_viewer=False,
show_FPS=False,
)
scene.add_entity(morph=gs.morphs.Plane())
scene.add_entity(
morph=gs.morphs.Box(
size=(0.1, 0.1, 0.1),
pos=(0.0, 0.0, 1.0),
),
)
camera = scene.add_camera(
res=(300, 200), # Using weird resolution to trigger padding
pos=(2.0, 2.0, 2.0),
lookat=(0.0, 0.0, 0.2),
GUI=False,
)
video_rgb_path = tmp_path / "test_rgb.mp4"
scene.start_recording(
data_func=lambda: camera.render(rgb=True, depth=False, segmentation=False, normal=False)[0],
rec_options=gs.recorders.VideoFile(
filename=str(video_rgb_path),
codec="libx264",
codec_options={"preset": "veryfast", "tune": "zerolatency"},
),
)
video_depth_path = tmp_path / "test_depth.mp4"
scene.start_recording(
data_func=lambda: as_grayscale_image(camera.render(rgb=False, depth=True, segmentation=False, normal=False)[1]),
rec_options=gs.recorders.VideoFile(
filename=str(video_depth_path),
),
)
scene.build()
for _ in range(STEPS):
scene.step()
scene.stop_recording()
for video_path in (video_rgb_path, video_depth_path):
assert video_path.exists(), "Recorded video file should exist"
assert video_path.stat().st_size > 0, "Recorded video file should not be empty"
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_recorders.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:genesis/recorders/base_recorder.py | import queue
import threading
import time
from typing import TYPE_CHECKING, Callable, Generic, TypeVar
import genesis as gs
from genesis.options.recorders import RecorderOptions
if TYPE_CHECKING:
from .recorder_manager import RecorderManager
T = TypeVar("T")
class Recorder(Generic[T]):
"""
Base class for all recorders.
Note that modifying the signature of this class in recorder implementations should be avoided since instantiation is
done through the RecorderManager.
"""
def __init__(self, manager: "RecorderManager", options: RecorderOptions, data_func: Callable[[], T]):
self._options = options
self._manager = manager
self._data_func = data_func
self._steps_per_sample = 1
self._is_built = False
self._is_recording = False
self._data_queue: queue.Queue | None = None
self._processor_thread: threading.Thread | None = None
if options.hz:
steps_per_sample_float = 1.0 / (options.hz * manager._step_dt)
steps_per_sample = max(1, round(steps_per_sample_float))
if abs(steps_per_sample_float - steps_per_sample) > gs.EPS:
gs.logger.warning(
f"[Recorder] hz={options.hz} is not an integer multiple of step size of step dt. "
f"Using hz={1.0 / steps_per_sample / manager._step_dt} instead."
)
self._steps_per_sample = steps_per_sample
# =============================== methods to implement ===============================
@gs.assert_unbuilt
def build(self):
"""
Build the recorder, e.g. by initializing variables and creating widgets or file handles.
"""
self._is_built = True
@gs.assert_built
def process(self, data, cur_time):
"""
Process each incoming data sample.
Parameters
----------
data: Any
The data to be processed.
cur_time: float
The current time of the simulation.
"""
raise NotImplementedError(f"[{type(self).__name__}] process() is not implemented.")
@gs.assert_built
def cleanup(self):
"""
Cleanup all resources, e.g. by closing widgets or files.
This method is called when recording is stopped by `scene.stop_recording()`.
"""
raise NotImplementedError(f"[{type(self).__name__}] cleanup() is not implemented.")
@gs.assert_built
def reset(self, envs_idx=None):
"""
Reset the recorder, e.g. by flushing stored data.
This method is called when the scene is reset by `scene.reset()`.
Parameters
----------
envs_idx: array_like, optional
The indices of the environments to reset. If None, all environments are reset.
"""
if self.run_in_thread:
# sync the thread to ensure all data is processed
self.sync()
@property
def run_in_thread(self) -> bool:
"""
Whether to run the recorder in a background thread.
Running in a background thread allows for processing data without blocking the main thread, so this is
encouraged for most recorders (simply `return True`), but implementers should check that the recorder is
thread-safe on all devices (threading on macOS tends to be less supported).
"""
raise NotImplementedError(f"[{type(self).__name__}] run_in_thread is not implemented.")
# =============================== recording ===============================
def _process_data_loop(self):
"""Background thread that processes and outputs data."""
if self._data_queue is None:
return
while self._is_recording or not self._data_queue.empty():
try:
data, timestamp = self._data_queue.get(timeout=1.0)
self.process(data, timestamp)
self._data_queue.task_done()
except queue.Empty:
continue
@gs.assert_built
def start(self):
"""Start the recording thread if run_in_thread is True."""
self._is_recording = True
if self.run_in_thread:
self.start_thread()
@gs.assert_built
def stop(self):
"""Stop the recording thread and cleanup resources."""
if self._is_recording:
self._is_recording = False
if self.run_in_thread:
self.join_thread()
self.cleanup()
@gs.assert_built
def join_thread(self):
"""Wait for the processor thread to finish."""
if self._processor_thread is not None:
self._processor_thread.join()
self._processor_thread = None
self._data_queue = None
else:
gs.logger.warning(f"[{type(self).__name__}] join_thread(): No processor thread to join.")
@gs.assert_built
def start_thread(self):
"""Wait for the processor thread to finish."""
if self._processor_thread is None:
self._data_queue = queue.Queue(maxsize=self._options.buffer_size)
self._processor_thread = threading.Thread(target=self._process_data_loop)
self._processor_thread.start()
else:
gs.logger.warning(f"[{type(self).__name__}] start_thread(): Processor thread already exists.")
@gs.assert_built
def sync(self, timeout: float | None = None):
"""
Wait until the data queue is empty.
Parameters
----------
timeout: float | None
The maximum time to wait for the data queue to be empty. If None, wait indefinitely.
If the timeout is reached, an exception is raised.
"""
timestep = min(0.1, timeout) if timeout is not None else 0.1
if self._data_queue is not None:
if timeout is not None:
start_time = time.time()
while not self._data_queue.empty():
if timeout is not None and time.time() - start_time > timeout:
gs.raise_exception(f"[{type(self).__name__}] sync(): Timeout waiting for data queue to be empty.")
dt = min(timestep, (start_time + timeout) - time.time()) if timeout is not None else timestep
if dt > 0.0:
time.sleep(dt)
@gs.assert_built
def step(self, global_step: int):
"""Process a simulation step, potentially recording data."""
if not self._is_recording:
return
if global_step % self._steps_per_sample != 0:
return
global_time = global_step * self._manager._step_dt
data = self._data_func()
if not self.run_in_thread:
# non-threaded mode: process data synchronously
self.process(data, global_time)
return
# threaded mode: put data in queue
try:
self._data_queue.put((data, global_time), block=False)
return
except queue.Full:
try:
self._data_queue.put((data, global_time), timeout=self._options.buffer_full_wait_time)
return
except queue.Full:
pass
gs.logger.debug("[Recorder] Data queue is full, dropping oldest data sample.")
try:
self._data_queue.get_nowait()
except queue.Empty:
# Queue became empty between operations, just put the data
pass
finally:
self._data_queue.put_nowait((data, global_time))
@property
def is_built(self) -> bool:
return self._is_built
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/recorders/base_recorder.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/recorders/file_writers.py | import csv
import os
from collections import defaultdict
from pathlib import Path
import numpy as np
import torch
import genesis as gs
from genesis.options.recorders import (
VideoFile as VideoFileWriterOptions,
CSVFile as CSVFileWriterOptions,
NPZFile as NPZFileWriterOptions,
)
from genesis.utils import tensor_to_array
from .base_recorder import Recorder
from .recorder_manager import register_recording
try:
import av
except ImportError:
pass
class BaseFileWriter(Recorder):
"""
Base class for file writers.
Handles filename counter when save_on_reset is True.
"""
def build(self):
super().build()
self.counter = 0
os.makedirs(os.path.abspath(os.path.dirname(self._options.filename)), exist_ok=True)
self._initialize_writer()
def reset(self, envs_idx=None):
super().reset(envs_idx)
# no envs specific saving supported
if self._options.save_on_reset:
self.cleanup()
self.counter += 1
self._initialize_writer()
def _get_filename(self):
if self._options.save_on_reset:
path, ext = os.path.splitext(self._options.filename)
return f"{path}_{self.counter}{ext}"
return self._options.filename
def _initialize_writer(self):
pass
@register_recording(VideoFileWriterOptions)
class VideoFileWriter(BaseFileWriter):
video_container: "av.container.OutputContainer | None"
video_stream: "av.video.stream.VideoStream | None"
video_frame: "av.video.frame.VideoFrame | None"
video_buffer: "np.ndarray | None"
def build(self):
self.video_container = None
self.video_stream = None
self.video_frame = None
self.video_buffer = None
self.fps = int(
round(
1.0 / (self._steps_per_sample * self._manager._step_dt)
if self._options.fps is None
else self._options.fps
)
)
super().build()
def _initialize_writer(self):
video_path = self._get_filename()
video_name = self._options.name or Path(video_path).stem
# Create ffmpeg video container
self.video_container = av.open(video_path, mode="w")
self.video_container.metadata["title"] = video_name
def _initialize_data(self, data):
assert isinstance(data, (np.ndarray, torch.Tensor))
is_color = data.ndim == 3 and data.shape[-1] == 3
if isinstance(data, np.ndarray):
is_dtype_int = np.issubdtype(data.dtype, np.integer)
else:
is_dtype_int = not torch.is_floating_point(data)
if data.ndim != 2 + is_color or not is_dtype_int:
gs.raise_exception(f"[{type(self).__name__}] Data must be either grayscale [H, W] or color [H, W, RGB]")
height, width, *_ = data.shape
# Create ffmpeg video stream
self.video_stream = self.video_container.add_stream(self._options.codec, rate=self.fps)
assert isinstance(self.video_stream, av.video.stream.VideoStream)
self.video_stream.width, self.video_stream.height = (width, height)
self.video_stream.pix_fmt = "yuv420p"
self.video_stream.bit_rate = int(self._options.bitrate * (8 * 1024**2))
self.video_stream.codec_context.options = self._options.codec_options
# Create frame storage once for efficiency
if is_color:
self.video_frame = av.VideoFrame(width, height, "rgb24")
frame_plane = self.video_frame.planes[0]
self.video_buffer = np.asarray(memoryview(frame_plane)).reshape((-1, frame_plane.line_size // 3, 3))
else:
self.video_frame = av.VideoFrame(width, height, "gray8")
frame_plane = self.video_frame.planes[0]
self.video_buffer = np.asarray(memoryview(frame_plane)).reshape((-1, frame_plane.line_size))
def process(self, data, cur_time):
if self.video_buffer is None:
self._initialize_data(data)
if isinstance(data, torch.Tensor):
data = tensor_to_array(data)
data = data.astype(np.uint8)
# Write frame
self.video_buffer[: data.shape[0], : data.shape[1]] = data
for packet in self.video_stream.encode(self.video_frame):
self.video_container.mux(packet)
def cleanup(self):
if self.video_container is not None:
# Finalize video recording.
# Note that 'video_stream' may be None if 'process' what never called.
if self.video_stream is not None:
for packet in self.video_stream.encode(None):
self.video_container.mux(packet)
self.video_container.close()
gs.logger.info(f'Video saved to "~<{self._options.filename}>~".')
self.video_container = None
self.video_stream = None
self.video_frame = None
self.video_buffer = None
@property
def run_in_thread(self) -> bool:
return False
@register_recording(CSVFileWriterOptions)
class CSVFileWriter(BaseFileWriter):
def _initialize_writer(self):
self.wrote_data = False
self.file_handle = open(self._get_filename(), "w", encoding="utf-8", newline="")
self.csv_writer = csv.writer(self.file_handle)
def _sanitize_to_list(self, value):
if isinstance(value, (torch.Tensor, np.ndarray)):
return value.reshape((-1,)).tolist()
elif isinstance(value, (int, float, bool)):
return [value]
elif isinstance(value, (list, tuple)):
return value
else:
gs.raise_exception(f"[{type(self).__name__}] Unsupported data type: {type(value)}")
def process(self, data, cur_time):
row_data = [cur_time]
if isinstance(data, dict):
for value in data.values():
row_data.extend(self._sanitize_to_list(value))
else:
row_data.extend(self._sanitize_to_list(data))
if not self.wrote_data: # write header
header = ["timestamp"]
if self._options.header:
header.extend(self._options.header)
else:
if isinstance(data, dict):
for key, val in data.items():
if hasattr(val, "__len__"):
header.extend([f"{key}_{i}" for i in range(len(val))])
else:
header.append(key)
else:
header.extend([f"data_{i}" for i in range(1, len(row_data))])
if len(header) != len(row_data):
gs.raise_exception(f"[{type(self).__name__}] header length does not match data length.")
self.csv_writer.writerow(header)
self.wrote_data = True
self.csv_writer.writerow(row_data)
if self._options.save_every_write:
self.file_handle.flush()
def cleanup(self):
if self.file_handle:
if self.wrote_data:
self.file_handle.close()
gs.logger.info(f'[CSVFileWriter] Saved to ~<"{self._get_filename()}">~.')
else:
self.file_handle.close()
os.remove(self._get_filename()) # delete empty file
@property
def run_in_thread(self) -> bool:
return True
@register_recording(NPZFileWriterOptions)
class NPZFileWriter(BaseFileWriter):
def build(self):
self.all_data: dict[str, list] = defaultdict(list)
super().build()
def process(self, data, cur_time):
self.all_data["timestamp"].append(cur_time)
if isinstance(data, dict):
for key, value in data.items():
if isinstance(value, torch.Tensor):
value = tensor_to_array(value)
assert isinstance(value, (int, float, bool, list, tuple, np.ndarray))
self.all_data[key].append(value)
else:
self.all_data["data"].append(tensor_to_array(data))
def cleanup(self):
filename = self._get_filename()
if self.all_data["timestamp"]: # at least one data point was collected
try:
np.savez_compressed(filename, **self.all_data)
except ValueError as error:
gs.logger.warning(f"NPZFileWriter: saving as dtype=object due to ValueError: {error}")
np.savez_compressed(filename, **{k: np.array(v, dtype=object) for k, v in self.all_data.items()})
gs.logger.info(f'[NPZFileWriter] Saved data with keys {list(self.all_data.keys())} to ~<"{filename}">~.')
self.all_data.clear()
@property
def run_in_thread(self) -> bool:
return True
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/recorders/file_writers.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/recorders/plotters.py | import io
import itertools
import sys
import threading
import time
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from typing import Any, Callable, T
import numpy as np
import torch
from PIL import Image
import genesis as gs
from genesis.options.recorders import (
BasePlotterOptions,
LinePlotterMixinOptions,
PyQtLinePlot as PyQtLinePlotterOptions,
MPLLinePlot as MPLLinePlotterOptions,
MPLImagePlot as MPLImagePlotterOptions,
)
from genesis.utils import has_display, tensor_to_array
from .base_recorder import Recorder
from .recorder_manager import RecorderManager, register_recording
IS_PYQTGRAPH_AVAILABLE = False
try:
import pyqtgraph as pg
IS_PYQTGRAPH_AVAILABLE = True
except ImportError:
pass
IS_MATPLOTLIB_AVAILABLE = False
try:
import matplotlib as mpl
IS_MATPLOTLIB_AVAILABLE = tuple(map(int, mpl.__version__.replace("+", ".").split(".")[:3])) >= (3, 7, 0)
except ImportError:
pass
MPL_PLOTTER_RESCALE_MIN_X = 0.5
MPL_PLOTTER_RESCALE_RATIO_X = 0.15
MPL_PLOTTER_RESCALE_RATIO_Y = 0.15
COLORS = itertools.cycle(("r", "g", "b", "c", "m", "y"))
def _data_to_array(data: Sequence) -> np.ndarray:
if isinstance(data, torch.Tensor):
data = tensor_to_array(data)
return np.atleast_1d(data)
class BasePlotter(Recorder):
def __init__(self, manager: "RecorderManager", options: BasePlotterOptions, data_func: Callable[[], T]):
if options.show_window is None:
options.show_window = has_display()
super().__init__(manager, options, data_func)
self._frames_buffer: list[np.ndarray] = []
def build(self):
super().build()
self.video_writer = None
if self._options.save_to_filename:
def _get_video_frame_buffer(plotter):
# Make sure that all the data in the pipe has been processed before rendering anything
if not plotter._frames_buffer:
if plotter._data_queue is not None and not plotter._data_queue.empty():
while not plotter._frames_buffer:
time.sleep(0.1)
return plotter._frames_buffer.pop(0)
self.video_writer = self._manager.add_recorder(
data_func=partial(_get_video_frame_buffer, self),
rec_options=gs.recorders.VideoFile(
filename=self._options.save_to_filename,
hz=self._options.hz,
),
)
def process(self, data, cur_time):
# Update plot
self._update_plot()
# Render frame if necessary
if self._options.save_to_filename:
self._frames_buffer.append(self.get_image_array())
def cleanup(self):
if self.video_writer is not None:
self.video_writer.stop()
self._frames_buffer.clear()
self.video_writer = None
def _update_plot(self):
"""
Update plot.
"""
raise NotImplementedError(f"[{type(self).__name__}] _update_plot() is not implemented.")
def get_image_array(self):
"""
Capture the plot image as a video frame.
Returns
-------
image_array : np.ndarray
The RGB image as a numpy array.
"""
raise NotImplementedError(f"[{type(self).__name__}] get_image_array() is not implemented.")
class LinePlotHelper:
"""
Helper class that manages line plot data.
Use composition pattern.
"""
def __init__(self, options: LinePlotterMixinOptions, data: dict[str, Sequence] | Sequence):
self.x_data: list[float] = []
self.y_data: defaultdict[str, defaultdict[str, list[float]]] = defaultdict(lambda: defaultdict(list))
self._history_length = options.history_length
# Note that these attributes will be set during first data processing or initialization
self._is_dict_data: bool | None = None
self._subplot_structure: dict[str, tuple[str, ...]] = {}
if isinstance(data, dict):
self._is_dict_data = True
if options.labels is not None:
assert isinstance(options.labels, dict), (
f"[{type(self).__name__}] Labels must be a dict when data is a dict"
)
assert set(options.labels.keys()) == set(data.keys()), (
f"[{type(self).__name__}] Label keys must match data keys"
)
for key in data.keys():
data_values = _data_to_array(data[key])
label_values = options.labels[key]
assert len(label_values) == len(data_values), (
f"[{type(self).__name__}] Label count must match data count for key '{key}'"
)
self._subplot_structure[key] = tuple(label_values)
else:
self._subplot_structure = {}
for key, values in data.items():
values = _data_to_array(values)
self._subplot_structure[key] = tuple(f"{key}_{i}" for i in range(len(values)))
else:
self._is_dict_data = False
data = _data_to_array(data)
if options.labels is not None:
if not isinstance(options.labels, Sequence):
options.labels = (options.labels,)
assert len(options.labels) == len(data), f"[{type(self).__name__}] Label count must match data count"
plot_labels = tuple(options.labels)
else:
plot_labels = tuple(f"data_{i}" for i in range(len(data)))
self._subplot_structure = {"main": plot_labels}
def clear_data(self):
self.x_data.clear()
self.y_data.clear()
def process(self, data, cur_time):
"""Process new data point and update plot."""
if self._is_dict_data:
processed_data = {}
for key, values in data.items():
if key not in self._subplot_structure:
continue # skip keys not included in subplot structure
values = _data_to_array(values)
processed_data[key] = values
else:
data = _data_to_array(data)
processed_data = {"main": data}
# Update time data
self.x_data.append(cur_time)
# Update y data for each subplot
for subplot_key, subplot_data in processed_data.items():
channel_labels = self._subplot_structure[subplot_key]
if len(subplot_data) != len(channel_labels):
gs.logger.warning(
f"[{type(self).__name__}] Data length ({len(subplot_data)}) doesn't match "
f"expected number of channels ({len(channel_labels)}) for subplot '{subplot_key}', skipping..."
)
continue
for i, channel_label in enumerate(channel_labels):
if i < len(subplot_data):
self.y_data[subplot_key][channel_label].append(float(subplot_data[i]))
# Maintain rolling history window
if len(self.x_data) > self._history_length:
self.x_data.pop(0)
for subplot_key in self.y_data:
for channel_label in self.y_data[subplot_key]:
try:
self.y_data[subplot_key][channel_label].pop(0)
except IndexError:
break # empty, nothing to do.
@property
def history_length(self):
return self._history_length
@property
def is_dict_data(self):
return self._is_dict_data
@property
def subplot_structure(self):
return self._subplot_structure
class BasePyQtPlotter(BasePlotter):
"""
Base class for PyQt based plotters.
"""
def __init__(self, manager: "RecorderManager", options: BasePlotterOptions, data_func: Callable[[], T]):
super().__init__(manager, options, data_func)
if threading.current_thread() is not threading.main_thread():
gs.raise_exception("Impossible to run PyQtPlotter in background thread.")
def build(self):
if not IS_PYQTGRAPH_AVAILABLE:
gs.raise_exception(
f"{type(self).__name__} pyqtgraph is not installed. Please install it with `pip install pyqtgraph`."
)
super().build()
self.app: pg.QtWidgets.QApplication | None = None
self.widget: pg.GraphicsLayoutWidget | None = None
self.plot_widgets: list[pg.PlotWidget] = []
if not pg.QtWidgets.QApplication.instance():
self.app = pg.QtWidgets.QApplication([])
else:
self.app = pg.QtWidgets.QApplication.instance()
self.widget = pg.GraphicsLayoutWidget(show=self._options.show_window, title=self._options.title)
if self._options.show_window:
gs.logger.info(f"[{type(self).__name__}] created PyQtGraph window")
self.widget.resize(*self._options.window_size)
def cleanup(self):
super().cleanup()
if self.widget:
try:
self.widget.close()
gs.logger.debug(f"[{type(self).__name__}] closed PyQtGraph window")
except Exception as e:
gs.logger.warning(f"[{type(self).__name__}] Error closing window: {e}")
finally:
self.plot_widgets.clear()
self.widget = None
@property
def run_in_thread(self) -> bool:
return False
def get_image_array(self):
"""
Capture the plot image as a video frame.
Returns
-------
image_array : np.ndarray
The image as a numpy array in (b,g,r,a) format.
"""
pixmap = self.widget.grab()
qimage = pixmap.toImage()
# pyqtgraph provides imageToArray but it always outputs (b,g,r,a) format
# https://pyqtgraph.readthedocs.io/en/latest/api_reference/functions.html#pyqtgraph.functions.imageToArray
return pg.imageToArray(qimage, copy=True, transpose=True)
@register_recording(PyQtLinePlotterOptions)
class PyQtLinePlotter(BasePyQtPlotter):
def build(self):
super().build()
self.line_plot = LinePlotHelper(options=self._options, data=self._data_func())
self.curves: dict[str, list[pg.PlotCurveItem]] = {}
# create plots for each subplot
for subplot_idx, (subplot_key, channel_labels) in enumerate(self.line_plot.subplot_structure.items()):
# add new row if not the first plot
if subplot_idx > 0:
self.widget.nextRow()
plot_widget = self.widget.addPlot(title=subplot_key if self.line_plot.is_dict_data else self._options.title)
plot_widget.setLabel("bottom", self._options.x_label)
plot_widget.setLabel("left", self._options.y_label)
plot_widget.showGrid(x=True, y=True, alpha=0.3)
plot_widget.addLegend()
# create lines for this subplot
subplot_curves = []
for color, channel_label in zip(COLORS, channel_labels):
curve = plot_widget.plot(pen=pg.mkPen(color=color, width=2), name=channel_label)
subplot_curves.append(curve)
self.plot_widgets.append(plot_widget)
if self._options.show_window:
plot_widget.show()
self.curves[subplot_key] = subplot_curves
def process(self, data, cur_time):
self.line_plot.process(data, cur_time)
super().process(data, cur_time)
def _update_plot(self):
# update all curves
for subplot_key, curves in self.curves.items():
channel_labels = self.line_plot.subplot_structure[subplot_key]
for curve, channel_label in zip(curves, channel_labels):
curve.setData(x=self.line_plot.x_data, y=self.line_plot.y_data[subplot_key][channel_label])
if self.app:
self.app.processEvents()
def cleanup(self):
super().cleanup()
self.line_plot.clear_data()
self.curves.clear()
class BaseMPLPlotter(BasePlotter):
"""
Base class for matplotlib based plotters.
"""
def __init__(self, manager: "RecorderManager", options: BasePlotterOptions, data_func: Callable[[], T]):
super().__init__(manager, options, data_func)
if threading.current_thread() is not threading.main_thread():
gs.raise_exception("Impossible to run MPLPlotter in background thread.")
def build(self):
if not IS_MATPLOTLIB_AVAILABLE:
gs.raise_exception(
f"{type(self).__name__} matplotlib is not installed. Please install it with `pip install matplotlib>=3.7.0`."
)
super().build()
import matplotlib.pyplot as plt
self.fig: plt.Figure | None = None
self._lock = threading.Lock()
# matplotlib figsize uses inches
dpi = mpl.rcParams.get("figure.dpi", 100)
self.figsize = (self._options.window_size[0] / dpi, self._options.window_size[1] / dpi)
def _show_fig(self):
if self._options.show_window:
self.fig.show()
gs.logger.info(f"[{type(self).__name__}] created matplotlib window")
def cleanup(self):
"""Clean up matplotlib resources."""
super().cleanup()
# Logger may not be available anymore
logger_exists = hasattr(gs, "logger")
if self.fig is not None:
try:
import matplotlib.pyplot as plt
plt.close(self.fig)
if logger_exists:
gs.logger.debug(f"[{type(self).__name__}] Closed matplotlib window")
except Exception as e:
if logger_exists:
gs.logger.warning(f"[{type(self).__name__}] Error closing window: {e}")
finally:
self.fig = None
def get_image_array(self):
"""
Capture the plot image as a video frame.
Returns
-------
image_array : np.ndarray
The RGB image as a numpy array.
"""
from matplotlib.backends.backend_agg import FigureCanvasAgg
self._lock.acquire()
if isinstance(self.fig.canvas, FigureCanvasAgg):
# Read internal buffer
width, height = self.fig.canvas.get_width_height(physical=True)
rgba_array_flat = np.frombuffer(self.fig.canvas.buffer_rgba(), dtype=np.uint8)
rgb_array = rgba_array_flat.reshape((height, width, 4))[..., :3]
# Rescale image if necessary
if (width, height) != tuple(self._options.window_size):
img = Image.fromarray(rgb_array)
img = img.resize(self._options.window_size, resample=Image.BILINEAR)
rgb_array = np.asarray(img)
else:
rgb_array = rgb_array.copy()
else:
# Slower but more generic fallback only if necessary
buffer = io.BytesIO()
self.fig.canvas.print_figure(buffer, format="png", dpi="figure")
buffer.seek(0)
img = Image.open(buffer)
rgb_array = np.asarray(img.convert("RGB"))
self._lock.release()
return rgb_array
@property
def run_in_thread(self) -> bool:
from matplotlib.backends.backend_agg import FigureCanvasAgg
if sys.platform == "darwin":
return False
if self._is_built:
assert self.fig is not None
# All Agg-based backends derives from the surfaceless Agg backend, so 'isinstance' cannot be used to
# discriminate the latter from others.
return type(self.fig.canvas) is FigureCanvasAgg
return not self._options.show_window
@register_recording(MPLLinePlotterOptions)
class MPLLinePlotter(BaseMPLPlotter):
def build(self):
super().build()
self.line_plot = LinePlotHelper(options=self._options, data=self._data_func())
import matplotlib.pyplot as plt
self.axes: list[plt.Axes] = []
self.lines: dict[str, list[plt.Line2D]] = {}
self.caches_bbox: list[Any] = []
self.cache_xmax: float = -1
# Create figure and subplots
n_subplots = len(self.line_plot.subplot_structure)
if n_subplots == 1:
self.fig, ax = plt.subplots(figsize=self.figsize)
self.axes = [ax]
else:
self.fig, axes = plt.subplots(n_subplots, 1, figsize=self.figsize, sharex=True, constrained_layout=True)
self.axes = axes if isinstance(axes, (list, tuple, np.ndarray)) else [axes]
self.fig.suptitle(self._options.title)
# Create lines for each subplot
for subplot_idx, (subplot_key, channel_labels) in enumerate(self.line_plot.subplot_structure.items()):
ax = self.axes[subplot_idx]
ax.set_xlabel(self._options.x_label)
ax.set_ylabel(self._options.y_label)
ax.grid(True, alpha=0.3)
if self.line_plot.is_dict_data and n_subplots > 1:
ax.set_title(subplot_key)
subplot_lines = []
for color, channel_label in zip(COLORS, channel_labels):
(line,) = ax.plot([], [], color=color, label=channel_label, linewidth=2)
subplot_lines.append(line)
self.lines[subplot_key] = subplot_lines
# Legend must be outside, otherwise it will not play well with blitting
self.fig.legend(ncol=sum(map(len, self.lines.values())), loc="outside lower center")
self.fig.canvas.draw()
for ax in self.axes:
self.caches_bbox.append(self.fig.canvas.copy_from_bbox(ax.bbox))
self._show_fig()
def process(self, data, cur_time):
self.line_plot.process(data, cur_time)
super().process(data, cur_time)
def _update_plot(self):
self._lock.acquire()
# Update limits for each subplot if necessary
limits_changed = False
if len(self.line_plot.x_data) > 1:
# First, check if the limits on y-axis must be extended to display all the available data
subplots_ylim_data = []
must_update_limit_y = False
for ax, subplot_key in zip(self.axes, self.lines.keys()):
subplot_y_data = self.line_plot.y_data[subplot_key]
subplot_ylim_data = None
if subplot_y_data:
all_y_values = list(itertools.chain.from_iterable(subplot_y_data.values()))
subplot_ylim_data = y_min_data, y_max_data = min(all_y_values), max(all_y_values)
y_min_plot, y_max_plot = ax.get_ylim()
if y_min_data < y_min_plot or y_max_plot < y_max_data:
must_update_limit_y = True
subplots_ylim_data.append(subplot_ylim_data)
# Next, adjust the limits on x-axis if they must be extended or adjusting y-axis is already planned
x_limits_changed = False
x_min_plot, x_max_plot = ax.get_xlim()
x_min_data, x_max_data = self.line_plot.x_data[0], self.line_plot.x_data[-1]
if must_update_limit_y or x_min_plot < 0.0 or x_max_plot < x_max_data:
x_min_plot = max(0.0, x_min_data)
x_max_plot = x_max_data + max(
MPL_PLOTTER_RESCALE_RATIO_X * (x_max_data - x_min_data), MPL_PLOTTER_RESCALE_MIN_X
)
ax.set_xlim((x_min_plot - gs.EPS, x_max_plot + gs.EPS))
x_limits_changed = True
# Finally, adjust the limits on y-axis if either x- or y-axis must be extended
if x_limits_changed or must_update_limit_y:
for ax, subplot_ylim_data in zip(self.axes, subplots_ylim_data):
if subplot_ylim_data is not None:
y_min_data, y_max_data = subplot_ylim_data
y_min_plot = y_min_data - MPL_PLOTTER_RESCALE_RATIO_Y * (y_max_data - y_min_data)
y_max_plot = y_max_data + MPL_PLOTTER_RESCALE_RATIO_Y * (y_max_data - y_min_data)
ax.set_ylim((y_min_plot - gs.EPS, y_max_plot + gs.EPS))
limits_changed = True
# Must redraw the entire figure if the limits have changed
if limits_changed:
self.fig.canvas.draw()
# Update background if the entire figure has been updated, or the buffer size has been exceeded
if limits_changed or (len(self.line_plot.x_data) > 1 and self.cache_xmax < self.line_plot.x_data[0] + gs.EPS):
self.caches_bbox = [self.fig.canvas.copy_from_bbox(ax.bbox) for ax in self.axes]
self.cache_xmax = self.line_plot.x_data[-2]
# Update lines for each subplot
for ax, cache_bbox, (subplot_key, subplot_lines) in zip(self.axes, self.caches_bbox, self.lines.items()):
# Restore background and update line data for this subplot
self.fig.canvas.restore_region(cache_bbox)
# Update lines
channel_labels = self.line_plot.subplot_structure[subplot_key]
for line, channel_label in zip(subplot_lines, channel_labels):
y_data = self.line_plot.y_data[subplot_key][channel_label]
line.set_data(self.line_plot.x_data, y_data)
ax.draw_artist(line)
# Blit the updated subplot
self.fig.canvas.blit(ax.bbox)
self.fig.canvas.flush_events()
self._lock.release()
def cleanup(self):
super().cleanup()
self.line_plot.clear_data()
self.lines.clear()
self.caches_bbox.clear()
self.cache_xmax = -1
@register_recording(MPLImagePlotterOptions)
class MPLImagePlotter(BaseMPLPlotter):
"""
Live image viewer using matplotlib.
The image data should be an array-like object with shape (H, W), (H, W, 1), (H, W, 3), or (H, W, 4).
"""
def build(self):
super().build()
import matplotlib.pyplot as plt
self.image_plot = None
self.background = None
self.fig, self.ax = plt.subplots(figsize=self.figsize)
self.fig.tight_layout(pad=0)
self.ax.set_axis_off()
self.fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
self.image_plot = self.ax.imshow(np.zeros((1, 1)), cmap="plasma", origin="upper", aspect="auto")
self._show_fig()
def process(self, data, cur_time):
"""Process new image data and update display."""
if isinstance(data, torch.Tensor):
img_data = tensor_to_array(data)
else:
img_data = np.asarray(data)
vmin, vmax = np.min(img_data), np.max(img_data)
current_vmin, current_vmax = self.image_plot.get_clim()
if vmin != current_vmin or vmax != current_vmax:
self.image_plot.set_clim(vmin, vmax)
self.fig.canvas.draw()
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.fig.canvas.restore_region(self.background)
self.image_plot.set_data(img_data)
self.ax.draw_artist(self.image_plot)
self.fig.canvas.blit(self.ax.bbox)
self.fig.canvas.flush_events()
def cleanup(self):
super().cleanup()
self.ax = None
self.image_plot = None
self.background = None
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/recorders/plotters.py",
"license": "Apache License 2.0",
"lines": 502,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/recorders/recorder_manager.py | from typing import TYPE_CHECKING, Any, Callable, Type
import genesis as gs
if TYPE_CHECKING:
from .base_recorder import Recorder, RecorderOptions
class RecorderManager:
"""
Manage the creation, processing, and cleanup of all data recorders.
Parameters
----------
step_dt: float
The simulation time step.
"""
RECORDER_TYPES_MAP = {}
def __init__(self, step_dt: float):
self._step_dt = step_dt
self._recorders: list["Recorder"] = []
self._is_recording = False
self._is_built = False
@gs.assert_unbuilt
def add_recorder(self, data_func: Callable[[], Any], rec_options: "RecorderOptions") -> "Recorder":
"""
Automatically read and process data. See RecorderOptions for more details.
Parameters
----------
data_func: Callable[[], Any]
A function with no arguments that returns the data to be recorded.
rec_options: RecorderOptions
The options for the recorder which determines how the data is recorded and processed.
Returns
-------
recorder : Recorder
The created recorder object.
"""
recorder_cls = RecorderManager.RECORDER_TYPES_MAP[type(rec_options)]
recorder = recorder_cls(self, rec_options, data_func)
self._recorders.append(recorder)
return recorder
@gs.assert_unbuilt
def build(self):
"""Start data recording."""
for recorder in self._recorders:
recorder.build()
recorder.start()
self._is_recording = True
self._is_built = True
@gs.assert_built
def stop(self):
"""Stop and complete data recording."""
if not self._is_recording:
gs.logger.warning("[DataRecorder] Ignoring stop(): data recording is not active.")
else:
self._is_recording = False
for recorder in self._recorders:
recorder.stop()
self._recorders.clear()
@gs.assert_built
def reset(self, envs_idx=None):
for recorder in self._recorders:
recorder.reset(envs_idx)
recorder.start()
@gs.assert_built
def step(self, global_step: int):
"""
Increment the step count and process data from each recording configuration.
In threaded mode, data is put in queues. In non-threaded mode, data is processed synchronously.
"""
if not self._is_recording:
return
for recorder in self._recorders:
recorder.step(global_step)
@property
def is_recording(self) -> bool:
return self._is_recording
@property
def is_built(self) -> bool:
return self._is_built
def register_recording(options_cls: Type["RecorderOptions"]):
def _impl(recorder_cls: Type["Recorder"]):
RecorderManager.RECORDER_TYPES_MAP[options_cls] = recorder_cls
return recorder_cls
return _impl
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/recorders/recorder_manager.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/manipulation/behavior_cloning.py | import os
import time
from collections import deque
from collections.abc import Iterator
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
class BehaviorCloning:
"""Multi-task behavior cloning with action prediction and object pose estimation"""
def __init__(self, env, cfg: dict, teacher: nn.Module, device: str = "cpu"):
self._env = env
self._cfg = cfg
self._device = device
self._teacher = teacher
self._num_steps_per_env = cfg["num_steps_per_env"]
# Stereo rgb: 6 channels (3 left + 3 right)
rgb_shape = (6, env.image_height, env.image_width)
action_dim = env.num_actions
# Multi-task policy with action and pose heads
self._policy = Policy(cfg["policy"], action_dim).to(device)
# Initialize optimizer
self._optimizer = torch.optim.Adam(self._policy.parameters(), lr=cfg["learning_rate"])
# Experience buffer with pose data
self._buffer = ExperienceBuffer(
num_envs=env.num_envs,
max_size=self._cfg["buffer_size"],
img_shape=rgb_shape,
state_dim=self._cfg["policy"]["action_head"]["state_obs_dim"],
action_dim=action_dim,
device=device,
dtype=self._policy.dtype,
)
# Training state
self._current_iter = 0
def learn(self, num_learning_iterations: int, log_dir: str) -> None:
self._rewbuffer = deque(maxlen=100)
self._cur_reward_sum = torch.zeros(self._env.num_envs, dtype=torch.float, device=self._device)
self._buffer.clear()
tf_writer = SummaryWriter(log_dir)
for it in range(num_learning_iterations):
# Collect experience
start_time = time.time()
self._collect_with_rl_teacher()
end_time = time.time()
forward_time = end_time - start_time
# Training steps for both action and pose prediction
total_action_loss = 0.0
total_pose_loss = 0.0
num_batches = 0
start_time = time.time()
generator = self._buffer.get_batches(self._cfg.get("num_mini_batches", 4), self._cfg["num_epochs"])
for batch in generator:
# Forward pass for both action and pose prediction
pred_action = self._policy(batch["rgb_obs"], batch["robot_pose"])
pred_left_pose, pred_right_pose = self._policy.predict_pose(batch["rgb_obs"])
# Compute action prediction loss
action_loss = F.mse_loss(pred_action, batch["actions"])
# Compute pose estimation loss (position + orientation)
pose_left_loss = self._compute_pose_loss(pred_left_pose, batch["object_poses"])
pose_right_loss = self._compute_pose_loss(pred_right_pose, batch["object_poses"])
pose_loss = pose_left_loss + pose_right_loss
# Combined loss with weights
total_loss = action_loss + pose_loss
# Backward pass
self._optimizer.zero_grad()
total_loss.backward()
self._optimizer.step()
torch.nn.utils.clip_grad_norm_(self._policy.parameters(), self._cfg["max_grad_norm"])
total_action_loss += action_loss
total_pose_loss += pose_loss
num_batches += 1
end_time = time.time()
backward_time = end_time - start_time
# Compute average losses
if num_batches == 0:
raise ValueError("No batches collected")
else:
avg_action_loss = total_action_loss / num_batches
avg_pose_loss = total_pose_loss / num_batches
fps = (self._num_steps_per_env * self._env.num_envs) / (forward_time)
# Logging
if (it + 1) % self._cfg["log_freq"] == 0:
current_lr = self._optimizer.param_groups[0]["lr"]
tf_writer.add_scalar("loss/action_loss", avg_action_loss, it)
tf_writer.add_scalar("loss/pose_loss", avg_pose_loss, it)
tf_writer.add_scalar("loss/total_loss", avg_action_loss + avg_pose_loss, it)
tf_writer.add_scalar("lr", current_lr, it)
tf_writer.add_scalar("buffer_size", self._buffer.size, it)
tf_writer.add_scalar("speed/forward", forward_time, it)
tf_writer.add_scalar("speed/backward", backward_time, it)
tf_writer.add_scalar("speed/fps", int(fps), it)
#
print("--------------------------------")
info_str = f" | Iteration: {it + 1:04d}\n"
info_str += f" | Action Loss: {avg_action_loss:.6f}\n"
info_str += f" | Pose Loss: {avg_pose_loss:.6f}\n"
info_str += f" | Total Loss: {avg_action_loss + avg_pose_loss:.6f}\n"
info_str += f" | Learning Rate: {current_lr:.6f}\n"
info_str += f" | Forward Time: {forward_time:.2f}s\n"
info_str += f" | Backward Time: {backward_time:.2f}s\n"
info_str += f" | FPS: {int(fps)}"
print(info_str)
if len(self._rewbuffer) > 0:
tf_writer.add_scalar("reward/mean", np.mean(self._rewbuffer), it)
# Save checkpoints periodically
if (it + 1) % self._cfg["save_freq"] == 0:
self.save(os.path.join(log_dir, f"checkpoint_{it + 1:04d}.pt"))
tf_writer.close()
def _compute_pose_loss(self, pred_poses: torch.Tensor, target_poses: torch.Tensor) -> torch.Tensor:
"""Compute pose loss with separate position and orientation components."""
# Split into position and orientation
pred_pos = pred_poses[:, :3]
pred_quat = pred_poses[:, 3:7]
target_pos = target_poses[:, :3]
target_quat = target_poses[:, 3:7]
# Position loss (MSE)
pos_loss = F.mse_loss(pred_pos, target_pos)
# Orientation loss (quaternion distance)
# Normalize quaternions
pred_quat = F.normalize(pred_quat, p=2, dim=1)
target_quat = F.normalize(target_quat, p=2, dim=1)
# Quaternion distance: 1 - |dot(q1, q2)|
# Note: we use this as a proxy for the actual distance between two quaternions
# because the impact of the orientation loss (auxiliary task) is not significant
# compared to the action loss (main task)
quat_dot = torch.sum(pred_quat * target_quat, dim=1)
quat_loss = torch.mean(1.0 - torch.abs(quat_dot))
return pos_loss + quat_loss
def _collect_with_rl_teacher(self) -> None:
"""Collect experience from environment using stereo rgb images and object poses."""
# Get state observation
obs, _ = self._env.get_observations()
with torch.inference_mode():
for _ in range(self._num_steps_per_env):
# Get stereo rgb images
rgb_obs = self._env.get_stereo_rgb_images(normalize=True)
# Get teacher action
teacher_action = self._teacher(obs).detach()
# Get end-effector position
ee_pose = self._env.robot.ee_pose
# Get object pose in camera frame
# object_pose_camera = self._get_object_pose_in_camera_frame()
object_pose = torch.cat(
[
self._env.object.get_pos(),
self._env.object.get_quat(),
],
dim=-1,
)
# Store in buffer
self._buffer.add(rgb_obs, ee_pose, object_pose, teacher_action)
# Step environment with student action
student_action = self._policy(rgb_obs.float(), ee_pose.float())
# Simple Dagger: use student action if its difference with teacher action is less than 0.5
action_diff = torch.norm(student_action - teacher_action, dim=-1)
condition = (action_diff < 1.0).unsqueeze(-1).expand_as(student_action)
action = torch.where(condition, student_action, teacher_action)
next_obs, reward, done, _ = self._env.step(action)
self._cur_reward_sum += reward
obs = next_obs
new_ids = (done > 0).nonzero(as_tuple=False)
self._rewbuffer.extend(self._cur_reward_sum[new_ids][:, 0].cpu().numpy().tolist())
self._cur_reward_sum[new_ids] = 0
def save(self, path: str) -> None:
"""Save model checkpoint."""
checkpoint = {
"model_state_dict": self._policy.state_dict(),
"optimizer_state_dict": self._optimizer.state_dict(),
"current_iter": self._current_iter,
"config": self._cfg,
}
torch.save(checkpoint, path)
print(f"Model saved to {path}")
def load(self, path: str) -> None:
"""Load model checkpoint."""
checkpoint = torch.load(path, map_location=self._device, weights_only=False)
self._policy.load_state_dict(checkpoint["model_state_dict"])
self._optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.current_iter = checkpoint["current_iter"]
print(f"Model loaded from {path}")
def load_finetuned_model(self, path: str) -> None:
"""Load a fine-tuned model checkpoint."""
checkpoint = torch.load(path, map_location=self._device, weights_only=False)
self._policy.load_state_dict(checkpoint["model_state_dict"])
self._optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self._current_iter = checkpoint["current_iter"]
print(f"Fine-tuned model loaded from {path}")
class ExperienceBuffer:
"""A first-in-first-out buffer for experience replay."""
def __init__(
self,
num_envs: int,
max_size: int,
img_shape: tuple[int, int, int],
state_dim: int,
action_dim: int,
device: str = "cpu",
dtype: torch.dtype | None = None,
):
self._num_envs = num_envs
self._max_size = max_size
self._img_shape = img_shape
self._state_dim = state_dim
self._action_dim = action_dim
self._device = device
self._ptr = 0
self._size = 0
# Buffers for data
self._rgb_obs = torch.empty(max_size, num_envs, *img_shape, dtype=dtype, device=device)
self._robot_pose = torch.empty(max_size, num_envs, state_dim, dtype=dtype, device=device)
self._object_poses = torch.empty(max_size, num_envs, 7, dtype=dtype, device=device)
self._actions = torch.empty(max_size, num_envs, action_dim, dtype=dtype, device=device)
def add(
self,
rgb_obs: torch.Tensor,
robot_pose: torch.Tensor,
object_poses: torch.Tensor,
actions: torch.Tensor,
) -> None:
"""Add experience to buffer."""
self._ptr = (self._ptr + 1) % self._max_size
self._rgb_obs[self._ptr] = rgb_obs
self._robot_pose[self._ptr] = robot_pose
self._object_poses[self._ptr] = object_poses
self._actions[self._ptr] = actions
self._size = min(self._size + 1, self._max_size)
def get_batches(self, num_mini_batches: int, num_epochs: int) -> Iterator[dict[str, torch.Tensor]]:
"""Generate batches for training."""
# calculate the size of each mini-batch
batch_size = self._size // num_mini_batches
for _ in range(num_epochs):
indices = torch.randperm(self._size)
for batch_idx in range(0, self._size, batch_size):
batch_indices = indices[batch_idx : batch_idx + batch_size]
# Yield a mini-batch of data
yield {
"rgb_obs": self._rgb_obs[batch_indices].reshape(-1, *self._img_shape),
"robot_pose": self._robot_pose[batch_indices].reshape(-1, self._state_dim),
"object_poses": self._object_poses[batch_indices].reshape(-1, 7),
"actions": self._actions[batch_indices].reshape(-1, self._action_dim),
}
def clear(self) -> None:
"""Clear the buffer."""
self._rgb_obs.zero_()
self._robot_pose.zero_()
self._object_poses.zero_()
self._actions.zero_()
self._ptr = 0
self._size = 0
def is_full(self) -> bool:
"""Check if buffer is full."""
return self._size == self._max_size
@property
def size(self) -> int:
"""Get buffer size."""
return self._size
class Policy(nn.Module):
"""Multi-task behavior cloning policy with shared stereo encoder/decoder."""
def __init__(self, config: dict, action_dim: int):
super().__init__()
# Shared encoder for both left and right cameras
self.shared_encoder = self._build_cnn(config["vision_encoder"])
# Feature fusion layer to combine stereo features
vision_encoder_conv_out_channels = config["vision_encoder"]["conv_layers"][-1]["out_channels"]
vision_encoder_output_dim = vision_encoder_conv_out_channels * 4 * 4
self.feature_fusion = nn.Sequential(
nn.Linear(vision_encoder_output_dim * 2, vision_encoder_output_dim), # 2 cameras
nn.ReLU(),
nn.Dropout(0.1),
)
# MLP for action prediction
mlp_cfg = config["action_head"]
self.state_obs_dim = config["action_head"]["state_obs_dim"]
if self.state_obs_dim is not None:
mlp_cfg["input_dim"] = vision_encoder_output_dim + self.state_obs_dim
else:
mlp_cfg["input_dim"] = vision_encoder_output_dim
mlp_cfg["output_dim"] = action_dim
self.mlp = self._build_mlp(mlp_cfg)
# MLP for pose prediction
pose_mlp_cfg = config["pose_head"]
pose_mlp_cfg["input_dim"] = vision_encoder_output_dim
pose_mlp_cfg["output_dim"] = 7
self.pose_mlp = self._build_mlp(pose_mlp_cfg)
@property
def dtype(self):
"""Get the dtype of the policy's parameters."""
return next(self.parameters()).dtype
@staticmethod
def _build_cnn(config: dict) -> nn.Sequential:
"""Build CNN encoder for grayscale images."""
layers = []
# Build layers from configuration
for conv_config in config["conv_layers"]:
layers.extend(
[
nn.Conv2d(
conv_config["in_channels"],
conv_config["out_channels"],
kernel_size=conv_config["kernel_size"],
stride=conv_config["stride"],
padding=conv_config["padding"],
),
nn.BatchNorm2d(conv_config["out_channels"]),
nn.ReLU(),
]
)
# Add adaptive pooling if specified
if config.get("pooling") == "adaptive_avg":
layers.append(nn.AdaptiveAvgPool2d((4, 4)))
return nn.Sequential(*layers)
@staticmethod
def _build_mlp(config: dict) -> nn.Sequential:
mlp_input_dim = config["input_dim"]
layers = []
for hidden_dim in config["hidden_dims"]:
layers.extend([nn.Linear(mlp_input_dim, hidden_dim), nn.ReLU()])
mlp_input_dim = hidden_dim
layers.append(nn.Linear(mlp_input_dim, config["output_dim"]))
return nn.Sequential(*layers)
def get_features(self, rgb_obs: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
# Split stereo rgb images
left_rgb = rgb_obs[:, 0:3] # First 3 channels (RGB)
right_rgb = rgb_obs[:, 3:6] # Last 3 channels (RGB)
# Use shared encoder for both images
left_features = self.shared_encoder(left_rgb).flatten(start_dim=1)
right_features = self.shared_encoder(right_rgb).flatten(start_dim=1)
return left_features, right_features
def forward(self, rgb_obs: torch.Tensor, state_obs: torch.Tensor | None = None) -> dict:
"""Forward pass with shared stereo encoder for rgb images."""
# Get features
left_features, right_features = self.get_features(rgb_obs)
# Concatenate features (much more efficient than concatenating raw images)
combined_features = torch.cat([left_features, right_features], dim=-1)
# Feature fusion
fused_features = self.feature_fusion(combined_features)
# Add state information if available
if state_obs is not None and self.state_obs_dim is not None:
final_features = torch.cat([fused_features, state_obs], dim=-1)
else:
final_features = fused_features
# Predict actions
return self.mlp(final_features)
def predict_pose(self, rgb_obs: torch.Tensor) -> torch.Tensor:
"""Predict pose from rgb images and state observations."""
left_features, right_features = self.get_features(rgb_obs)
left_pose = self.pose_mlp(left_features)
right_pose = self.pose_mlp(right_features)
return left_pose, right_pose
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/manipulation/behavior_cloning.py",
"license": "Apache License 2.0",
"lines": 351,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:tests/test_integration.py | import numpy as np
import pytest
import genesis as gs
from .utils import assert_allclose, get_hf_dataset
@pytest.mark.slow # ~120s
@pytest.mark.parametrize("mode", [0, 1, 2])
@pytest.mark.parametrize("backend", [gs.cpu, gs.gpu])
def test_pick_and_place(mode, show_viewer):
# Add DoF armature to improve numerical stability if not using 'approximate_implicitfast' integrator.
#
# This is necessary because the first-order correction term involved in the implicit integration schemes
# 'implicitfast' and 'Euler' are only able to stabilize each entity independently, from the forces that were
# obtained from the instable accelerations. As a result, everything is fine as long as the entities are not
# interacting with each other, but it induces unrealistic motion otherwise. In this case, the acceleration of the
# cube being lifted is based on the acceleration that the gripper would have without implicit damping.
#
# The only way to correct this would be to take into account the derivative of the Jacobian of the constraints in
# the first-order correction term. Doing this is challenging and would significantly increase the computation cost.
#
# In practice, it is more common to just go for a higher order integrator such as RK4.
if mode == 0:
integrator = gs.integrator.approximate_implicitfast
substeps = 1
armature = 0.0
elif mode == 1:
integrator = gs.integrator.implicitfast
substeps = 4
armature = 0.0
elif mode == 2:
integrator = gs.integrator.Euler
substeps = 1
armature = 2.0
# Create and build the scene
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.01,
substeps=substeps,
),
rigid_options=gs.options.RigidOptions(
box_box_detection=True,
integrator=integrator,
),
show_viewer=show_viewer,
show_FPS=False,
)
scene.add_entity(
gs.morphs.Plane(),
)
cube = scene.add_entity(
gs.morphs.Box(
size=(0.05, 0.05, 0.05),
pos=(0.65, 0.0, 0.025),
),
surface=gs.surfaces.Plastic(color=(1, 0, 0)),
)
scene.add_entity(
gs.morphs.Box(
size=(0.05, 0.05, 0.05),
pos=(0.4, 0.2, 0.025),
fixed=True,
),
surface=gs.surfaces.Plastic(color=(0, 1, 0)),
)
franka = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
vis_mode="collision",
visualize_contact=True,
)
scene.build()
franka.set_dofs_armature(franka.get_dofs_armature() + armature)
motors_dof = np.arange(7)
fingers_dof = np.arange(7, 9)
end_effector = franka.get_link("hand")
# set control gains
franka.set_dofs_kp(
np.array([4500, 4500, 3500, 3500, 2000, 2000, 2000, 100, 100]),
)
franka.set_dofs_kv(
np.array([450, 450, 350, 350, 200, 200, 200, 10, 10]),
)
franka.set_dofs_force_range(
np.array([-87, -87, -87, -87, -12, -12, -12, -100, -100]),
np.array([87, 87, 87, 87, 12, 12, 12, 100, 100]),
)
# move to pre-grasp pose
qpos = franka.inverse_kinematics(
link=end_effector,
pos=np.array([0.65, 0.0, 0.22]),
quat=np.array([0, 1, 0, 0]),
)
# gripper open pos
qpos[-2:] = 0.04
path = franka.plan_path(qpos_goal=qpos, num_waypoints=300, resolution=0.05, max_retry=10)
# execute the planned path
franka.control_dofs_position(np.array([0.15, 0.15]), fingers_dof)
for waypoint in path:
franka.control_dofs_position(waypoint)
scene.step()
# Get more time to the robot to reach the last waypoint
for i in range(120):
scene.step()
# reach
qpos = franka.inverse_kinematics(
link=end_effector,
pos=np.array([0.65, 0.0, 0.13]),
quat=np.array([0, 1, 0, 0]),
)
franka.control_dofs_position(qpos[:-2], motors_dof)
for i in range(60):
scene.step()
# grasp
franka.control_dofs_position(qpos[:-2], motors_dof)
franka.control_dofs_force(np.array([-1.0, -1.0]), fingers_dof)
for i in range(50):
scene.step()
# lift
qpos = franka.inverse_kinematics(
link=end_effector,
pos=np.array([0.65, 0.0, 0.28]),
quat=np.array([0, 1, 0, 0]),
)
franka.control_dofs_position(qpos[:-2], motors_dof)
for i in range(50):
scene.step()
# reach
qpos = franka.inverse_kinematics(
link=end_effector,
pos=np.array([0.4, 0.2, 0.2]),
quat=np.array([0, 1, 0, 0]),
)
path = franka.plan_path(
qpos_goal=qpos,
num_waypoints=100,
resolution=0.05,
max_retry=10,
ee_link_name="hand",
with_entity=cube,
)
for waypoint in path:
franka.control_dofs_position(waypoint[:-2], motors_dof)
scene.step()
# Get more time to the robot to reach the last waypoint
for i in range(50):
scene.step()
# release
franka.control_dofs_position(np.array([0.15, 0.15]), fingers_dof)
for i in range(180):
scene.step()
if i > 150:
qvel = cube.get_dofs_velocity()
assert_allclose(qvel, 0, atol=0.02)
qpos = cube.get_dofs_position()
assert_allclose(qpos[2], 0.075, atol=2e-3)
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cpu, gs.gpu])
def test_hanging_rigid_cable(show_viewer, tol):
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.002,
),
show_viewer=show_viewer,
show_FPS=False,
)
robot = scene.add_entity(
gs.morphs.MJCF(
file="xml/cable.xml",
),
)
scene.build()
links_pos_0 = scene.rigid_solver.links_state.pos.to_numpy()[:, 0]
links_quat_0 = scene.rigid_solver.links_state.quat.to_numpy()[:, 0]
links_quat_0 /= np.linalg.norm(links_quat_0, axis=-1, keepdims=True)
robot.set_dofs_position(robot.get_dofs_position())
if show_viewer:
scene.visualizer.update()
for _ in range(100):
scene.step()
links_pos_f = scene.rigid_solver.links_state.pos.to_numpy()[:, 0]
links_quat_f = scene.rigid_solver.links_state.quat.to_numpy()[:, 0]
links_quat_f /= np.linalg.norm(links_quat_f, axis=-1, keepdims=True)
links_quat_err = 2.0 * np.arccos(np.minimum(np.abs(np.sum(links_quat_f * links_quat_0, axis=-1)), 1.0))
# FIXME: Why it is not possible to achieve better accuracy?
assert_allclose(links_pos_0, links_pos_f, tol=1e-3)
assert_allclose(links_quat_err, 0.0, tol=1e-3)
@pytest.mark.slow # ~150s
@pytest.mark.parametrize("primitive_type", ["box", "sphere"])
@pytest.mark.parametrize("precision", ["64"])
def test_franka_panda_grasp_fem_entity(primitive_type, show_viewer):
if gs.use_ndarray:
pytest.skip("SAPCoupler does not support ndarray yet.")
GRAPPER_POS_START = (0.65, 0.0, 0.13)
GRAPPER_POS_END = (0.65, 0.0, 0.18)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=1.0 / 60,
substeps=2,
),
rigid_options=gs.options.RigidOptions(
enable_self_collision=False,
),
fem_options=gs.options.FEMOptions(
use_implicit_solver=True,
pcg_threshold=1e-10,
),
coupler_options=gs.options.SAPCouplerOptions(
pcg_threshold=1e-10,
sap_convergence_atol=1e-10,
sap_convergence_rtol=1e-10,
linesearch_ftol=1e-10,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.3, 0.0, 0.15),
camera_lookat=(0.65, 0.0, 0.15),
max_FPS=60,
),
show_viewer=show_viewer,
show_FPS=False,
)
franka = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
material=gs.materials.Rigid(
coup_friction=1.0,
friction=1.0,
),
)
# Only allow finger contact to accelerate
for geom in franka.geoms:
if "finger" not in geom.link.name:
geom._contype = 0
geom._conaffinity = 0
if primitive_type == "sphere":
obj = scene.add_entity(
morph=gs.morphs.Sphere(
pos=(0.65, 0.0, 0.02),
radius=0.02,
),
material=gs.materials.FEM.Elastic(
model="linear_corotated",
friction_mu=1.0,
E=1e5,
nu=0.4,
),
)
else: # primitive_type == "box":
asset_path = get_hf_dataset(pattern="meshes/cube8.obj")
obj = scene.add_entity(
morph=gs.morphs.Mesh(
file=f"{asset_path}/meshes/cube8.obj",
pos=(0.65, 0.0, 0.02),
scale=0.02,
),
material=gs.materials.FEM.Elastic(
model="linear_corotated",
friction_mu=1.0,
),
)
scene.build()
motors_dof = np.arange(7)
fingers_dof = np.arange(7, 9)
end_effector = franka.get_link("hand")
# init
franka.set_qpos((-1.0124, 1.5559, 1.3662, -1.6878, -1.5799, 1.7757, 1.4602, 0.04, 0.04))
box_pos_0 = obj.get_state().pos.mean(dim=-2)
# hold
qpos = franka.inverse_kinematics(link=end_effector, pos=GRAPPER_POS_START, quat=(0, 1, 0, 0))
franka.control_dofs_position(qpos[motors_dof], motors_dof)
for i in range(15):
scene.step()
# grasp
for i in range(10):
franka.control_dofs_force(np.array([-1.0, -1.0]), fingers_dof)
scene.step()
# lift and wait for while to give enough time for the robot to stop shaking
qpos = franka.inverse_kinematics(link=end_effector, pos=GRAPPER_POS_END, quat=(0, 1, 0, 0))
franka.control_dofs_position(qpos[motors_dof], motors_dof)
for i in range(65):
franka.control_dofs_force(np.array([-1.0, -1.0]), fingers_dof)
scene.step()
# Check that the box has moved by the expected delta, without slipping
box_pos_f = obj.get_state().pos.mean(dim=-2)
assert_allclose(box_pos_f - box_pos_0, np.array(GRAPPER_POS_END) - np.array(GRAPPER_POS_START), tol=5e-3)
# wait for a while
for i in range(25):
franka.control_dofs_force(np.array([-1.0, -1.0]), fingers_dof)
scene.step()
box_pos_post = obj.get_state().pos.mean(dim=-2)
assert_allclose(box_pos_f, box_pos_post, atol=1e-3)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_integration.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:tests/test_sensors.py | import numpy as np
import pytest
import torch
import genesis as gs
import genesis.utils.geom as gu
from .utils import assert_allclose, assert_equal
# ------------------------------------------------------------------------------------------
# -------------------------------------- IMU Sensors ---------------------------------------
# ------------------------------------------------------------------------------------------
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
def test_imu_sensor(show_viewer, tol, n_envs):
"""Test if the IMU sensor returns the correct data."""
GRAVITY = -10.0
DT = 1e-2
BIAS = (0.1, 0.2, 0.3)
DELAY_STEPS = 2
MAG_FIELD = (0.3, 0.1, 0.5) # arbitrary world magnetic field
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=DT,
substeps=1,
gravity=(0.0, 0.0, GRAVITY),
),
profiling_options=gs.options.ProfilingOptions(show_FPS=False),
show_viewer=show_viewer,
)
scene.add_entity(gs.morphs.Plane())
box = scene.add_entity(
morph=gs.morphs.Box(
size=(0.1, 0.1, 0.1),
pos=(0.0, 0.0, 0.2),
),
)
imu = scene.add_sensor(
gs.sensors.IMU(
entity_idx=box.idx,
magnetic_field=MAG_FIELD,
)
)
imu_delayed = scene.add_sensor(
gs.sensors.IMU(
entity_idx=box.idx,
delay=DT * DELAY_STEPS,
magnetic_field=MAG_FIELD,
)
)
imu_noisy = scene.add_sensor(
gs.sensors.IMU(
entity_idx=box.idx,
acc_cross_axis_coupling=0.01,
gyro_cross_axis_coupling=(0.02, 0.03, 0.04),
mag_cross_axis_coupling=0.01,
acc_noise=(0.01, 0.01, 0.01),
gyro_noise=(0.01, 0.01, 0.01),
mag_noise=(0.01, 0.01, 0.01),
acc_random_walk=(0.001, 0.001, 0.001),
gyro_random_walk=(0.001, 0.001, 0.001),
mag_random_walk=(0.001, 0.001, 0.001),
delay=DT,
magnetic_field=MAG_FIELD,
jitter=DT * 0.1,
interpolate=True,
)
)
scene.build(n_envs=n_envs)
# box is in freefall
for _ in range(10):
scene.step()
# IMU should calculate "classical linear acceleration" using the local frame without accounting for gravity
# acc_classical_lin_z = - theta_dot ** 2 - cos(theta) * g
assert_allclose(imu.read().lin_acc, 0.0, tol=tol)
assert_allclose(imu.read().ang_vel, 0.0, tol=tol)
assert_allclose(imu.read().mag, MAG_FIELD, tol=tol)
assert_allclose(imu_noisy.read().lin_acc, 0.0, tol=1e-1)
assert_allclose(imu_noisy.read().ang_vel, 0.0, tol=1e-1)
assert_allclose(imu_noisy.read().mag, MAG_FIELD, tol=1e-1)
# shift COM to induce angular velocity
box.set_COM_shift([0.05, 0.05, 0.05])
# update noise and bias for accelerometer, gyroscope and magnetometer
imu_noisy.set_noise((0.01, 0.01, 0.01, 0.02, 0.02, 0.02, 0.05, 0.05, 0.05))
imu_noisy.set_bias((0.01, 0.01, 0.01, 0.02, 0.02, 0.02, 0.05, 0.05, 0.05))
imu_noisy.set_jitter(0.001)
for _ in range(10 - DELAY_STEPS):
scene.step()
true_imu_delayed_reading = imu_delayed.read_ground_truth()
for _ in range(DELAY_STEPS):
scene.step()
assert_equal(imu_delayed.read().lin_acc, true_imu_delayed_reading.lin_acc)
assert_equal(imu_delayed.read().ang_vel, true_imu_delayed_reading.ang_vel)
assert_equal(imu_delayed.read().mag, true_imu_delayed_reading.mag)
# check that position offset affects linear acceleration
imu.set_pos_offset((0.5, 0.0, 0.0))
lin_acc_no_offset = imu.read().lin_acc
scene.step()
lin_acc_with_offset = imu.read().lin_acc
with np.testing.assert_raises(AssertionError):
assert_allclose(lin_acc_no_offset, lin_acc_with_offset, atol=0.2)
imu.set_pos_offset((0.0, 0.0, 0.0))
# let box collide with ground
for _ in range(20):
scene.step()
assert_equal(imu.read_ground_truth().lin_acc, imu_delayed.read_ground_truth().lin_acc)
assert_equal(imu.read_ground_truth().ang_vel, imu_delayed.read_ground_truth().ang_vel)
assert_equal(imu.read_ground_truth().mag, imu_delayed.read_ground_truth().mag)
with np.testing.assert_raises(AssertionError, msg="Angular velocity should not be zero due to COM shift"):
assert_allclose(imu.read_ground_truth().ang_vel, 0.0, tol=tol)
with np.testing.assert_raises(AssertionError, msg="Delayed accl data should not be equal to the ground truth data"):
assert_equal(imu_delayed.read().lin_acc - imu_delayed.read_ground_truth().lin_acc, 0.0)
with np.testing.assert_raises(AssertionError, msg="Delayed mag data should not be equal to the ground truth data"):
assert_equal(imu_delayed.read().mag - imu_delayed.read_ground_truth().mag, 0.0)
box.set_COM_shift((0.0, 0.0, 0.0))
box.set_quat((0.0, 0.0, 0.0, 1.0)) # pi rotation around z-axis
# wait for the box to be stationary on ground
for _ in range(50):
scene.step()
assert_allclose(imu.read().lin_acc, (0.0, 0.0, -GRAVITY), tol=5e-6)
assert_allclose(imu.read().ang_vel, (0.0, 0.0, 0.0), tol=1e-5)
assert_allclose(imu.read().mag, (-MAG_FIELD[0], -MAG_FIELD[1], MAG_FIELD[2]), tol=tol)
# rotate IMU 90 deg around x axis means gravity should be along -y axis
imu.set_quat_offset(gu.euler_to_quat((90.0, 0.0, 0.0)))
scene.step()
assert_allclose(imu.read().lin_acc, (0.0, GRAVITY, 0.0), tol=5e-6)
assert_allclose(imu.read().mag, (-MAG_FIELD[0], -MAG_FIELD[2], -MAG_FIELD[1]), tol=tol)
imu.set_acc_cross_axis_coupling((0.0, 1.0, 0.0))
scene.step()
assert_allclose(imu.read().lin_acc, GRAVITY, tol=5e-6)
scene.reset()
box.set_dofs_velocity((1.0, 2.0, 3.0), dofs_idx_local=slice(3, None))
scene.step()
assert_allclose(imu.read_ground_truth().ang_vel, (1.0, 3.0, -2.0), tol=0.1)
imu.set_quat_offset((1.0, 0.0, 0.0, 0.0))
imu.set_acc_cross_axis_coupling((0.0, 0.0, 0.0))
scene.reset()
assert_allclose(imu.read().lin_acc, 0.0, tol=gs.EPS) # biased, but cache hasn't been updated yet
assert_allclose(imu_delayed.read().lin_acc, 0.0, tol=gs.EPS)
assert_allclose(imu_noisy.read().ang_vel, 0.0, tol=gs.EPS)
assert_allclose(imu_noisy.read().mag, 0.0, tol=gs.EPS) # biased
imu.set_bias(BIAS + 2 * (0.0, 0.0, 0.0))
scene.step()
assert_allclose(imu.read().lin_acc, BIAS, tol=tol)
assert_allclose(imu.read().mag, MAG_FIELD, tol=tol)
# ------------------------------------------------------------------------------------------
# ------------------------------------ Contact Sensors -------------------------------------
# ------------------------------------------------------------------------------------------
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
def test_rigid_tactile_sensors_gravity_force(n_envs, show_viewer, tol):
"""Test if the sensor will detect the correct forces being applied on a falling box."""
GRAVITY = -10.0
BIAS = (0.1, 0.2, 0.3)
NOISE = 0.01
scene = gs.Scene(
sim_options=gs.options.SimOptions(
gravity=(0.0, 0.0, GRAVITY),
),
profiling_options=gs.options.ProfilingOptions(show_FPS=False),
show_viewer=show_viewer,
)
floor = scene.add_entity(morph=gs.morphs.Plane())
# Add duck (with convex decomposition enabled) to offset geom index vs link index
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/duck.obj",
scale=0.04,
pos=(0.0, 1.0, 0.2),
euler=(90, 0, 90),
),
)
box = scene.add_entity(
morph=gs.morphs.Box(
size=(1.0, 1.0, 1.0), # volume = 1 m^3
pos=(0.0, 0.0, 0.51),
),
material=gs.materials.Rigid(
rho=1.0, # mass = 1.0 kg
),
surface=gs.surfaces.Default(
color=(1.0, 0.0, 0.0, 1.0),
),
)
box_2 = scene.add_entity(
morph=gs.morphs.Box(
size=(0.2, 0.2, 0.2), # volume = 0.008 m^3
pos=(1.0, 0.0, 0.4),
),
material=gs.materials.Rigid(
rho=100.0, # mass = 0.8 kg
),
surface=gs.surfaces.Default(
color=(0.0, 1.0, 0.0, 1.0),
),
)
box_3 = scene.add_entity(
morph=gs.morphs.Box(
size=(0.2, 0.2, 0.2), # volume = 0.008 m^3
pos=(1.0, 0.0, 0.61),
),
material=gs.materials.Rigid(
rho=25.0, # mass = 0.2 kg
),
surface=gs.surfaces.Default(
color=(0.0, 0.0, 1.0, 1.0),
),
)
bool_sensor_floor = scene.add_sensor(
gs.sensors.Contact(
entity_idx=floor.idx,
)
)
bool_sensor_box_2 = scene.add_sensor(
gs.sensors.Contact(
entity_idx=box_2.idx,
)
)
force_sensor = scene.add_sensor(
gs.sensors.ContactForce(
entity_idx=box.idx,
)
)
force_sensor_box_2 = scene.add_sensor(
gs.sensors.ContactForce(
entity_idx=box_2.idx,
)
)
force_sensor_noisy = scene.add_sensor(
gs.sensors.ContactForce(
entity_idx=box.idx,
min_force=0.01,
max_force=(10.0, 20.0, -GRAVITY / 2),
noise=NOISE,
bias=BIAS,
random_walk=(NOISE * 0.01, NOISE * 0.02, NOISE * 0.03),
delay=0.05,
jitter=0.01,
interpolate=True,
)
)
# Adding extra sensor sharing same dtype to force discontinuous memory layout for ground truth when batched
scene.add_sensor(
gs.sensors.IMU(
entity_idx=box.idx,
)
)
scene.build(n_envs=n_envs)
# Move CoM to get unbalanced forces on each contact points
box_com_offset = (0.3, 0.1, 0.0)
box.set_COM_shift(box_com_offset)
# Rotate the box make sure the force is correctly computed in local frame
box_2.set_dofs_position((np.pi / 2, np.pi / 4, np.pi / 2), dofs_idx_local=slice(3, None))
# Add another cube on top of it make sure the forces are correctly aggregated
box_3.set_dofs_position((-np.pi / 2, -np.pi / 4, -np.pi / 2), dofs_idx_local=slice(3, None))
# Note that it is necessary to do a first step, because the initial state right after reset is not valid
scene.step()
# Make sure that box CoM is valid
assert_allclose(box.get_links_pos(ref="root_com")[..., :2], box_com_offset[:2], tol=tol)
assert not bool_sensor_floor.read().any(), "ContactSensor for floor should not detect any contact yet."
assert not bool_sensor_box_2.read().any(), "ContactSensor for box_2 should not detect any contact yet."
assert_allclose(force_sensor_noisy.read_ground_truth(), 0.0, tol=gs.EPS)
assert_allclose(force_sensor.read(), force_sensor_noisy.read_ground_truth(), tol=gs.EPS)
assert_allclose(force_sensor_noisy.read(), BIAS, tol=NOISE * 3)
for _ in range(10):
scene.step()
assert bool_sensor_floor.read().all(), "ContactSensor for floor should detect contact with the ground"
assert not bool_sensor_box_2.read().any(), "ContactSensor for box_2 should not detect any contact yet."
assert_allclose(force_sensor_noisy.read(), force_sensor_noisy.read(), tol=gs.EPS)
for _ in range(90):
scene.step()
assert bool_sensor_box_2.read().all(), "ContactSensor for box_2 should detect contact with the ground"
# Moving force back in world frame because box is not perfectly flat on the ground due to CoM offset
with np.testing.assert_raises(AssertionError):
assert_allclose(box.get_quat(), 0.0, atol=tol)
assert_allclose(
gu.transform_by_quat(force_sensor_noisy.read_ground_truth(), box.get_quat()), (0.0, 0.0, -GRAVITY), tol=tol
)
# FIXME: Adding CoM offset on box is disturbing contact force computations on box_2 for some reason...
assert_allclose(force_sensor_box_2.read_ground_truth(), (-0.8 * GRAVITY, 0.0, 0.0), tol=1e-2)
assert_allclose(force_sensor_noisy.read()[..., :2], BIAS[:2], tol=NOISE * 3)
assert_allclose(force_sensor_noisy.read()[..., 2], -GRAVITY / 2, tol=gs.EPS)
# ------------------------------------------------------------------------------------------
# ------------------------------------ Raycast Sensors -------------------------------------
# ------------------------------------------------------------------------------------------
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
def test_raycaster_hits(show_viewer, n_envs):
"""Test if the Raycaster sensor with GridPattern rays pointing to ground returns the correct distance."""
NUM_RAYS_XY = (3, 5)
SPHERE_POS = (2.5, 0.5, 1.0)
BOX_SIZE = 0.05
RAYCAST_BOX_SIZE = 0.1
RAYCAST_GRID_SIZE_X = 1.0
RAYCAST_HEIGHT = 1.0
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(-3.0, RAYCAST_GRID_SIZE_X * (NUM_RAYS_XY[1] / NUM_RAYS_XY[0]), 2 * RAYCAST_HEIGHT),
camera_lookat=(1.5, RAYCAST_GRID_SIZE_X * (NUM_RAYS_XY[1] / NUM_RAYS_XY[0]), RAYCAST_HEIGHT),
),
vis_options=gs.options.VisOptions(
rendered_envs_idx=(0,),
env_separate_rigid=False,
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=show_viewer,
)
scene.add_entity(gs.morphs.Plane())
spherical_sensor = scene.add_entity(
gs.morphs.Sphere(
radius=RAYCAST_HEIGHT,
pos=SPHERE_POS,
fixed=True,
),
)
spherical_raycaster = scene.add_sensor(
gs.sensors.Raycaster(
pattern=gs.sensors.raycaster.SphericalPattern(
n_points=NUM_RAYS_XY,
),
entity_idx=spherical_sensor.idx,
return_world_frame=False,
draw_debug=show_viewer,
debug_ray_start_color=(0.0, 0.0, 0.0, 0.0),
debug_ray_hit_color=(1.0, 0.0, 0.0, 1.0),
)
)
grid_sensor = scene.add_entity(
gs.morphs.Box(
size=(RAYCAST_BOX_SIZE, RAYCAST_BOX_SIZE, RAYCAST_BOX_SIZE),
pos=(0.0, 0.0, RAYCAST_HEIGHT + 0.5 * RAYCAST_BOX_SIZE),
collision=False,
fixed=False,
),
)
grid_res = RAYCAST_GRID_SIZE_X / (NUM_RAYS_XY[0] - 1)
grid_size_y = grid_res * (NUM_RAYS_XY[1] - 1)
grid_raycaster = scene.add_sensor(
gs.sensors.Raycaster(
pattern=gs.sensors.raycaster.GridPattern(
resolution=grid_res,
size=(RAYCAST_GRID_SIZE_X, grid_size_y),
direction=(0.0, 0.0, -1.0), # pointing downwards to ground
),
entity_idx=grid_sensor.idx,
pos_offset=(0.0, 0.0, -0.5 * RAYCAST_BOX_SIZE),
return_world_frame=True,
draw_debug=show_viewer,
debug_ray_start_color=(0.0, 0.0, 0.0, 0.0),
debug_ray_hit_color=(0.0, 1.0, 0.0, 1.0),
)
)
depth_camera = scene.add_sensor(
gs.sensors.DepthCamera(
pattern=gs.sensors.raycaster.DepthCameraPattern(
res=NUM_RAYS_XY[::-1],
),
entity_idx=spherical_sensor.idx,
draw_debug=show_viewer,
debug_ray_start_color=(0.0, 0.0, 0.0, 0.0),
debug_ray_hit_color=(0.0, 0.0, 1.0, 1.0),
),
)
obstacle_1 = scene.add_entity(
gs.morphs.Box(
size=(BOX_SIZE, BOX_SIZE, BOX_SIZE),
pos=(grid_res, grid_res, 0.5 * BOX_SIZE),
),
)
obstacle_2 = scene.add_entity(
gs.morphs.Box(
size=(BOX_SIZE, BOX_SIZE, BOX_SIZE),
pos=(RAYCAST_GRID_SIZE_X, grid_size_y, RAYCAST_HEIGHT + RAYCAST_BOX_SIZE + BOX_SIZE),
fixed=True,
),
)
# Build the simulation and do one step
scene.build(n_envs=n_envs)
batch_shape = (n_envs,) if n_envs > 0 else ()
# Validate grid raycast
for obstacle_pos, sensor_pos, hit_ij in (
(None, None, (-1, -2)),
((grid_res, grid_res, BOX_SIZE), None, (-1, -2)),
(None, (*(grid_res * (e - 2) for e in NUM_RAYS_XY), RAYCAST_HEIGHT + 0.5 * RAYCAST_BOX_SIZE), (1, 0)),
):
# Update obstacle and/or sensor position if necessary
if obstacle_pos is not None:
obstacle_1.set_pos(np.tile(obstacle_pos, (*batch_shape, 1)))
obstacle_pos = obstacle_1.get_pos()
if sensor_pos is not None:
grid_sensor.set_pos(np.tile(sensor_pos, (*batch_shape, 1)))
scene.sim._sensor_manager.step()
if show_viewer:
scene.visualizer.update(force=True)
# Fetch updated sensor data
grid_hits = grid_raycaster.read().points
grid_distances = grid_raycaster.read().distances
assert grid_distances.shape == (*batch_shape, *NUM_RAYS_XY)
# Check hits
grid_sensor_origin = grid_sensor.get_pos()
x = torch.linspace(-0.5, 0.5, NUM_RAYS_XY[0]) * RAYCAST_GRID_SIZE_X + grid_sensor_origin[..., [0]]
y = torch.linspace(-0.5, 0.5, NUM_RAYS_XY[1]) * grid_size_y + grid_sensor_origin[..., [1]]
# xg, yg = torch.meshgrid(x, y, indexing="ij")
xg = x.unsqueeze(-1).expand((*batch_shape, -1, NUM_RAYS_XY[1]))
yg = y.unsqueeze(-2).expand((*batch_shape, NUM_RAYS_XY[0], -1))
zg = torch.zeros((*batch_shape, *NUM_RAYS_XY))
zg[(..., *hit_ij)] = obstacle_pos[..., 2] + 0.5 * BOX_SIZE
grid_hits_ref = torch.stack([xg, yg, zg], dim=-1)
assert_allclose(grid_hits, grid_hits_ref, tol=gs.EPS)
# Check distances
grid_distances_ref = torch.full((*batch_shape, *NUM_RAYS_XY), RAYCAST_HEIGHT)
grid_distances_ref[(..., *hit_ij)] = RAYCAST_HEIGHT - obstacle_pos[..., 2] - 0.5 * BOX_SIZE
assert_allclose(grid_distances, grid_distances_ref, tol=gs.EPS)
# Validate spherical raycast
spherical_distances = spherical_raycaster.read().distances
assert spherical_distances.shape == (*batch_shape, *NUM_RAYS_XY)
# Note that the tolerance must be large because the sphere geometry is discretized
assert_allclose(spherical_distances, RAYCAST_HEIGHT, tol=5e-3)
# Check that we can read image from depth camera
assert_equal(depth_camera.read_image().shape, batch_shape + NUM_RAYS_XY)
# Note that the tolerance must be large because the sphere geometry is discretized
assert_allclose(depth_camera.read_image(), RAYCAST_HEIGHT, tol=5e-3)
# Simulate for a while and check again that the ray is casted properly
offset = torch.from_numpy(np.random.rand(*batch_shape, 3)).to(dtype=gs.tc_float, device=gs.device)
for entity in (grid_sensor, obstacle_1, obstacle_2):
pos = entity.get_pos() + offset
if entity is obstacle_2:
pos[..., 2] = BOX_SIZE / 2
entity.set_pos(pos)
if show_viewer:
scene.visualizer.update(force=True)
grid_sensor_pos = grid_sensor.get_pos().clone()
for _ in range(60):
scene.step()
grid_sensor.set_pos(grid_sensor_pos)
scene.sim._sensor_manager.step()
if show_viewer:
scene.visualizer.update(force=True)
grid_distances = grid_raycaster.read().distances
grid_distances_ref = torch.full((*batch_shape, *NUM_RAYS_XY), RAYCAST_HEIGHT)
grid_distances_ref[(..., -1, -2)] = RAYCAST_HEIGHT - BOX_SIZE
grid_distances_ref[(..., *hit_ij)] = RAYCAST_HEIGHT - BOX_SIZE
grid_distances_ref += offset[..., 2].reshape((*(-1 for e in batch_shape), 1, 1))
assert_allclose(grid_distances, grid_distances_ref, tol=1e-3)
@pytest.mark.required
def test_lidar_bvh_parallel_env(show_viewer, tol):
"""Verify each environment receives a different lidar distance when geometries differ."""
scene = gs.Scene(
vis_options=gs.options.VisOptions(
rendered_envs_idx=(1,),
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1, -5, 3),
camera_lookat=(1, 0.5, 0),
),
show_viewer=show_viewer,
)
scene.add_entity(gs.morphs.Plane())
sensor_mount = scene.add_entity(
gs.morphs.Box(
size=(0.1, 0.1, 0.1),
pos=(0.0, 0.0, 0.5),
fixed=True,
collision=False,
)
)
obstacle_1 = scene.add_entity(
gs.morphs.Box(
size=(0.2, 0.2, 0.2),
pos=(1.0, 0.0, 0.5),
fixed=True,
),
)
obstacle_2 = scene.add_entity(
gs.morphs.Box(
size=(0.05, 0.4, 0.4),
pos=(1.0, 0.0, 0.5),
fixed=True,
),
)
lidar = scene.add_sensor(
gs.sensors.Lidar(
entity_idx=sensor_mount.idx,
pattern=gs.options.sensors.SphericalPattern(
n_points=(1, 1),
fov=(0.0, 0.0),
),
max_range=5.0,
draw_debug=show_viewer,
debug_ray_start_color=(0.0, 0.0, 0.0, 0.0),
debug_ray_hit_color=(1.0, 0.0, 0.0, 1.0),
)
)
scene.build(n_envs=2)
sensor_positions = np.array([[0.0, 0.0, 0.5], [0.0, 1.0, 0.5]], dtype=gs.np_float)
obstacle_1_positions = np.array([[1.1, 0.0, 0.5], [2.5, 1.0, 0.5]], dtype=gs.np_float)
obstacle_2_positions = np.array([[1.4, 0.0, 0.5], [2.2, 1.0, 0.5]], dtype=gs.np_float)
sensor_mount.set_pos(sensor_positions)
obstacle_1.set_pos(obstacle_1_positions)
obstacle_2.set_pos(obstacle_2_positions)
scene.step()
distances = lidar.read().distances
assert distances.shape == (2, 1, 1)
lidar_distances = distances[:, 0, 0]
front_positions = np.minimum(obstacle_1_positions[:, 0] - 0.1, obstacle_2_positions[:, 0] - 0.025)
expected_distances = front_positions - sensor_positions[:, 0]
assert_allclose(lidar_distances, expected_distances, tol=tol)
@pytest.mark.required
def test_lidar_cache_offset_parallel_env(show_viewer, tol):
scene = gs.Scene(
show_viewer=show_viewer,
)
scene.add_entity(
morph=gs.morphs.Plane(),
)
cube = scene.add_entity(
morph=gs.morphs.Box(
size=(0.1, 0.1, 1.0),
pos=(0.0, 0.0, 0.5),
),
)
sensors = [
scene.add_sensor(
gs.sensors.Raycaster(
pattern=gs.sensors.raycaster.SphericalPattern(
n_points=(2, 2),
),
entity_idx=cube.idx,
return_world_frame=False,
)
),
scene.add_sensor(
gs.sensors.Raycaster(
pattern=gs.sensors.raycaster.SphericalPattern(
n_points=(2, 2),
),
entity_idx=cube.idx,
return_world_frame=False,
)
),
scene.add_sensor(
gs.sensors.Raycaster(
pattern=gs.sensors.raycaster.SphericalPattern(
n_points=(2, 2),
),
entity_idx=cube.idx,
return_world_frame=False,
)
),
]
scene.build()
scene.step()
for sensor in sensors:
sensor_data = sensor.read()
assert (sensor_data.distances > gs.EPS).any()
assert (sensor_data.points.abs() > gs.EPS).any()
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
def test_kinematic_contact_probe_box_support(show_viewer, tol, n_envs):
"""Test KinematicContactProbe for a box resting on the ground and a fixed sphere on top of it."""
BOX_SIZE = 0.5
PROBE_RADIUS = 0.05
PENETRATION = 0.02
STIFFNESS = 100.0
SPHERE_RADIUS = 0.1
NOISE = 0.001
GRAVITY = -10.0
scene = gs.Scene(
sim_options=gs.options.SimOptions(
gravity=(0.0, 0.0, GRAVITY),
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=show_viewer,
)
scene.add_entity(gs.morphs.Plane())
box = scene.add_entity(
gs.morphs.Box(
size=(BOX_SIZE, BOX_SIZE, BOX_SIZE),
pos=(0.0, 0.0, BOX_SIZE / 2 - PENETRATION), # box is penetrating ground plane
fixed=False, # probe will not detect fixed-fixed contact
),
)
sphere = scene.add_entity(
gs.morphs.Sphere(
radius=SPHERE_RADIUS,
pos=(0.0, 0.0, BOX_SIZE + SPHERE_RADIUS + 0.2), # start with sphere above the box
fixed=True,
),
)
probe_normals = (
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, -1.0),
)
probe = scene.add_sensor(
gs.sensors.KinematicContactProbe(
entity_idx=box.idx,
probe_local_pos=(
(0.0, 0.0, BOX_SIZE / 2), # top of box, center
(BOX_SIZE / 4, BOX_SIZE / 4, BOX_SIZE / 2), # top of box
(-BOX_SIZE / 4, -BOX_SIZE / 4, BOX_SIZE / 2), # top of box
(0.0, 0.0, -BOX_SIZE / 2), # bottom of box, center
),
probe_local_normal=probe_normals,
radius=(
PROBE_RADIUS,
PROBE_RADIUS / 10, # small radius which cannot detect sphere unless it's perfectly on top
BOX_SIZE / 3, # large radius that can detect sphere when not aligned
PROBE_RADIUS,
),
stiffness=STIFFNESS,
noise=NOISE,
random_walk=NOISE * 0.1,
draw_debug=show_viewer,
)
)
sphere_probe = scene.add_sensor(
gs.sensors.KinematicContactProbe(
entity_idx=sphere.idx,
probe_local_pos=[(0.0, 0.0, -SPHERE_RADIUS)],
probe_local_normal=[(0.0, 0.0, -1.0)],
radius=PROBE_RADIUS,
stiffness=STIFFNESS,
debug_sphere_color=(0.0, 0.0, 1.0, 0.5),
draw_debug=show_viewer,
)
)
scene.build(n_envs=n_envs)
scene.step()
noisy_data = probe.read()
box_data = probe.read_ground_truth()
with np.testing.assert_raises(AssertionError):
assert_allclose(noisy_data.penetration, box_data.penetration, tol=gs.EPS)
with np.testing.assert_raises(AssertionError):
assert_allclose(noisy_data.force, box_data.force, tol=gs.EPS)
noise_tol = NOISE * 10.0
assert_allclose(noisy_data.penetration, box_data.penetration, atol=noise_tol)
assert_allclose(noisy_data.force, box_data.force, atol=noise_tol)
# Check that the box's bottom probe (idx 3) detects the ground
assert (box_data.penetration[..., 3] > tol).all(), "Bottom probe should detect ground contact"
assert (box_data.force[..., 3, 2] > tol).all(), "Bottom probe should have upward force from ground"
# Forces should be equivalent to the penetration * stiffness along normal vector
normals = torch.stack([-torch.tensor(n) for n in probe_normals])
expected_force = (box_data.penetration * STIFFNESS).unsqueeze(-1) * normals
assert_allclose(box_data.force, expected_force, tol=tol)
# Top probes should not detect anything yet
assert_allclose(box_data.penetration[..., :3], 0.0, tol=gs.EPS)
assert_allclose(box_data.force[..., :3, :], 0.0, tol=gs.EPS)
# Now position the sphere to penetrate the top of the box
sphere.set_pos((0.0, 0.0, BOX_SIZE + SPHERE_RADIUS - PENETRATION))
scene.step()
box_data = probe.read_ground_truth()
sphere_data = sphere_probe.read()
assert (box_data.penetration[..., 0] > tol).all(), "Top probe should detect sphere contact"
assert (box_data.force[..., 0, 2] < -tol).all(), "Top probe should have downward force from sphere"
assert (sphere_data.penetration[..., 0] > tol).all(), "Sphere probe should detect box contact"
assert_allclose(
sphere_data.penetration[..., 0],
box_data.penetration[..., 0],
tol=2e-3,
err_msg="Sphere probe penetration should match top box probe penetration",
)
assert_equal(
box_data.penetration[..., 1], 0.0, err_msg="Noncenter probe with small radius should not detect contact"
)
assert (box_data.penetration[..., 2] > tol).all(), "Noncenter probe with large radius should detect contact"
# Move sphere away and check no contact
sphere.set_pos((0.0, 0.0, BOX_SIZE / 2 + SPHERE_RADIUS + PROBE_RADIUS + 0.2))
scene.step()
sphere_data = sphere_probe.read()
sphere_ground_truth = sphere_probe.read_ground_truth()
assert_allclose(sphere_data.penetration, sphere_ground_truth.penetration, tol=gs.EPS)
assert_allclose(sphere_data.force, sphere_ground_truth.force, tol=gs.EPS)
assert_allclose(sphere_data.penetration, 0.0, tol=gs.EPS)
assert_allclose(sphere_data.force, 0.0, tol=gs.EPS)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_sensors.py",
"license": "Apache License 2.0",
"lines": 677,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:genesis/utils/ring_buffer.py | import torch
import genesis as gs
class TensorRingBuffer:
"""
A helper class for storing a buffer of `torch.Tensor`s without allocating new tensors.
Parameters
----------
N : int
The number of tensors to store.
shape : tuple[int, ...]
The shape of the tensors to store.
dtype : torch.dtype
The dtype of the tensors to store.
buffer : torch.Tensor | None, optional
The buffer tensor where all the data is stored. If not provided, a new tensor is allocated.
idx : torch.Tensor, optional
The index reference to the most recently updated position in the ring buffer as a mutable 0D torch.Tensor of
integer dtype. If not provided, it is initialized to -1.
"""
def __init__(
self,
N: int,
shape: tuple[int, ...],
dtype=torch.float32,
buffer: torch.Tensor | None = None,
idx: torch.Tensor | None = None,
):
if buffer is None:
self.buffer = torch.empty((N, *shape), dtype=dtype, device=gs.device)
else:
assert buffer.shape == (N, *shape)
self.buffer = buffer
self.N = N
if idx is None:
self._idx = torch.tensor(-1, dtype=torch.int64, device=gs.device)
else: # torch.Tensor
assert idx.ndim == 0 and idx.dtype in (torch.int32, torch.int64)
self._idx = idx.to(device=gs.device)
assert self._idx is idx
def at(
self, idx: int | torch.Tensor, *others_idx: int | slice | torch.Tensor, copy: bool | None = None
) -> torch.Tensor:
"""
Get the value of the tensor at the given index.
Parameters
----------
idx : int | torch.Tensor
Index of the element to get from most recent to least recent (that has not been discarded yet).
Passing a 1D tensor for advanced (aka fancy) indexing is supported, but this is requiring allocating fresh
memory instead of returning a view, which is less efficient.
others_idx : int | slice | torch.Tensor, optional
Index of the elements to extract from the selected tensor. In case of advanced indexing, this is equivalent
but significantly more efficient than doing this extraction in a latter stage.
copy: bool | None, optional
If `None`, then memory will be allocated only if necessary. If `True`, then memory will be allocated
systematically instead of returning a view. If `False`, then allocating memory is forbidden and will raise
an exception if returning a view is impossible.
"""
rel_idx = (self._idx - idx) % self.N
assert len(others_idx) < self.buffer.ndim
tensor = self.buffer[(rel_idx, *others_idx)]
if tensor.untyped_storage().data_ptr() == self.buffer.untyped_storage().data_ptr():
if copy:
tensor = tensor.clone()
elif copy == False:
gs.raise_exception("Allocating memory is necessary but 'copy=False'.")
return tensor
def get(self, idx: int) -> torch.Tensor:
"""
Get a clone of the tensor at the given index.
Parameters
----------
idx : int
Index of the element to get from most recent to least recent (that has not been discarded yet).
"""
return self.buffer[idx].clone()
def set(self, tensor: torch.Tensor):
"""
Set the current position of the ring buffer.
Parameters
----------
tensor : torch.Tensor
The tensor to copy into the ring buffer.
"""
self.buffer[self._idx] = tensor
def rotate(self):
"""
, and advance the index pointer
"""
self._idx[()] = (self._idx + 1) % self.N
def clone(self) -> "TensorRingBuffer":
return TensorRingBuffer(
self.N,
self.buffer.shape[1:],
dtype=self.buffer.dtype,
buffer=self.buffer.clone(),
idx=self._idx.clone(),
)
def __getitem__(self, key: int | slice | tuple) -> "TensorRingBuffer":
"""
Enable slicing of the tensor ring buffer.
Parameters
----------
key : int | slice | tuple
Slice object (e.g., 3:6) or integer index or tuple of indices
Returns
-------
TensorRingBuffer
A new ring buffer containing a view of the sliced data
"""
if isinstance(key, int):
sliced_buffer = self.buffer[:, key : key + 1]
elif isinstance(key, slice):
sliced_buffer = self.buffer[:, key]
elif isinstance(key, tuple):
indexes = (slice(None),) + key
sliced_buffer = self.buffer[indexes]
else:
raise TypeError(f"Unsupported key type: {type(key)}")
return TensorRingBuffer(
self.N,
sliced_buffer.shape[1:],
dtype=sliced_buffer.dtype,
buffer=sliced_buffer,
idx=self._idx,
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/ring_buffer.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Genesis-Embodied-AI/Genesis:examples/rigid/single_franka_batch_render.py | import argparse
import numpy as np
import genesis as gs
from genesis.utils.geom import trans_to_T
from genesis.utils.image_exporter import FrameImageExporter
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--vis", action="store_true", default=False)
parser.add_argument("-c", "--cpu", action="store_true", default=False)
parser.add_argument("-b", "--n_envs", type=int, default=3)
parser.add_argument("-s", "--n_steps", type=int, default=2)
parser.add_argument("-r", "--render_all_cameras", action="store_true", default=False)
parser.add_argument("-o", "--output_dir", type=str, default="data/test")
parser.add_argument("-u", "--use_rasterizer", action="store_true", default=False)
parser.add_argument("-f", "--use_fisheye", action="store_true", default=False)
parser.add_argument("-d", "--debug", action="store_true", default=False)
parser.add_argument("-l", "--seg_level", type=str, default="link")
args = parser.parse_args()
########################## init ##########################
gs.init(backend=gs.cpu if args.cpu else gs.gpu)
########################## create a scene ##########################
scene = gs.Scene(
vis_options=gs.options.VisOptions(
segmentation_level=args.seg_level,
),
renderer=gs.options.renderers.BatchRenderer(
use_rasterizer=args.use_rasterizer,
),
)
########################## entities ##########################
plane = scene.add_entity(
gs.morphs.Plane(),
surface=gs.surfaces.Default(
diffuse_texture=gs.textures.BatchTexture.from_images(image_folder="textures"),
),
)
franka = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
visualize_contact=True,
)
########################## cameras ##########################
debug_cam = scene.add_camera(
res=(720, 1280),
pos=(1.5, -0.5, 1.0),
lookat=(0.0, 0.0, 0.5),
fov=60,
GUI=args.vis,
debug=True,
)
cam_0 = scene.add_camera(
res=(512, 512),
pos=(1.5, 0.5, 1.5),
lookat=(0.0, 0.0, 0.5),
fov=45,
GUI=args.vis,
)
cam_0.attach(franka.links[6], trans_to_T(np.array([0.0, 0.5, 0.0])))
cam_1 = scene.add_camera(
res=(512, 512),
pos=(1.5, -0.5, 1.5),
lookat=(0.0, 0.0, 0.5),
fov=45,
GUI=args.vis,
)
cam_2 = scene.add_camera(
res=(512, 512),
pos=(0.0, 0.0, 5.0),
lookat=(0.0, 0.0, 0.0),
fov=70,
model="fisheye" if args.use_fisheye else "pinhole",
GUI=args.vis,
)
scene.add_light(
pos=(0.0, 0.0, 1.5),
dir=(1.0, 1.0, -2.0),
color=(1.0, 0.0, 0.0),
directional=True,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_light(
pos=(4, -4, 4),
dir=(0, 0, -1),
directional=False,
castshadow=True,
cutoff=80.0,
intensity=1.0,
attenuation=0.1,
)
########################## build ##########################
scene.build(n_envs=args.n_envs)
# Create an image exporter
exporter = FrameImageExporter(args.output_dir)
if args.debug:
debug_cam.start_recording()
for i in range(args.n_steps):
scene.step()
if args.debug:
debug_cam.render()
if args.render_all_cameras:
color, depth, seg, normal = scene.render_all_cameras(
rgb=True, depth=i % 2 == 1, segmentation=i % 2 == 1, normal=True
)
exporter.export_frame_all_cameras(i, rgb=color, depth=depth, segmentation=seg, normal=normal)
else:
color, depth, seg, normal = cam_1.render(
rgb=False,
depth=True,
segmentation=True,
colorize_seg=True,
normal=False,
)
exporter.export_frame_single_camera(i, cam_1.idx, rgb=seg, depth=depth, segmentation=None, normal=normal)
if args.debug:
debug_cam.stop_recording("debug_cam.mp4")
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/rigid/single_franka_batch_render.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:genesis/utils/image_exporter.py | import os
from collections.abc import Iterable, Sequence
from concurrent.futures import ThreadPoolExecutor, Executor
from functools import partial
import torch
import numpy as np
import genesis as gs
from genesis.constants import IMAGE_TYPE
from genesis.utils.misc import tensor_to_array
def as_grayscale_image(
data: np.ndarray, clip_max: float | None = None, enable_log_scale: bool = False, black_to_white: bool = False
) -> np.ndarray:
"""Convert a batched 2D array of numeric dtype as 8 bits single channel (grayscale) image array for visualization.
Internally, this method clips non-finite values, optionally applies log scaling (i.e. `log(1.0 + data)`), then
normalizes values between 0.0 and 1.0, to finally convert to grayscale.
Parameters
----------
data : ndarray [(N x) H x W]
The data to normalize as a batched 2D array with any numeric dtype.
clip_max : float, optional
The maximum valid value if any. Default to None.
enable_log_scale: bool, optional
Wether to apply log scaling before normalization. Default to False.
black_to_white: bool, optional
Whether the color is transitioning from black to white as value increases or conversely. Default to False.
"""
# Cast data to float32
data_float = data.astype(np.float32)
# Clip data, with special handling for non-finite values only if necessary for efficiency
valid_mask = np.isfinite(data_float)
if np.all(valid_mask):
data_min = np.min(data_float, axis=(-2, -1), keepdims=True)
data_max = np.max(data_float, axis=(-2, -1), keepdims=True)
else:
data_min = np.min(data_float, axis=(-2, -1), keepdims=True, initial=float("+inf"), where=valid_mask)
data_max = np.max(data_float, axis=(-2, -1), keepdims=True, initial=float("-inf"), where=valid_mask)
data_min = np.maximum(data_min, 0.0)
if clip_max is not None:
data_max = np.minimum(data_max, clip_max)
data_float = np.clip(data_float, data_min, data_max)
# Apply log scaling if requested
if enable_log_scale:
data_float = np.log(1.0 + data_float)
# Normalize values between 0.0 and 1.0
data_delta = data_max - data_min
data_rel = data_float - data_min if black_to_white else data_max - data_float
data_normalized = np.divide(data_rel, data_delta, where=data_delta > gs.EPS)
# Discretize as unsigned int8
return (data_normalized * 255.0).astype(np.uint8)
class FrameImageExporter:
"""
This class enables exporting images from multiple cameras and environments in batch and in parallel, unlike
`Camera.(start|stop)_recording` API, which only allows for exporting images from a single camera and environment.
"""
def __init__(self, export_dir: str, depth_clip_max: float = 100.0, enable_depth_log_scale: bool = False):
self.depth_clip_max = depth_clip_max
self.enable_depth_log_scale = enable_depth_log_scale
self.export_dir = export_dir
os.makedirs(export_dir, exist_ok=True)
def export_frame_all_cameras(
self,
i_step: int,
cameras_idx: Iterable | None = None,
rgb: Sequence[np.ndarray] | None = None,
depth: Sequence[np.ndarray] | None = None,
segmentation: Sequence[np.ndarray] | None = None,
normal: Sequence[np.ndarray] | None = None,
):
"""
Export multiple frames from different cameras and environments in parrallel as PNG files.
Note
----
All specified sequences of images must have the same length.
Parameters
----------
i_step : int
The current step index.
cameras_idx: Iterable, optional
Sequence of indices of cameras to export. If None, all cameras are exported.
rgb: Sequence[ndarray[np.floating]], optional
RGB image is a sequence of arrays of shape ([n_envs,] H, W, 3).
depth: Sequence[ndarray[np.floating]], optional
Depth image is a sequence of arrays of shape ([n_envs,] H, W).
segmentation: Sequence[ndarray[np.integer]], optional
Segmentation image is a sequence of arrays of shape ([n_envs,] H, W).
normal: Sequence[ndarray[np.floating]], optional
Normal image is a sequence of arrays of shape ([n_envs,] H, W, 3).
"""
# Pack frames data for convenience
frames_data = (rgb, depth, segmentation, normal)
# Early return if nothing to do
if all(e is None for e in frames_data):
gs.logger.debug("No images to export.")
return
# Make sure that all image sequences are valid
try:
(num_cameras,) = set(map(len, (e for e in frames_data if e is not None)))
except ValueError as e:
for img_type, imgs_data in zip(IMAGE_TYPE, frames_data):
if imgs_data is not None and len(imgs_data) == 0:
gs.raise_exception_from(f"'{img_type}' must be a non-empty sequence of arrays.", e)
gs.raise_exception_from("Specified image sequences have inconsistent length.", e)
# Set default camera indices if undefined
if cameras_idx is None:
cameras_idx = range(num_cameras)
if num_cameras != len(cameras_idx):
gs.raise_exception("Camera indices and image sequences have inconsistent length.")
# Loop over single camera data asynchronously
with ThreadPoolExecutor() as executor:
for i_cam, frame_data in zip(
cameras_idx, zip(*(e if e is not None else (None,) * num_cameras for e in frames_data))
):
self.export_frame_single_camera(i_step, i_cam, *frame_data, executor=executor)
def export_frame_single_camera(
self,
i_step,
i_cam,
rgb=None,
depth=None,
segmentation=None,
normal=None,
*,
compress_level: int | None = None,
executor: Executor | None = None,
):
"""
Export multiple frames from a single camera but different environments in parrallel as PNG files.
Parameters
----------
i_step: int
The current step index.
i_cam: int
The index of the camera.
rgb: ndarray[np.floating], optional
RGB image array of shape ([n_envs,] H, W, 3).
depth: ndarray[np.floating], optional
Depth image array of shape ([n_envs,] H, W).
segmentation: ndarray[np.integer], optional
Segmentation image array of shape ([n_envs,] H, W).
normal: ndarray[np.floating], optional
Normal image array of shape ([n_envs,] H, W, 3).
compress_level: int, optional
Compression level when exporting images as PNG. Default to 3.
executor: Executor, optional
Executor to which I/O bounded jobs (saving to PNG) will be submitted. A local executor will be instantiated
if none is provided.
"""
# Postpone import of OpenCV at runtime to reduce hard system dependencies
import cv2
# Pack frames data for convenience
frame_data = (rgb, depth, segmentation, normal)
# Early return if nothing to do
if all(e is None for e in frame_data):
gs.logger.debug("No images to export.")
return
# Instantiate a new executor if none is provided
is_local_executor = False
if executor is None:
is_local_executor = True
executor = ThreadPoolExecutor()
# Loop over each image type
exported_types = []
for img_type, imgs_data in zip(IMAGE_TYPE, frame_data):
if imgs_data is None:
continue
# Convert data to numpy
if isinstance(imgs_data, torch.Tensor):
imgs_data = tensor_to_array(imgs_data)
else:
imgs_data = np.asarray(imgs_data)
# Make sure that image data has shape `(n_env, H, W [, C>1])``
if imgs_data.shape[-1] == 1:
imgs_data = imgs_data[..., 0]
if imgs_data.ndim == (3 if imgs_data.shape[-1] <= 4 else 2):
imgs_data = imgs_data[None]
if imgs_data.ndim not in (3, 4):
gs.raise_exception("'{imgs_data}' images must be arrays of shape ([n_envs,] H, W [, C>1])")
# Convert image data to grayscale array if necessary
if img_type == IMAGE_TYPE.DEPTH:
imgs_data = as_grayscale_image(
imgs_data, self.depth_clip_max, self.enable_depth_log_scale, black_to_white=False
)
elif img_type == IMAGE_TYPE.SEGMENTATION:
imgs_data = as_grayscale_image(imgs_data, None, enable_log_scale=False, black_to_white=True)
imgs_data = imgs_data.astype(np.uint8)
# Flip channel order if necessary
if imgs_data.ndim == 4:
imgs_data = np.flip(imgs_data, axis=-1)
# Export image array as (compressed) PNG file.
# Note that 'pillow>=11' is now consistently faster than 'cv2' when compression level is explicitly
# specified, yet slower for (implicit) default compression level, namely 3.
cv2_params = [cv2.IMWRITE_PNG_COMPRESSION, compress_level] if compress_level is not None else None
for i_env, img_data in enumerate(imgs_data):
frame_path = os.path.join(self.export_dir, f"{img_type}_cam{i_cam}_env{i_env}_{i_step:03d}.png")
executor.submit(partial(cv2.imwrite, params=cv2_params), frame_path, img_data)
exported_types.append((len(imgs_data), img_type.name.lower()))
if exported_types:
types_str = ", ".join(f"{num} {type}" for num, type in exported_types)
gs.logger.info(
f"Exported ~<{sum(num for num, _ in exported_types)} frame(s) ({types_str})>~ "
f"from camera ~<{i_cam}>~ at step ~<{i_step}>~ to ~<{self.export_dir}>~"
)
# Shutdown executor if necessary
if is_local_executor:
executor.shutdown(wait=True)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/image_exporter.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/vis/batch_renderer.py | import math
import numpy as np
import torch
import genesis as gs
from genesis.repr_base import RBC
from genesis.constants import IMAGE_TYPE
from genesis.utils.misc import qd_to_torch
from .rasterizer_context import SegmentationColorMap
# Optional imports for platform-specific functionality
try:
from gs_madrona.renderer_gs import MadronaBatchRendererAdapter
_MADRONA_AVAILABLE = True
except ImportError:
MadronaBatchRendererAdapter = None
_MADRONA_AVAILABLE = False
def _transform_camera_quat(quat):
# quat for Madrona needs to be transformed to y-forward
w, x, y, z = torch.unbind(quat, dim=-1)
return torch.stack([x + w, x - w, y - z, y + z], dim=-1) / math.sqrt(2.0)
def _make_tensor(data, *, dtype: torch.dtype = torch.float32):
return torch.tensor(data, dtype=dtype, device=gs.device)
class GenesisGeomRetriever:
def __init__(self, rigid_solver, seg_level):
self.rigid_solver = rigid_solver
self.seg_color_map = SegmentationColorMap(to_torch=True)
self.seg_level = seg_level
self.geom_idxc = None
self.default_geom_group = 2
self.default_enabled_geom_groups = np.array([self.default_geom_group], dtype=np.int32)
def build(self):
self.n_vgeoms = self.rigid_solver.n_vgeoms
self.geom_idxc = []
vgeoms = self.rigid_solver.vgeoms
for vgeom in vgeoms:
seg_key = self.get_seg_key(vgeom)
seg_idxc = self.seg_color_map.seg_key_to_idxc(seg_key)
self.geom_idxc.append(seg_idxc)
self.geom_idxc = torch.tensor(self.geom_idxc, dtype=torch.int32, device=gs.device)
self.seg_color_map.generate_seg_colors()
def get_seg_key(self, vgeom):
if self.seg_level == "geom":
return (vgeom.entity.idx, vgeom.link.idx, vgeom.idx)
elif self.seg_level == "link":
return (vgeom.entity.idx, vgeom.link.idx)
elif self.seg_level == "entity":
return vgeom.entity.idx
else:
gs.raise_exception(f"Unsupported segmentation level: {self.seg_level}")
# FIXME: Use a kernel to do it efficiently
def retrieve_rigid_meshes_static(self):
args = {}
vgeoms = self.rigid_solver.vgeoms
# Retrieve geom data
mesh_vertices = self.rigid_solver.vverts_info.init_pos.to_numpy()
mesh_faces = self.rigid_solver.vfaces_info.vverts_idx.to_numpy()
mesh_vertex_offsets = self.rigid_solver.vgeoms_info.vvert_start.to_numpy()
mesh_face_starts = self.rigid_solver.vgeoms_info.vface_start.to_numpy()
mesh_face_ends = self.rigid_solver.vgeoms_info.vface_end.to_numpy()
total_uv_size = 0
mesh_uvs = []
mesh_uv_offsets = []
for i in range(self.n_vgeoms):
mesh_faces[mesh_face_starts[i] : mesh_face_ends[i]] -= mesh_vertex_offsets[i]
geom_data_ids = []
for vgeom in vgeoms:
seg_key = self.get_seg_key(vgeom)
seg_id = self.seg_color_map.seg_key_to_idxc(seg_key)
geom_data_ids.append(seg_id)
if vgeom.uvs is not None:
mesh_uvs.append(vgeom.uvs.astype(np.float32))
mesh_uv_offsets.append(total_uv_size)
total_uv_size += vgeom.uvs.shape[0]
else:
mesh_uv_offsets.append(-1)
args["mesh_vertices"] = mesh_vertices
args["mesh_vertex_offsets"] = mesh_vertex_offsets
args["mesh_faces"] = mesh_faces
args["mesh_face_offsets"] = mesh_face_starts
args["mesh_texcoords"] = np.concatenate(mesh_uvs, axis=0) if mesh_uvs else np.empty((0, 2), np.float32)
args["mesh_texcoord_offsets"] = np.array(mesh_uv_offsets, np.int32)
args["geom_types"] = np.full((self.n_vgeoms,), 7, dtype=np.int32) # 7 stands for mesh
args["geom_groups"] = np.full((self.n_vgeoms,), self.default_geom_group, dtype=np.int32)
args["geom_data_ids"] = np.arange(self.n_vgeoms, dtype=np.int32)
args["geom_sizes"] = np.ones((self.n_vgeoms, 3), dtype=np.float32)
args["enabled_geom_groups"] = self.default_enabled_geom_groups
# Retrieve material data
geom_mat_ids = []
num_materials = 0
materials_indices = {}
mat_rgbas = []
mat_texture_indices = []
mat_texture_offsets = []
total_mat_textures = 0
num_textures = 0
texture_indices = {}
texture_widths = []
texture_heights = []
texture_nchans = []
texture_offsets = []
texture_data = []
total_texture_size = 0
for vgeom in vgeoms:
geom_surface = vgeom.surface
geom_textures = geom_surface.get_rgba(batch=True).textures
geom_texture_indices = []
for geom_texture in geom_textures:
if isinstance(geom_texture, gs.textures.ImageTexture) and geom_texture.image_array is not None:
texture_id = geom_texture.image_path
if texture_id not in texture_indices:
texture_idx = num_textures
if texture_id is not None:
texture_indices[texture_id] = texture_idx
texture_widths.append(geom_texture.image_array.shape[1])
texture_heights.append(geom_texture.image_array.shape[0])
assert geom_texture.channel() == 4
texture_nchans.append(geom_texture.channel())
texture_offsets.append(total_texture_size)
texture_data.append(geom_texture.image_array.flat)
num_textures += 1
total_texture_size += geom_texture.image_array.size
else:
texture_idx = texture_indices[texture_id]
geom_texture_indices.append(texture_idx)
# TODO: support batch rgba
geom_rgbas = [
geom_texture.image_color if isinstance(geom_texture, gs.textures.ImageTexture) else geom_texture.color
for geom_texture in geom_textures
]
for i in range(1, len(geom_rgbas)):
if not np.allclose(geom_rgbas[0], geom_rgbas[i], atol=gs.EPS):
gs.logger.warning("Batch Color is not yet supported. Use the first texture's color instead.")
break
geom_rgba = geom_rgbas[0]
mat_id = None
if len(geom_texture_indices) == 0:
geom_rgba_int = (np.array(geom_rgba) * 255.0).astype(np.uint32)
mat_id = geom_rgba_int[0] << 24 | geom_rgba_int[1] << 16 | geom_rgba_int[2] << 8 | geom_rgba_int[3]
if mat_id not in materials_indices:
material_idx = num_materials
if mat_id is not None:
materials_indices[mat_id] = material_idx
mat_rgbas.append(geom_rgba)
mat_texture_indices.extend(geom_texture_indices)
mat_texture_offsets.append(total_mat_textures)
num_materials += 1
total_mat_textures += len(geom_texture_indices)
else:
material_idx = materials_indices[mat_id]
geom_mat_ids.append(material_idx)
args["geom_mat_ids"] = np.array(geom_mat_ids, np.int32)
args["tex_widths"] = np.array(texture_widths, np.int32)
args["tex_heights"] = np.array(texture_heights, np.int32)
args["tex_nchans"] = np.array(texture_nchans, np.int32)
args["tex_data"] = np.concatenate(texture_data, axis=0) if texture_data else np.array([], np.uint8)
args["tex_offsets"] = np.array(texture_offsets, np.int64)
args["mat_rgba"] = np.array(mat_rgbas, np.float32)
args["mat_tex_ids"] = np.array(mat_texture_indices, np.int32)
args["mat_tex_offsets"] = np.array(mat_texture_offsets, np.int32)
return args
# FIXME: Use a kernel to do it efficiently
def retrieve_rigid_property_torch(self, num_worlds):
geom_rgb = torch.empty((0, self.n_vgeoms), dtype=torch.uint32, device=gs.device)
geom_mat_ids = torch.full((num_worlds, self.n_vgeoms), -1, dtype=torch.int32, device=gs.device)
geom_sizes = torch.ones((self.n_vgeoms, 3), dtype=torch.float32, device=gs.device)
geom_sizes = geom_sizes[None].repeat(num_worlds, 1, 1)
return geom_mat_ids, geom_rgb, geom_sizes
# FIXME: Use a kernel to do it efficiently
def retrieve_rigid_state_torch(self):
geom_pos = qd_to_torch(self.rigid_solver.vgeoms_state.pos)
geom_rot = qd_to_torch(self.rigid_solver.vgeoms_state.quat)
geom_pos = geom_pos.transpose(0, 1).contiguous()
geom_rot = geom_rot.transpose(0, 1).contiguous()
return geom_pos, geom_rot
class Light:
def __init__(self, pos, dir, color, intensity, directional, castshadow, cutoff, attenuation):
self._pos = pos
self._dir = tuple(dir / np.linalg.norm(dir))
self._color = color
self._intensity = intensity
self._directional = directional
self._castshadow = castshadow
self._cutoff = cutoff
self._attenuation = attenuation
@property
def pos(self):
return self._pos
@property
def dir(self):
return self._dir
@property
def color(self):
return self._color
@property
def intensity(self):
return self._intensity
@property
def directional(self):
return self._directional
@property
def castshadow(self):
return self._castshadow
@property
def cutoffRad(self):
return math.radians(self._cutoff)
@property
def cutoffDeg(self):
return self._cutoff
@property
def attenuation(self):
return self._attenuation
class BatchRenderer(RBC):
"""
This class is used to manage batch rendering
"""
def __init__(self, visualizer, renderer_options, vis_options):
self._visualizer = visualizer
self._lights = gs.List()
self._use_rasterizer = renderer_options.use_rasterizer
self._renderer = None
self._geom_retriever = GenesisGeomRetriever(self._visualizer.scene.rigid_solver, vis_options.segmentation_level)
self._data_cache = {}
self._t = -1
def add_light(self, pos, dir, color, intensity, directional, castshadow, cutoff, attenuation):
self._lights.append(Light(pos, dir, color, intensity, directional, castshadow, cutoff, attenuation))
def build(self):
"""
Build all cameras in the batch and initialize Moderona renderer
"""
if not _MADRONA_AVAILABLE:
gs.raise_exception("Madrona batch renderer is only supported on Linux x86-64.")
if gs.backend != gs.cuda:
gs.raise_exception("BatchRenderer requires CUDA backend.")
gpu_id = gs.device.index if gs.device.index is not None else 0
# Extract the complete list of non-debug cameras
self._cameras = gs.List([camera for camera in self._visualizer._cameras if not camera.debug])
if not self._cameras:
gs.raise_exception("Please add at least one camera when using BatchRender.")
# Build the geometry retriever
self._geom_retriever.build()
# Make sure that all cameras have identical resolution
try:
((camera_width, camera_height),) = set(camera.res for camera in self._cameras)
except ValueError as e:
gs.raise_exception_from("All cameras must have the exact same resolution when using BatchRender.", e)
self._renderer = MadronaBatchRendererAdapter(
geom_retriever=self._geom_retriever,
gpu_id=gs.device.index if gs.device.index is not None else 0,
num_worlds=max(self._visualizer.scene.n_envs, 1),
num_lights=len(self._lights),
cam_fovs_tensor=_make_tensor([camera.fov for camera in self._cameras]),
cam_znears_tensor=_make_tensor([camera.near for camera in self._cameras]),
cam_zfars_tensor=_make_tensor([camera.far for camera in self.cameras]),
cam_proj_types_tensor=_make_tensor(
[camera.model == "fisheye" for camera in self._cameras], dtype=torch.uint32
),
batch_render_view_width=camera_width,
batch_render_view_height=camera_height,
add_cam_debug_geo=False,
use_rasterizer=self._use_rasterizer,
)
self._renderer.init(
cam_pos_tensor=torch.stack([torch.atleast_2d(camera.get_pos()) for camera in self._cameras], dim=1),
cam_rot_tensor=_transform_camera_quat(
torch.stack([torch.atleast_2d(camera.get_quat()) for camera in self._cameras], dim=1)
),
lights_pos_tensor=_make_tensor([light.pos for light in self._lights]).reshape((-1, 3)),
lights_dir_tensor=_make_tensor([light.dir for light in self._lights]).reshape((-1, 3)),
lights_rgb_tensor=_make_tensor([light.color for light in self._lights]).reshape((-1, 3)),
lights_directional_tensor=_make_tensor([light.directional for light in self._lights], dtype=torch.bool),
lights_castshadow_tensor=_make_tensor([light.castshadow for light in self._lights], dtype=torch.bool),
lights_cutoff_tensor=_make_tensor([light.cutoffRad for light in self._lights]),
lights_attenuation_tensor=_make_tensor([light.attenuation for light in self._lights]),
lights_intensity_tensor=_make_tensor([light.intensity for light in self._lights]),
)
def update_scene(self, force_render: bool = False):
self._visualizer._context.update(force_render)
def render(self, rgb=True, depth=False, segmentation=False, normal=False, antialiasing=False, force_render=False):
"""
Render all cameras in the batch.
Parameters
----------
rgb : bool, optional
Whether to render the rgb image.
depth : bool, optional
Whether to render the depth image.
segmentation : bool, optional
Whether to render the segmentation image.
normal : bool, optional
Whether to render the normal image.
antialiasing : bool, optional
Whether to apply anti-aliasing.
force_render : bool, optional
Whether to force render the scene.
Returns
-------
rgb_arr : tuple of arrays
The sequence of rgb images associated with each camera.
depth_arr : tuple of arrays
The sequence of depth images associated with each camera.
segmentation_arr : tuple of arrays
The sequence of segmentation images associated with each camera.
normal_arr : tuple of arrays
The sequence of normal images associated with each camera.
"""
# Clear cache if requested or necessary
if force_render or self._t < self._visualizer.scene.t:
self._data_cache.clear()
# Fetch available cached data
request = (rgb, depth, segmentation, normal)
cache_key = (antialiasing,)
cached = [self._data_cache.get((img_type, cache_key), None) for img_type in IMAGE_TYPE]
# Force disabling rendering whenever cached data is already available
needed = tuple(req and arr is None for req, arr in zip(request, cached))
# Early return if everything requested is already cached
if not any(needed):
return tuple(arr if req else None for req, arr in zip(request, cached))
# Update scene
self.update_scene(force_render)
# Render only what is needed (flags still passed to renderer)
cameras_pos = torch.stack([torch.atleast_2d(camera.get_pos()) for camera in self._cameras], dim=1)
cameras_quat = torch.stack([torch.atleast_2d(camera.get_quat()) for camera in self._cameras], dim=1)
cameras_quat = _transform_camera_quat(cameras_quat)
render_flags = np.array(
(
*(
needed[img_type]
for img_type in (IMAGE_TYPE.RGB, IMAGE_TYPE.DEPTH, IMAGE_TYPE.NORMAL, IMAGE_TYPE.SEGMENTATION)
),
antialiasing,
),
dtype=np.uint32,
)
rendered = list(self._renderer.render(cameras_pos, cameras_quat, render_flags))
# convert seg geom idx to seg_idxc
if needed[IMAGE_TYPE.SEGMENTATION]:
seg_geoms = rendered[IMAGE_TYPE.SEGMENTATION]
mask = seg_geoms != -1
seg_geoms[mask] = self._geom_retriever.geom_idxc[seg_geoms[mask]]
seg_geoms[~mask] = 0
# Post-processing:
# * Remove alpha channel from RGBA
# * Squeeze env and channel dims if necessary
# * Split along camera dim
for img_type, data in enumerate(rendered):
if needed[img_type]:
data = data.swapaxes(0, 1)
if self._visualizer.scene.n_envs == 0:
data = data.squeeze(1)
rendered[img_type] = tuple(data[..., :3].squeeze(-1))
# Convert center distance depth to plane distance
if not self._use_rasterizer and needed[IMAGE_TYPE.DEPTH]:
rendered[IMAGE_TYPE.DEPTH] = tuple(
camera.distance_center_to_plane(depth_data)
for camera, depth_data in zip(self._cameras, rendered[IMAGE_TYPE.DEPTH])
)
# Update cache
self._t = self._visualizer.scene.t
for img_type, data in enumerate(rendered):
if needed[img_type]:
self._data_cache[(img_type, cache_key)] = rendered[img_type]
# Return in the required order, or None if not requested
return tuple(self._data_cache[(img_type, cache_key)] if needed[img_type] else None for img_type in IMAGE_TYPE)
def colorize_seg_idxc_arr(self, seg_idxc_arr):
return self._geom_retriever.seg_color_map.colorize_seg_idxc_arr(seg_idxc_arr)
def destroy(self):
self._lights.clear()
self._data_cache.clear()
if self._renderer is not None:
del self._renderer.madrona
self._renderer = None
def reset(self):
self._t = -1
@property
def lights(self):
return self._lights
@property
def cameras(self):
return self._cameras
@property
def seg_idxc_map(self):
return self._geom_retriever.seg_color_map.idxc_map
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/vis/batch_renderer.py",
"license": "Apache License 2.0",
"lines": 381,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:examples/manipulation/grasp_env.py | import torch
import math
from typing import Literal
import genesis as gs
from genesis.utils.geom import (
xyz_to_quat,
transform_quat_by_quat,
transform_by_quat,
)
class GraspEnv:
def __init__(
self,
env_cfg: dict,
reward_cfg: dict,
robot_cfg: dict,
show_viewer: bool = False,
) -> None:
self.num_envs = env_cfg["num_envs"]
self.num_obs = env_cfg["num_obs"]
self.num_privileged_obs = None
self.num_actions = env_cfg["num_actions"]
self.image_width = env_cfg["image_resolution"][0]
self.image_height = env_cfg["image_resolution"][1]
self.rgb_image_shape = (3, self.image_height, self.image_width)
self.device = gs.device
self.ctrl_dt = env_cfg["ctrl_dt"]
self.max_episode_length = math.ceil(env_cfg["episode_length_s"] / self.ctrl_dt)
# configs
self.env_cfg = env_cfg
self.reward_scales = reward_cfg
self.action_scales = torch.tensor(env_cfg["action_scales"], device=self.device)
# == setup scene ==
self.scene = gs.Scene(
sim_options=gs.options.SimOptions(dt=self.ctrl_dt, substeps=2),
rigid_options=gs.options.RigidOptions(
dt=self.ctrl_dt,
constraint_solver=gs.constraint_solver.Newton,
enable_collision=True,
enable_joint_limit=True,
),
vis_options=gs.options.VisOptions(rendered_envs_idx=list(range(10))),
viewer_options=gs.options.ViewerOptions(
max_FPS=int(0.5 / self.ctrl_dt),
camera_pos=(2.0, 0.0, 2.5),
camera_lookat=(0.0, 0.0, 0.5),
camera_fov=40,
),
profiling_options=gs.options.ProfilingOptions(show_FPS=False),
renderer=gs.options.renderers.BatchRenderer(
use_rasterizer=env_cfg["use_rasterizer"],
),
show_viewer=show_viewer,
)
# == add ground ==
self.scene.add_entity(gs.morphs.URDF(file="urdf/plane/plane.urdf", fixed=True))
# == add robot ==
self.robot = Manipulator(
num_envs=self.num_envs,
scene=self.scene,
args=robot_cfg,
device=gs.device,
)
# == add object ==
self.object = self.scene.add_entity(
gs.morphs.Box(
size=env_cfg["box_size"],
fixed=env_cfg["box_fixed"],
collision=env_cfg["box_collision"],
batch_fixed_verts=True,
),
# material=gs.materials.Rigid(gravity_compensation=1),
surface=gs.surfaces.Rough(
diffuse_texture=gs.textures.ColorTexture(
color=(1.0, 0.0, 0.0),
),
),
)
if self.env_cfg["visualize_camera"]:
self.vis_cam = self.scene.add_camera(
res=(1280, 720),
pos=(1.5, 0.0, 0.2),
lookat=(0.0, 0.0, 0.2),
fov=60,
GUI=self.env_cfg["visualize_camera"],
debug=True,
)
# == add stero camera ==
self.left_cam = self.scene.add_camera(
res=(self.image_width, self.image_height),
pos=(1.25, 0.3, 0.3),
lookat=(0.0, 0.0, 0.0),
fov=60,
GUI=self.env_cfg["visualize_camera"],
)
self.right_cam = self.scene.add_camera(
res=(self.image_width, self.image_height),
pos=(1.25, -0.3, 0.3),
lookat=(0.0, 0.0, 0.0),
fov=60,
GUI=self.env_cfg["visualize_camera"],
)
# build
self.scene.build(n_envs=env_cfg["num_envs"])
# set pd gains (must be called after scene.build)
self.robot.set_pd_gains()
# prepare reward functions and multiply reward scales by dt
self.reward_functions, self.episode_sums = dict(), dict()
for name in self.reward_scales.keys():
self.reward_scales[name] *= self.ctrl_dt
self.reward_functions[name] = getattr(self, "_reward_" + name)
self.episode_sums[name] = torch.zeros((self.num_envs,), device=gs.device, dtype=gs.tc_float)
self.keypoints_offset = self.get_keypoint_offsets(batch_size=self.num_envs, device=self.device, unit_length=0.5)
# == init buffers ==
self._init_buffers()
self.reset()
def _init_buffers(self) -> None:
self.episode_length_buf = torch.zeros((self.num_envs,), device=gs.device, dtype=gs.tc_int)
self.reset_buf = torch.zeros(self.num_envs, dtype=torch.bool, device=gs.device)
self.goal_pose = torch.zeros(self.num_envs, 7, device=gs.device)
self.extras = dict()
self.extras["observations"] = dict()
def reset_idx(self, envs_idx: torch.Tensor) -> None:
if len(envs_idx) == 0:
return
self.episode_length_buf[envs_idx] = 0
# reset robot
self.robot.reset(envs_idx)
# reset object
num_reset = len(envs_idx)
random_x = torch.rand(num_reset, device=self.device) * 0.4 + 0.2 # 0.2 ~ 0.6
random_y = (torch.rand(num_reset, device=self.device) - 0.5) * 0.5 # -0.25 ~ 0.25
random_z = torch.ones(num_reset, device=self.device) * 0.025 # 0.15 ~ 0.15
random_pos = torch.stack([random_x, random_y, random_z], dim=-1)
# downward facing quaternion to align with the hand
q_downward = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self.device).repeat(num_reset, 1)
# randomly yaw the object
random_yaw = (torch.rand(num_reset, device=self.device) * 2 * math.pi - math.pi) * 0.25
q_yaw = torch.stack(
[
torch.cos(random_yaw / 2),
torch.zeros(num_reset, device=self.device),
torch.zeros(num_reset, device=self.device),
torch.sin(random_yaw / 2),
],
dim=-1,
)
goal_yaw = transform_quat_by_quat(q_yaw, q_downward)
self.goal_pose[envs_idx] = torch.cat([random_pos, goal_yaw], dim=-1)
self.object.set_pos(random_pos, envs_idx=envs_idx)
self.object.set_quat(goal_yaw, envs_idx=envs_idx)
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"]["rew_" + key] = (
torch.mean(self.episode_sums[key][envs_idx]).item() / self.env_cfg["episode_length_s"]
)
self.episode_sums[key][envs_idx] = 0.0
def reset(self) -> tuple[torch.Tensor, dict]:
self.reset_buf[:] = True
self.reset_idx(torch.arange(self.num_envs, device=gs.device))
obs, self.extras = self.get_observations()
return obs, self.extras
def step(self, actions: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, dict]:
# update time
self.episode_length_buf += 1
# apply action based on task
actions = self.rescale_action(actions)
self.robot.apply_action(actions, open_gripper=True)
self.scene.step()
# check termination
env_reset_idx = self.is_episode_complete()
if len(env_reset_idx) > 0:
self.reset_idx(env_reset_idx)
# compute reward based on task
reward = torch.zeros_like(self.reset_buf, device=gs.device, dtype=gs.tc_float)
for name, reward_func in self.reward_functions.items():
rew = reward_func() * self.reward_scales[name]
reward += rew
self.episode_sums[name] += rew
# get observations and fill extras
obs, self.extras = self.get_observations()
return obs, reward, self.reset_buf, self.extras
def get_privileged_observations(self) -> None:
return None
def is_episode_complete(self) -> torch.Tensor:
time_out_buf = self.episode_length_buf > self.max_episode_length
# check if the ee is in the valid position
self.reset_buf = time_out_buf
# fill time out buffer for reward/value bootstrapping
time_out_idx = (time_out_buf).nonzero(as_tuple=False).reshape((-1,))
self.extras["time_outs"] = torch.zeros_like(self.reset_buf, device=gs.device, dtype=gs.tc_float)
self.extras["time_outs"][time_out_idx] = 1.0
return self.reset_buf.nonzero(as_tuple=True)[0]
def get_observations(self) -> tuple[torch.Tensor, dict]:
# Current end-effector pose
finger_pos, finger_quat = (
self.robot.center_finger_pose[:, :3],
self.robot.center_finger_pose[:, 3:7],
)
obj_pos, obj_quat = self.object.get_pos(), self.object.get_quat()
#
obs_components = [
finger_pos - obj_pos, # 3D position difference
finger_quat, # current orientation (w, x, y, z)
obj_pos, # goal position
obj_quat, # goal orientation (w, x, y, z)
]
obs_tensor = torch.cat(obs_components, dim=-1)
self.extras["observations"]["critic"] = obs_tensor
return obs_tensor, self.extras
def rescale_action(self, action: torch.Tensor) -> torch.Tensor:
rescaled_action = action * self.action_scales
return rescaled_action
def get_stereo_rgb_images(self, normalize: bool = True) -> torch.Tensor:
rgb_left, _, _, _ = self.left_cam.render(rgb=True, depth=False, segmentation=False, normal=False)
rgb_right, _, _, _ = self.right_cam.render(rgb=True, depth=False, segmentation=False, normal=False)
# Convert to proper format
rgb_left = rgb_left.permute(0, 3, 1, 2)[:, :3] # shape (B, 3, H, W)
rgb_right = rgb_right.permute(0, 3, 1, 2)[:, :3] # shape (B, 3, H, W)
# Normalize if requested
if normalize:
rgb_left = torch.clamp(rgb_left, min=0.0, max=255.0) / 255.0
rgb_right = torch.clamp(rgb_right, min=0.0, max=255.0) / 255.0
# Concatenate left and right rgb images along channel dimension
# Result: [B, 6, H, W] where channel 0 is left rgb, channel 1 is right rgb
stereo_rgb = torch.cat([rgb_left, rgb_right], dim=1)
return stereo_rgb
# ------------ begin reward functions----------------
def _reward_keypoints(self) -> torch.Tensor:
keypoints_offset = self.keypoints_offset
# there is a offset between the finger tip and the finger base frame
finger_tip_z_offset = torch.tensor(
[0.0, 0.0, -0.06],
device=self.device,
dtype=gs.tc_float,
).repeat(self.num_envs, 1)
finger_pos_keypoints = self._to_world_frame(
self.robot.center_finger_pose[:, :3] + finger_tip_z_offset,
self.robot.center_finger_pose[:, 3:7],
keypoints_offset,
)
object_pos_keypoints = self._to_world_frame(self.object.get_pos(), self.object.get_quat(), keypoints_offset)
dist = torch.norm(finger_pos_keypoints - object_pos_keypoints, p=2, dim=-1).sum(-1)
return torch.exp(-dist)
# ------------ end reward functions----------------
def _to_world_frame(
self,
position: torch.Tensor, # [B, 3]
quaternion: torch.Tensor, # [B, 4]
keypoints_offset: torch.Tensor, # [B, 7, 3]
) -> torch.Tensor:
world = torch.zeros_like(keypoints_offset)
for k in range(keypoints_offset.shape[1]):
world[:, k] = position + transform_by_quat(keypoints_offset[:, k], quaternion)
return world
@staticmethod
def get_keypoint_offsets(batch_size: int, device: str, unit_length: float = 0.5) -> torch.Tensor:
"""
Get uniformly-spaced keypoints along a line of unit length, centered at body center.
"""
keypoint_offsets = (
torch.tensor(
[
[0, 0, 0], # origin
[-1.0, 0, 0], # x-negative
[1.0, 0, 0], # x-positive
[0, -1.0, 0], # y-negative
[0, 1.0, 0], # y-positive
[0, 0, -1.0], # z-negative
[0, 0, 1.0], # z-positive
],
device=device,
dtype=torch.float32,
)
* unit_length
)
return keypoint_offsets[None].repeat((batch_size, 1, 1))
def grasp_and_lift_demo(self) -> None:
total_steps = 500
goal_pose = self.robot.ee_pose.clone()
# lift pose (above the object)
lift_height = 0.3
lift_pose = goal_pose.clone()
lift_pose[:, 2] += lift_height
# final pose (above the table)
final_pose = goal_pose.clone()
final_pose[:, 0] = 0.3
final_pose[:, 1] = 0.0
final_pose[:, 2] = 0.4
# reset pose (home pose)
reset_pose = torch.tensor([0.2, 0.0, 0.4, 0.0, 1.0, 0.0, 0.0], device=self.device).repeat(self.num_envs, 1)
for i in range(total_steps):
if i < total_steps / 4: # grasping
self.robot.go_to_goal(goal_pose, open_gripper=False)
elif i < total_steps / 2: # lifting
self.robot.go_to_goal(lift_pose, open_gripper=False)
elif i < total_steps * 3 / 4: # final
self.robot.go_to_goal(final_pose, open_gripper=False)
else: # reset
self.robot.go_to_goal(reset_pose, open_gripper=True)
self.scene.step()
## ------------ robot ----------------
class Manipulator:
def __init__(self, num_envs: int, scene: gs.Scene, args: dict, device: str = "cpu"):
# == set members ==
self._device = device
self._scene = scene
self._num_envs = num_envs
self._args = args
# == Genesis configurations ==
material: gs.materials.Rigid = gs.materials.Rigid()
morph: gs.morphs.URDF = gs.morphs.MJCF(
file="xml/franka_emika_panda/panda.xml",
pos=(0.0, 0.0, 0.0),
quat=(1.0, 0.0, 0.0, 0.0),
)
self._robot_entity: gs.Entity = scene.add_entity(material=material, morph=morph)
self._gripper_open_dof = 0.04
self._gripper_close_dof = 0.00
self._ik_method: Literal["rel_pose", "dls"] = args["ik_method"]
# == some buffer initialization ==
self._init()
def set_pd_gains(self):
# set control gains
# Note: the following values are tuned for achieving best behavior with Franka
# Typically, each new robot would have a different set of parameters.
# Sometimes high-quality URDF or XML file would also provide this and will be parsed.
self._robot_entity.set_dofs_kp(
torch.tensor([4500, 4500, 3500, 3500, 2000, 2000, 2000, 100, 100]),
)
self._robot_entity.set_dofs_kv(
torch.tensor([450, 450, 350, 350, 200, 200, 200, 10, 10]),
)
self._robot_entity.set_dofs_force_range(
torch.tensor([-87, -87, -87, -87, -12, -12, -12, -100, -100]),
torch.tensor([87, 87, 87, 87, 12, 12, 12, 100, 100]),
)
def _init(self):
self._arm_dof_dim = self._robot_entity.n_dofs - 2 # total number of arm: joints
self._gripper_dim = 2 # number of gripper joints
self._arm_dof_idx = torch.arange(self._arm_dof_dim, device=self._device)
self._fingers_dof = torch.arange(
self._arm_dof_dim,
self._arm_dof_dim + self._gripper_dim,
device=self._device,
)
self._left_finger_dof = self._fingers_dof[0]
self._right_finger_dof = self._fingers_dof[1]
self._ee_link = self._robot_entity.get_link(self._args["ee_link_name"])
self._left_finger_link = self._robot_entity.get_link(self._args["gripper_link_names"][0])
self._right_finger_link = self._robot_entity.get_link(self._args["gripper_link_names"][1])
self._default_joint_angles = self._args["default_arm_dof"]
if self._args["default_gripper_dof"] is not None:
self._default_joint_angles += self._args["default_gripper_dof"]
def reset(self, envs_idx: torch.IntTensor):
if len(envs_idx) == 0:
return
self.reset_home(envs_idx)
def reset_home(self, envs_idx: torch.IntTensor | None = None):
if envs_idx is None:
envs_idx = torch.arange(self._num_envs, device=self._device)
default_joint_angles = torch.tensor(
self._default_joint_angles, dtype=torch.float32, device=self._device
).repeat(len(envs_idx), 1)
self._robot_entity.set_qpos(default_joint_angles, envs_idx=envs_idx)
def apply_action(self, action: torch.Tensor, open_gripper: bool) -> None:
"""
Apply the action to the robot.
"""
q_pos = self._robot_entity.get_qpos()
if self._ik_method == "gs_ik":
q_pos = self._gs_ik(action)
elif self._ik_method == "dls_ik":
q_pos = self._dls_ik(action)
else:
raise ValueError(f"Invalid control mode: {self._ik_method}")
# set gripper to open
if open_gripper:
q_pos[:, self._fingers_dof] = self._gripper_open_dof
else:
q_pos[:, self._fingers_dof] = self._gripper_close_dof
self._robot_entity.control_dofs_position(position=q_pos)
def _gs_ik(self, action: torch.Tensor) -> torch.Tensor:
"""
Genesis inverse kinematics
"""
delta_position = action[:, :3]
delta_orientation = action[:, 3:6]
# compute target pose
target_position = delta_position + self._ee_link.get_pos()
quat_rel = xyz_to_quat(delta_orientation, rpy=True, degrees=False)
target_orientation = transform_quat_by_quat(quat_rel, self._ee_link.get_quat())
q_pos = self._robot_entity.inverse_kinematics(
link=self._ee_link,
pos=target_position,
quat=target_orientation,
dofs_idx_local=self._arm_dof_idx,
)
return q_pos
def _dls_ik(self, action: torch.Tensor) -> torch.Tensor:
"""
Damped least squares inverse kinematics
"""
delta_pose = action[:, :6]
lambda_val = 0.01
jacobian = self._robot_entity.get_jacobian(link=self._ee_link)
jacobian_T = jacobian.transpose(1, 2)
lambda_matrix = (lambda_val**2) * torch.eye(n=jacobian.shape[1], device=self._device)
delta_joint_pos = (
jacobian_T @ torch.inverse(jacobian @ jacobian_T + lambda_matrix) @ delta_pose.unsqueeze(-1)
).squeeze(-1)
return self._robot_entity.get_qpos() + delta_joint_pos
def go_to_goal(self, goal_pose: torch.Tensor, open_gripper: bool = True):
q_pos = self._robot_entity.inverse_kinematics(
link=self._ee_link,
pos=goal_pose[:, :3],
quat=goal_pose[:, 3:7],
dofs_idx_local=self._arm_dof_idx,
)
if open_gripper:
q_pos[:, self._fingers_dof] = self._gripper_open_dof
else:
q_pos[:, self._fingers_dof] = self._gripper_close_dof
self._robot_entity.control_dofs_position(position=q_pos)
@property
def base_pos(self):
return self._robot_entity.get_pos()
@property
def ee_pose(self) -> torch.Tensor:
"""
The end-effector pose (the hand pose)
"""
pos, quat = self._ee_link.get_pos(), self._ee_link.get_quat()
return torch.cat([pos, quat], dim=-1)
@property
def left_finger_pose(self) -> torch.Tensor:
pos, quat = self._left_finger_link.get_pos(), self._left_finger_link.get_quat()
return torch.cat([pos, quat], dim=-1)
@property
def right_finger_pose(self) -> torch.Tensor:
pos, quat = (
self._right_finger_link.get_pos(),
self._right_finger_link.get_quat(),
)
return torch.cat([pos, quat], dim=-1)
@property
def center_finger_pose(self) -> torch.Tensor:
"""
The center finger pose is the average of the left and right finger poses.
"""
left_finger_pose = self.left_finger_pose
right_finger_pose = self.right_finger_pose
center_finger_pos = (left_finger_pose[:, :3] + right_finger_pose[:, :3]) / 2
center_finger_quat = left_finger_pose[:, 3:7]
return torch.cat([center_finger_pos, center_finger_quat], dim=-1)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/manipulation/grasp_env.py",
"license": "Apache License 2.0",
"lines": 455,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:examples/manipulation/grasp_eval.py | import argparse
import re
import pickle
from importlib import metadata
from pathlib import Path
import torch
try:
try:
if metadata.version("rsl-rl"):
raise ImportError
except metadata.PackageNotFoundError:
if metadata.version("rsl-rl-lib") != "2.2.4":
raise ImportError
except (metadata.PackageNotFoundError, ImportError) as e:
raise ImportError("Please uninstall 'rsl_rl' and install 'rsl-rl-lib==2.2.4'.") from e
from rsl_rl.runners import OnPolicyRunner
import genesis as gs
from grasp_env import GraspEnv
from behavior_cloning import BehaviorCloning
def load_rl_policy(env, train_cfg, log_dir):
"""Load reinforcement learning policy."""
runner = OnPolicyRunner(env, train_cfg, log_dir, device=gs.device)
# Find the latest checkpoint
checkpoint_files = [f for f in log_dir.iterdir() if re.match(r"model_\d+\.pt", f.name)]
if not checkpoint_files:
raise FileNotFoundError(f"No checkpoint files found in {log_dir}")
try:
*_, last_ckpt = sorted(checkpoint_files)
except ValueError as e:
raise FileNotFoundError(f"No checkpoint files found in {log_dir}") from e
runner.load(last_ckpt)
print(f"Loaded RL checkpoint from {last_ckpt}")
return runner.get_inference_policy(device=gs.device)
def load_bc_policy(env, bc_cfg, log_dir):
"""Load behavior cloning policy."""
# Create behavior cloning instance
bc_runner = BehaviorCloning(env, bc_cfg, None, device=gs.device)
# Find the latest checkpoint
checkpoint_files = [f for f in log_dir.iterdir() if re.match(r"checkpoint_\d+\.pt", f.name)]
if not checkpoint_files:
raise FileNotFoundError(f"No checkpoint files found in {log_dir}")
try:
*_, last_ckpt = sorted(checkpoint_files)
except ValueError as e:
raise FileNotFoundError(f"No checkpoint files found in {log_dir}") from e
print(f"Loaded BC checkpoint from {last_ckpt}")
bc_runner.load(last_ckpt)
return bc_runner._policy
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--exp_name", type=str, default="grasp")
parser.add_argument(
"--stage",
type=str,
default="rl",
choices=["rl", "bc"],
help="Model type: 'rl' for reinforcement learning, 'bc' for behavior cloning",
)
parser.add_argument(
"--record",
action="store_true",
help="Record stereo images as video during evaluation",
)
parser.add_argument(
"--video_path",
type=str,
default=None,
help="Path to save the video file (default: auto-generated)",
)
args = parser.parse_args()
# Set PyTorch default dtype to float32 for better performance
torch.set_default_dtype(torch.float32)
gs.init()
log_dir = Path("logs") / f"{args.exp_name + '_' + args.stage}"
# Load configurations
if args.stage == "rl":
# For RL, load the standard configs
env_cfg, reward_cfg, robot_cfg, rl_train_cfg, bc_train_cfg = pickle.load(open(log_dir / "cfgs.pkl", "rb"))
else:
# For BC, we need to load the configs and create BC config
env_cfg, reward_cfg, robot_cfg, rl_train_cfg, bc_train_cfg = pickle.load(open(log_dir / "cfgs.pkl", "rb"))
# set the max FPS for visualization
env_cfg["max_visualize_FPS"] = 60
# set the box collision
env_cfg["box_collision"] = True
# set the box fixed
env_cfg["box_fixed"] = False
# set the number of envs for evaluation
env_cfg["num_envs"] = 10
# for video recording
env_cfg["visualize_camera"] = args.record
env = GraspEnv(
env_cfg=env_cfg,
reward_cfg=reward_cfg,
robot_cfg=robot_cfg,
show_viewer=True,
)
# Load the appropriate policy based on model type
if args.stage == "rl":
policy = load_rl_policy(env, rl_train_cfg, log_dir)
else:
policy = load_bc_policy(env, bc_train_cfg, log_dir)
policy.eval()
obs, _ = env.reset()
max_sim_step = int(env_cfg["episode_length_s"] * env_cfg["max_visualize_FPS"])
with torch.no_grad():
if args.record:
print("Recording video...")
env.vis_cam.start_recording()
env.left_cam.start_recording()
env.right_cam.start_recording()
for step in range(max_sim_step):
if args.stage == "rl":
actions = policy(obs)
else:
# Get stereo grayscale images and ensure float32
rgb_obs = env.get_stereo_rgb_images(normalize=True).float()
ee_pose = env.robot.ee_pose.float()
actions = policy(rgb_obs, ee_pose)
# Collect frame for video recording
if args.record:
env.vis_cam.render() # render the visualization camera
obs, rews, dones, infos = env.step(actions)
env.grasp_and_lift_demo()
if args.record:
print("Stopping video recording...")
env.vis_cam.stop_recording(save_to_filename="video.mp4", fps=env_cfg["max_visualize_FPS"])
env.left_cam.stop_recording(save_to_filename="left_cam.mp4", fps=env_cfg["max_visualize_FPS"])
env.right_cam.stop_recording(save_to_filename="right_cam.mp4", fps=env_cfg["max_visualize_FPS"])
if __name__ == "__main__":
main()
"""
# evaluation
# For reinforcement learning model:
python examples/manipulation/grasp_eval.py --stage=rl
# For behavior cloning model:
python examples/manipulation/grasp_eval.py --stage=bc
# With video recording:
python examples/manipulation/grasp_eval.py --stage=bc --record
"""
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/manipulation/grasp_eval.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:examples/manipulation/grasp_train.py | import argparse
import re
import pickle
from importlib import metadata
from pathlib import Path
try:
try:
if metadata.version("rsl-rl"):
raise ImportError
except metadata.PackageNotFoundError:
if metadata.version("rsl-rl-lib") != "2.2.4":
raise ImportError
except (metadata.PackageNotFoundError, ImportError) as e:
raise ImportError("Please uninstall 'rsl_rl' and install 'rsl-rl-lib==2.2.4'.") from e
from rsl_rl.runners import OnPolicyRunner
from behavior_cloning import BehaviorCloning
import genesis as gs
from grasp_env import GraspEnv
def get_train_cfg(exp_name, max_iterations):
# stage 1: privileged reinforcement learning
rl_cfg_dict = {
"algorithm": {
"class_name": "PPO",
"clip_param": 0.2,
"desired_kl": 0.01,
"entropy_coef": 0.0,
"gamma": 0.99,
"lam": 0.95,
"learning_rate": 0.0003,
"max_grad_norm": 1.0,
"num_learning_epochs": 5,
"num_mini_batches": 4,
"schedule": "adaptive",
"use_clipped_value_loss": True,
"value_loss_coef": 1.0,
},
"init_member_classes": {},
"policy": {
"activation": "relu",
"actor_hidden_dims": [256, 256, 128],
"critic_hidden_dims": [256, 256, 128],
"init_noise_std": 1.0,
"class_name": "ActorCritic",
},
"runner": {
"checkpoint": -1,
"experiment_name": exp_name,
"load_run": -1,
"log_interval": 1,
"max_iterations": max_iterations,
"record_interval": -1,
"resume": False,
"resume_path": None,
"run_name": "",
},
"runner_class_name": "OnPolicyRunner",
"num_steps_per_env": 24,
"save_interval": 100,
"empirical_normalization": None,
"seed": 1,
}
# stage 2: vision-based behavior cloning
bc_cfg_dict = {
# Basic training parameters
"num_steps_per_env": 24,
"learning_rate": 0.001,
"num_epochs": 5,
"num_mini_batches": 10,
"max_grad_norm": 1.0,
# Network architecture
"policy": {
"vision_encoder": {
"conv_layers": [
{
"in_channels": 3, # 3 channel for rgb image
"out_channels": 8,
"kernel_size": 3,
"stride": 1,
"padding": 1,
},
{
"in_channels": 8,
"out_channels": 16,
"kernel_size": 3,
"stride": 2,
"padding": 1,
},
{
"in_channels": 16,
"out_channels": 32,
"kernel_size": 3,
"stride": 2,
"padding": 1,
},
],
"pooling": "adaptive_avg",
},
"action_head": {
"state_obs_dim": 7, # end-effector pose as additional state observation
"hidden_dims": [128, 128, 64],
},
"pose_head": {
"hidden_dims": [64, 64],
},
},
# Training settings
"buffer_size": 1000,
"log_freq": 10,
"save_freq": 50,
"eval_freq": 50,
}
return rl_cfg_dict, bc_cfg_dict
def get_task_cfgs():
env_cfg = {
"num_envs": 10,
"num_obs": 14,
"num_actions": 6,
"action_scales": [0.05, 0.05, 0.05, 0.05, 0.05, 0.05],
"episode_length_s": 3.0,
"ctrl_dt": 0.01,
"box_size": [0.08, 0.03, 0.06],
"box_collision": False,
"box_fixed": True,
"image_resolution": (64, 64),
"use_rasterizer": True,
"visualize_camera": False,
}
reward_scales = {
"keypoints": 1.0,
}
# panda robot specific
robot_cfg = {
"ee_link_name": "hand",
"gripper_link_names": ["left_finger", "right_finger"],
"default_arm_dof": [0.0, -0.785, 0.0, -2.356, 0.0, 1.571, 0.785],
"default_gripper_dof": [0.04, 0.04],
"ik_method": "dls_ik",
}
return env_cfg, reward_scales, robot_cfg
def load_teacher_policy(env, rl_train_cfg, exp_name):
# load teacher policy
log_dir = Path("logs") / f"{exp_name + '_' + 'rl'}"
assert log_dir.exists(), f"Log directory {log_dir} does not exist"
checkpoint_files = [f for f in log_dir.iterdir() if re.match(r"model_\d+\.pt", f.name)]
try:
*_, last_ckpt = sorted(checkpoint_files)
except ValueError as e:
raise FileNotFoundError(f"No checkpoint files found in {log_dir}") from e
assert last_ckpt is not None, f"No checkpoint found in {log_dir}"
runner = OnPolicyRunner(env, rl_train_cfg, log_dir, device=gs.device)
runner.load(last_ckpt)
print(f"Loaded teacher policy from checkpoint {last_ckpt} from {log_dir}")
teacher_policy = runner.get_inference_policy(device=gs.device)
return teacher_policy
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--exp_name", type=str, default="grasp")
parser.add_argument("-v", "--vis", action="store_true", default=False)
parser.add_argument("-B", "--num_envs", type=int, default=2048)
parser.add_argument("--max_iterations", type=int, default=300)
parser.add_argument("--stage", type=str, default="rl")
args = parser.parse_args()
# === init ===
gs.init(backend=gs.gpu, precision="32", logging_level="warning", performance_mode=True)
# === task cfgs and trainning algos cfgs ===
env_cfg, reward_scales, robot_cfg = get_task_cfgs()
rl_train_cfg, bc_train_cfg = get_train_cfg(args.exp_name, args.max_iterations)
# === log dir ===
log_dir = Path("logs") / f"{args.exp_name + '_' + args.stage}"
log_dir.mkdir(parents=True, exist_ok=True)
with open(log_dir / "cfgs.pkl", "wb") as f:
pickle.dump((env_cfg, reward_scales, robot_cfg, rl_train_cfg, bc_train_cfg), f)
# === env ===
# BC only needs a small number of envs, e.g., 10
env_cfg["num_envs"] = args.num_envs if args.stage == "rl" else 10
env = GraspEnv(
env_cfg=env_cfg,
reward_cfg=reward_scales,
robot_cfg=robot_cfg,
show_viewer=args.vis,
)
# === runner ===
if args.stage == "bc":
teacher_policy = load_teacher_policy(env, rl_train_cfg, args.exp_name)
bc_train_cfg["teacher_policy"] = teacher_policy
runner = BehaviorCloning(env, bc_train_cfg, teacher_policy, device=gs.device)
runner.learn(num_learning_iterations=args.max_iterations, log_dir=log_dir)
else:
runner = OnPolicyRunner(env, rl_train_cfg, log_dir, device=gs.device)
runner.learn(num_learning_iterations=args.max_iterations, init_at_random_ep_len=True)
if __name__ == "__main__":
main()
"""
# training
# to train the RL policy
python examples/manipulation/grasp_train.py --stage=rl
# to train the BC policy (requires RL policy to be trained first)
python examples/manipulation/grasp_train.py --stage=bc
"""
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/manipulation/grasp_train.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/utils/array_class.py | import dataclasses
import math
from enum import IntEnum
from functools import partial
import quadrants as qd
import numpy as np
from typing_extensions import dataclass_transform # Made it into standard lib from Python 3.12
import genesis as gs
if not gs._initialized:
gs.raise_exception("Genesis hasn't been initialized. Did you call `gs.init()`?")
V_ANNOTATION = qd.types.ndarray() if gs.use_ndarray else qd.template
V = qd.ndarray if gs.use_ndarray else qd.field
V_VEC = qd.Vector.ndarray if gs.use_ndarray else qd.Vector.field
V_MAT = qd.Matrix.ndarray if gs.use_ndarray else qd.Matrix.field
DATA_ORIENTED = partial(dataclasses.dataclass, frozen=True) if gs.use_ndarray else qd.data_oriented
PLACEHOLDER = V(dtype=gs.qd_float, shape=())
def maybe_shape(shape, is_on):
return shape if is_on else ()
@dataclass_transform(eq_default=True, order_default=True, kw_only_default=False, frozen_default=True)
class AutoInitMeta(type):
def __new__(cls, name, bases, namespace):
names = tuple(namespace["__annotations__"].keys())
defaults = {k: namespace[k] for k in names if k in namespace}
def __init__(self, *args, **kwargs):
# Initialize assigned arguments from defaults
assigned = defaults.copy()
# Assign positional arguments
if len(args) > len(names):
raise TypeError(f"{name}() takes {len(names)} positional arguments but {len(args)} were given")
for key, value in zip(names, args):
assigned[key] = value
# Assign keyword arguments
for key, value in kwargs.items():
if key not in names:
raise TypeError(f"{name}() got unexpected keyword argument '{key}'")
if key in names[: len(args)]:
raise TypeError(f"{name}() got multiple values for argument '{key}'")
assigned[key] = value
# Check for missing arguments
for key in names:
if key not in assigned:
raise TypeError(f"{name}() missing required argument: '{key}'")
# Set attributes
for key, value in assigned.items():
setattr(self, key, value)
namespace["__init__"] = __init__
return super().__new__(cls, name, bases, namespace)
BASE_METACLASS = type if gs.use_ndarray else AutoInitMeta
def V_SCALAR_FROM(dtype, value):
data = V(dtype=dtype, shape=())
data.fill(value)
return data
# =========================================== ErrorCode ===========================================
class ErrorCode(IntEnum):
SUCCESS = 0b000000000000000000000000000000000
OVERFLOW_CANDIDATE_CONTACTS = 0b00000000000000000000000000000001
OVERFLOW_COLLISION_PAIRS = 0b00000000000000000000000000000010
OVERFLOW_HIBERNATION_ISLANDS = 0b00000000000000000000000000000100
INVALID_FORCE_NAN = 0b00000000000000000000000000001000
INVALID_ACC_NAN = 0b00000000000000000000000000010000
# =========================================== RigidGlobalInfo ===========================================
@DATA_ORIENTED
class StructRigidGlobalInfo(metaclass=BASE_METACLASS):
# *_bw: Cache for backward pass
n_awake_dofs: V_ANNOTATION
awake_dofs: V_ANNOTATION
n_awake_entities: V_ANNOTATION
awake_entities: V_ANNOTATION
n_awake_links: V_ANNOTATION
awake_links: V_ANNOTATION
qpos0: V_ANNOTATION
qpos: V_ANNOTATION
qpos_next: V_ANNOTATION
links_T: V_ANNOTATION
envs_offset: V_ANNOTATION
geoms_init_AABB: V_ANNOTATION
mass_mat: V_ANNOTATION
mass_mat_L: V_ANNOTATION
mass_mat_L_bw: V_ANNOTATION
mass_mat_D_inv: V_ANNOTATION
mass_mat_mask: V_ANNOTATION
meaninertia: V_ANNOTATION
mass_parent_mask: V_ANNOTATION
gravity: V_ANNOTATION
# Runtime constants
substep_dt: V_ANNOTATION
iterations: V_ANNOTATION
tolerance: V_ANNOTATION
ls_iterations: V_ANNOTATION
ls_tolerance: V_ANNOTATION
noslip_iterations: V_ANNOTATION
noslip_tolerance: V_ANNOTATION
n_equalities: V_ANNOTATION
n_candidate_equalities: V_ANNOTATION
hibernation_thresh_acc: V_ANNOTATION
hibernation_thresh_vel: V_ANNOTATION
EPS: V_ANNOTATION
def get_rigid_global_info(solver, kinematic_only):
_B = solver._B
mass_mat_shape = (solver.n_dofs_, solver.n_dofs_, _B)
if math.prod(mass_mat_shape) > np.iinfo(np.int32).max:
gs.raise_exception(
f"Mass matrix shape (n_dofs={solver.n_dofs_}, n_dofs={solver.n_dofs_}, n_envs={_B}) is too large."
)
requires_grad = solver._requires_grad
mass_mat_shape_bw = maybe_shape((2, *mass_mat_shape), requires_grad)
if math.prod(mass_mat_shape_bw) > np.iinfo(np.int32).max:
gs.raise_exception(
f"Mass matrix buffer shape (2, n_dofs={solver.n_dofs_}, n_dofs={solver.n_dofs_}, n_envs={_B}) is too large."
)
# FIXME: Add a better split between kinematic and Genesis
if kinematic_only:
return StructRigidGlobalInfo(
envs_offset=V_VEC(3, dtype=gs.qd_float, shape=(_B,)),
gravity=V_VEC(3, dtype=gs.qd_float, shape=()),
meaninertia=V(dtype=gs.qd_float, shape=()),
n_awake_dofs=V(dtype=gs.qd_int, shape=(_B,)),
n_awake_entities=V(dtype=gs.qd_int, shape=(_B,)),
n_awake_links=V(dtype=gs.qd_int, shape=(_B,)),
awake_dofs=V(dtype=gs.qd_int, shape=(solver.n_dofs_, _B)),
awake_entities=V(dtype=gs.qd_int, shape=(solver.n_entities_, _B)),
awake_links=V(dtype=gs.qd_int, shape=(solver.n_links_, _B)),
qpos0=V(dtype=gs.qd_float, shape=(solver.n_qs_, _B)),
qpos=V(dtype=gs.qd_float, shape=(solver.n_qs_, _B)),
qpos_next=V(dtype=gs.qd_float, shape=(solver.n_qs_, _B)),
links_T=V_MAT(n=4, m=4, dtype=gs.qd_float, shape=(solver.n_links_,)),
geoms_init_AABB=V_VEC(3, dtype=gs.qd_float, shape=()),
mass_mat=V(dtype=gs.qd_float, shape=()),
mass_mat_L=V(dtype=gs.qd_float, shape=()),
mass_mat_L_bw=V(dtype=gs.qd_float, shape=()),
mass_mat_D_inv=V(dtype=gs.qd_float, shape=()),
mass_mat_mask=V(dtype=gs.qd_bool, shape=()),
mass_parent_mask=V(dtype=gs.qd_float, shape=()),
substep_dt=V_SCALAR_FROM(dtype=gs.qd_float, value=0.0),
iterations=V_SCALAR_FROM(dtype=gs.qd_int, value=0),
tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=0.0),
ls_iterations=V_SCALAR_FROM(dtype=gs.qd_int, value=0),
ls_tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=0.0),
noslip_iterations=V_SCALAR_FROM(dtype=gs.qd_int, value=0),
noslip_tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=0.0),
n_equalities=V_SCALAR_FROM(dtype=gs.qd_int, value=0),
n_candidate_equalities=V_SCALAR_FROM(dtype=gs.qd_int, value=0),
hibernation_thresh_acc=V_SCALAR_FROM(dtype=gs.qd_float, value=0.0),
hibernation_thresh_vel=V_SCALAR_FROM(dtype=gs.qd_float, value=0.0),
EPS=V_SCALAR_FROM(dtype=gs.qd_float, value=gs.EPS),
)
return StructRigidGlobalInfo(
envs_offset=V_VEC(3, dtype=gs.qd_float, shape=(_B,)),
gravity=V_VEC(3, dtype=gs.qd_float, shape=(_B,)),
meaninertia=V(dtype=gs.qd_float, shape=(_B,)),
n_awake_dofs=V(dtype=gs.qd_int, shape=(_B,)),
n_awake_entities=V(dtype=gs.qd_int, shape=(_B,)),
n_awake_links=V(dtype=gs.qd_int, shape=(_B,)),
awake_dofs=V(dtype=gs.qd_int, shape=(solver.n_dofs_, _B)),
awake_entities=V(dtype=gs.qd_int, shape=(solver.n_entities_, _B)),
awake_links=V(dtype=gs.qd_int, shape=(solver.n_links_, _B)),
qpos0=V(dtype=gs.qd_float, shape=(solver.n_qs_, _B)),
qpos=V(dtype=gs.qd_float, shape=(solver.n_qs_, _B), needs_grad=requires_grad),
qpos_next=V(dtype=gs.qd_float, shape=(solver.n_qs_, _B), needs_grad=requires_grad),
links_T=V_MAT(n=4, m=4, dtype=gs.qd_float, shape=(solver.n_links_,)),
geoms_init_AABB=V_VEC(3, dtype=gs.qd_float, shape=(solver.n_geoms_, 8)),
mass_mat=V(dtype=gs.qd_float, shape=mass_mat_shape, needs_grad=requires_grad),
mass_mat_L=V(dtype=gs.qd_float, shape=mass_mat_shape, needs_grad=requires_grad),
mass_mat_L_bw=V(dtype=gs.qd_float, shape=mass_mat_shape_bw, needs_grad=requires_grad),
mass_mat_D_inv=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B), needs_grad=requires_grad),
mass_mat_mask=V(dtype=gs.qd_bool, shape=(solver.n_entities_, _B)),
mass_parent_mask=V(dtype=gs.qd_float, shape=(solver.n_dofs_, solver.n_dofs_)),
substep_dt=V_SCALAR_FROM(dtype=gs.qd_float, value=solver._substep_dt),
iterations=V_SCALAR_FROM(dtype=gs.qd_int, value=solver._options.iterations),
tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=solver._options.tolerance),
ls_iterations=V_SCALAR_FROM(dtype=gs.qd_int, value=solver._options.ls_iterations),
ls_tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=solver._options.ls_tolerance),
noslip_iterations=V_SCALAR_FROM(dtype=gs.qd_int, value=solver._options.noslip_iterations),
noslip_tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=solver._options.noslip_tolerance),
n_equalities=V_SCALAR_FROM(dtype=gs.qd_int, value=solver._n_equalities),
n_candidate_equalities=V_SCALAR_FROM(dtype=gs.qd_int, value=solver.n_candidate_equalities_),
hibernation_thresh_acc=V_SCALAR_FROM(dtype=gs.qd_float, value=solver._hibernation_thresh_acc),
hibernation_thresh_vel=V_SCALAR_FROM(dtype=gs.qd_float, value=solver._hibernation_thresh_vel),
EPS=V_SCALAR_FROM(dtype=gs.qd_float, value=gs.EPS),
)
# =========================================== Constraint ===========================================
@DATA_ORIENTED
class StructConstraintState(metaclass=BASE_METACLASS):
is_warmstart: V_ANNOTATION
n_constraints: V_ANNOTATION
qd_n_equalities: V_ANNOTATION
jac: V_ANNOTATION
diag: V_ANNOTATION
aref: V_ANNOTATION
jac_relevant_dofs: V_ANNOTATION
jac_n_relevant_dofs: V_ANNOTATION
n_constraints_equality: V_ANNOTATION
n_constraints_frictionloss: V_ANNOTATION
improved: V_ANNOTATION
Jaref: V_ANNOTATION
Ma: V_ANNOTATION
Ma_ws: V_ANNOTATION
grad: V_ANNOTATION
Mgrad: V_ANNOTATION
search: V_ANNOTATION
efc_D: V_ANNOTATION
efc_frictionloss: V_ANNOTATION
efc_force: V_ANNOTATION
efc_b: V_ANNOTATION
efc_AR: V_ANNOTATION
active: V_ANNOTATION
prev_active: V_ANNOTATION
qfrc_constraint: V_ANNOTATION
qacc: V_ANNOTATION
qacc_ws: V_ANNOTATION
qacc_prev: V_ANNOTATION
cost_ws: V_ANNOTATION
gauss: V_ANNOTATION
cost: V_ANNOTATION
prev_cost: V_ANNOTATION
gtol: V_ANNOTATION
mv: V_ANNOTATION
jv: V_ANNOTATION
quad_gauss: V_ANNOTATION
candidates: V_ANNOTATION
eq_sum: V_ANNOTATION
ls_it: V_ANNOTATION
ls_result: V_ANNOTATION
# Optional CG fields
cg_prev_grad: V_ANNOTATION
cg_prev_Mgrad: V_ANNOTATION
cg_beta: V_ANNOTATION
cg_pg_dot_pMg: V_ANNOTATION
# Optional Newton fields
# Hessian matrix of the optimization problem as a dense 2D tensor.
# Note that only the lower triangular part is updated for efficiency because this matrix is symmetric by definition.
# As a result, the values of the strictly upper triangular part is undefined.
# In practice, this variable is re-purposed to store the Cholesky factor L st H = L @ L.T to spare memory resources.
# TODO: Optimize storage to only allocate memory half of the Hessian matrix to sparse memory resources.
nt_H: V_ANNOTATION
nt_vec: V_ANNOTATION
# Compacted list of constraints whose active state changed, used by incremental Cholesky update
# to reduce GPU thread divergence by iterating only over constraints that need processing.
incr_changed_idx: V_ANNOTATION
incr_n_changed: V_ANNOTATION
# Backward gradients
dL_dqacc: V_ANNOTATION
dL_dM: V_ANNOTATION
dL_djac: V_ANNOTATION
dL_daref: V_ANNOTATION
dL_defc_D: V_ANNOTATION
dL_dforce: V_ANNOTATION
# Backward buffers for linear system solver
bw_u: V_ANNOTATION
bw_r: V_ANNOTATION
bw_p: V_ANNOTATION
bw_Ap: V_ANNOTATION
bw_Ju: V_ANNOTATION
bw_y: V_ANNOTATION
bw_w: V_ANNOTATION
# Timers for profiling
timers: V_ANNOTATION
def get_constraint_state(constraint_solver, solver):
_B = solver._B
len_constraints_ = constraint_solver.len_constraints_
jac_shape = (len_constraints_, solver.n_dofs_, _B)
efc_AR_shape = maybe_shape((len_constraints_, len_constraints_, _B), solver._options.noslip_iterations > 0)
efc_b_shape = maybe_shape((len_constraints_, _B), solver._options.noslip_iterations > 0)
jac_relevant_dofs_shape = maybe_shape((len_constraints_, solver.n_dofs_, _B), constraint_solver.sparse_solve)
jac_n_relevant_dofs_shape = maybe_shape((len_constraints_, _B), constraint_solver.sparse_solve)
if math.prod(jac_shape) > np.iinfo(np.int32).max:
gs.raise_exception(
f"Jacobian shape (n_constraints={len_constraints_}, n_dofs={solver.n_dofs_}, n_envs={_B}) is too large."
)
if math.prod(efc_AR_shape) > np.iinfo(np.int32).max:
gs.logger.warning(
f"efc_AR shape (n_constraints={len_constraints_}, n_constraints={solver.n_dofs_}, n_envs={_B}) is too "
"large. Consider manually setting a smaller 'max_collision_pairs' in RigidOptions to reduce the size of "
"reserved memory. "
)
# /!\ Changing allocation order of these tensors may reduce runtime speed by >10% /!\
return StructConstraintState(
n_constraints=V(dtype=gs.qd_int, shape=(_B,)),
qd_n_equalities=V(dtype=gs.qd_int, shape=(_B,)),
n_constraints_equality=V(dtype=gs.qd_int, shape=(_B,)),
n_constraints_frictionloss=V(dtype=gs.qd_int, shape=(_B,)),
is_warmstart=V(dtype=gs.qd_bool, shape=(_B,)),
improved=V(dtype=gs.qd_bool, shape=(_B,)),
cost_ws=V(dtype=gs.qd_float, shape=(_B,)),
gauss=V(dtype=gs.qd_float, shape=(_B,)),
cost=V(dtype=gs.qd_float, shape=(_B,)),
prev_cost=V(dtype=gs.qd_float, shape=(_B,)),
gtol=V(dtype=gs.qd_float, shape=(_B,)),
ls_it=V(dtype=gs.qd_int, shape=(_B,)),
ls_result=V(dtype=gs.qd_int, shape=(_B,)),
cg_beta=V(dtype=gs.qd_float, shape=(_B,)),
cg_pg_dot_pMg=V(dtype=gs.qd_float, shape=(_B,)),
quad_gauss=V(dtype=gs.qd_float, shape=(3, _B)),
candidates=V(dtype=gs.qd_float, shape=(12, _B)),
eq_sum=V(dtype=gs.qd_float, shape=(3, _B)),
Ma=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
Ma_ws=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
grad=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
Mgrad=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
search=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
qfrc_constraint=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
qacc=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
qacc_ws=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
qacc_prev=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
mv=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
cg_prev_grad=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
cg_prev_Mgrad=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
nt_vec=V(dtype=gs.qd_float, shape=(solver.n_dofs_, _B)),
nt_H=V(dtype=gs.qd_float, shape=(_B, solver.n_dofs_, solver.n_dofs_)),
incr_changed_idx=V(dtype=gs.qd_int, shape=(len_constraints_, _B)),
incr_n_changed=V(dtype=gs.qd_int, shape=(_B,)),
efc_b=V(dtype=gs.qd_float, shape=efc_b_shape),
efc_AR=V(dtype=gs.qd_float, shape=efc_AR_shape),
active=V(dtype=gs.qd_bool, shape=(len_constraints_, _B)),
prev_active=V(dtype=gs.qd_bool, shape=(len_constraints_, _B)),
diag=V(dtype=gs.qd_float, shape=(len_constraints_, _B)),
aref=V(dtype=gs.qd_float, shape=(len_constraints_, _B)),
Jaref=V(dtype=gs.qd_float, shape=(len_constraints_, _B)),
efc_frictionloss=V(dtype=gs.qd_float, shape=(len_constraints_, _B)),
efc_force=V(dtype=gs.qd_float, shape=(len_constraints_, _B)),
efc_D=V(dtype=gs.qd_float, shape=(len_constraints_, _B)),
jv=V(dtype=gs.qd_float, shape=(len_constraints_, _B)),
jac=V(dtype=gs.qd_float, shape=jac_shape),
jac_relevant_dofs=V(dtype=gs.qd_int, shape=jac_relevant_dofs_shape),
jac_n_relevant_dofs=V(dtype=gs.qd_int, shape=jac_n_relevant_dofs_shape),
# Backward gradients
dL_dqacc=V(dtype=gs.qd_float, shape=maybe_shape((solver.n_dofs_, _B), solver._requires_grad)),
dL_dM=V(dtype=gs.qd_float, shape=maybe_shape((solver.n_dofs_, solver.n_dofs_, _B), solver._requires_grad)),
dL_djac=V(dtype=gs.qd_float, shape=maybe_shape((len_constraints_, solver.n_dofs_, _B), solver._requires_grad)),
dL_daref=V(dtype=gs.qd_float, shape=maybe_shape((len_constraints_, _B), solver._requires_grad)),
dL_defc_D=V(dtype=gs.qd_float, shape=maybe_shape((len_constraints_, _B), solver._requires_grad)),
dL_dforce=V(dtype=gs.qd_float, shape=maybe_shape((solver.n_dofs_, _B), solver._requires_grad)),
bw_u=V(dtype=gs.qd_float, shape=maybe_shape((solver.n_dofs_, _B), solver._requires_grad)),
bw_r=V(dtype=gs.qd_float, shape=maybe_shape((solver.n_dofs_, _B), solver._requires_grad)),
bw_p=V(dtype=gs.qd_float, shape=maybe_shape((solver.n_dofs_, _B), solver._requires_grad)),
bw_Ap=V(dtype=gs.qd_float, shape=maybe_shape((solver.n_dofs_, _B), solver._requires_grad)),
bw_Ju=V(dtype=gs.qd_float, shape=maybe_shape((len_constraints_, _B), solver._requires_grad)),
bw_y=V(dtype=gs.qd_float, shape=maybe_shape((len_constraints_, _B), solver._requires_grad)),
bw_w=V(dtype=gs.qd_float, shape=maybe_shape((len_constraints_, _B), solver._requires_grad)),
# Timers
timers=V(dtype=qd.i64 if gs.backend != gs.metal else qd.i32, shape=(10, _B)),
)
# =========================================== Collider ===========================================
@DATA_ORIENTED
class StructContactData(metaclass=BASE_METACLASS):
geom_a: V_ANNOTATION
geom_b: V_ANNOTATION
penetration: V_ANNOTATION
normal: V_ANNOTATION
pos: V_ANNOTATION
friction: V_ANNOTATION
sol_params: V_ANNOTATION
force: V_ANNOTATION
link_a: V_ANNOTATION
link_b: V_ANNOTATION
def get_contact_data(solver, max_contact_pairs, requires_grad):
_B = solver._B
max_contact_pairs_ = max(max_contact_pairs, 1)
return StructContactData(
geom_a=V(dtype=gs.qd_int, shape=(max_contact_pairs_, _B)),
geom_b=V(dtype=gs.qd_int, shape=(max_contact_pairs_, _B)),
normal=V(dtype=gs.qd_vec3, shape=(max_contact_pairs_, _B), needs_grad=requires_grad),
pos=V(dtype=gs.qd_vec3, shape=(max_contact_pairs_, _B), needs_grad=requires_grad),
penetration=V(dtype=gs.qd_float, shape=(max_contact_pairs_, _B), needs_grad=requires_grad),
friction=V(dtype=gs.qd_float, shape=(max_contact_pairs_, _B)),
sol_params=V_VEC(7, dtype=gs.qd_float, shape=(max_contact_pairs_, _B)),
force=V(dtype=gs.qd_vec3, shape=(max_contact_pairs_, _B)),
link_a=V(dtype=gs.qd_int, shape=(max_contact_pairs_, _B)),
link_b=V(dtype=gs.qd_int, shape=(max_contact_pairs_, _B)),
)
@DATA_ORIENTED
class StructDiffContactInput(metaclass=BASE_METACLASS):
### Non-differentiable input data
# Geom id of the two geometries
geom_a: V_ANNOTATION
geom_b: V_ANNOTATION
# Local positions of the 3 vertices from the two geometries that define the face on the Minkowski difference
local_pos1_a: V_ANNOTATION
local_pos1_b: V_ANNOTATION
local_pos1_c: V_ANNOTATION
local_pos2_a: V_ANNOTATION
local_pos2_b: V_ANNOTATION
local_pos2_c: V_ANNOTATION
# Local positions of the 1 vertex from the two geometries that define the support point for the face above
w_local_pos1: V_ANNOTATION
w_local_pos2: V_ANNOTATION
# Reference id of the contact point, which is needed for the backward pass
ref_id: V_ANNOTATION
# Flag whether the contact data can be computed in numerically stable way in both the forward and backward passes
valid: V_ANNOTATION
### Differentiable input data
# Reference penetration depth, which is needed for computing the weight of the contact point
ref_penetration: V_ANNOTATION
def get_diff_contact_input(solver, max_contacts_per_pair, is_active):
_B = solver._B
shape = maybe_shape((_B, max_contacts_per_pair), is_active and solver._requires_grad)
return StructDiffContactInput(
geom_a=V(dtype=gs.qd_int, shape=shape),
geom_b=V(dtype=gs.qd_int, shape=shape),
local_pos1_a=V_VEC(3, dtype=gs.qd_float, shape=shape),
local_pos1_b=V_VEC(3, dtype=gs.qd_float, shape=shape),
local_pos1_c=V_VEC(3, dtype=gs.qd_float, shape=shape),
local_pos2_a=V_VEC(3, dtype=gs.qd_float, shape=shape),
local_pos2_b=V_VEC(3, dtype=gs.qd_float, shape=shape),
local_pos2_c=V_VEC(3, dtype=gs.qd_float, shape=shape),
w_local_pos1=V_VEC(3, dtype=gs.qd_float, shape=shape),
w_local_pos2=V_VEC(3, dtype=gs.qd_float, shape=shape),
ref_id=V(dtype=gs.qd_int, shape=shape),
valid=V(dtype=gs.qd_int, shape=shape),
ref_penetration=V(dtype=gs.qd_float, shape=shape, needs_grad=True),
)
@DATA_ORIENTED
class StructSortBuffer(metaclass=BASE_METACLASS):
value: V_ANNOTATION
i_g: V_ANNOTATION
is_max: V_ANNOTATION
def get_sort_buffer(solver):
_B = solver._B
return StructSortBuffer(
value=V(dtype=gs.qd_float, shape=(2 * solver.n_geoms_, _B)),
i_g=V(dtype=gs.qd_int, shape=(2 * solver.n_geoms_, _B)),
is_max=V(dtype=gs.qd_bool, shape=(2 * solver.n_geoms_, _B)),
)
@DATA_ORIENTED
class StructContactCache(metaclass=BASE_METACLASS):
normal: V_ANNOTATION
def get_contact_cache(solver, n_possible_pairs):
_B = solver._B
return StructContactCache(
normal=V_VEC(3, dtype=gs.qd_float, shape=(n_possible_pairs, _B)),
)
@DATA_ORIENTED
class StructAggList(metaclass=BASE_METACLASS):
curr: V_ANNOTATION
n: V_ANNOTATION
start: V_ANNOTATION
def get_agg_list(solver):
_B = solver._B
n_entities = max(solver.n_entities, 1)
return StructAggList(
curr=V(dtype=gs.qd_int, shape=(n_entities, _B)),
n=V(dtype=gs.qd_int, shape=(n_entities, _B)),
start=V(dtype=gs.qd_int, shape=(n_entities, _B)),
)
@DATA_ORIENTED
class StructContactIslandState(metaclass=BASE_METACLASS):
ci_edges: V_ANNOTATION
edge_id: V_ANNOTATION
constraint_list: V_ANNOTATION
constraint_id: V_ANNOTATION
entity_edge: StructAggList
island_col: StructAggList
island_hibernated: V_ANNOTATION
island_entity: StructAggList
entity_id: V_ANNOTATION
n_edges: V_ANNOTATION
n_islands: V_ANNOTATION
n_stack: V_ANNOTATION
entity_island: V_ANNOTATION
stack: V_ANNOTATION
entity_idx_to_next_entity_idx_in_hibernated_island: V_ANNOTATION
def get_contact_island_state(solver, collider):
_B = solver._B
max_contact_pairs = max(collider._collider_info.max_contact_pairs[None], 1)
n_entities = max(solver.n_entities, 1)
# When hibernation is enabled, the island construction adds edges for hibernated entity chains
# in addition to contact edges. The chain construction is cyclic (last entity links back to first),
# so worst case: each entity contributes one hibernation edge, totaling n_entities hibernation edges.
max_hibernation_edges = n_entities if solver._use_hibernation else 0
max_edges = max_contact_pairs + max_hibernation_edges
return StructContactIslandState(
ci_edges=V(dtype=gs.qd_int, shape=(max_edges, 2, _B)),
edge_id=V(dtype=gs.qd_int, shape=(max_edges * 2, _B)),
constraint_list=V(dtype=gs.qd_int, shape=(max_contact_pairs, _B)),
constraint_id=V(dtype=gs.qd_int, shape=(max_contact_pairs * 2, _B)),
entity_edge=get_agg_list(solver),
island_col=get_agg_list(solver),
island_hibernated=V(dtype=gs.qd_int, shape=(n_entities, _B)),
island_entity=get_agg_list(solver),
entity_id=V(dtype=gs.qd_int, shape=(n_entities, _B)),
n_edges=V(dtype=gs.qd_int, shape=(_B,)),
n_islands=V(dtype=gs.qd_int, shape=(_B,)),
n_stack=V(dtype=gs.qd_int, shape=(_B,)),
entity_island=V(dtype=gs.qd_int, shape=(n_entities, _B)),
stack=V(dtype=gs.qd_int, shape=(n_entities, _B)),
entity_idx_to_next_entity_idx_in_hibernated_island=V(dtype=gs.qd_int, shape=(n_entities, _B)),
)
@DATA_ORIENTED
class StructColliderState(metaclass=BASE_METACLASS):
sort_buffer: StructSortBuffer
contact_data: StructContactData
active_buffer: V_ANNOTATION
n_broad_pairs: V_ANNOTATION
broad_collision_pairs: V_ANNOTATION
active_buffer_awake: V_ANNOTATION
active_buffer_hib: V_ANNOTATION
box_depth: V_ANNOTATION
box_points: V_ANNOTATION
box_pts: V_ANNOTATION
box_lines: V_ANNOTATION
box_linesu: V_ANNOTATION
box_axi: V_ANNOTATION
box_ppts2: V_ANNOTATION
box_pu: V_ANNOTATION
xyz_max_min: V_ANNOTATION
prism: V_ANNOTATION
n_contacts: V_ANNOTATION
n_contacts_hibernated: V_ANNOTATION
first_time: V_ANNOTATION
contact_cache: StructContactCache
# Input data for differentiable contact detection used in the backward pass
diff_contact_input: StructDiffContactInput
def get_collider_state(
solver,
static_rigid_sim_config,
n_possible_pairs,
max_collision_pairs_broad_k,
collider_info,
collider_static_config,
):
_B = solver._B
n_geoms = solver.n_geoms_
max_collision_pairs = min(solver.max_collision_pairs, n_possible_pairs)
max_collision_pairs_broad = max_collision_pairs * max_collision_pairs_broad_k
max_contact_pairs = max_collision_pairs * collider_static_config.n_contacts_per_pair
requires_grad = static_rigid_sim_config.requires_grad
box_depth_shape = maybe_shape(
(collider_static_config.n_contacts_per_pair, _B), static_rigid_sim_config.box_box_detection
)
box_points_shape = maybe_shape(
(collider_static_config.n_contacts_per_pair, _B), static_rigid_sim_config.box_box_detection
)
box_pts_shape = maybe_shape((6, _B), static_rigid_sim_config.box_box_detection)
box_lines_shape = maybe_shape((4, _B), static_rigid_sim_config.box_box_detection)
box_linesu_shape = maybe_shape((4, _B), static_rigid_sim_config.box_box_detection)
box_axi_shape = maybe_shape((3, _B), static_rigid_sim_config.box_box_detection)
box_ppts2_shape = maybe_shape((4, 2, _B), static_rigid_sim_config.box_box_detection)
box_pu_shape = maybe_shape((4, _B), static_rigid_sim_config.box_box_detection)
return StructColliderState(
sort_buffer=get_sort_buffer(solver),
active_buffer=V(dtype=gs.qd_int, shape=(n_geoms, _B)),
n_broad_pairs=V(dtype=gs.qd_int, shape=(_B,)),
active_buffer_awake=V(dtype=gs.qd_int, shape=(n_geoms, _B)),
active_buffer_hib=V(dtype=gs.qd_int, shape=(n_geoms, _B)),
box_depth=V(dtype=gs.qd_float, shape=box_depth_shape),
box_points=V_VEC(3, dtype=gs.qd_float, shape=box_points_shape),
box_pts=V_VEC(3, dtype=gs.qd_float, shape=box_pts_shape),
box_lines=V_VEC(6, dtype=gs.qd_float, shape=box_lines_shape),
box_linesu=V_VEC(6, dtype=gs.qd_float, shape=box_linesu_shape),
box_axi=V_VEC(3, dtype=gs.qd_float, shape=box_axi_shape),
box_ppts2=V(dtype=gs.qd_float, shape=box_ppts2_shape),
box_pu=V_VEC(3, dtype=gs.qd_float, shape=box_pu_shape),
xyz_max_min=V(dtype=gs.qd_float, shape=(6, _B)),
prism=V_VEC(3, dtype=gs.qd_float, shape=(6, _B)),
n_contacts=V(dtype=gs.qd_int, shape=(_B,)),
n_contacts_hibernated=V(dtype=gs.qd_int, shape=(_B,)),
first_time=V(dtype=gs.qd_bool, shape=(_B,)),
contact_cache=get_contact_cache(solver, n_possible_pairs),
broad_collision_pairs=V_VEC(2, dtype=gs.qd_int, shape=(max(max_collision_pairs_broad, 1), _B)),
contact_data=get_contact_data(solver, max_contact_pairs, requires_grad),
diff_contact_input=get_diff_contact_input(solver, max(max_contact_pairs, 1), is_active=True),
)
@DATA_ORIENTED
class StructColliderInfo(metaclass=BASE_METACLASS):
vert_neighbors: V_ANNOTATION
vert_neighbor_start: V_ANNOTATION
vert_n_neighbors: V_ANNOTATION
collision_pair_idx: V_ANNOTATION
max_possible_pairs: V_ANNOTATION
max_collision_pairs: V_ANNOTATION
max_contact_pairs: V_ANNOTATION
max_collision_pairs_broad: V_ANNOTATION
# Terrain fields
terrain_hf: V_ANNOTATION
terrain_rc: V_ANNOTATION
terrain_scale: V_ANNOTATION
terrain_xyz_maxmin: V_ANNOTATION
# multi contact perturbation and tolerance
mc_perturbation: V_ANNOTATION
mc_tolerance: V_ANNOTATION
mpr_to_gjk_overlap_ratio: V_ANNOTATION
# differentiable contact tolerance
diff_pos_tolerance: V_ANNOTATION
diff_normal_tolerance: V_ANNOTATION
def get_collider_info(solver, n_vert_neighbors, collider_static_config, **kwargs):
for geom in solver.geoms:
if geom.type == gs.GEOM_TYPE.TERRAIN:
terrain_hf_shape = geom.entity.terrain_hf.shape
break
else:
terrain_hf_shape = 1
return StructColliderInfo(
vert_neighbors=V(dtype=gs.qd_int, shape=(max(n_vert_neighbors, 1),)),
vert_neighbor_start=V(dtype=gs.qd_int, shape=(solver.n_verts_,)),
vert_n_neighbors=V(dtype=gs.qd_int, shape=(solver.n_verts_,)),
collision_pair_idx=V(dtype=gs.qd_int, shape=(solver.n_geoms_, solver.n_geoms_)),
max_possible_pairs=V(dtype=gs.qd_int, shape=()),
max_collision_pairs=V(dtype=gs.qd_int, shape=()),
max_contact_pairs=V(dtype=gs.qd_int, shape=()),
max_collision_pairs_broad=V(dtype=gs.qd_int, shape=()),
terrain_hf=V(dtype=gs.qd_float, shape=terrain_hf_shape),
terrain_rc=V(dtype=gs.qd_int, shape=(2,)),
terrain_scale=V(dtype=gs.qd_float, shape=(2,)),
terrain_xyz_maxmin=V(dtype=gs.qd_float, shape=(6,)),
mc_perturbation=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["mc_perturbation"]),
mc_tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["mc_tolerance"]),
mpr_to_gjk_overlap_ratio=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["mpr_to_gjk_overlap_ratio"]),
diff_pos_tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["diff_pos_tolerance"]),
diff_normal_tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["diff_normal_tolerance"]),
)
@qd.data_oriented
class StructColliderStaticConfig(metaclass=AutoInitMeta):
has_terrain: bool
has_convex_convex: bool
has_convex_specialization: bool
has_nonconvex_nonterrain: bool
# maximum number of contact pairs per collision pair
n_contacts_per_pair: int
# ccd algorithm
ccd_algorithm: int
# =========================================== MPR ===========================================
@DATA_ORIENTED
class StructMPRSimplexSupport(metaclass=BASE_METACLASS):
v1: V_ANNOTATION
v2: V_ANNOTATION
v: V_ANNOTATION
def get_mpr_simplex_support(B_):
return StructMPRSimplexSupport(
v1=V_VEC(3, dtype=gs.qd_float, shape=(4, B_)),
v2=V_VEC(3, dtype=gs.qd_float, shape=(4, B_)),
v=V_VEC(3, dtype=gs.qd_float, shape=(4, B_)),
)
@DATA_ORIENTED
class StructMPRState(metaclass=BASE_METACLASS):
simplex_support: StructMPRSimplexSupport
simplex_size: V_ANNOTATION
def get_mpr_state(B_):
return StructMPRState(
simplex_support=get_mpr_simplex_support(B_),
simplex_size=V(dtype=gs.qd_int, shape=(B_,)),
)
@DATA_ORIENTED
class StructMPRInfo(metaclass=BASE_METACLASS):
CCD_EPS: V_ANNOTATION
CCD_TOLERANCE: V_ANNOTATION
CCD_ITERATIONS: V_ANNOTATION
def get_mpr_info(**kwargs):
return StructMPRInfo(
CCD_EPS=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["CCD_EPS"]),
CCD_TOLERANCE=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["CCD_TOLERANCE"]),
CCD_ITERATIONS=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["CCD_ITERATIONS"]),
)
# =========================================== GJK ===========================================
@DATA_ORIENTED
class StructMDVertex(metaclass=BASE_METACLASS):
# Vertex of the Minkowski difference
obj1: V_ANNOTATION
obj2: V_ANNOTATION
local_obj1: V_ANNOTATION
local_obj2: V_ANNOTATION
id1: V_ANNOTATION
id2: V_ANNOTATION
mink: V_ANNOTATION
def get_gjk_simplex_vertex(solver, is_active):
_B = solver._B
shape = maybe_shape((_B, 4), is_active)
return StructMDVertex(
obj1=V_VEC(3, dtype=gs.qd_float, shape=shape),
obj2=V_VEC(3, dtype=gs.qd_float, shape=shape),
local_obj1=V_VEC(3, dtype=gs.qd_float, shape=shape),
local_obj2=V_VEC(3, dtype=gs.qd_float, shape=shape),
id1=V(dtype=gs.qd_int, shape=shape),
id2=V(dtype=gs.qd_int, shape=shape),
mink=V_VEC(3, dtype=gs.qd_float, shape=shape),
)
def get_epa_polytope_vertex(solver, gjk_info, is_active):
_B = solver._B
max_num_polytope_verts = 5 + gjk_info.epa_max_iterations[None]
shape = maybe_shape((_B, max_num_polytope_verts), is_active)
return StructMDVertex(
obj1=V_VEC(3, dtype=gs.qd_float, shape=shape),
obj2=V_VEC(3, dtype=gs.qd_float, shape=shape),
local_obj1=V_VEC(3, dtype=gs.qd_float, shape=shape),
local_obj2=V_VEC(3, dtype=gs.qd_float, shape=shape),
id1=V(dtype=gs.qd_int, shape=shape),
id2=V(dtype=gs.qd_int, shape=shape),
mink=V_VEC(3, dtype=gs.qd_float, shape=shape),
)
@DATA_ORIENTED
class StructGJKSimplex(metaclass=BASE_METACLASS):
nverts: V_ANNOTATION
dist: V_ANNOTATION
def get_gjk_simplex(solver, is_active):
_B = solver._B
shape = maybe_shape((_B,), is_active)
return StructGJKSimplex(
nverts=V(dtype=gs.qd_int, shape=shape),
dist=V(dtype=gs.qd_float, shape=shape),
)
@DATA_ORIENTED
class StructGJKSimplexBuffer(metaclass=BASE_METACLASS):
normal: V_ANNOTATION
sdist: V_ANNOTATION
def get_gjk_simplex_buffer(solver, is_active):
_B = solver._B
shape = maybe_shape((_B, 4), is_active)
return StructGJKSimplexBuffer(
normal=V_VEC(3, dtype=gs.qd_float, shape=shape),
sdist=V(dtype=gs.qd_float, shape=shape),
)
@DATA_ORIENTED
class StructEPAPolytope(metaclass=BASE_METACLASS):
nverts: V_ANNOTATION
nfaces: V_ANNOTATION
nfaces_map: V_ANNOTATION
horizon_nedges: V_ANNOTATION
horizon_w: V_ANNOTATION
def get_epa_polytope(solver, is_active):
_B = solver._B
shape = maybe_shape((_B,), is_active)
return StructEPAPolytope(
nverts=V(dtype=gs.qd_int, shape=shape),
nfaces=V(dtype=gs.qd_int, shape=shape),
nfaces_map=V(dtype=gs.qd_int, shape=shape),
horizon_nedges=V(dtype=gs.qd_int, shape=shape),
horizon_w=V_VEC(3, dtype=gs.qd_float, shape=shape),
)
@DATA_ORIENTED
class StructEPAPolytopeFace(metaclass=BASE_METACLASS):
verts_idx: V_ANNOTATION
adj_idx: V_ANNOTATION
normal: V_ANNOTATION
dist2: V_ANNOTATION
map_idx: V_ANNOTATION
visited: V_ANNOTATION
def get_epa_polytope_face(solver, polytope_max_faces, is_active):
_B = solver._B
shape = maybe_shape((_B, polytope_max_faces), is_active)
return StructEPAPolytopeFace(
verts_idx=V_VEC(3, dtype=gs.qd_int, shape=shape),
adj_idx=V_VEC(3, dtype=gs.qd_int, shape=shape),
normal=V_VEC(3, dtype=gs.qd_float, shape=shape),
dist2=V(dtype=gs.qd_float, shape=shape),
map_idx=V(dtype=gs.qd_int, shape=shape),
visited=V(dtype=gs.qd_int, shape=shape),
)
@DATA_ORIENTED
class StructEPAPolytopeHorizonData(metaclass=BASE_METACLASS):
face_idx: V_ANNOTATION
edge_idx: V_ANNOTATION
def get_epa_polytope_horizon_data(solver, polytope_max_horizons, is_active):
_B = solver._B
shape = maybe_shape((_B, polytope_max_horizons), is_active)
return StructEPAPolytopeHorizonData(
face_idx=V(dtype=gs.qd_int, shape=shape),
edge_idx=V(dtype=gs.qd_int, shape=shape),
)
@DATA_ORIENTED
class StructContactFace(metaclass=BASE_METACLASS):
vert1: V_ANNOTATION
vert2: V_ANNOTATION
endverts: V_ANNOTATION
normal1: V_ANNOTATION
normal2: V_ANNOTATION
id1: V_ANNOTATION
id2: V_ANNOTATION
def get_contact_face(solver, max_contact_polygon_verts, is_active):
_B = solver._B
shape = maybe_shape((_B, max_contact_polygon_verts), is_active)
return StructContactFace(
vert1=V_VEC(3, dtype=gs.qd_float, shape=shape),
vert2=V_VEC(3, dtype=gs.qd_float, shape=shape),
endverts=V_VEC(3, dtype=gs.qd_float, shape=shape),
normal1=V_VEC(3, dtype=gs.qd_float, shape=shape),
normal2=V_VEC(3, dtype=gs.qd_float, shape=shape),
id1=V(dtype=gs.qd_int, shape=shape),
id2=V(dtype=gs.qd_int, shape=shape),
)
@DATA_ORIENTED
class StructContactNormal(metaclass=BASE_METACLASS):
endverts: V_ANNOTATION
normal: V_ANNOTATION
id: V_ANNOTATION
def get_contact_normal(solver, max_contact_polygon_verts, is_active):
_B = solver._B
shape = maybe_shape((_B, max_contact_polygon_verts), is_active)
return StructContactNormal(
endverts=V_VEC(3, dtype=gs.qd_float, shape=shape),
normal=V_VEC(3, dtype=gs.qd_float, shape=shape),
id=V(dtype=gs.qd_int, shape=shape),
)
@DATA_ORIENTED
class StructContactHalfspace(metaclass=BASE_METACLASS):
normal: V_ANNOTATION
dist: V_ANNOTATION
def get_contact_halfspace(solver, max_contact_polygon_verts, is_active):
_B = solver._B
shape = maybe_shape((_B, max_contact_polygon_verts), is_active)
return StructContactHalfspace(
normal=V_VEC(3, dtype=gs.qd_float, shape=shape),
dist=V(dtype=gs.qd_float, shape=shape),
)
@DATA_ORIENTED
class StructWitness(metaclass=BASE_METACLASS):
point_obj1: V_ANNOTATION
point_obj2: V_ANNOTATION
def get_witness(solver, max_contacts_per_pair, is_active):
_B = solver._B
shape = maybe_shape((_B, max_contacts_per_pair), is_active)
return StructWitness(
point_obj1=V_VEC(3, dtype=gs.qd_float, shape=shape),
point_obj2=V_VEC(3, dtype=gs.qd_float, shape=shape),
)
@DATA_ORIENTED
class StructGJKState(metaclass=BASE_METACLASS):
support_mesh_prev_vertex_id: V_ANNOTATION
simplex_vertex: StructMDVertex
simplex_buffer: StructGJKSimplexBuffer
simplex: StructGJKSimplex
simplex_vertex_intersect: StructMDVertex
simplex_buffer_intersect: StructGJKSimplexBuffer
nsimplex: V_ANNOTATION
last_searched_simplex_vertex_id: V_ANNOTATION
polytope: StructEPAPolytope
polytope_verts: StructMDVertex
polytope_faces: StructEPAPolytopeFace
polytope_faces_map: V_ANNOTATION
polytope_horizon_data: StructEPAPolytopeHorizonData
polytope_horizon_stack: StructEPAPolytopeHorizonData
contact_faces: StructContactFace
contact_normals: StructContactNormal
contact_halfspaces: StructContactHalfspace
contact_clipped_polygons: V_ANNOTATION
multi_contact_flag: V_ANNOTATION
witness: StructWitness
n_witness: V_ANNOTATION
n_contacts: V_ANNOTATION
contact_pos: V_ANNOTATION
normal: V_ANNOTATION
is_col: V_ANNOTATION
penetration: V_ANNOTATION
distance: V_ANNOTATION
# Differentiable contact detection
diff_contact_input: StructDiffContactInput
n_diff_contact_input: V_ANNOTATION
diff_penetration: V_ANNOTATION
def get_gjk_state(solver, static_rigid_sim_config, gjk_info, is_active):
_B = solver._B
enable_mujoco_compatibility = static_rigid_sim_config.enable_mujoco_compatibility
polytope_max_faces = gjk_info.polytope_max_faces[None]
max_contacts_per_pair = gjk_info.max_contacts_per_pair[None]
max_contact_polygon_verts = gjk_info.max_contact_polygon_verts[None]
requires_grad = solver._static_rigid_sim_config.requires_grad
# FIXME: Define GJKState and MujocoCompatGJKState that derives from the former but defines additional attributes
return StructGJKState(
# GJK simplex
support_mesh_prev_vertex_id=V(dtype=gs.qd_int, shape=(_B, 2)),
simplex_vertex=get_gjk_simplex_vertex(solver, is_active),
simplex_buffer=get_gjk_simplex_buffer(solver, is_active),
simplex=get_gjk_simplex(solver, is_active),
last_searched_simplex_vertex_id=V(dtype=gs.qd_int, shape=(_B,)),
simplex_vertex_intersect=get_gjk_simplex_vertex(solver, is_active),
simplex_buffer_intersect=get_gjk_simplex_buffer(solver, is_active),
nsimplex=V(dtype=gs.qd_int, shape=(_B,)),
# EPA polytope
polytope=get_epa_polytope(solver, is_active),
polytope_verts=get_epa_polytope_vertex(solver, gjk_info, is_active),
polytope_faces=get_epa_polytope_face(solver, polytope_max_faces, is_active),
polytope_faces_map=V(dtype=gs.qd_int, shape=(_B, polytope_max_faces)),
polytope_horizon_data=get_epa_polytope_horizon_data(solver, 6 + gjk_info.epa_max_iterations[None], is_active),
polytope_horizon_stack=get_epa_polytope_horizon_data(solver, polytope_max_faces * 3, is_active),
# Multi-contact detection (MuJoCo compatibility)
contact_faces=get_contact_face(solver, max_contact_polygon_verts, is_active),
contact_normals=get_contact_normal(solver, max_contact_polygon_verts, is_active),
contact_halfspaces=get_contact_halfspace(solver, max_contact_polygon_verts, is_active),
contact_clipped_polygons=V_VEC(3, dtype=gs.qd_float, shape=(_B, 2, max_contact_polygon_verts)),
multi_contact_flag=V(dtype=gs.qd_bool, shape=(_B,)),
# Final results
witness=get_witness(solver, max_contacts_per_pair, is_active),
n_witness=V(dtype=gs.qd_int, shape=(_B,)),
n_contacts=V(dtype=gs.qd_int, shape=(_B,)),
contact_pos=V_VEC(3, dtype=gs.qd_float, shape=(_B, max_contacts_per_pair)),
normal=V_VEC(3, dtype=gs.qd_float, shape=(_B, max_contacts_per_pair)),
is_col=V(dtype=gs.qd_bool, shape=(_B,)),
penetration=V(dtype=gs.qd_float, shape=(_B,)),
distance=V(dtype=gs.qd_float, shape=(_B,)),
diff_contact_input=get_diff_contact_input(solver, max(max_contacts_per_pair, 1), is_active),
n_diff_contact_input=V(dtype=gs.qd_int, shape=(_B,)),
diff_penetration=V(dtype=gs.qd_float, shape=maybe_shape((_B, max_contacts_per_pair), requires_grad)),
)
@DATA_ORIENTED
class StructGJKInfo(metaclass=BASE_METACLASS):
max_contacts_per_pair: V_ANNOTATION
max_contact_polygon_verts: V_ANNOTATION
# Maximum number of iterations for GJK and EPA algorithms
gjk_max_iterations: V_ANNOTATION
epa_max_iterations: V_ANNOTATION
FLOAT_MIN: V_ANNOTATION
FLOAT_MIN_SQ: V_ANNOTATION
FLOAT_MAX: V_ANNOTATION
FLOAT_MAX_SQ: V_ANNOTATION
# Tolerance for stopping GJK and EPA algorithms when they converge (only for non-discrete geometries).
tolerance: V_ANNOTATION
# If the distance between two objects is smaller than this value, we consider them colliding.
collision_eps: V_ANNOTATION
# In safe GJK, we do not allow degenerate simplex to happen, because it becomes the main reason of EPA errors.
# To prevent degeneracy, we throw away the simplex that has smaller degeneracy measure (e.g. colinearity,
# coplanarity) than this threshold.
simplex_max_degeneracy_sq: V_ANNOTATION
polytope_max_faces: V_ANNOTATION
# Threshold for reprojection error when we compute the witness points from the polytope. In computing the
# witness points, we project the origin onto the polytope faces and compute the barycentric coordinates of the
# projected point. To confirm the projection is valid, we compute the projected point using the barycentric
# coordinates and compare it with the original projected point. If the difference is larger than this threshold,
# we consider the projection invalid, because it means numerical errors are too large.
polytope_max_reprojection_error: V_ANNOTATION
# Tolerance for normal alignment between (face-face) or (edge-face). The normals should align within this
# tolerance to be considered as a valid parallel contact.
contact_face_tol: V_ANNOTATION
contact_edge_tol: V_ANNOTATION
# Epsilon values for differentiable contact. [eps_boundary] denotes the maximum distance between the face
# and the support point in the direction of the face normal. If this distance is 0, the face is on the
# boundary of the Minkowski difference. For [eps_distance], the distance between the origin and the face
# should not exceed this eps value plus the default EPA depth. For [eps_affine], the affine coordinates
# of the origin's projection onto the face should not violate [0, 1] range by this eps value.
# FIXME: Adjust these values based on the case study.
diff_contact_eps_boundary: V_ANNOTATION
diff_contact_eps_distance: V_ANNOTATION
diff_contact_eps_affine: V_ANNOTATION
# The minimum norm of the normal to be considered as a valid normal in the differentiable formulation.
diff_contact_min_normal_norm: V_ANNOTATION
# The minimum penetration depth to be considered as a valid contact in the differentiable formulation.
# The contact with penetration depth smaller than this value is ignored in the differentiable formulation.
# This should be large enough to be safe from numerical errors, because in the backward pass, the computed
# penetration depth could be different from the forward pass due to the numerical errors. If this value is
# too small, the non-zero penetration depth could be falsely computed to 0 in the backward pass and thus
# produce nan values for the contact normal.
diff_contact_min_penetration: V_ANNOTATION
def get_gjk_info(**kwargs):
return StructGJKInfo(
max_contacts_per_pair=V_SCALAR_FROM(dtype=gs.qd_int, value=kwargs["max_contacts_per_pair"]),
max_contact_polygon_verts=V_SCALAR_FROM(dtype=gs.qd_int, value=kwargs["max_contact_polygon_verts"]),
gjk_max_iterations=V_SCALAR_FROM(dtype=gs.qd_int, value=kwargs["gjk_max_iterations"]),
epa_max_iterations=V_SCALAR_FROM(dtype=gs.qd_int, value=kwargs["epa_max_iterations"]),
FLOAT_MIN=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["FLOAT_MIN"]),
FLOAT_MIN_SQ=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["FLOAT_MIN"] ** 2),
FLOAT_MAX=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["FLOAT_MAX"]),
FLOAT_MAX_SQ=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["FLOAT_MAX"] ** 2),
tolerance=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["tolerance"]),
collision_eps=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["collision_eps"]),
simplex_max_degeneracy_sq=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["simplex_max_degeneracy_sq"]),
polytope_max_faces=V_SCALAR_FROM(dtype=gs.qd_int, value=kwargs["polytope_max_faces"]),
polytope_max_reprojection_error=V_SCALAR_FROM(
dtype=gs.qd_float, value=kwargs["polytope_max_reprojection_error"]
),
contact_face_tol=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["contact_face_tol"]),
contact_edge_tol=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["contact_edge_tol"]),
diff_contact_eps_boundary=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["diff_contact_eps_boundary"]),
diff_contact_eps_distance=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["diff_contact_eps_distance"]),
diff_contact_eps_affine=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["diff_contact_eps_affine"]),
diff_contact_min_normal_norm=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["diff_contact_min_normal_norm"]),
diff_contact_min_penetration=V_SCALAR_FROM(dtype=gs.qd_float, value=kwargs["diff_contact_min_penetration"]),
)
@qd.data_oriented
class StructGJKStaticConfig(metaclass=AutoInitMeta):
# This is disabled by default, because it is often less stable than the other multi-contact detection algorithm.
# However, we keep the code here for compatibility with MuJoCo and for possible future use.
enable_mujoco_multi_contact: bool
# =========================================== SupportField ===========================================
@DATA_ORIENTED
class StructSupportFieldInfo(metaclass=BASE_METACLASS):
support_cell_start: V_ANNOTATION
support_v: V_ANNOTATION
support_vid: V_ANNOTATION
support_res: V_ANNOTATION
def get_support_field_info(n_geoms, n_support_cells, support_res):
return StructSupportFieldInfo(
support_cell_start=V(dtype=gs.qd_int, shape=(max(n_geoms, 1),)),
support_v=V_VEC(3, dtype=gs.qd_float, shape=(max(n_support_cells, 1),)),
support_vid=V(dtype=gs.qd_int, shape=(max(n_support_cells, 1),)),
support_res=V_SCALAR_FROM(dtype=gs.qd_int, value=support_res),
)
# =========================================== SDF ===========================================
@DATA_ORIENTED
class StructSDFGeomInfo(metaclass=BASE_METACLASS):
T_mesh_to_sdf: V_ANNOTATION
sdf_res: V_ANNOTATION
sdf_max: V_ANNOTATION
sdf_cell_size: V_ANNOTATION
sdf_cell_start: V_ANNOTATION
def get_sdf_geom_info(n_geoms):
return StructSDFGeomInfo(
T_mesh_to_sdf=V_MAT(n=4, m=4, dtype=gs.qd_float, shape=(n_geoms,)),
sdf_res=V_VEC(3, dtype=gs.qd_int, shape=(n_geoms,)),
sdf_max=V(dtype=gs.qd_float, shape=(n_geoms,)),
sdf_cell_size=V(dtype=gs.qd_float, shape=(n_geoms,)),
sdf_cell_start=V(dtype=gs.qd_int, shape=(n_geoms,)),
)
@DATA_ORIENTED
class StructSDFInfo(metaclass=BASE_METACLASS):
geoms_info: StructSDFGeomInfo
geoms_sdf_start: V_ANNOTATION
geoms_sdf_val: V_ANNOTATION
geoms_sdf_grad: V_ANNOTATION
geoms_sdf_closest_vert: V_ANNOTATION
def get_sdf_info(n_geoms, n_cells):
if math.prod((n_cells, 3)) > np.iinfo(np.int32).max:
gs.raise_exception(
f"SDF Gradient shape (n_cells={n_cells}, 3) is too large. Consider manually setting larger "
"'sdf_cell_size' in 'gs.materials.Rigid' options."
)
return StructSDFInfo(
geoms_info=get_sdf_geom_info(max(n_geoms, 1)),
geoms_sdf_start=V(dtype=gs.qd_int, shape=(max(n_geoms, 1),)),
geoms_sdf_val=V(dtype=gs.qd_float, shape=(max(n_cells, 1),)),
geoms_sdf_grad=V_VEC(3, dtype=gs.qd_float, shape=(max(n_cells, 1),)),
geoms_sdf_closest_vert=V(dtype=gs.qd_int, shape=(max(n_cells, 1),)),
)
# =========================================== DofsInfo and DofsState ===========================================
@DATA_ORIENTED
class StructDofsInfo(metaclass=BASE_METACLASS):
entity_idx: V_ANNOTATION
stiffness: V_ANNOTATION
invweight: V_ANNOTATION
armature: V_ANNOTATION
damping: V_ANNOTATION
frictionloss: V_ANNOTATION
motion_ang: V_ANNOTATION
motion_vel: V_ANNOTATION
limit: V_ANNOTATION
kp: V_ANNOTATION
kv: V_ANNOTATION
force_range: V_ANNOTATION
def get_dofs_info(solver):
shape = (solver.n_dofs_, solver._B) if solver._options.batch_dofs_info else (solver.n_dofs_,)
return StructDofsInfo(
entity_idx=V(dtype=gs.qd_int, shape=shape),
stiffness=V(dtype=gs.qd_float, shape=shape),
invweight=V(dtype=gs.qd_float, shape=shape),
armature=V(dtype=gs.qd_float, shape=shape),
damping=V(dtype=gs.qd_float, shape=shape),
frictionloss=V(dtype=gs.qd_float, shape=shape),
motion_ang=V(dtype=gs.qd_vec3, shape=shape),
motion_vel=V(dtype=gs.qd_vec3, shape=shape),
limit=V(dtype=gs.qd_vec2, shape=shape),
kp=V(dtype=gs.qd_float, shape=shape),
kv=V(dtype=gs.qd_float, shape=shape),
force_range=V(dtype=gs.qd_vec2, shape=shape),
)
@DATA_ORIENTED
class StructDofsState(metaclass=BASE_METACLASS):
# *_bw: Cache to avoid overwriting for backward pass
force: V_ANNOTATION
qf_bias: V_ANNOTATION
qf_passive: V_ANNOTATION
qf_actuator: V_ANNOTATION
qf_applied: V_ANNOTATION
act_length: V_ANNOTATION
pos: V_ANNOTATION
vel: V_ANNOTATION
vel_prev: V_ANNOTATION
vel_next: V_ANNOTATION
acc: V_ANNOTATION
acc_bw: V_ANNOTATION
acc_smooth: V_ANNOTATION
acc_smooth_bw: V_ANNOTATION
qf_smooth: V_ANNOTATION
qf_constraint: V_ANNOTATION
cdof_ang: V_ANNOTATION
cdof_vel: V_ANNOTATION
cdofvel_ang: V_ANNOTATION
cdofvel_vel: V_ANNOTATION
cdofd_ang: V_ANNOTATION
cdofd_vel: V_ANNOTATION
f_vel: V_ANNOTATION
f_ang: V_ANNOTATION
ctrl_force: V_ANNOTATION
ctrl_pos: V_ANNOTATION
ctrl_vel: V_ANNOTATION
ctrl_mode: V_ANNOTATION
hibernated: V_ANNOTATION
def get_dofs_state(solver):
shape = (solver.n_dofs_, solver._B)
requires_grad = solver._requires_grad
shape_bw = maybe_shape((2, *shape), requires_grad)
return StructDofsState(
force=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
qf_bias=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
qf_passive=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
qf_actuator=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
qf_applied=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
act_length=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
pos=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
vel=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
vel_prev=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
vel_next=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
acc=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
acc_bw=V(dtype=gs.qd_float, shape=shape_bw, needs_grad=requires_grad),
acc_smooth=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
acc_smooth_bw=V(dtype=gs.qd_float, shape=shape_bw, needs_grad=requires_grad),
qf_smooth=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
qf_constraint=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
cdof_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cdof_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cdofvel_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cdofvel_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cdofd_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cdofd_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
f_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
f_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
ctrl_force=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
ctrl_pos=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
ctrl_vel=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
ctrl_mode=V(dtype=gs.qd_int, shape=shape),
hibernated=V(dtype=gs.qd_int, shape=shape),
)
# =========================================== LinksState and LinksInfo ===========================================
@DATA_ORIENTED
class StructLinksState(metaclass=BASE_METACLASS):
# *_bw: Cache to avoid overwriting for backward pass
cinr_inertial: V_ANNOTATION
cinr_pos: V_ANNOTATION
cinr_quat: V_ANNOTATION
cinr_mass: V_ANNOTATION
crb_inertial: V_ANNOTATION
crb_pos: V_ANNOTATION
crb_quat: V_ANNOTATION
crb_mass: V_ANNOTATION
cdd_vel: V_ANNOTATION
cdd_ang: V_ANNOTATION
pos: V_ANNOTATION
quat: V_ANNOTATION
pos_bw: V_ANNOTATION
quat_bw: V_ANNOTATION
i_pos: V_ANNOTATION
i_pos_bw: V_ANNOTATION
i_quat: V_ANNOTATION
j_pos: V_ANNOTATION
j_quat: V_ANNOTATION
j_pos_bw: V_ANNOTATION
j_quat_bw: V_ANNOTATION
j_vel: V_ANNOTATION
j_ang: V_ANNOTATION
cd_ang: V_ANNOTATION
cd_vel: V_ANNOTATION
cd_ang_bw: V_ANNOTATION
cd_vel_bw: V_ANNOTATION
mass_sum: V_ANNOTATION
root_COM: V_ANNOTATION # COM of the kinematic tree
root_COM_bw: V_ANNOTATION
mass_shift: V_ANNOTATION
i_pos_shift: V_ANNOTATION
cacc_ang: V_ANNOTATION
cacc_lin: V_ANNOTATION
cfrc_ang: V_ANNOTATION
cfrc_vel: V_ANNOTATION
cfrc_applied_ang: V_ANNOTATION
cfrc_applied_vel: V_ANNOTATION
cfrc_coupling_ang: V_ANNOTATION
cfrc_coupling_vel: V_ANNOTATION
contact_force: V_ANNOTATION
hibernated: V_ANNOTATION
def get_links_state(solver):
max_n_joints_per_link = solver._static_rigid_sim_config.max_n_joints_per_link
shape = (solver.n_links_, solver._B)
requires_grad = solver._requires_grad
shape_bw = (solver.n_links_, max(max_n_joints_per_link + 1, 1), solver._B)
return StructLinksState(
cinr_inertial=V(dtype=gs.qd_mat3, shape=shape, needs_grad=requires_grad),
cinr_pos=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cinr_quat=V(dtype=gs.qd_vec4, shape=shape, needs_grad=requires_grad),
cinr_mass=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
crb_inertial=V(dtype=gs.qd_mat3, shape=shape, needs_grad=requires_grad),
crb_pos=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
crb_quat=V(dtype=gs.qd_vec4, shape=shape, needs_grad=requires_grad),
crb_mass=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
cdd_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cdd_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
pos=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
quat=V(dtype=gs.qd_vec4, shape=shape, needs_grad=requires_grad),
pos_bw=V(dtype=gs.qd_vec3, shape=shape_bw, needs_grad=requires_grad),
quat_bw=V(dtype=gs.qd_vec4, shape=shape_bw, needs_grad=requires_grad),
i_pos=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
i_pos_bw=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
i_quat=V(dtype=gs.qd_vec4, shape=shape, needs_grad=requires_grad),
j_pos=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
j_quat=V(dtype=gs.qd_vec4, shape=shape, needs_grad=requires_grad),
j_pos_bw=V(dtype=gs.qd_vec3, shape=shape_bw, needs_grad=requires_grad),
j_quat_bw=V(dtype=gs.qd_vec4, shape=shape_bw, needs_grad=requires_grad),
j_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
j_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cd_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cd_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cd_ang_bw=V(dtype=gs.qd_vec3, shape=shape_bw, needs_grad=requires_grad),
cd_vel_bw=V(dtype=gs.qd_vec3, shape=shape_bw, needs_grad=requires_grad),
mass_sum=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
root_COM=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
root_COM_bw=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
mass_shift=V(dtype=gs.qd_float, shape=shape, needs_grad=requires_grad),
i_pos_shift=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cacc_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cacc_lin=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cfrc_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cfrc_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cfrc_applied_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cfrc_applied_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cfrc_coupling_ang=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
cfrc_coupling_vel=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
contact_force=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
hibernated=V(dtype=gs.qd_int, shape=shape),
)
@DATA_ORIENTED
class StructLinksInfo(metaclass=BASE_METACLASS):
parent_idx: V_ANNOTATION
root_idx: V_ANNOTATION
q_start: V_ANNOTATION
dof_start: V_ANNOTATION
joint_start: V_ANNOTATION
q_end: V_ANNOTATION
dof_end: V_ANNOTATION
joint_end: V_ANNOTATION
n_dofs: V_ANNOTATION
pos: V_ANNOTATION
quat: V_ANNOTATION
invweight: V_ANNOTATION
is_fixed: V_ANNOTATION
inertial_pos: V_ANNOTATION
inertial_quat: V_ANNOTATION
inertial_i: V_ANNOTATION
inertial_mass: V_ANNOTATION
entity_idx: V_ANNOTATION
# Heterogeneous simulation support: per-link geom/vgeom index ranges
geom_start: V_ANNOTATION
geom_end: V_ANNOTATION
vgeom_start: V_ANNOTATION
vgeom_end: V_ANNOTATION
def get_links_info(solver):
links_info_shape = (solver.n_links_, solver._B) if solver._options.batch_links_info else solver.n_links_
return StructLinksInfo(
parent_idx=V(dtype=gs.qd_int, shape=links_info_shape),
root_idx=V(dtype=gs.qd_int, shape=links_info_shape),
q_start=V(dtype=gs.qd_int, shape=links_info_shape),
dof_start=V(dtype=gs.qd_int, shape=links_info_shape),
joint_start=V(dtype=gs.qd_int, shape=links_info_shape),
q_end=V(dtype=gs.qd_int, shape=links_info_shape),
dof_end=V(dtype=gs.qd_int, shape=links_info_shape),
joint_end=V(dtype=gs.qd_int, shape=links_info_shape),
n_dofs=V(dtype=gs.qd_int, shape=links_info_shape),
pos=V(dtype=gs.qd_vec3, shape=links_info_shape),
quat=V(dtype=gs.qd_vec4, shape=links_info_shape),
invweight=V(dtype=gs.qd_vec2, shape=links_info_shape),
is_fixed=V(dtype=gs.qd_bool, shape=links_info_shape),
inertial_pos=V(dtype=gs.qd_vec3, shape=links_info_shape),
inertial_quat=V(dtype=gs.qd_vec4, shape=links_info_shape),
inertial_i=V(dtype=gs.qd_mat3, shape=links_info_shape),
inertial_mass=V(dtype=gs.qd_float, shape=links_info_shape),
entity_idx=V(dtype=gs.qd_int, shape=links_info_shape),
# Heterogeneous simulation support: per-link geom/vgeom index ranges
geom_start=V(dtype=gs.qd_int, shape=links_info_shape),
geom_end=V(dtype=gs.qd_int, shape=links_info_shape),
vgeom_start=V(dtype=gs.qd_int, shape=links_info_shape),
vgeom_end=V(dtype=gs.qd_int, shape=links_info_shape),
)
# =========================================== JointsInfo and JointsState ===========================================
@DATA_ORIENTED
class StructJointsInfo(metaclass=BASE_METACLASS):
type: V_ANNOTATION
sol_params: V_ANNOTATION
q_start: V_ANNOTATION
dof_start: V_ANNOTATION
q_end: V_ANNOTATION
dof_end: V_ANNOTATION
n_dofs: V_ANNOTATION
pos: V_ANNOTATION
def get_joints_info(solver):
shape = (solver.n_joints_, solver._B) if solver._options.batch_joints_info else (solver.n_joints_,)
return StructJointsInfo(
type=V(dtype=gs.qd_int, shape=shape),
sol_params=V(dtype=gs.qd_vec7, shape=shape),
q_start=V(dtype=gs.qd_int, shape=shape),
dof_start=V(dtype=gs.qd_int, shape=shape),
q_end=V(dtype=gs.qd_int, shape=shape),
dof_end=V(dtype=gs.qd_int, shape=shape),
n_dofs=V(dtype=gs.qd_int, shape=shape),
pos=V(dtype=gs.qd_vec3, shape=shape),
)
@DATA_ORIENTED
class StructJointsState(metaclass=BASE_METACLASS):
xanchor: V_ANNOTATION
xaxis: V_ANNOTATION
def get_joints_state(solver):
shape = (solver.n_joints_, solver._B)
requires_grad = solver._requires_grad
return StructJointsState(
xanchor=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
xaxis=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
)
# =========================================== GeomsInfo and GeomsState ===========================================
@DATA_ORIENTED
class StructGeomsInfo(metaclass=BASE_METACLASS):
pos: V_ANNOTATION
center: V_ANNOTATION
quat: V_ANNOTATION
data: V_ANNOTATION
link_idx: V_ANNOTATION
type: V_ANNOTATION
friction: V_ANNOTATION
sol_params: V_ANNOTATION
vert_num: V_ANNOTATION
vert_start: V_ANNOTATION
vert_end: V_ANNOTATION
verts_state_start: V_ANNOTATION
verts_state_end: V_ANNOTATION
face_num: V_ANNOTATION
face_start: V_ANNOTATION
face_end: V_ANNOTATION
edge_num: V_ANNOTATION
edge_start: V_ANNOTATION
edge_end: V_ANNOTATION
is_convex: V_ANNOTATION
contype: V_ANNOTATION
conaffinity: V_ANNOTATION
is_fixed: V_ANNOTATION
is_decomposed: V_ANNOTATION
needs_coup: V_ANNOTATION
coup_friction: V_ANNOTATION
coup_softness: V_ANNOTATION
coup_restitution: V_ANNOTATION
def get_geoms_info(solver):
shape = (solver.n_geoms_,)
return StructGeomsInfo(
pos=V(dtype=gs.qd_vec3, shape=shape),
center=V(dtype=gs.qd_vec3, shape=shape),
quat=V(dtype=gs.qd_vec4, shape=shape),
data=V(dtype=gs.qd_vec7, shape=shape),
link_idx=V(dtype=gs.qd_int, shape=shape),
type=V(dtype=gs.qd_int, shape=shape),
friction=V(dtype=gs.qd_float, shape=shape),
sol_params=V(dtype=gs.qd_vec7, shape=shape),
vert_num=V(dtype=gs.qd_int, shape=shape),
vert_start=V(dtype=gs.qd_int, shape=shape),
vert_end=V(dtype=gs.qd_int, shape=shape),
verts_state_start=V(dtype=gs.qd_int, shape=shape),
verts_state_end=V(dtype=gs.qd_int, shape=shape),
face_num=V(dtype=gs.qd_int, shape=shape),
face_start=V(dtype=gs.qd_int, shape=shape),
face_end=V(dtype=gs.qd_int, shape=shape),
edge_num=V(dtype=gs.qd_int, shape=shape),
edge_start=V(dtype=gs.qd_int, shape=shape),
edge_end=V(dtype=gs.qd_int, shape=shape),
is_convex=V(dtype=gs.qd_bool, shape=shape),
contype=V(dtype=gs.qd_int, shape=shape),
conaffinity=V(dtype=gs.qd_int, shape=shape),
is_fixed=V(dtype=gs.qd_bool, shape=shape),
is_decomposed=V(dtype=gs.qd_bool, shape=shape),
needs_coup=V(dtype=gs.qd_int, shape=shape),
coup_friction=V(dtype=gs.qd_float, shape=shape),
coup_softness=V(dtype=gs.qd_float, shape=shape),
coup_restitution=V(dtype=gs.qd_float, shape=shape),
)
@DATA_ORIENTED
class StructGeomsState(metaclass=BASE_METACLASS):
pos: V_ANNOTATION
quat: V_ANNOTATION
aabb_min: V_ANNOTATION
aabb_max: V_ANNOTATION
verts_updated: V_ANNOTATION
min_buffer_idx: V_ANNOTATION
max_buffer_idx: V_ANNOTATION
hibernated: V_ANNOTATION
friction_ratio: V_ANNOTATION
def get_geoms_state(solver):
shape = (solver.n_geoms_, solver._B)
requires_grad = solver._static_rigid_sim_config.requires_grad
return StructGeomsState(
pos=V(dtype=gs.qd_vec3, shape=shape, needs_grad=requires_grad),
quat=V(dtype=gs.qd_vec4, shape=shape, needs_grad=requires_grad),
aabb_min=V(dtype=gs.qd_vec3, shape=shape),
aabb_max=V(dtype=gs.qd_vec3, shape=shape),
verts_updated=V(dtype=gs.qd_bool, shape=shape),
min_buffer_idx=V(dtype=gs.qd_int, shape=shape),
max_buffer_idx=V(dtype=gs.qd_int, shape=shape),
hibernated=V(dtype=gs.qd_int, shape=shape),
friction_ratio=V(dtype=gs.qd_float, shape=shape),
)
# =========================================== VertsInfo ===========================================
@DATA_ORIENTED
class StructVertsInfo(metaclass=BASE_METACLASS):
init_pos: V_ANNOTATION
init_normal: V_ANNOTATION
geom_idx: V_ANNOTATION
init_center_pos: V_ANNOTATION
verts_state_idx: V_ANNOTATION
is_fixed: V_ANNOTATION
def get_verts_info(solver):
shape = (solver.n_verts_,)
return StructVertsInfo(
init_pos=V(dtype=gs.qd_vec3, shape=shape),
init_normal=V(dtype=gs.qd_vec3, shape=shape),
geom_idx=V(dtype=gs.qd_int, shape=shape),
init_center_pos=V(dtype=gs.qd_vec3, shape=shape),
verts_state_idx=V(dtype=gs.qd_int, shape=shape),
is_fixed=V(dtype=gs.qd_bool, shape=shape),
)
# =========================================== FacesInfo ===========================================
@DATA_ORIENTED
class StructFacesInfo(metaclass=BASE_METACLASS):
verts_idx: V_ANNOTATION
geom_idx: V_ANNOTATION
def get_faces_info(solver):
shape = (solver.n_faces_,)
return StructFacesInfo(
verts_idx=V(dtype=gs.qd_ivec3, shape=shape),
geom_idx=V(dtype=gs.qd_int, shape=shape),
)
# =========================================== EdgesInfo ===========================================
@DATA_ORIENTED
class StructEdgesInfo(metaclass=BASE_METACLASS):
v0: V_ANNOTATION
v1: V_ANNOTATION
length: V_ANNOTATION
def get_edges_info(solver):
shape = (solver.n_edges_,)
return StructEdgesInfo(
v0=V(dtype=gs.qd_int, shape=shape),
v1=V(dtype=gs.qd_int, shape=shape),
length=V(dtype=gs.qd_float, shape=shape),
)
# =========================================== VertsState ===========================================
@DATA_ORIENTED
class StructVertsState(metaclass=BASE_METACLASS):
pos: V_ANNOTATION
def get_free_verts_state(solver):
return StructVertsState(
pos=V(dtype=gs.qd_vec3, shape=(solver.n_free_verts_, solver._B)),
)
def get_fixed_verts_state(solver):
return StructVertsState(
pos=V(dtype=gs.qd_vec3, shape=(solver.n_fixed_verts_,)),
)
# =========================================== VvertsInfo ===========================================
@DATA_ORIENTED
class StructVvertsInfo(metaclass=BASE_METACLASS):
init_pos: V_ANNOTATION
init_vnormal: V_ANNOTATION
vgeom_idx: V_ANNOTATION
def get_vverts_info(solver):
shape = (solver.n_vverts_,)
return StructVvertsInfo(
init_pos=V(dtype=gs.qd_vec3, shape=shape),
init_vnormal=V(dtype=gs.qd_vec3, shape=shape),
vgeom_idx=V(dtype=gs.qd_int, shape=shape),
)
# =========================================== VfacesInfo ===========================================
@DATA_ORIENTED
class StructVfacesInfo(metaclass=BASE_METACLASS):
vverts_idx: V_ANNOTATION
vgeom_idx: V_ANNOTATION
def get_vfaces_info(solver):
shape = (solver.n_vfaces_,)
return StructVfacesInfo(
vverts_idx=V(dtype=gs.qd_ivec3, shape=shape),
vgeom_idx=V(dtype=gs.qd_int, shape=shape),
)
# =========================================== VgeomsInfo ===========================================
@DATA_ORIENTED
class StructVgeomsInfo(metaclass=BASE_METACLASS):
pos: V_ANNOTATION
quat: V_ANNOTATION
link_idx: V_ANNOTATION
vvert_num: V_ANNOTATION
vvert_start: V_ANNOTATION
vvert_end: V_ANNOTATION
vface_num: V_ANNOTATION
vface_start: V_ANNOTATION
vface_end: V_ANNOTATION
color: V_ANNOTATION
def get_vgeoms_info(solver):
shape = (solver.n_vgeoms_,)
return StructVgeomsInfo(
pos=V(dtype=gs.qd_vec3, shape=shape),
quat=V(dtype=gs.qd_vec4, shape=shape),
link_idx=V(dtype=gs.qd_int, shape=shape),
vvert_num=V(dtype=gs.qd_int, shape=shape),
vvert_start=V(dtype=gs.qd_int, shape=shape),
vvert_end=V(dtype=gs.qd_int, shape=shape),
vface_num=V(dtype=gs.qd_int, shape=shape),
vface_start=V(dtype=gs.qd_int, shape=shape),
vface_end=V(dtype=gs.qd_int, shape=shape),
color=V(dtype=gs.qd_vec4, shape=shape),
)
# =========================================== VGeomsState ===========================================
@DATA_ORIENTED
class StructVgeomsState(metaclass=BASE_METACLASS):
pos: V_ANNOTATION
quat: V_ANNOTATION
def get_vgeoms_state(solver):
shape = (solver.n_vgeoms_, solver._B)
return StructVgeomsState(
pos=V(dtype=gs.qd_vec3, shape=shape),
quat=V(dtype=gs.qd_vec4, shape=shape),
)
# =========================================== EqualitiesInfo ===========================================
@DATA_ORIENTED
class StructEqualitiesInfo(metaclass=BASE_METACLASS):
eq_obj1id: V_ANNOTATION
eq_obj2id: V_ANNOTATION
eq_data: V_ANNOTATION
eq_type: V_ANNOTATION
sol_params: V_ANNOTATION
def get_equalities_info(solver):
shape = (solver.n_candidate_equalities_, solver._B)
return StructEqualitiesInfo(
eq_obj1id=V(dtype=gs.qd_int, shape=shape),
eq_obj2id=V(dtype=gs.qd_int, shape=shape),
eq_data=V(dtype=gs.qd_vec11, shape=shape),
eq_type=V(dtype=gs.qd_int, shape=shape),
sol_params=V(dtype=gs.qd_vec7, shape=shape),
)
# =========================================== EntitiesInfo ===========================================
@DATA_ORIENTED
class StructEntitiesInfo(metaclass=BASE_METACLASS):
dof_start: V_ANNOTATION
dof_end: V_ANNOTATION
n_dofs: V_ANNOTATION
link_start: V_ANNOTATION
link_end: V_ANNOTATION
n_links: V_ANNOTATION
geom_start: V_ANNOTATION
geom_end: V_ANNOTATION
n_geoms: V_ANNOTATION
gravity_compensation: V_ANNOTATION
is_local_collision_mask: V_ANNOTATION
def get_entities_info(solver):
shape = (solver.n_entities_,)
return StructEntitiesInfo(
dof_start=V(dtype=gs.qd_int, shape=shape),
dof_end=V(dtype=gs.qd_int, shape=shape),
n_dofs=V(dtype=gs.qd_int, shape=shape),
link_start=V(dtype=gs.qd_int, shape=shape),
link_end=V(dtype=gs.qd_int, shape=shape),
n_links=V(dtype=gs.qd_int, shape=shape),
geom_start=V(dtype=gs.qd_int, shape=shape),
geom_end=V(dtype=gs.qd_int, shape=shape),
n_geoms=V(dtype=gs.qd_int, shape=shape),
gravity_compensation=V(dtype=gs.qd_float, shape=shape),
is_local_collision_mask=V(dtype=gs.qd_bool, shape=shape),
)
# =========================================== EntitiesState ===========================================
@DATA_ORIENTED
class StructEntitiesState(metaclass=BASE_METACLASS):
hibernated: V_ANNOTATION
def get_entities_state(solver):
return StructEntitiesState(
hibernated=V(dtype=gs.qd_int, shape=(solver.n_entities_, solver._B)),
)
# =========================================== RigidAdjointCache ===========================================
@DATA_ORIENTED
class StructRigidAdjointCache(metaclass=BASE_METACLASS):
# This cache stores intermediate values during rigid body simulation to use Quadrants's AD. Quadrants's AD requires
# us not to overwrite the values that have been read during the forward pass, so we need to store the intemediate
# values in this cache to avoid overwriting them. Specifically, after we compute next frame's qpos, dofs_vel, and
# dofs_acc, we need to store them in this cache because we overwrite the values in the next frame. See how
# [kernel_save_adjoint_cache] is used in [rigid_solver.py] to store the values in this cache.
qpos: V_ANNOTATION
dofs_vel: V_ANNOTATION
dofs_acc: V_ANNOTATION
def get_rigid_adjoint_cache(solver):
substeps_local = solver._sim.substeps_local
requires_grad = solver._requires_grad
return StructRigidAdjointCache(
qpos=V(dtype=gs.qd_float, shape=(substeps_local + 1, solver.n_qs_, solver._B), needs_grad=requires_grad),
dofs_vel=V(dtype=gs.qd_float, shape=(substeps_local + 1, solver.n_dofs_, solver._B), needs_grad=requires_grad),
dofs_acc=V(dtype=gs.qd_float, shape=(substeps_local + 1, solver.n_dofs_, solver._B), needs_grad=requires_grad),
)
# =================================== StructRigidSimStaticConfig ===================================
@qd.data_oriented
class StructRigidSimStaticConfig(metaclass=AutoInitMeta):
backend: int
para_level: int
enable_collision: bool
use_hibernation: bool
batch_links_info: bool
batch_dofs_info: bool
batch_joints_info: bool
enable_heterogeneous: bool
enable_mujoco_compatibility: bool
enable_multi_contact: bool
enable_joint_limit: bool
box_box_detection: bool
sparse_solve: bool
integrator: int
solver_type: int
requires_grad: bool
enable_tiled_cholesky_mass_matrix: bool = False
enable_tiled_cholesky_hessian: bool = False
tiled_n_dofs_per_entity: int = -1
tiled_n_dofs: int = -1
max_n_links_per_entity: int = -1
max_n_joints_per_link: int = -1
max_n_dofs_per_joint: int = -1
max_n_qs_per_link: int = -1
max_n_dofs_per_entity: int = -1
max_n_dofs_per_link: int = -1
max_n_geoms_per_entity: int = -1
n_entities: int = -1
n_links: int = -1
n_geoms: int = -1
# =========================================== DataManager ===========================================
@qd.data_oriented
class DataManager:
def __init__(self, solver, kinematic_only):
self.rigid_global_info = get_rigid_global_info(solver, kinematic_only)
self.dofs_info = get_dofs_info(solver)
self.dofs_state = get_dofs_state(solver)
self.links_info = get_links_info(solver)
self.links_state = get_links_state(solver)
self.joints_info = get_joints_info(solver)
self.joints_state = get_joints_state(solver)
self.entities_info = get_entities_info(solver)
self.entities_state = get_entities_state(solver)
self.vverts_info = get_vverts_info(solver)
self.vfaces_info = get_vfaces_info(solver)
self.vgeoms_info = get_vgeoms_info(solver)
self.vgeoms_state = get_vgeoms_state(solver)
if not kinematic_only:
self.geoms_info = get_geoms_info(solver)
self.geoms_state = get_geoms_state(solver)
self.verts_info = get_verts_info(solver)
self.faces_info = get_faces_info(solver)
self.edges_info = get_edges_info(solver)
self.free_verts_state = get_free_verts_state(solver)
self.fixed_verts_state = get_fixed_verts_state(solver)
self.equalities_info = get_equalities_info(solver)
if solver._static_rigid_sim_config.requires_grad:
# Data structures required for backward pass
self.dofs_state_adjoint_cache = get_dofs_state(solver)
self.links_state_adjoint_cache = get_links_state(solver)
self.joints_state_adjoint_cache = get_joints_state(solver)
self.geoms_state_adjoint_cache = get_geoms_state(solver)
self.rigid_adjoint_cache = get_rigid_adjoint_cache(solver)
self.errno = V(dtype=gs.qd_int, shape=(solver._B,))
# =========================================== ViewerRaycastResult ===========================================
@DATA_ORIENTED
class StructViewerRaycastResult(metaclass=BASE_METACLASS):
distance: V_ANNOTATION
geom_idx: V_ANNOTATION
hit_point: V_ANNOTATION
normal: V_ANNOTATION
env_idx: V_ANNOTATION
def get_viewer_raycast_result():
return StructViewerRaycastResult(
distance=V(dtype=gs.qd_float, shape=()),
geom_idx=V(dtype=gs.qd_int, shape=()),
hit_point=V_VEC(3, dtype=gs.qd_float, shape=()),
normal=V_VEC(3, dtype=gs.qd_float, shape=()),
env_idx=V(dtype=gs.qd_int, shape=()),
)
DofsState = StructDofsState if gs.use_ndarray else qd.template()
DofsInfo = StructDofsInfo if gs.use_ndarray else qd.template()
GeomsState = StructGeomsState if gs.use_ndarray else qd.template()
GeomsInfo = StructGeomsInfo if gs.use_ndarray else qd.template()
GeomsInitAABB = V_ANNOTATION
LinksState = StructLinksState if gs.use_ndarray else qd.template()
LinksInfo = StructLinksInfo if gs.use_ndarray else qd.template()
JointsInfo = StructJointsInfo if gs.use_ndarray else qd.template()
JointsState = StructJointsState if gs.use_ndarray else qd.template()
VertsState = StructVertsState if gs.use_ndarray else qd.template()
VertsInfo = StructVertsInfo if gs.use_ndarray else qd.template()
EdgesInfo = StructEdgesInfo if gs.use_ndarray else qd.template()
FacesInfo = StructFacesInfo if gs.use_ndarray else qd.template()
VVertsInfo = StructVvertsInfo if gs.use_ndarray else qd.template()
VFacesInfo = StructVfacesInfo if gs.use_ndarray else qd.template()
VGeomsInfo = StructVgeomsInfo if gs.use_ndarray else qd.template()
VGeomsState = StructVgeomsState if gs.use_ndarray else qd.template()
EntitiesState = StructEntitiesState if gs.use_ndarray else qd.template()
EntitiesInfo = StructEntitiesInfo if gs.use_ndarray else qd.template()
EqualitiesInfo = StructEqualitiesInfo if gs.use_ndarray else qd.template()
RigidGlobalInfo = StructRigidGlobalInfo if gs.use_ndarray else qd.template()
ColliderState = StructColliderState if gs.use_ndarray else qd.template()
ColliderInfo = StructColliderInfo if gs.use_ndarray else qd.template()
MPRState = StructMPRState if gs.use_ndarray else qd.template()
MPRInfo = StructMPRInfo if gs.use_ndarray else qd.template()
SupportFieldInfo = StructSupportFieldInfo if gs.use_ndarray else qd.template()
ConstraintState = StructConstraintState if gs.use_ndarray else qd.template()
GJKState = StructGJKState if gs.use_ndarray else qd.template()
GJKInfo = StructGJKInfo if gs.use_ndarray else qd.template()
SDFInfo = StructSDFInfo if gs.use_ndarray else qd.template()
ContactIslandState = StructContactIslandState if gs.use_ndarray else qd.template()
DiffContactInput = StructDiffContactInput if gs.use_ndarray else qd.template()
RigidAdjointCache = StructRigidAdjointCache if gs.use_ndarray else qd.template()
RaycastResult = StructViewerRaycastResult if gs.use_ndarray else qd.template()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/array_class.py",
"license": "Apache License 2.0",
"lines": 1692,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:examples/collision/contype.py | """
NOTE: contype and conaffinity are 32-bit integer bitmasks used for contact filtering of contact pairs.
When the contype of one geom and the conaffinity of the other geom share a common bit set to 1, two geoms can collide.
Plane: contype=0xFFFF, conaffinity=0xFFFF (1111 1111 1111 1111)
Red Cube: contype=1, conaffinity=1 (0001) -> collide with Plane and Blue Cube
Green Cube: contype=2, conaffinity=2 (0010) -> collide with Plane and Blue Cube
Blue Cube: contype=3, conaffinity=3 (0011) -> collide with Plane, Red Cube, and Green Cube
Dragon: contype=4, conaffinity=4 (0100) -> collide with Plane only
"""
import argparse
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--vis", action="store_true", default=False)
args = parser.parse_args()
gs.init()
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(0.0, -2, 1.5),
camera_lookat=(0.0, 0.0, 0.5),
camera_fov=40,
max_FPS=200,
),
show_viewer=args.vis,
)
scene.add_entity(gs.morphs.Plane())
scene.add_entity(
gs.morphs.Box(
pos=(0.025, 0, 0.5),
quat=(0, 0, 0, 1),
size=(0.1, 0.1, 0.1),
contype=0b001,
conaffinity=0b001,
),
surface=gs.surfaces.Default(
color=(1.0, 0.0, 0.0, 1.0),
),
)
scene.add_entity(
gs.morphs.Box(
pos=(-0.025, 0, 1.0),
quat=(0, 0, 0, 1),
size=(0.1, 0.1, 0.1),
contype=0b010,
conaffinity=0b010,
),
surface=gs.surfaces.Default(
color=(0.0, 1.0, 0.0, 1.0),
),
)
scene.add_entity(
gs.morphs.Box(
pos=(0.0, 0, 1.5),
quat=(0, 0, 0, 1),
size=(0.1, 0.1, 0.1),
contype=0b011,
conaffinity=0b011,
),
surface=gs.surfaces.Default(
color=(0.0, 0.0, 1.0, 1.0),
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/dragon/dragon.obj",
scale=0.004,
euler=(0, 0, 90),
pos=(-0.1, 0.0, 1.0),
contype=0b100,
conaffinity=0b100,
),
)
scene.build()
for i in range(1000):
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/collision/contype.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/coupling/fem_cube_linked_with_arm.py | import os
import argparse
import numpy as np
from tqdm import tqdm
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--solver", choices=["explicit", "implicit"], default="implicit", help="FEM solver type (default: explicit)"
)
parser.add_argument("--dt", type=float, help="Time step (auto-selected based on solver if not specified)")
parser.add_argument(
"--substeps", type=int, help="Number of substeps (auto-selected based on solver if not specified)"
)
parser.add_argument("--vis", "-v", action="store_true", help="Show visualization GUI")
parser.add_argument("-c", "--cpu", action="store_true", default="PYTEST_VERSION" in os.environ)
args = parser.parse_args()
gs.init(backend=gs.cpu if args.cpu else gs.gpu, logging_level=None)
if args.solver == "explicit":
dt = args.dt if args.dt is not None else 1e-4
substeps = args.substeps if args.substeps is not None else 5
else: # implicit
dt = args.dt if args.dt is not None else 1e-3
substeps = args.substeps if args.substeps is not None else 1
steps = int(1.0 / dt if "PYTEST_VERSION" not in os.environ else 5)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=dt,
substeps=substeps,
gravity=(0, 0, -9.81),
),
fem_options=gs.options.FEMOptions(
use_implicit_solver=args.solver == "implicit",
enable_vertex_constraints=True,
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=args.vis,
)
# Setup scene entities
scene.add_entity(gs.morphs.Plane())
cube = scene.add_entity(
morph=gs.morphs.Box(
pos=(0.5, 0.0, 0.05),
size=(0.2, 0.2, 0.2),
),
material=gs.materials.FEM.Elastic(
E=1.0e4, # stiffness
nu=0.45, # compressibility (0 to 0.5)
rho=1000.0, # density
model="linear_corotated",
),
)
arm = scene.add_entity(
morph=gs.morphs.MJCF(
file="xml/franka_emika_panda/panda.xml",
pos=(0, 0, 0),
),
)
# Setup camera for recording
video_fps = 1 / dt
max_fps = 100
frame_interval = max(1, int(video_fps / max_fps)) if max_fps > 0 else 1
print("video_fps:", video_fps, "frame_interval:", frame_interval)
cam = scene.add_camera(
res=(640, 480),
pos=(-2.0, 3.0, 2.0),
lookat=(0.5, 0.5, 0.5),
fov=30,
)
scene.build()
cam.start_recording()
try:
joint_names = [j.name for j in arm.joints]
dofs_idx_local = []
for j in arm.joints:
# print("joint name:", j.name, "dofs_idx_local:", j.dofs_idx_local)
dofs_idx_local += j.dofs_idx_local
end_joint = arm.get_joint(joint_names[-1])
arm.set_dofs_kp(
np.array([4500, 4500, 3500, 3500, 2000, 2000, 2000, 100, 100]),
)
arm.set_dofs_kv(
np.array([450, 450, 350, 350, 200, 200, 200, 10, 10]),
)
arm.set_dofs_force_range(
np.array([-87, -87, -87, -87, -12, -12, -12, -100, -100]),
np.array([87, 87, 87, 87, 12, 12, 12, 100, 100]),
)
for i in range(100):
arm.set_dofs_position(
np.array([0.9643, -0.3213, -0.6685, -2.3139, -0.2890, 2.0335, -1.6014, 0.0306, 0.0306]), dofs_idx_local
)
scene.step()
if i % frame_interval == 0:
cam.render()
print("cube init pos", cube.init_positions)
pin_idx = [1, 5]
cube.set_vertex_constraints(verts_idx_local=pin_idx, link=end_joint.link)
print("Cube initial positions:", cube.init_positions[pin_idx])
scene.draw_debug_spheres(poss=cube.init_positions[pin_idx], radius=0.02, color=(1.0, 0.0, 1.0, 0.8))
arm_target_pos = (0.3, 0.2, 0.8)
scene.draw_debug_spheres(poss=[arm_target_pos], radius=0.02, color=(0.0, 1.0, 0.0, 0.8))
qpos = arm.inverse_kinematics(
link=end_joint.link,
pos=np.array(arm_target_pos, gs.np_float),
quat=np.array((0.0, 1.0, 0.0, 0.0), gs.np_float),
)
arm_path_waypoints = arm.plan_path(qpos_goal=qpos, num_waypoints=steps)
for i, waypoint in tqdm(enumerate(arm_path_waypoints), total=len(arm_path_waypoints)):
arm.control_dofs_position(waypoint)
scene.step()
if i % frame_interval == 0:
cam.render()
print("Now dropping the cube")
cube.remove_vertex_constraints()
for i in tqdm(range(steps), total=steps):
arm.control_dofs_position(arm_path_waypoints[-1])
scene.step()
if i % frame_interval == 0:
cam.render()
except KeyboardInterrupt:
gs.logger.info("Simulation interrupted, exiting.")
finally:
gs.logger.info("Simulation finished.")
actual_fps = video_fps / frame_interval
video_filename = f"cube_link_arm_{args.solver}_dt={dt}_substeps={substeps}.mp4"
cam.stop_recording(save_to_filename=video_filename, fps=actual_fps)
gs.logger.info(f"Saved video to {video_filename}")
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/coupling/fem_cube_linked_with_arm.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:examples/fem_hard_and_soft_constraint.py | import os
import argparse
import numpy as np
import torch
from tqdm import tqdm
import genesis as gs
SCENE_POS = np.array([0.5, 0.5, 1.0])
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--solver", choices=["explicit", "implicit"], default="implicit", help="FEM solver type (default: implicit)"
)
parser.add_argument("--dt", type=float)
parser.add_argument("--substeps", type=int)
parser.add_argument("--seconds", type=float, default=5)
parser.add_argument("--vis", "-v", action="store_true", default=False)
args = parser.parse_args()
args.seconds = 0.01 if "PYTEST_VERSION" in os.environ else args.seconds
if args.solver == "explicit":
dt = args.dt if args.dt is not None else 1e-4
substeps = args.substeps if args.substeps is not None else 5
else: # implicit
dt = args.dt if args.dt is not None else 1e-3
substeps = args.substeps if args.substeps is not None else 1
gs.init(backend=gs.gpu)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=dt,
substeps=substeps,
gravity=(0, 0, -9.81),
),
fem_options=gs.options.FEMOptions(
use_implicit_solver=args.solver == "implicit",
enable_vertex_constraints=True,
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=args.vis,
)
scene.add_entity(gs.morphs.Plane())
blob = scene.add_entity(
morph=gs.morphs.Sphere(pos=SCENE_POS + np.array([-0.3, -0.3, 0]), radius=0.1),
material=gs.materials.FEM.Elastic(E=1.0e4, nu=0.45, rho=1000.0, model="linear_corotated"),
)
cube = scene.add_entity(
morph=gs.morphs.Box(pos=SCENE_POS + np.array([0.3, 0.3, 0]), size=(0.2, 0.2, 0.2)),
material=gs.materials.FEM.Elastic(E=1.0e6, nu=0.45, rho=1000.0, model="linear_corotated"),
)
video_fps = 1 / dt
max_fps = 100
frame_interval = max(1, int(video_fps / max_fps)) if max_fps > 0 else 1
print(f"video_fps: {video_fps}, frame_interval: {frame_interval}")
cam = scene.add_camera(
res=(640, 480),
pos=(-2.0, 3.0, 2.0),
lookat=SCENE_POS + np.array([0.0, 0.0, -0.8]),
fov=30,
)
scene.build()
cam.start_recording()
pinned_idx = [0]
circle_radius = 0.3
circle_period = 10.0
angle_step = 2 * np.pi * dt / circle_period
current_angle = 0.0
initial_vertex_pos = cube.init_positions[pinned_idx]
circle_center = initial_vertex_pos - torch.tensor(
[-circle_radius * np.cos(current_angle), -circle_radius * np.sin(current_angle), 0.0],
device=cube.init_positions.device,
dtype=cube.init_positions.dtype,
)
def get_next_circle_position():
"""Get next position on circular path with incremental step."""
nonlocal current_angle
offset = torch.tensor(
[-circle_radius * np.cos(current_angle), -circle_radius * np.sin(current_angle), 0.0],
device=cube.init_positions.device,
dtype=cube.init_positions.dtype,
)
current_angle += angle_step
return circle_center + offset
debug_circle = None
total_steps = int(args.seconds / dt)
try:
target_positions = blob.init_positions[pinned_idx]
scene.draw_debug_spheres(poss=target_positions, radius=0.02, color=(1, 0, 1, 0.8))
blob.set_vertex_constraints(pinned_idx, target_positions, is_soft_constraint=True, stiffness=1e4)
target_positions = get_next_circle_position()
debug_circle = scene.draw_debug_spheres(poss=target_positions, radius=0.02, color=(0, 1, 0, 0.8))
cube.set_vertex_constraints(pinned_idx, target_positions)
for step in tqdm(range(total_steps), total=total_steps):
if debug_circle is not None:
scene.clear_debug_object(debug_circle)
new_pos = get_next_circle_position()
debug_circle = scene.draw_debug_spheres(poss=new_pos, radius=0.02, color=(0, 1, 0, 0.8))
cube.update_constraint_targets(pinned_idx, new_pos)
scene.step()
if step % frame_interval == 0:
cam.render()
except KeyboardInterrupt:
gs.logger.info("Simulation interrupted, exiting.")
finally:
gs.logger.info("Simulation finished.")
actual_fps = video_fps / frame_interval
video_filename = f"fem_hard_soft_{args.solver}_dt={dt}_substeps={substeps}.mp4"
cam.stop_recording(save_to_filename=video_filename, fps=actual_fps)
gs.logger.info(f"Saved video to {video_filename}")
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/fem_hard_and_soft_constraint.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/couplers/legacy_coupler.py | from typing import TYPE_CHECKING
import numpy as np
import quadrants as qd
import genesis as gs
import genesis.utils.sdf as sdf
from genesis.options.solvers import LegacyCouplerOptions
from genesis.repr_base import RBC
from genesis.utils import array_class
from genesis.utils.array_class import LinksState
from genesis.utils.geom import qd_inv_transform_by_trans_quat, qd_transform_by_trans_quat
if TYPE_CHECKING:
from genesis.engine.simulator import Simulator
CLAMPED_INV_DT = 50.0
@qd.data_oriented
class LegacyCoupler(RBC):
"""
This class handles all the coupling between different solvers. LegacyCoupler will be deprecated in the future.
"""
# ------------------------------------------------------------------------------------
# --------------------------------- Initialization -----------------------------------
# ------------------------------------------------------------------------------------
def __init__(
self,
simulator: "Simulator",
options: "LegacyCouplerOptions",
) -> None:
self.sim = simulator
self.options = options
self.tool_solver = self.sim.tool_solver
self.rigid_solver = self.sim.rigid_solver
self.mpm_solver = self.sim.mpm_solver
self.sph_solver = self.sim.sph_solver
self.pbd_solver = self.sim.pbd_solver
self.fem_solver = self.sim.fem_solver
self.sf_solver = self.sim.sf_solver
def build(self) -> None:
self._rigid_mpm = self.rigid_solver.is_active and self.mpm_solver.is_active and self.options.rigid_mpm
self._rigid_sph = self.rigid_solver.is_active and self.sph_solver.is_active and self.options.rigid_sph
self._rigid_pbd = self.rigid_solver.is_active and self.pbd_solver.is_active and self.options.rigid_pbd
self._rigid_fem = self.rigid_solver.is_active and self.fem_solver.is_active and self.options.rigid_fem
self._mpm_sph = self.mpm_solver.is_active and self.sph_solver.is_active and self.options.mpm_sph
self._mpm_pbd = self.mpm_solver.is_active and self.pbd_solver.is_active and self.options.mpm_pbd
self._fem_mpm = self.fem_solver.is_active and self.mpm_solver.is_active and self.options.fem_mpm
self._fem_sph = self.fem_solver.is_active and self.sph_solver.is_active and self.options.fem_sph
if (self._rigid_mpm or self._rigid_sph or self._rigid_pbd or self._rigid_fem) and any(
geom.needs_coup for geom in self.rigid_solver.geoms
):
self.rigid_solver.collider._sdf.activate()
if self._rigid_mpm and self.mpm_solver.enable_CPIC:
# this field stores the geom index of the thin shell rigid object (if any) that separates particle and its surrounding grid cell
self.cpic_flag = qd.field(gs.qd_int, shape=(self.mpm_solver.n_particles, 3, 3, 3, self.mpm_solver._B))
self.mpm_rigid_normal = qd.Vector.field(
3,
dtype=gs.qd_float,
shape=(self.mpm_solver.n_particles, self.rigid_solver.n_geoms_, self.mpm_solver._B),
)
if self._rigid_sph:
self.sph_rigid_normal = qd.Vector.field(
3,
dtype=gs.qd_float,
shape=(self.sph_solver.n_particles, self.rigid_solver.n_geoms_, self.sph_solver._B),
)
self.sph_rigid_normal_reordered = qd.Vector.field(
3,
dtype=gs.qd_float,
shape=(self.sph_solver.n_particles, self.rigid_solver.n_geoms_, self.sph_solver._B),
)
if self._rigid_pbd:
self.pbd_rigid_normal_reordered = qd.Vector.field(
3, dtype=gs.qd_float, shape=(self.pbd_solver.n_particles, self.pbd_solver._B, self.rigid_solver.n_geoms)
)
struct_particle_attach_info = qd.types.struct(
link_idx=gs.qd_int,
local_pos=gs.qd_vec3,
)
self.particle_attach_info = struct_particle_attach_info.field(
shape=(self.pbd_solver._n_particles, self.pbd_solver._B), layout=qd.Layout.SOA
)
self.particle_attach_info.link_idx.fill(-1)
self.particle_attach_info.local_pos.fill(0.0)
if self._mpm_sph:
self.mpm_sph_stencil_size = int(np.floor(self.mpm_solver.dx / self.sph_solver.hash_grid_cell_size) + 2)
if self._mpm_pbd:
self.mpm_pbd_stencil_size = int(np.floor(self.mpm_solver.dx / self.pbd_solver.hash_grid_cell_size) + 2)
## DEBUG
self._dx = 1 / 1024
self._stencil_size = int(np.floor(self._dx / self.sph_solver.hash_grid_cell_size) + 2)
self.reset(envs_idx=self.sim.scene._envs_idx)
def reset(self, envs_idx=None) -> None:
if self._rigid_mpm and self.mpm_solver.enable_CPIC:
if envs_idx is None:
self.mpm_rigid_normal.fill(0)
else:
self._kernel_reset_mpm(envs_idx)
if self._rigid_sph:
if envs_idx is None:
self.sph_rigid_normal.fill(0)
else:
self._kernel_reset_sph(envs_idx)
@qd.kernel
def _kernel_reset_mpm(self, envs_idx: qd.types.ndarray()):
for i_p, i_g, i_b_ in qd.ndrange(self.mpm_solver.n_particles, self.rigid_solver.n_geoms, envs_idx.shape[0]):
self.mpm_rigid_normal[i_p, i_g, envs_idx[i_b_]] = 0.0
@qd.kernel
def _kernel_reset_sph(self, envs_idx: qd.types.ndarray()):
for i_p, i_g, i_b_ in qd.ndrange(self.sph_solver.n_particles, self.rigid_solver.n_geoms, envs_idx.shape[0]):
self.sph_rigid_normal[i_p, i_g, envs_idx[i_b_]] = 0.0
@qd.func
def _func_collide_with_rigid(
self,
f,
pos_world,
vel,
mass,
i_b,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
sdf_info: array_class.SDFInfo,
collider_static_config: qd.template(),
):
for i_g in range(self.rigid_solver.n_geoms):
if geoms_info.needs_coup[i_g]:
vel = self._func_collide_with_rigid_geom(
pos_world,
vel,
mass,
i_g,
i_b,
geoms_state=geoms_state,
geoms_info=geoms_info,
links_state=links_state,
rigid_global_info=rigid_global_info,
sdf_info=sdf_info,
collider_static_config=collider_static_config,
)
return vel
@qd.func
def _func_collide_with_rigid_geom(
self,
pos_world,
vel,
mass,
geom_idx,
batch_idx,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
sdf_info: array_class.SDFInfo,
collider_static_config: qd.template(),
):
signed_dist = sdf.sdf_func_world(
geoms_state=geoms_state,
geoms_info=geoms_info,
sdf_info=sdf_info,
pos_world=pos_world,
geom_idx=geom_idx,
batch_idx=batch_idx,
)
# bigger coup_softness implies that the coupling influence extends further away from the object.
influence = qd.min(qd.exp(-signed_dist / max(1e-10, geoms_info.coup_softness[geom_idx])), 1)
if influence > 0.1:
normal_rigid = sdf.sdf_func_normal_world(
geoms_state=geoms_state,
geoms_info=geoms_info,
rigid_global_info=rigid_global_info,
collider_static_config=collider_static_config,
sdf_info=sdf_info,
pos_world=pos_world,
geom_idx=geom_idx,
batch_idx=batch_idx,
)
vel = self._func_collide_in_rigid_geom(
pos_world,
vel,
mass,
normal_rigid,
influence,
geom_idx,
batch_idx,
geoms_info,
links_state,
rigid_global_info,
)
return vel
@qd.func
def _func_collide_with_rigid_geom_robust(
self,
pos_world,
vel,
mass,
normal_prev,
geom_idx,
batch_idx,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
sdf_info: array_class.SDFInfo,
collider_static_config: qd.template(),
):
"""
Similar to _func_collide_with_rigid_geom, but additionally handles potential side flip due to penetration.
"""
signed_dist = sdf.sdf_func_world(
geoms_state=geoms_state,
geoms_info=geoms_info,
sdf_info=sdf_info,
pos_world=pos_world,
geom_idx=geom_idx,
batch_idx=batch_idx,
)
normal_rigid = sdf.sdf_func_normal_world(
geoms_state=geoms_state,
geoms_info=geoms_info,
rigid_global_info=rigid_global_info,
collider_static_config=collider_static_config,
sdf_info=sdf_info,
pos_world=pos_world,
geom_idx=geom_idx,
batch_idx=batch_idx,
)
# bigger coup_softness implies that the coupling influence extends further away from the object.
influence = qd.min(qd.exp(-signed_dist / max(1e-10, geoms_info.coup_softness[geom_idx])), 1)
# if normal_rigid.dot(normal_prev) < 0: # side flip due to penetration
# influence = 1.0
# normal_rigid = normal_prev
if influence > 0.1:
vel = self._func_collide_in_rigid_geom(
pos_world,
vel,
mass,
normal_rigid,
influence,
geom_idx,
batch_idx,
geoms_info,
links_state,
rigid_global_info,
)
# attraction force
# if 0.001 < signed_dist < 0.01:
# vel = vel - normal_rigid * 0.1 * signed_dist
return vel, normal_rigid
@qd.func
def _func_collide_in_rigid_geom(
self,
pos_world,
vel,
mass,
normal_rigid,
influence,
geom_idx,
i_b,
geoms_info: array_class.GeomsInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
):
"""
Resolves collision when a particle is already in collision with a rigid object.
This function assumes known normal_rigid and influence.
"""
vel_rigid = self.rigid_solver._func_vel_at_point(
pos_world=pos_world,
link_idx=geoms_info.link_idx[geom_idx],
i_b=i_b,
links_state=links_state,
)
# v w.r.t rigid
rvel = vel - vel_rigid
rvel_normal_magnitude = rvel.dot(normal_rigid) # negative if inward
if rvel_normal_magnitude < 0: # colliding
#################### rigid -> particle ####################
# tangential component
rvel_tan = rvel - rvel_normal_magnitude * normal_rigid
rvel_tan_norm = rvel_tan.norm(gs.EPS)
# tangential component after friction
rvel_tan = (
rvel_tan
/ rvel_tan_norm
* qd.max(0, rvel_tan_norm + rvel_normal_magnitude * geoms_info.coup_friction[geom_idx])
)
# normal component after collision
rvel_normal = -normal_rigid * rvel_normal_magnitude * geoms_info.coup_restitution[geom_idx]
# normal + tangential component
rvel_new = rvel_tan + rvel_normal
# apply influence
vel_old = vel
vel = vel_rigid + rvel_new * influence + rvel * (1 - influence)
#################### particle -> rigid ####################
# Compute delta momentum and apply to rigid body.
delta_mv = mass * (vel - vel_old)
force = -delta_mv / rigid_global_info.substep_dt[None]
self.rigid_solver._func_apply_coupling_force(
pos_world,
force,
geoms_info.link_idx[geom_idx],
i_b,
links_state,
)
return vel
@qd.func
def _func_mpm_tool(self, f, pos_world, vel, i_b):
for entity in qd.static(self.tool_solver.entities):
if qd.static(entity.material.collision):
vel = entity.collide(f, pos_world, vel, i_b)
return vel
@qd.kernel
def mpm_grid_op(
self,
f: qd.i32,
t: qd.f32,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
sdf_info: array_class.SDFInfo,
collider_static_config: qd.template(),
):
for ii, jj, kk, i_b in qd.ndrange(*self.mpm_solver.grid_res, self.mpm_solver._B):
I = (ii, jj, kk)
if self.mpm_solver.grid[f, I, i_b].mass > gs.EPS:
#################### MPM grid op ####################
# Momentum to velocity
vel_mpm = (1 / self.mpm_solver.grid[f, I, i_b].mass) * self.mpm_solver.grid[f, I, i_b].vel_in
# gravity
vel_mpm += self.mpm_solver.substep_dt * self.mpm_solver._gravity[i_b]
pos = (I + self.mpm_solver.grid_offset) * self.mpm_solver.dx
mass_mpm = self.mpm_solver.grid[f, I, i_b].mass / self.mpm_solver._particle_volume_scale
# external force fields
for i_ff in qd.static(range(len(self.mpm_solver._ffs))):
vel_mpm += self.mpm_solver._ffs[i_ff].get_acc(pos, vel_mpm, t, -1) * self.mpm_solver.substep_dt
#################### MPM <-> Tool ####################
if qd.static(self.tool_solver.is_active):
vel_mpm = self._func_mpm_tool(f, pos, vel_mpm, i_b)
#################### MPM <-> Rigid ####################
if qd.static(self._rigid_mpm):
vel_mpm = self._func_collide_with_rigid(
f,
pos,
vel_mpm,
mass_mpm,
i_b,
geoms_state=geoms_state,
geoms_info=geoms_info,
links_state=links_state,
rigid_global_info=rigid_global_info,
sdf_info=sdf_info,
collider_static_config=collider_static_config,
)
#################### MPM <-> SPH ####################
if qd.static(self._mpm_sph):
# using the lower corner of MPM cell to find the corresponding SPH base cell
base = self.sph_solver.sh.pos_to_grid(pos - 0.5 * self.mpm_solver.dx)
# ---------- SPH -> MPM ----------
sph_vel = qd.Vector([0.0, 0.0, 0.0])
colliding_particles = 0
for offset in qd.grouped(
qd.ndrange(self.mpm_sph_stencil_size, self.mpm_sph_stencil_size, self.mpm_sph_stencil_size)
):
slot_idx = self.sph_solver.sh.grid_to_slot(base + offset)
for i in range(
self.sph_solver.sh.slot_start[slot_idx, i_b],
self.sph_solver.sh.slot_start[slot_idx, i_b] + self.sph_solver.sh.slot_size[slot_idx, i_b],
):
if (
qd.abs(pos - self.sph_solver.particles_reordered.pos[i, i_b]).max()
< self.mpm_solver.dx * 0.5
):
sph_vel += self.sph_solver.particles_reordered.vel[i, i_b]
colliding_particles += 1
if colliding_particles > 0:
vel_old = vel_mpm
vel_mpm = sph_vel / colliding_particles
# ---------- MPM -> SPH ----------
delta_mv = mass_mpm * (vel_mpm - vel_old)
for offset in qd.grouped(
qd.ndrange(self.mpm_sph_stencil_size, self.mpm_sph_stencil_size, self.mpm_sph_stencil_size)
):
slot_idx = self.sph_solver.sh.grid_to_slot(base + offset)
for i in range(
self.sph_solver.sh.slot_start[slot_idx, i_b],
self.sph_solver.sh.slot_start[slot_idx, i_b]
+ self.sph_solver.sh.slot_size[slot_idx, i_b],
):
if (
qd.abs(pos - self.sph_solver.particles_reordered.pos[i, i_b]).max()
< self.mpm_solver.dx * 0.5
):
self.sph_solver.particles_reordered[i, i_b].vel = (
self.sph_solver.particles_reordered[i, i_b].vel
- delta_mv / self.sph_solver.particles_info_reordered[i, i_b].mass
)
#################### MPM <-> PBD ####################
if qd.static(self._mpm_pbd):
# using the lower corner of MPM cell to find the corresponding PBD base cell
base = self.pbd_solver.sh.pos_to_grid(pos - 0.5 * self.mpm_solver.dx)
# ---------- PBD -> MPM ----------
pbd_vel = qd.Vector([0.0, 0.0, 0.0])
colliding_particles = 0
for offset in qd.grouped(
qd.ndrange(self.mpm_pbd_stencil_size, self.mpm_pbd_stencil_size, self.mpm_pbd_stencil_size)
):
slot_idx = self.pbd_solver.sh.grid_to_slot(base + offset)
for i in range(
self.pbd_solver.sh.slot_start[slot_idx, i_b],
self.pbd_solver.sh.slot_start[slot_idx, i_b] + self.pbd_solver.sh.slot_size[slot_idx, i_b],
):
if (
qd.abs(pos - self.pbd_solver.particles_reordered.pos[i, i_b]).max()
< self.mpm_solver.dx * 0.5
):
pbd_vel += self.pbd_solver.particles_reordered.vel[i, i_b]
colliding_particles += 1
if colliding_particles > 0:
vel_old = vel_mpm
vel_mpm = pbd_vel / colliding_particles
# ---------- MPM -> PBD ----------
delta_mv = mass_mpm * (vel_mpm - vel_old)
for offset in qd.grouped(
qd.ndrange(self.mpm_pbd_stencil_size, self.mpm_pbd_stencil_size, self.mpm_pbd_stencil_size)
):
slot_idx = self.pbd_solver.sh.grid_to_slot(base + offset)
for i in range(
self.pbd_solver.sh.slot_start[slot_idx, i_b],
self.pbd_solver.sh.slot_start[slot_idx, i_b]
+ self.pbd_solver.sh.slot_size[slot_idx, i_b],
):
if (
qd.abs(pos - self.pbd_solver.particles_reordered.pos[i, i_b]).max()
< self.mpm_solver.dx * 0.5
):
if self.pbd_solver.particles_reordered[i, i_b].free:
self.pbd_solver.particles_reordered[i, i_b].vel = (
self.pbd_solver.particles_reordered[i, i_b].vel
- delta_mv / self.pbd_solver.particles_info_reordered[i, i_b].mass
)
#################### MPM boundary ####################
_, self.mpm_solver.grid[f, I, i_b].vel_out = self.mpm_solver.boundary.impose_pos_vel(pos, vel_mpm)
@qd.kernel
def mpm_surface_to_particle(
self,
f: qd.i32,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
sdf_info: array_class.SDFInfo,
rigid_global_info: array_class.RigidGlobalInfo,
collider_static_config: qd.template(),
):
for i_p, i_b in qd.ndrange(self.mpm_solver.n_particles, self.mpm_solver._B):
if self.mpm_solver.particles_ng[f, i_p, i_b].active:
for i_g in range(self.rigid_solver.n_geoms):
if geoms_info.needs_coup[i_g]:
sdf_normal = sdf.sdf_func_normal_world(
geoms_state=geoms_state,
geoms_info=geoms_info,
rigid_global_info=rigid_global_info,
collider_static_config=collider_static_config,
sdf_info=sdf_info,
pos_world=self.mpm_solver.particles[f, i_p, i_b].pos,
geom_idx=i_g,
batch_idx=i_b,
)
# we only update the normal if the particle does not the object
if sdf_normal.dot(self.mpm_rigid_normal[i_p, i_g, i_b]) >= 0:
self.mpm_rigid_normal[i_p, i_g, i_b] = sdf_normal
def fem_rigid_link_constraints(self):
if self.fem_solver._constraints_initialized and self.rigid_solver.is_active:
self.fem_solver._kernel_update_linked_vertex_constraints(self.rigid_solver.links_state)
@qd.kernel
def fem_surface_force(
self,
f: qd.i32,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
sdf_info: array_class.SDFInfo,
collider_static_config: qd.template(),
):
# TODO: all collisions are on vertices instead of surface and edge
for i_s, i_b in qd.ndrange(self.fem_solver.n_surfaces, self.fem_solver._B):
if self.fem_solver.surface[i_s].active:
dt = self.fem_solver.substep_dt
iel = self.fem_solver.surface[i_s].tri2el
mass = self.fem_solver.elements_i[iel].mass_scaled / self.fem_solver.vol_scale
p1 = self.fem_solver.elements_v[f, self.fem_solver.surface[i_s].tri2v[0], i_b].pos
p2 = self.fem_solver.elements_v[f, self.fem_solver.surface[i_s].tri2v[1], i_b].pos
p3 = self.fem_solver.elements_v[f, self.fem_solver.surface[i_s].tri2v[2], i_b].pos
u = p2 - p1
v = p3 - p1
surface_normal = qd.math.cross(u, v)
surface_normal = surface_normal / surface_normal.norm(gs.EPS)
# FEM <-> Rigid
if qd.static(self._rigid_fem):
# NOTE: collision only on surface vertices
for j in qd.static(range(3)):
iv = self.fem_solver.surface[i_s].tri2v[j]
vel_fem_sv = self._func_collide_with_rigid(
f,
self.fem_solver.elements_v[f, iv, i_b].pos,
self.fem_solver.elements_v[f + 1, iv, i_b].vel,
mass / 3.0, # assume element mass uniformly distributed to vertices
i_b,
geoms_state,
geoms_info,
links_state,
rigid_global_info,
sdf_info,
collider_static_config,
)
self.fem_solver.elements_v[f + 1, iv, i_b].vel = vel_fem_sv
# FEM <-> MPM (interact with MPM grid instead of particles)
# NOTE: not doing this in mpm_grid_op otherwise we need to search for fem surface for each particles
# however, this function is called after mpm boundary conditions.
if qd.static(self._fem_mpm):
for j in qd.static(range(3)):
iv = self.fem_solver.surface[i_s].tri2v[j]
pos = self.fem_solver.elements_v[f, iv, i_b].pos
vel_fem_sv = self.fem_solver.elements_v[f + 1, iv, i_b].vel
mass_fem_sv = mass / 4.0 # assume element mass uniformly distributed
# follow MPM p2g scheme
vel_mpm = qd.Vector([0.0, 0.0, 0.0])
mass_mpm = 0.0
mpm_base = qd.floor(pos * self.mpm_solver.inv_dx - 0.5).cast(gs.qd_int)
mpm_fx = pos * self.mpm_solver.inv_dx - mpm_base.cast(gs.qd_float)
mpm_w = [0.5 * (1.5 - mpm_fx) ** 2, 0.75 - (mpm_fx - 1.0) ** 2, 0.5 * (mpm_fx - 0.5) ** 2]
new_vel_fem_sv = vel_fem_sv
for mpm_offset in qd.static(qd.grouped(self.mpm_solver.stencil_range())):
mpm_grid_I = mpm_base - self.mpm_solver.grid_offset + mpm_offset
mpm_grid_mass = (
self.mpm_solver.grid[f, mpm_grid_I, i_b].mass / self.mpm_solver.particle_volume_scale
)
mpm_weight = gs.qd_float(1.0)
for d in qd.static(range(3)):
mpm_weight *= mpm_w[mpm_offset[d]][d]
# FEM -> MPM
mpm_grid_pos = (mpm_grid_I + self.mpm_solver.grid_offset) * self.mpm_solver.dx
signed_dist = (mpm_grid_pos - pos).dot(surface_normal)
if signed_dist <= self.mpm_solver.dx: # NOTE: use dx as minimal unit for collision
vel_mpm_at_cell = mpm_weight * self.mpm_solver.grid[f, mpm_grid_I, i_b].vel_out
mass_mpm_at_cell = mpm_weight * mpm_grid_mass
vel_mpm += vel_mpm_at_cell
mass_mpm += mass_mpm_at_cell
if mass_mpm_at_cell > gs.EPS:
delta_mpm_vel_at_cell_unmul = (
vel_fem_sv * mpm_weight - self.mpm_solver.grid[f, mpm_grid_I, i_b].vel_out
)
mass_mul_at_cell = (
mpm_grid_mass / mass_fem_sv
) # NOTE: use un-reweighted mass instead of mass_mpm_at_cell
delta_mpm_vel_at_cell = delta_mpm_vel_at_cell_unmul * mass_mul_at_cell
self.mpm_solver.grid[f, mpm_grid_I, i_b].vel_out += delta_mpm_vel_at_cell
new_vel_fem_sv -= delta_mpm_vel_at_cell * mass_mpm_at_cell / mass_fem_sv
# MPM -> FEM
if mass_mpm > gs.EPS:
# delta_mv = (vel_mpm - vel_fem_sv) * mass_mpm
# delta_vel_fem_sv = delta_mv / mass_fem_sv
# self.fem_solver.elements_v[f + 1, iv].vel += delta_vel_fem_sv
self.fem_solver.elements_v[f + 1, iv, i_b].vel = new_vel_fem_sv
# FEM <-> SPH TODO: this doesn't work well
if qd.static(self._fem_sph):
for j in qd.static(range(3)):
iv = self.fem_solver.surface[i_s].tri2v[j]
pos = self.fem_solver.elements_v[f, iv, i_b].pos
vel_fem_sv = self.fem_solver.elements_v[f + 1, iv, i_b].vel
mass_fem_sv = mass / 4.0
dx = self.sph_solver.hash_grid_cell_size # self._dx
stencil_size = 2 # self._stencil_size
base = self.sph_solver.sh.pos_to_grid(pos - 0.5 * dx)
# ---------- SPH -> FEM ----------
sph_vel = qd.Vector([0.0, 0.0, 0.0])
colliding_particles = 0
for offset in qd.grouped(qd.ndrange(stencil_size, stencil_size, stencil_size)):
slot_idx = self.sph_solver.sh.grid_to_slot(base + offset)
for k in range(
self.sph_solver.sh.slot_start[slot_idx, i_b],
self.sph_solver.sh.slot_start[slot_idx, i_b]
+ self.sph_solver.sh.slot_size[slot_idx, i_b],
):
if qd.abs(pos - self.sph_solver.particles_reordered.pos[k, i_b]).max() < dx * 0.5:
sph_vel += self.sph_solver.particles_reordered.vel[k, i_b]
colliding_particles += 1
if colliding_particles > 0:
vel_old = vel_fem_sv
vel_fem_sv_unprojected = sph_vel / colliding_particles
vel_fem_sv = (
vel_fem_sv_unprojected.dot(surface_normal) * surface_normal
) # exclude tangential velocity
# ---------- FEM -> SPH ----------
delta_mv = mass_fem_sv * (vel_fem_sv - vel_old)
for offset in qd.grouped(qd.ndrange(stencil_size, stencil_size, stencil_size)):
slot_idx = self.sph_solver.sh.grid_to_slot(base + offset)
for k in range(
self.sph_solver.sh.slot_start[slot_idx, i_b],
self.sph_solver.sh.slot_start[slot_idx, i_b]
+ self.sph_solver.sh.slot_size[slot_idx, i_b],
):
if qd.abs(pos - self.sph_solver.particles_reordered.pos[k, i_b]).max() < dx * 0.5:
self.sph_solver.particles_reordered[k, i_b].vel = (
self.sph_solver.particles_reordered[k, i_b].vel
- delta_mv / self.sph_solver.particles_info_reordered[k, i_b].mass
)
self.fem_solver.elements_v[f + 1, iv, i_b].vel = vel_fem_sv
# boundary condition
for j in qd.static(range(3)):
iv = self.fem_solver.surface[i_s].tri2v[j]
_, self.fem_solver.elements_v[f + 1, iv, i_b].vel = self.fem_solver.boundary.impose_pos_vel(
self.fem_solver.elements_v[f, iv, i_b].pos, self.fem_solver.elements_v[f + 1, iv, i_b].vel
)
def fem_hydroelastic(self, f: qd.i32):
# Floor contact
# collision detection
self.fem_solver.floor_hydroelastic_detection(f)
@qd.kernel
def sph_rigid(
self,
f: qd.i32,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
sdf_info: array_class.SDFInfo,
collider_static_config: qd.template(),
):
for i_p, i_b in qd.ndrange(self.sph_solver._n_particles, self.sph_solver._B):
if self.sph_solver.particles_ng_reordered[i_p, i_b].active:
for i_g in range(self.rigid_solver.n_geoms):
if geoms_info.needs_coup[i_g]:
(
self.sph_solver.particles_reordered[i_p, i_b].vel,
self.sph_rigid_normal_reordered[i_p, i_g, i_b],
) = self._func_collide_with_rigid_geom_robust(
self.sph_solver.particles_reordered[i_p, i_b].pos,
self.sph_solver.particles_reordered[i_p, i_b].vel,
self.sph_solver.particles_info_reordered[i_p, i_b].mass,
self.sph_rigid_normal_reordered[i_p, i_g, i_b],
i_g,
i_b,
geoms_state,
geoms_info,
links_state,
rigid_global_info,
sdf_info,
collider_static_config,
)
@qd.kernel
def kernel_pbd_rigid_collide(
self,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
links_state: array_class.LinksState,
sdf_info: array_class.SDFInfo,
rigid_global_info: array_class.RigidGlobalInfo,
collider_static_config: qd.template(),
):
for i_p, i_b in qd.ndrange(self.pbd_solver._n_particles, self.sph_solver._B):
if self.pbd_solver.particles_ng_reordered[i_p, i_b].active:
# NOTE: Couldn't figure out a good way to handle collision with non-free particle. Such collision is not phsically plausible anyway.
for i_g in range(self.rigid_solver.n_geoms):
if geoms_info.needs_coup[i_g]:
(
self.pbd_solver.particles_reordered[i_p, i_b].pos,
self.pbd_solver.particles_reordered[i_p, i_b].vel,
self.pbd_rigid_normal_reordered[i_p, i_b, i_g],
) = self._func_pbd_collide_with_rigid_geom(
i_p,
self.pbd_solver.particles_reordered[i_p, i_b].pos,
self.pbd_solver.particles_reordered[i_p, i_b].vel,
self.pbd_solver.particles_info_reordered[i_p, i_b].mass,
self.pbd_rigid_normal_reordered[i_p, i_b, i_g],
i_g,
i_b,
geoms_state,
geoms_info,
links_state,
sdf_info,
rigid_global_info,
collider_static_config,
)
@qd.kernel
def kernel_attach_pbd_to_rigid_link(
self,
particles_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
link_idx: qd.i32,
links_state: LinksState,
) -> None:
"""
Sets listed particles in listed environments to be animated by the link.
Current position of the particle, relatively to the link, is stored and preserved.
"""
pdb = self.pbd_solver
for i_p_, i_b_ in qd.ndrange(particles_idx.shape[1], envs_idx.shape[0]):
i_p = particles_idx[i_b_, i_p_]
i_b = envs_idx[i_b_]
link_pos = links_state.pos[link_idx, i_b]
link_quat = links_state.quat[link_idx, i_b]
# compute local offset from link to the particle
world_pos = pdb.particles[i_p, i_b].pos
local_pos = qd_inv_transform_by_trans_quat(world_pos, link_pos, link_quat)
# set particle to be animated (not free) and store animation info
pdb.particles[i_p, i_b].free = False
self.particle_attach_info[i_p, i_b].link_idx = link_idx
self.particle_attach_info[i_p, i_b].local_pos = local_pos
@qd.kernel
def kernel_pbd_rigid_clear_animate_particles_by_link(
self,
particles_idx: qd.types.ndarray(),
envs_idx: qd.types.ndarray(),
) -> None:
"""Detach listed particles from links, and simulate them freely."""
pdb = self.pbd_solver
for i_p_, i_b_ in qd.ndrange(particles_idx.shape[1], envs_idx.shape[0]):
i_p = particles_idx[i_b_, i_p_]
i_b = envs_idx[i_b_]
pdb.particles[i_p, i_b].free = True
self.particle_attach_info[i_p, i_b].link_idx = -1
self.particle_attach_info[i_p, i_b].local_pos = qd.math.vec3([0.0, 0.0, 0.0])
@qd.kernel
def kernel_pbd_rigid_solve_animate_particles_by_link(self, clamped_inv_dt: qd.f32, links_state: LinksState):
"""
Itearates all particles and environments, and sets corrective velocity for all animated particle.
Computes target position and velocity from the attachment/reference link and local offset position.
Note, that this step shoudl be done after rigid solver update, and before PDB solver update.
Currently, this is done after both rigid and PBD solver updates, hence the corrective velocity
is off by a frame.
Note, it's adviced to clamp inv_dt to avoid large jerks and instability. 1/0.02 might be a good max value.
"""
pdb = self.pbd_solver
for i_p, i_env in qd.ndrange(pdb._n_particles, pdb._B):
if self.particle_attach_info[i_p, i_env].link_idx >= 0:
# read link state
link_idx = self.particle_attach_info[i_p, i_env].link_idx
link_pos = links_state.pos[link_idx, i_env]
link_quat = links_state.quat[link_idx, i_env]
link_lin_vel = links_state.cd_vel[link_idx, i_env]
link_ang_vel = links_state.cd_ang[link_idx, i_env]
link_com_in_world = links_state.root_COM[link_idx, i_env] + links_state.i_pos[link_idx, i_env]
# calculate target pos and vel of the particle
local_pos = self.particle_attach_info[i_p, i_env].local_pos
target_world_pos = qd_transform_by_trans_quat(local_pos, link_pos, link_quat)
world_arm = target_world_pos - link_com_in_world
target_world_vel = link_lin_vel + link_ang_vel.cross(world_arm)
# compute and apply corrective velocity
i_rp = pdb.particles_ng[i_p, i_env].reordered_idx
particle_pos = pdb.particles_reordered[i_rp, i_env].pos
pos_correction = target_world_pos - particle_pos
corrective_vel = pos_correction * clamped_inv_dt
pdb.particles_reordered[i_rp, i_env].vel = corrective_vel + target_world_vel
@qd.func
def _func_pbd_collide_with_rigid_geom(
self,
i,
pos_world,
vel,
mass,
normal_prev,
geom_idx,
batch_idx,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
links_state: array_class.LinksState,
sdf_info: array_class.SDFInfo,
rigid_global_info: array_class.RigidGlobalInfo,
collider_static_config: qd.template(),
):
"""
Resolves collision when a particle is already in collision with a rigid object.
This function assumes known normal_rigid and influence.
"""
signed_dist = sdf.sdf_func_world(
geoms_state=geoms_state,
geoms_info=geoms_info,
sdf_info=sdf_info,
pos_world=pos_world,
geom_idx=geom_idx,
batch_idx=batch_idx,
)
contact_normal = sdf.sdf_func_normal_world(
geoms_state=geoms_state,
geoms_info=geoms_info,
rigid_global_info=rigid_global_info,
collider_static_config=collider_static_config,
sdf_info=sdf_info,
pos_world=pos_world,
geom_idx=geom_idx,
batch_idx=batch_idx,
)
new_pos = pos_world
new_vel = vel
if signed_dist < self.pbd_solver.particle_size / 2: # skip non-penetration particles
stiffness = 1.0 # value in [0, 1]
# we don't consider friction for now
# friction = 0.15
# vel_rigid = self.rigid_solver._func_vel_at_point(
# pos_world=pos_world,
# link_idx=geoms_info.link_idx[geom_idx],
# i_b=batch_idx,
# links_state=links_state,
# )
# rvel = vel - vel_rigid
# rvel_normal_magnitude = rvel.dot(contact_normal) # negative if inward
# rvel_tan = rvel - rvel_normal_magnitude * contact_normal
# rvel_tan_norm = rvel_tan.norm(gs.EPS)
#################### rigid -> particle ####################
energy_loss = 0.0 # value in [0, 1]
new_pos = pos_world + stiffness * contact_normal * (self.pbd_solver.particle_size / 2 - signed_dist)
prev_pos = self.pbd_solver.particles_reordered[i, batch_idx].ipos
new_vel = (new_pos - prev_pos) / self.pbd_solver._substep_dt
#################### particle -> rigid ####################
delta_mv = mass * (new_vel - vel)
force = (-delta_mv / self.rigid_solver._substep_dt) * (1 - energy_loss)
self.rigid_solver._func_apply_coupling_force(
pos_world,
force,
geoms_info.link_idx[geom_idx],
batch_idx,
links_state,
)
return new_pos, new_vel, contact_normal
def preprocess(self, f):
# preprocess for MPM CPIC
if self._rigid_mpm and self.mpm_solver.enable_CPIC:
self.mpm_surface_to_particle(
f,
self.rigid_solver.geoms_state,
self.rigid_solver.geoms_info,
self.rigid_solver.collider._sdf._sdf_info,
self.rigid_solver._rigid_global_info,
self.rigid_solver.collider._collider_static_config,
)
def couple(self, f):
# MPM <-> all others
if self.mpm_solver.is_active:
self.mpm_grid_op(
f,
self.sim.cur_t,
geoms_state=self.rigid_solver.geoms_state,
geoms_info=self.rigid_solver.geoms_info,
links_state=self.rigid_solver.links_state,
rigid_global_info=self.rigid_solver._rigid_global_info,
sdf_info=self.rigid_solver.collider._sdf._sdf_info,
collider_static_config=self.rigid_solver.collider._collider_static_config,
)
# SPH <-> Rigid
if self._rigid_sph:
self.sph_rigid(
f,
self.rigid_solver.geoms_state,
self.rigid_solver.geoms_info,
self.rigid_solver.links_state,
self.rigid_solver._rigid_global_info,
self.rigid_solver.collider._sdf._sdf_info,
self.rigid_solver.collider._collider_static_config,
)
# PBD <-> Rigid
if self._rigid_pbd:
self.kernel_pbd_rigid_collide(
geoms_state=self.rigid_solver.geoms_state,
geoms_info=self.rigid_solver.geoms_info,
links_state=self.rigid_solver.links_state,
sdf_info=self.rigid_solver.collider._sdf._sdf_info,
rigid_global_info=self.rigid_solver._rigid_global_info,
collider_static_config=self.rigid_solver.collider._collider_static_config,
)
# 1-way: animate particles by links
full_step_inv_dt = 1.0 / self.pbd_solver._dt
clamped_inv_dt = min(full_step_inv_dt, CLAMPED_INV_DT)
self.kernel_pbd_rigid_solve_animate_particles_by_link(clamped_inv_dt, self.rigid_solver.links_state)
if self.fem_solver.is_active:
self.fem_surface_force(
f,
self.rigid_solver.geoms_state,
self.rigid_solver.geoms_info,
self.rigid_solver.links_state,
self.rigid_solver._rigid_global_info,
self.rigid_solver.collider._sdf._sdf_info,
self.rigid_solver.collider._collider_static_config,
)
self.fem_rigid_link_constraints()
def couple_grad(self, f):
if self.fem_solver.is_active:
self.fem_surface_force.grad(
f,
self.rigid_solver.geoms_state,
self.rigid_solver.geoms_info,
self.rigid_solver.links_state,
self.rigid_solver._rigid_global_info,
self.rigid_solver.collider._sdf._sdf_info,
self.rigid_solver.collider._collider_static_config,
)
if self.mpm_solver.is_active:
self.mpm_grid_op.grad(
f,
self.sim.cur_t,
geoms_state=self.rigid_solver.geoms_state,
geoms_info=self.rigid_solver.geoms_info,
links_state=self.rigid_solver.links_state,
rigid_global_info=self.rigid_solver._rigid_global_info,
sdf_info=self.rigid_solver.collider._sdf._sdf_info,
collider_static_config=self.rigid_solver.collider._collider_static_config,
)
@property
def active_solvers(self):
"""All the active solvers managed by the scene's simulator."""
return self.sim.active_solvers
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/couplers/legacy_coupler.py",
"license": "Apache License 2.0",
"lines": 909,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/couplers/sap_coupler.py | from typing import TYPE_CHECKING
import math
import igl
import numpy as np
import quadrants as qd
import genesis as gs
import genesis.utils.element as eu
import genesis.utils.array_class as array_class
import genesis.utils.geom as gu
from genesis.constants import IntEnum
from genesis.engine.bvh import AABB, LBVH, FEMSurfaceTetLBVH, RigidTetLBVH
from genesis.options.solvers import SAPCouplerOptions
from genesis.repr_base import RBC
if TYPE_CHECKING:
from genesis.engine.simulator import Simulator
MARCHING_TETS_EDGE_TABLE = (
(-1, -1, -1, -1),
(0, 3, 2, -1),
(0, 1, 4, -1),
(4, 3, 2, 1),
(1, 2, 5, -1),
(0, 3, 5, 1),
(0, 2, 5, 4),
(3, 5, 4, -1),
(3, 4, 5, -1),
(4, 5, 2, 0),
(1, 5, 3, 0),
(1, 5, 2, -1),
(1, 2, 3, 4),
(0, 4, 1, -1),
(0, 2, 3, -1),
(-1, -1, -1, -1),
)
TET_EDGES = (
(0, 1),
(1, 2),
(2, 0),
(0, 3),
(1, 3),
(2, 3),
)
# Cosine threshold for whether two vectors are considered to be in the same direction. Set to zero for strictly positive.
COS_ANGLE_THRESHOLD = math.cos(math.pi * 5.0 / 8.0)
# An estimate of the maximum number of contact pairs per AABB query.
MAX_N_QUERY_RESULT_PER_AABB = 32
class FEMFloorContactType(IntEnum):
"""
Enum for FEM floor contact types.
"""
NONE = 0 # No contact
TET = 1 # Tetrahedral contact
VERT = 2 # Vertex contact
class RigidFloorContactType(IntEnum):
"""
Enum for rigid floor contact types.
"""
NONE = 0 # No contact
VERT = 1 # Vertex contact
TET = 2 # Tetrahedral contact
class RigidRigidContactType(IntEnum):
"""
Enum for rigid-rigid contact types.
"""
NONE = 0 # No contact
TET = 1 # Tetrahedral contact
@qd.func
def tri_barycentric(p, tri_vertices, normal):
"""
Compute the barycentric coordinates of point p with respect to the triangle defined by tri_vertices.
Parameters
----------
p:
The point in space for which to compute barycentric coordinates.
tri_vertices:
a matrix of shape (3, 3) where each column is a vertex of the triangle.
normal:
the normal vector of the triangle.
Notes
-----
This function assumes that the triangle is not degenerated.
"""
v0 = tri_vertices[:, 0]
v1 = tri_vertices[:, 1]
v2 = tri_vertices[:, 2]
# Compute the areas of the triangles formed by the vertices
area_tri_inv = 1.0 / (v1 - v0).cross((v2 - v0)).dot(normal)
# Compute the barycentric coordinates
b0 = (v2 - v1).cross(p - v1).dot(normal) * area_tri_inv
b1 = (v0 - v2).cross(p - v2).dot(normal) * area_tri_inv
b2 = 1.0 - b0 - b1
return gs.qd_vec3(b0, b1, b2)
@qd.func
def tet_barycentric(p, tet_vertices):
"""
Compute the barycentric coordinates of point p with respect to the tetrahedron defined by tet_vertices.
tet_vertices is a matrix of shape (3, 4) where each column is a vertex of the tetrahedron.
"""
v0 = tet_vertices[:, 0]
v1 = tet_vertices[:, 1]
v2 = tet_vertices[:, 2]
v3 = tet_vertices[:, 3]
# Compute the volumes of the tetrahedra formed by the point and the vertices
vol_tet_inv = 1.0 / ((v1 - v0).dot((v2 - v0).cross(v3 - v0)))
# Compute the barycentric coordinates
b0 = (p - v1).dot((v3 - v1).cross(v2 - v1)) * vol_tet_inv
b1 = (p - v2).dot((v3 - v2).cross(v0 - v2)) * vol_tet_inv
b2 = (p - v3).dot((v1 - v3).cross(v0 - v3)) * vol_tet_inv
b3 = 1.0 - b0 - b1 - b2
return qd.Vector([b0, b1, b2, b3], dt=gs.qd_float)
@qd.data_oriented
class SAPCoupler(RBC):
"""
This class handles all the coupling between different solvers using the
Semi-Analytic Primal (SAP) contact solver used in Drake.
Note
----
For now all batches have the same constraints, such as joint equality constraints are consistent among all batches.
Paper reference: https://arxiv.org/abs/2110.10107
Drake reference: https://drake.mit.edu/release_notes/v1.5.0.html
Code reference: https://github.com/RobotLocomotion/drake/blob/d7a5096c6d0f131705c374390202ad95d0607fd4/multibody/plant/sap_driver.cc
"""
# ------------------------------------------------------------------------------------
# --------------------------------- Initialization -----------------------------------
# ------------------------------------------------------------------------------------
def __init__(
self,
simulator: "Simulator",
options: "SAPCouplerOptions",
) -> None:
self.sim = simulator
self.options = options
self.rigid_solver = self.sim.rigid_solver
self.fem_solver = self.sim.fem_solver
self._n_sap_iterations = options.n_sap_iterations
self._n_pcg_iterations = options.n_pcg_iterations
self._n_linesearch_iterations = options.n_linesearch_iterations
self._sap_convergence_atol = options.sap_convergence_atol
self._sap_convergence_rtol = options.sap_convergence_rtol
self._sap_taud = options.sap_taud
self._sap_beta = options.sap_beta
self._sap_sigma = options.sap_sigma
self._pcg_threshold = options.pcg_threshold
self._linesearch_ftol = options.linesearch_ftol
self._linesearch_max_step_size = options.linesearch_max_step_size
self._hydroelastic_stiffness = options.hydroelastic_stiffness
self._point_contact_stiffness = options.point_contact_stiffness
if gs.qd_float == qd.f32:
gs.raise_exception(
"SAPCoupler does not support 32bits precision. Please specify precision='64' when initializing Genesis."
)
if options.fem_floor_contact_type == "tet":
self._fem_floor_contact_type = FEMFloorContactType.TET
elif options.fem_floor_contact_type == "vert":
self._fem_floor_contact_type = FEMFloorContactType.VERT
elif options.fem_floor_contact_type == "none":
self._fem_floor_contact_type = FEMFloorContactType.NONE
else:
gs.raise_exception(
f"Invalid FEM floor contact type: {options.fem_floor_contact_type}. "
"Must be one of 'tet', 'vert', or 'none'."
)
self._enable_fem_self_tet_contact = options.enable_fem_self_tet_contact
if options.rigid_floor_contact_type == "vert":
self._rigid_floor_contact_type = RigidFloorContactType.VERT
elif options.rigid_floor_contact_type == "tet":
self._rigid_floor_contact_type = RigidFloorContactType.TET
elif options.rigid_floor_contact_type == "none":
self._rigid_floor_contact_type = RigidFloorContactType.NONE
else:
gs.raise_exception(
f"Invalid rigid floor contact type: {options.rigid_floor_contact_type}. "
"Must be one of 'vert' or 'none'."
)
self._enable_rigid_fem_contact = options.enable_rigid_fem_contact
if options.rigid_rigid_contact_type == "tet":
self._rigid_rigid_contact_type = RigidRigidContactType.TET
elif options.rigid_rigid_contact_type == "none":
self._rigid_rigid_contact_type = RigidRigidContactType.NONE
else:
gs.raise_exception(
f"Invalid rigid-rigid contact type: {options.rigid_rigid_contact_type}. Must be one of 'tet' or 'none'."
)
self._rigid_compliant = False
# ------------------------------------------------------------------------------------
# --------------------------------- Initialization -----------------------------------
# ------------------------------------------------------------------------------------
def build(self) -> None:
self._B = self.sim._B
self.contact_handlers = []
self._enable_rigid_fem_contact &= self.rigid_solver.is_active and self.fem_solver.is_active
self._enable_fem_self_tet_contact &= self.fem_solver.is_active
init_tet_tables = False
if self.fem_solver.is_active:
if self.fem_solver._use_implicit_solver is False:
gs.raise_exception(
"SAPCoupler requires FEM to use implicit solver. "
"Please set `use_implicit_solver=True` in FEM options."
)
if self._fem_floor_contact_type == FEMFloorContactType.TET or self._enable_fem_self_tet_contact:
init_tet_tables = True
self._init_hydroelastic_fem_fields_and_info()
if self._fem_floor_contact_type == FEMFloorContactType.TET:
self.fem_floor_tet_contact = FEMFloorTetContactHandler(self.sim)
self.contact_handlers.append(self.fem_floor_tet_contact)
if self._fem_floor_contact_type == FEMFloorContactType.VERT:
self.fem_floor_vert_contact = FEMFloorVertContactHandler(self.sim)
self.contact_handlers.append(self.fem_floor_vert_contact)
if self._enable_fem_self_tet_contact:
self.fem_self_tet_contact = FEMSelfTetContactHandler(self.sim)
self.contact_handlers.append(self.fem_self_tet_contact)
self._init_fem_fields()
if self.rigid_solver.is_active:
if (
self._rigid_floor_contact_type == RigidFloorContactType.TET
or self._rigid_rigid_contact_type == RigidRigidContactType.TET
):
init_tet_tables = True
self._init_hydroelastic_rigid_fields_and_info()
self._init_rigid_fields()
if self._rigid_floor_contact_type == RigidFloorContactType.VERT:
self.rigid_floor_vert_contact = RigidFloorVertContactHandler(self.sim)
self.contact_handlers.append(self.rigid_floor_vert_contact)
elif self._rigid_floor_contact_type == RigidFloorContactType.TET:
self.rigid_floor_tet_contact = RigidFloorTetContactHandler(self.sim)
self.contact_handlers.append(self.rigid_floor_tet_contact)
if self._rigid_rigid_contact_type == RigidRigidContactType.TET:
self.rigid_rigid_tet_contact = RigidRigidTetContactHandler(self.sim)
self.contact_handlers.append(self.rigid_rigid_tet_contact)
# TODO: Dynamically added constraints are not supported for now
if self.rigid_solver.n_equalities > 0:
self._init_equality_constraint()
if self._enable_rigid_fem_contact:
self.rigid_fem_contact = RigidFemTriTetContactHandler(self.sim)
self.contact_handlers.append(self.rigid_fem_contact)
self._init_bvh()
if init_tet_tables:
self._init_tet_tables()
self._init_sap_fields()
self._init_pcg_fields()
self._init_linesearch_fields()
def reset(self, envs_idx=None):
pass
def _init_tet_tables(self):
# Lookup table for marching tetrahedra edges
self.MarchingTetsEdgeTable = qd.field(gs.qd_ivec4, shape=len(MARCHING_TETS_EDGE_TABLE))
self.MarchingTetsEdgeTable.from_numpy(np.array(MARCHING_TETS_EDGE_TABLE, dtype=gs.np_int))
self.TetEdges = qd.field(gs.qd_ivec2, shape=(len(TET_EDGES),))
self.TetEdges.from_numpy(np.array(TET_EDGES, dtype=gs.np_int))
def _init_hydroelastic_fem_fields_and_info(self):
self.fem_pressure = qd.field(gs.qd_float, shape=(self.fem_solver.n_vertices))
fem_pressure_np = np.concatenate([fem_entity.pressure_field_np for fem_entity in self.fem_solver.entities])
self.fem_pressure.from_numpy(fem_pressure_np)
self.fem_pressure_gradient = qd.field(gs.qd_vec3, shape=(self.fem_solver._B, self.fem_solver.n_elements))
def _init_hydroelastic_rigid_fields_and_info(self):
rigid_volume_verts = []
rigid_volume_elems = []
rigid_volume_verts_geom_idx = []
rigid_volume_elems_geom_idx = []
rigid_pressure_field = []
offset = 0
for geom in self.rigid_solver.geoms:
if geom.contype or geom.conaffinity:
if geom.type == gs.GEOM_TYPE.PLANE:
gs.raise_exception("Primitive plane not supported as user-specified collision geometries.")
volume = geom.get_trimesh().volume
tet_cfg = {"nobisect": False, "maxvolume": volume / 100}
mesh_verts, mesh_elems, _uvs = eu.mesh_to_elements(file=geom.get_trimesh(), tet_cfg=tet_cfg)
verts, elems = eu.split_all_surface_tets(mesh_verts, mesh_elems)
rigid_volume_verts.append(verts)
rigid_volume_elems.append(elems + offset)
rigid_volume_verts_geom_idx.append(np.full(len(verts), geom.idx, dtype=gs.np_int))
rigid_volume_elems_geom_idx.append(np.full(len(elems), geom.idx, dtype=gs.np_int))
signed_distance, *_ = igl.signed_distance(verts, geom.init_verts, geom.init_faces)
signed_distance = signed_distance.astype(gs.np_float, copy=False)
distance_unsigned = np.abs(signed_distance)
distance_max = np.max(distance_unsigned)
if distance_max < gs.EPS:
gs.raise_exception(
f"Pressure field max distance is too small: {distance_max}. "
"This might be due to a mesh having no internal vertices."
)
pressure_field_np = distance_unsigned / distance_max * self._hydroelastic_stiffness
rigid_pressure_field.append(pressure_field_np)
offset += len(verts)
if not rigid_volume_verts:
gs.raise_exception("No rigid collision geometries found.")
rigid_volume_verts_np = np.concatenate(rigid_volume_verts, axis=0, dtype=gs.np_float)
rigid_volume_elems_np = np.concatenate(rigid_volume_elems, axis=0, dtype=gs.np_int)
rigid_volume_verts_geom_idx_np = np.concatenate(rigid_volume_verts_geom_idx, axis=0, dtype=gs.np_int)
rigid_volume_elems_geom_idx_np = np.concatenate(rigid_volume_elems_geom_idx, axis=0, dtype=gs.np_int)
rigid_pressure_field_np = np.concatenate(rigid_pressure_field, axis=0, dtype=gs.np_float)
self.n_rigid_volume_verts = len(rigid_volume_verts_np)
self.n_rigid_volume_elems = len(rigid_volume_elems_np)
self.rigid_volume_verts_rest = qd.field(gs.qd_vec3, shape=(self.n_rigid_volume_verts,))
self.rigid_volume_verts_rest.from_numpy(rigid_volume_verts_np)
self.rigid_volume_verts = qd.field(gs.qd_vec3, shape=(self._B, self.n_rigid_volume_verts))
self.rigid_volume_elems = qd.field(gs.qd_ivec4, shape=(self.n_rigid_volume_elems,))
self.rigid_volume_elems.from_numpy(rigid_volume_elems_np)
self.rigid_volume_verts_geom_idx = qd.field(gs.qd_int, shape=(self.n_rigid_volume_verts,))
self.rigid_volume_verts_geom_idx.from_numpy(rigid_volume_verts_geom_idx_np)
self.rigid_volume_elems_geom_idx = qd.field(gs.qd_int, shape=(self.n_rigid_volume_elems,))
self.rigid_volume_elems_geom_idx.from_numpy(rigid_volume_elems_geom_idx_np)
# FIXME: Convert collision_pair_idx to field here because SAPCoupler cannot support ndarray/field switch yet
np_collision_pair_idx = self.rigid_solver.collider._collider_info.collision_pair_idx.to_numpy()
self.rigid_collision_pair_idx = qd.field(gs.qd_int, shape=np_collision_pair_idx.shape)
self.rigid_collision_pair_idx.from_numpy(np_collision_pair_idx)
self.rigid_pressure_field = qd.field(gs.qd_float, shape=(self.n_rigid_volume_verts,))
self.rigid_pressure_field.from_numpy(rigid_pressure_field_np)
self.rigid_pressure_gradient_rest = qd.field(gs.qd_vec3, shape=(self.n_rigid_volume_elems,))
self.rigid_pressure_gradient = qd.field(gs.qd_vec3, shape=(self._B, self.n_rigid_volume_elems))
self.rigid_compute_pressure_gradient_rest()
self._rigid_compliant = True
@qd.kernel
def rigid_update_volume_verts_pressure_gradient(
self,
geoms_state: array_class.GeomsState,
):
for i_b, i_v in qd.ndrange(self._B, self.n_rigid_volume_verts):
i_g = self.rigid_volume_verts_geom_idx[i_v]
pos = geoms_state.pos[i_g, i_b]
quat = geoms_state.quat[i_g, i_b]
R = gu.qd_quat_to_R(quat, gs.EPS)
self.rigid_volume_verts[i_b, i_v] = R @ self.rigid_volume_verts_rest[i_v] + pos
for i_b, i_e in qd.ndrange(self._B, self.n_rigid_volume_elems):
i_g = self.rigid_volume_elems_geom_idx[i_e]
pos = geoms_state.pos[i_g, i_b]
quat = geoms_state.quat[i_g, i_b]
R = gu.qd_quat_to_R(quat, gs.EPS)
self.rigid_pressure_gradient[i_b, i_e] = R @ self.rigid_pressure_gradient_rest[i_e]
@qd.kernel
def rigid_compute_pressure_gradient_rest(self):
grad = qd.static(self.rigid_pressure_gradient_rest)
for i_e in range(self.n_rigid_volume_elems):
grad[i_e].fill(0.0)
for i in qd.static(range(4)):
i_v0 = self.rigid_volume_elems[i_e][i]
i_v1 = self.rigid_volume_elems[i_e][(i + 1) % 4]
i_v2 = self.rigid_volume_elems[i_e][(i + 2) % 4]
i_v3 = self.rigid_volume_elems[i_e][(i + 3) % 4]
pos_v0 = self.rigid_volume_verts_rest[i_v0]
pos_v1 = self.rigid_volume_verts_rest[i_v1]
pos_v2 = self.rigid_volume_verts_rest[i_v2]
pos_v3 = self.rigid_volume_verts_rest[i_v3]
e10 = pos_v0 - pos_v1
e12 = pos_v2 - pos_v1
e13 = pos_v3 - pos_v1
area_vector = e12.cross(e13)
signed_volume = area_vector.dot(e10)
if qd.abs(signed_volume) > gs.EPS:
grad_i = area_vector / signed_volume
grad[i_e] += grad_i * self.rigid_pressure_field[i_v0]
def _init_bvh(self):
if self._enable_fem_self_tet_contact:
self.fem_surface_tet_aabb = AABB(self.fem_solver._B, self.fem_solver.n_surface_elements)
self.fem_surface_tet_bvh = FEMSurfaceTetLBVH(
self.fem_solver, self.fem_surface_tet_aabb, max_n_query_result_per_aabb=MAX_N_QUERY_RESULT_PER_AABB
)
if self._enable_rigid_fem_contact:
self.rigid_tri_aabb = AABB(self.sim._B, self.rigid_solver.n_faces)
max_n_query_result_per_aabb = (
max(self.rigid_solver.n_faces, self.fem_solver.n_surface_elements)
* MAX_N_QUERY_RESULT_PER_AABB
// self.rigid_solver.n_faces
)
self.rigid_tri_bvh = LBVH(self.rigid_tri_aabb, max_n_query_result_per_aabb)
if self.rigid_solver.is_active and self._rigid_rigid_contact_type == RigidRigidContactType.TET:
self.rigid_tet_aabb = AABB(self.sim._B, self.n_rigid_volume_elems)
self.rigid_tet_bvh = RigidTetLBVH(
self, self.rigid_tet_aabb, max_n_query_result_per_aabb=MAX_N_QUERY_RESULT_PER_AABB
)
def _init_equality_constraint(self):
# TODO: Handling dynamically registered weld constraints would requiere passing 'constraint_state' as input.
# This is not a big deal for now since only joint equality constraints are support by this coupler.
self.equality_constraint_handler = RigidConstraintHandler(self.sim)
self.equality_constraint_handler.build_constraints(
self.rigid_solver.equalities_info,
self.rigid_solver.joints_info,
self.rigid_solver._static_rigid_sim_config,
)
def _init_sap_fields(self):
self.batch_active = qd.field(dtype=gs.qd_bool, shape=(self.sim._B,), needs_grad=False)
sap_state = qd.types.struct(
gradient_norm=gs.qd_float, # norm of the gradient
momentum_norm=gs.qd_float, # norm of the momentum
impulse_norm=gs.qd_float, # norm of the impulse
)
self.sap_state = sap_state.field(shape=(self.sim._B,), needs_grad=False, layout=qd.Layout.SOA)
def _init_fem_fields(self):
fem_state_v = qd.types.struct(
v=gs.qd_vec3, # vertex velocity
v_diff=gs.qd_vec3, # difference between current and previous velocity
gradient=gs.qd_vec3, # gradient vector
impulse=gs.qd_vec3, # impulse vector
)
self.fem_state_v = fem_state_v.field(
shape=(self.sim._B, self.fem_solver.n_vertices), needs_grad=False, layout=qd.Layout.SOA
)
pcg_fem_state_v = qd.types.struct(
diag3x3=gs.qd_mat3, # diagonal 3-by-3 block of the hessian
prec=gs.qd_mat3, # preconditioner
x=gs.qd_vec3, # solution vector
r=gs.qd_vec3, # residual vector
z=gs.qd_vec3, # preconditioned residual vector
p=gs.qd_vec3, # search direction vector
Ap=gs.qd_vec3, # matrix-vector product
)
self.pcg_fem_state_v = pcg_fem_state_v.field(
shape=(self.sim._B, self.fem_solver.n_vertices), needs_grad=False, layout=qd.Layout.SOA
)
linesearch_fem_state_v = qd.types.struct(
x_prev=gs.qd_vec3, # solution vector
dp=gs.qd_vec3, # A @ dv
)
self.linesearch_fem_state_v = linesearch_fem_state_v.field(
shape=(self.sim._B, self.fem_solver.n_vertices), needs_grad=False, layout=qd.Layout.SOA
)
def _init_rigid_fields(self):
rigid_state_dof = qd.types.struct(
v=gs.qd_float, # vertex velocity
v_diff=gs.qd_float, # difference between current and previous velocity
mass_v_diff=gs.qd_float, # mass weighted difference between current and previous velocity
gradient=gs.qd_float, # gradient vector
impulse=gs.qd_float, # impulse vector
)
self.rigid_state_dof = rigid_state_dof.field(
shape=(self.sim._B, self.rigid_solver.n_dofs), needs_grad=False, layout=qd.Layout.SOA
)
pcg_rigid_state_dof = qd.types.struct(
x=gs.qd_float, # solution vector
r=gs.qd_float, # residual vector
z=gs.qd_float, # preconditioned residual vector
p=gs.qd_float, # search direction vector
Ap=gs.qd_float, # matrix-vector product
)
self.pcg_rigid_state_dof = pcg_rigid_state_dof.field(
shape=(self.sim._B, self.rigid_solver.n_dofs), needs_grad=False, layout=qd.Layout.SOA
)
linesearch_rigid_state_dof = qd.types.struct(
x_prev=gs.qd_float, # solution vector
dp=gs.qd_float, # A @ dv
)
self.linesearch_rigid_state_dof = linesearch_rigid_state_dof.field(
shape=(self.sim._B, self.rigid_solver.n_dofs), needs_grad=False, layout=qd.Layout.SOA
)
def _init_pcg_fields(self):
self.batch_pcg_active = qd.field(dtype=gs.qd_bool, shape=(self.sim._B,), needs_grad=False)
pcg_state = qd.types.struct(
rTr=gs.qd_float,
rTz=gs.qd_float,
rTr_new=gs.qd_float,
rTz_new=gs.qd_float,
pTAp=gs.qd_float,
alpha=gs.qd_float,
beta=gs.qd_float,
)
self.pcg_state = pcg_state.field(shape=(self.sim._B,), needs_grad=False, layout=qd.Layout.SOA)
def _init_linesearch_fields(self):
self.batch_linesearch_active = qd.field(dtype=gs.qd_bool, shape=(self.sim._B,), needs_grad=False)
linesearch_state = qd.types.struct(
prev_energy=gs.qd_float,
energy=gs.qd_float,
step_size=gs.qd_float,
m=gs.qd_float,
dell_dalpha=gs.qd_float, # first derivative of the total energy w.r.t. alpha
d2ellA_dalpha2=gs.qd_float, # second derivative of the dynamic energy w.r.t. alpha
d2ell_dalpha2=gs.qd_float, # second derivative of the total energy w.r.t. alpha
dell_scale=gs.qd_float, # scale factor for the first derivative
alpha_min=gs.qd_float, # minimum stepsize value
alpha_max=gs.qd_float, # maximum stepsize value
alpha_tol=gs.qd_float, # stepsize tolerance for convergence
f_lower=gs.qd_float, # minimum f value
f_upper=gs.qd_float, # maximum f value
f=gs.qd_float, # f value
df=gs.qd_float, # f gradient
minus_dalpha=gs.qd_float, # negative stepsize
minus_dalpha_prev=gs.qd_float, # previous negative stepsize
)
self.linesearch_state = linesearch_state.field(shape=(self.sim._B,), needs_grad=False, layout=qd.Layout.SOA)
# ------------------------------------------------------------------------------------
# -------------------------------------- Main ----------------------------------------
# ------------------------------------------------------------------------------------
def preprocess(self, i_step):
self.precompute(i_step)
self.update_bvh(i_step)
self.has_contact, overflow = self.update_contact(
i_step,
links_info=self.rigid_solver.links_info,
faces_info=self.rigid_solver.faces_info,
verts_info=self.rigid_solver.verts_info,
free_verts_state=self.rigid_solver.free_verts_state,
fixed_verts_state=self.rigid_solver.fixed_verts_state,
geoms_info=self.rigid_solver.geoms_info,
dofs_state=self.rigid_solver.dofs_state,
links_state=self.rigid_solver.links_state,
)
if overflow:
message = "Overflowed In Contact Query: \n"
for contact in self.contact_handlers:
if contact.n_contact_pairs[None] > contact.max_contact_pairs:
message += (
f"{contact.name} max contact pairs: {contact.max_contact_pairs}"
f", using {contact.n_contact_pairs[None]}\n"
)
gs.raise_exception(message)
self.compute_regularization(
dofs_state=self.rigid_solver.dofs_state,
entities_info=self.rigid_solver.entities_info,
rigid_global_info=self.rigid_solver._rigid_global_info,
)
def precompute(self, i_step):
from genesis.engine.solvers.rigid.rigid_solver import kernel_update_all_verts
if self.fem_solver.is_active:
if qd.static(self._fem_floor_contact_type == FEMFloorContactType.TET or self._enable_fem_self_tet_contact):
self.fem_compute_pressure_gradient(i_step)
if self.rigid_solver.is_active:
kernel_update_all_verts(
geoms_info=self.rigid_solver.geoms_info,
geoms_state=self.rigid_solver.geoms_state,
verts_info=self.rigid_solver.verts_info,
free_verts_state=self.rigid_solver.free_verts_state,
fixed_verts_state=self.rigid_solver.fixed_verts_state,
static_rigid_sim_config=self.rigid_solver._static_rigid_sim_config,
)
if self._rigid_compliant:
self.rigid_update_volume_verts_pressure_gradient(
self.rigid_solver.geoms_state,
)
@qd.kernel
def update_contact(
self,
i_step: qd.i32,
links_info: array_class.LinksInfo,
faces_info: array_class.FacesInfo,
verts_info: array_class.VertsInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
geoms_info: array_class.GeomsInfo,
dofs_state: array_class.DofsState,
links_state: array_class.LinksState,
) -> tuple[bool, bool]:
has_contact = False
overflow = False
for contact in qd.static(self.contact_handlers):
overflow |= contact.detection(
i_step,
links_info=links_info,
verts_info=verts_info,
faces_info=faces_info,
free_verts_state=free_verts_state,
fixed_verts_state=fixed_verts_state,
geoms_info=geoms_info,
)
has_contact |= contact.n_contact_pairs[None] > 0
contact.compute_jacobian(
links_info=links_info,
dofs_state=dofs_state,
links_state=links_state,
)
return has_contact, overflow
def couple(self, i_step):
if self.has_contact:
self.sap_solve(i_step)
self.update_vel(i_step, dofs_state=self.rigid_solver.dofs_state)
def couple_grad(self, i_step):
gs.raise_exception("couple_grad is not available for SAPCoupler. Please use LegacyCoupler instead.")
@qd.kernel
def update_vel(self, i_step: qd.i32, dofs_state: array_class.DofsState):
if qd.static(self.fem_solver.is_active):
self.update_fem_vel(i_step)
if qd.static(self.rigid_solver.is_active):
self.update_rigid_vel(dofs_state=dofs_state)
@qd.func
def update_fem_vel(self, i_step: qd.i32):
for i_b, i_v in qd.ndrange(self.fem_solver._B, self.fem_solver.n_vertices):
self.fem_solver.elements_v[i_step + 1, i_v, i_b].vel = self.fem_state_v.v[i_b, i_v]
@qd.func
def update_rigid_vel(self, dofs_state: array_class.DofsState):
for i_b, i_d in qd.ndrange(self.rigid_solver._B, self.rigid_solver.n_dofs):
dofs_state.vel[i_d, i_b] = self.rigid_state_dof.v[i_b, i_d]
@qd.kernel
def fem_compute_pressure_gradient(self, i_step: qd.i32):
for i_b, i_e in qd.ndrange(self.fem_solver._B, self.fem_solver.n_elements):
self.fem_pressure_gradient[i_b, i_e].fill(0.0)
for i in qd.static(range(4)):
i_v0 = self.fem_solver.elements_i[i_e].el2v[i]
i_v1 = self.fem_solver.elements_i[i_e].el2v[(i + 1) % 4]
i_v2 = self.fem_solver.elements_i[i_e].el2v[(i + 2) % 4]
i_v3 = self.fem_solver.elements_i[i_e].el2v[(i + 3) % 4]
pos_v0 = self.fem_solver.elements_v[i_step, i_v0, i_b].pos
pos_v1 = self.fem_solver.elements_v[i_step, i_v1, i_b].pos
pos_v2 = self.fem_solver.elements_v[i_step, i_v2, i_b].pos
pos_v3 = self.fem_solver.elements_v[i_step, i_v3, i_b].pos
e10 = pos_v0 - pos_v1
e12 = pos_v2 - pos_v1
e13 = pos_v3 - pos_v1
area_vector = e12.cross(e13)
signed_volume = area_vector.dot(e10)
if qd.abs(signed_volume) > gs.EPS:
grad_i = area_vector / signed_volume
self.fem_pressure_gradient[i_b, i_e] += grad_i * self.fem_pressure[i_v0]
# ------------------------------------------------------------------------------------
# -------------------------------------- BVH -----------------------------------------
# ------------------------------------------------------------------------------------
def update_bvh(self, i_step: qd.i32):
if self._enable_fem_self_tet_contact:
self.update_fem_surface_tet_bvh(i_step)
if self._enable_rigid_fem_contact:
self.update_rigid_tri_bvh()
if self.rigid_solver.is_active and self._rigid_rigid_contact_type == RigidRigidContactType.TET:
self.update_rigid_tet_bvh()
def update_fem_surface_tet_bvh(self, i_step: qd.i32):
self.compute_fem_surface_tet_aabb(i_step)
self.fem_surface_tet_bvh.build()
def update_rigid_tri_bvh(self):
self.compute_rigid_tri_aabb(
faces_info=self.rigid_solver.faces_info,
free_verts_state=self.rigid_solver.free_verts_state,
fixed_verts_state=self.rigid_solver.fixed_verts_state,
verts_info=self.rigid_solver.verts_info,
)
self.rigid_tri_bvh.build()
def update_rigid_tet_bvh(self):
self.compute_rigid_tet_aabb()
self.rigid_tet_bvh.build()
@qd.kernel
def compute_fem_surface_tet_aabb(self, i_step: qd.i32):
aabbs = qd.static(self.fem_surface_tet_aabb.aabbs)
for i_b, i_se in qd.ndrange(self.fem_solver._B, self.fem_solver.n_surface_elements):
i_e = self.fem_solver.surface_elements[i_se]
i_vs = self.fem_solver.elements_i[i_e].el2v
aabbs[i_b, i_se].min.fill(np.inf)
aabbs[i_b, i_se].max.fill(-np.inf)
for i in qd.static(range(4)):
pos_v = self.fem_solver.elements_v[i_step, i_vs[i], i_b].pos
aabbs[i_b, i_se].min = qd.min(aabbs[i_b, i_se].min, pos_v)
aabbs[i_b, i_se].max = qd.max(aabbs[i_b, i_se].max, pos_v)
@qd.kernel
def compute_rigid_tri_aabb(
self,
faces_info: array_class.FacesInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
verts_info: array_class.VertsInfo,
):
aabbs = qd.static(self.rigid_tri_aabb.aabbs)
for i_b, i_f in qd.ndrange(self.rigid_solver._B, self.rigid_solver.n_faces):
tri_vertices = qd.Matrix.zero(gs.qd_float, 3, 3)
for i in qd.static(range(3)):
i_v = faces_info.verts_idx[i_f][i]
i_fv = verts_info.verts_state_idx[i_v]
if verts_info.is_fixed[i_v]:
tri_vertices[:, i] = fixed_verts_state.pos[i_fv]
else:
tri_vertices[:, i] = free_verts_state.pos[i_fv, i_b]
pos_v0, pos_v1, pos_v2 = tri_vertices[:, 0], tri_vertices[:, 1], tri_vertices[:, 2]
aabbs[i_b, i_f].min = qd.min(pos_v0, pos_v1, pos_v2)
aabbs[i_b, i_f].max = qd.max(pos_v0, pos_v1, pos_v2)
@qd.kernel
def compute_rigid_tet_aabb(self):
aabbs = qd.static(self.rigid_tet_aabb.aabbs)
for i_b, i_e in qd.ndrange(self._B, self.n_rigid_volume_elems):
i_v0 = self.rigid_volume_elems[i_e][0]
i_v1 = self.rigid_volume_elems[i_e][1]
i_v2 = self.rigid_volume_elems[i_e][2]
i_v3 = self.rigid_volume_elems[i_e][3]
pos_v0 = self.rigid_volume_verts[i_b, i_v0]
pos_v1 = self.rigid_volume_verts[i_b, i_v1]
pos_v2 = self.rigid_volume_verts[i_b, i_v2]
pos_v3 = self.rigid_volume_verts[i_b, i_v3]
aabbs[i_b, i_e].min = qd.min(pos_v0, pos_v1, pos_v2, pos_v3)
aabbs[i_b, i_e].max = qd.max(pos_v0, pos_v1, pos_v2, pos_v3)
# ------------------------------------------------------------------------------------
# ------------------------------------- Solve ----------------------------------------
# ------------------------------------------------------------------------------------
def sap_solve(self, i_step):
self._init_sap_solve(i_step, dofs_state=self.rigid_solver.dofs_state)
for iter in range(self._n_sap_iterations):
# init gradient and preconditioner
self.compute_unconstrained_gradient_diag(i_step, iter)
# compute contact hessian and gradient
self.compute_constraint_contact_gradient_hessian_diag_prec()
self.check_sap_convergence(rigid_global_info=self.rigid_solver._rigid_global_info)
# solve for the vertex velocity
self.pcg_solve()
# line search
self.exact_linesearch(i_step)
@qd.kernel
def check_sap_convergence(self, rigid_global_info: array_class.RigidGlobalInfo):
self.clear_sap_norms()
if qd.static(self.fem_solver.is_active):
self.add_fem_norms()
if qd.static(self.rigid_solver.is_active):
self.add_rigid_norms(rigid_global_info=rigid_global_info)
self.update_batch_active()
@qd.func
def clear_sap_norms(self):
for i_b in range(self._B):
if not self.batch_active[i_b]:
continue
self.sap_state[i_b].gradient_norm = 0.0
self.sap_state[i_b].momentum_norm = 0.0
self.sap_state[i_b].impulse_norm = 0.0
@qd.func
def add_fem_norms(self):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_active[i_b]:
continue
self.sap_state[i_b].gradient_norm += (
self.fem_state_v.gradient[i_b, i_v].norm_sqr() / self.fem_solver.elements_v_info[i_v].mass
)
self.sap_state[i_b].momentum_norm += (
self.fem_state_v.v[i_b, i_v].norm_sqr() * self.fem_solver.elements_v_info[i_v].mass
)
self.sap_state[i_b].impulse_norm += (
self.fem_state_v.impulse[i_b, i_v].norm_sqr() / self.fem_solver.elements_v_info[i_v].mass
)
@qd.func
def add_rigid_norms(self, rigid_global_info: array_class.RigidGlobalInfo):
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_active[i_b]:
continue
self.sap_state[i_b].gradient_norm += (
self.rigid_state_dof.gradient[i_b, i_d] ** 2 / rigid_global_info.mass_mat[i_d, i_d, i_b]
)
self.sap_state[i_b].momentum_norm += (
self.rigid_state_dof.v[i_b, i_d] ** 2 * rigid_global_info.mass_mat[i_d, i_d, i_b]
)
self.sap_state[i_b].impulse_norm += (
self.rigid_state_dof.impulse[i_b, i_d] ** 2 / rigid_global_info.mass_mat[i_d, i_d, i_b]
)
@qd.func
def update_batch_active(self):
for i_b in range(self._B):
if not self.batch_active[i_b]:
continue
norm_thr = self._sap_convergence_atol + self._sap_convergence_rtol * qd.max(
self.sap_state[i_b].momentum_norm, self.sap_state[i_b].impulse_norm
)
self.batch_active[i_b] = self.sap_state[i_b].gradient_norm >= norm_thr
@qd.kernel
def compute_regularization(
self,
dofs_state: array_class.DofsState,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
):
for contact in qd.static(self.contact_handlers):
contact.compute_regularization(entities_info=entities_info, rigid_global_info=rigid_global_info)
if qd.static(self.rigid_solver.is_active and self.rigid_solver.n_equalities > 0):
self.equality_constraint_handler.compute_regularization(dofs_state=dofs_state)
@qd.kernel
def _init_sap_solve(self, i_step: qd.i32, dofs_state: array_class.DofsState):
self._init_v(i_step, dofs_state=dofs_state)
self.batch_active.fill(True)
@qd.func
def _init_v(self, i_step: qd.i32, dofs_state: array_class.DofsState):
if qd.static(self.fem_solver.is_active):
self._init_v_fem(i_step)
if qd.static(self.rigid_solver.is_active):
self._init_v_rigid(i_step, dofs_state=dofs_state)
@qd.func
def _init_v_fem(self, i_step: qd.i32):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
self.fem_state_v.v[i_b, i_v] = self.fem_solver.elements_v[i_step + 1, i_v, i_b].vel
@qd.func
def _init_v_rigid(self, i_step: qd.i32, dofs_state: array_class.DofsState):
for i_b, i_d in qd.ndrange(self.rigid_solver._B, self.rigid_solver.n_dofs):
self.rigid_state_dof.v[i_b, i_d] = dofs_state.vel[i_d, i_b]
def compute_unconstrained_gradient_diag(self, i_step: qd.i32, iter: int):
self.init_unconstrained_gradient_diag(i_step)
# No need to do this for iter=0 because v=v* and A(v-v*) = 0
if iter > 0:
self.compute_unconstrained_gradient()
def init_unconstrained_gradient_diag(self, i_step: qd.i32):
if self.fem_solver.is_active:
self.init_fem_unconstrained_gradient_diag(i_step)
if self.rigid_solver.is_active:
self.init_rigid_unconstrained_gradient(dofs_state=self.rigid_solver.dofs_state)
@qd.kernel
def init_fem_unconstrained_gradient_diag(self, i_step: qd.i32):
dt2 = self.fem_solver._substep_dt**2
for i_b, i_v in qd.ndrange(self.fem_solver._B, self.fem_solver.n_vertices):
self.fem_state_v.gradient[i_b, i_v].fill(0.0)
# was using position now using velocity, need to multiply dt^2
self.pcg_fem_state_v[i_b, i_v].diag3x3 = self.fem_solver.pcg_state_v[i_b, i_v].diag3x3 * dt2
self.fem_state_v.v_diff[i_b, i_v] = (
self.fem_state_v.v[i_b, i_v] - self.fem_solver.elements_v[i_step + 1, i_v, i_b].vel
)
@qd.kernel
def init_rigid_unconstrained_gradient(self, dofs_state: array_class.DofsState):
for i_b, i_d in qd.ndrange(self.rigid_solver._B, self.rigid_solver.n_dofs):
self.rigid_state_dof.gradient[i_b, i_d] = 0.0
self.rigid_state_dof.v_diff[i_b, i_d] = self.rigid_state_dof.v[i_b, i_d] - dofs_state.vel[i_d, i_b]
def compute_unconstrained_gradient(self):
if self.fem_solver.is_active:
self.compute_fem_unconstrained_gradient()
if self.rigid_solver.is_active:
self.compute_rigid_unconstrained_gradient(rigid_global_info=self.rigid_solver._rigid_global_info)
@qd.kernel
def compute_fem_unconstrained_gradient(self):
self.compute_fem_matrix_vector_product(self.fem_state_v.v_diff, self.fem_state_v.gradient, self.batch_active)
@qd.kernel
def compute_rigid_unconstrained_gradient(self, rigid_global_info: array_class.RigidGlobalInfo):
self.pcg_rigid_state_dof.Ap.fill(0.0)
for i_b, i_d0, i_d1 in qd.ndrange(self.rigid_solver._B, self.rigid_solver.n_dofs, self.rigid_solver.n_dofs):
if not self.batch_active[i_b]:
continue
self.rigid_state_dof.gradient[i_b, i_d1] += (
rigid_global_info.mass_mat[i_d1, i_d0, i_b] * self.rigid_state_dof.v_diff[i_b, i_d0]
)
@qd.kernel
def compute_constraint_contact_gradient_hessian_diag_prec(self):
self.clear_impulses()
if qd.static(self.rigid_solver.is_active and self.rigid_solver.n_equalities > 0):
self.equality_constraint_handler.compute_gradient_hessian_diag()
for contact in qd.static(self.contact_handlers):
contact.compute_gradient_hessian_diag()
self.compute_preconditioner()
@qd.func
def clear_impulses(self):
if qd.static(self.fem_solver.is_active):
self.clear_fem_impulses()
if qd.static(self.rigid_solver.is_active):
self.clear_rigid_impulses()
@qd.func
def clear_fem_impulses(self):
for i_b, i_v in qd.ndrange(self.fem_solver._B, self.fem_solver.n_vertices):
if not self.batch_active[i_b]:
continue
self.fem_state_v[i_b, i_v].impulse.fill(0.0)
@qd.func
def clear_rigid_impulses(self):
for i_b, i_d in qd.ndrange(self.rigid_solver._B, self.rigid_solver.n_dofs):
if not self.batch_active[i_b]:
continue
self.rigid_state_dof[i_b, i_d].impulse = 0.0
@qd.func
def compute_preconditioner(self):
if qd.static(self.fem_solver.is_active):
self.compute_fem_preconditioner()
@qd.func
def compute_fem_preconditioner(self):
for i_b, i_v in qd.ndrange(self.fem_solver._B, self.fem_solver.n_vertices):
if not self.batch_active[i_b]:
continue
self.pcg_fem_state_v[i_b, i_v].prec = self.pcg_fem_state_v[i_b, i_v].diag3x3.inverse()
@qd.func
def compute_fem_pcg_matrix_vector_product(self):
self.compute_fem_matrix_vector_product(self.pcg_fem_state_v.p, self.pcg_fem_state_v.Ap, self.batch_pcg_active)
@qd.func
def compute_rigid_pcg_matrix_vector_product(self, rigid_global_info: array_class.RigidGlobalInfo):
self.compute_rigid_mass_mat_vec_product(
self.pcg_rigid_state_dof.p,
self.pcg_rigid_state_dof.Ap,
self.batch_pcg_active,
rigid_global_info=rigid_global_info,
)
@qd.func
def compute_elastic_products(self, i_b, i_e, S, i_vs, src):
p9 = qd.Vector.zero(gs.qd_float, 9)
for i, j in qd.static(qd.ndrange(3, 4)):
p9[i * 3 : i * 3 + 3] = p9[i * 3 : i * 3 + 3] + (S[j, i] * src[i_b, i_vs[j]])
H9_p9 = qd.Vector.zero(gs.qd_float, 9)
for i, j in qd.static(qd.ndrange(3, 3)):
H9_p9[i * 3 : i * 3 + 3] = H9_p9[i * 3 : i * 3 + 3] + (
self.fem_solver.elements_el_hessian[i_b, i, j, i_e] @ p9[j * 3 : j * 3 + 3]
)
return p9, H9_p9
@qd.func
def compute_fem_matrix_vector_product(self, src, dst, active):
"""
Compute the FEM matrix-vector product, including mass matrix and elasticity stiffness matrix.
"""
dt2 = self.fem_solver._substep_dt**2
damping_alpha_factor = self.fem_solver._damping_alpha * self.fem_solver._substep_dt + 1.0
damping_beta_factor = self.fem_solver._damping_beta / self.fem_solver._substep_dt + 1.0
# Inerita
for i_b, i_v in qd.ndrange(self.fem_solver._B, self.fem_solver.n_vertices):
if not active[i_b]:
continue
dst[i_b, i_v] = (
self.fem_solver.elements_v_info[i_v].mass_over_dt2 * src[i_b, i_v] * dt2 * damping_alpha_factor
)
# Elasticity
for i_b, i_e in qd.ndrange(self.fem_solver._B, self.fem_solver.n_elements):
if not active[i_b]:
continue
V_dt2 = self.fem_solver.elements_i[i_e].V * dt2
B = self.fem_solver.elements_i[i_e].B
S = qd.Matrix.zero(gs.qd_float, 4, 3)
S[:3, :] = B
S[3, :] = -B[0, :] - B[1, :] - B[2, :]
i_vs = self.fem_solver.elements_i[i_e].el2v
if qd.static(self.fem_solver._enable_vertex_constraints):
for i in qd.static(range(4)):
if self.fem_solver.vertex_constraints.is_constrained[i_vs[i], i_b]:
S[i, :] = qd.Vector.zero(gs.qd_float, 3)
_, new_p9 = self.compute_elastic_products(i_b, i_e, S, i_vs, src)
# atomic
scale = V_dt2 * damping_beta_factor
for i in qd.static(range(4)):
dst[i_b, i_vs[i]] += (S[i, 0] * new_p9[0:3] + S[i, 1] * new_p9[3:6] + S[i, 2] * new_p9[6:9]) * scale
@qd.kernel
def init_pcg_solve(self, entities_info: array_class.EntitiesInfo, rigid_global_info: array_class.RigidGlobalInfo):
self.init_pcg_state()
if qd.static(self.fem_solver.is_active):
self.init_fem_pcg_solve()
if qd.static(self.rigid_solver.is_active):
self.init_rigid_pcg_solve(entities_info=entities_info, rigid_global_info=rigid_global_info)
self.init_pcg_active()
@qd.func
def init_pcg_state(self):
for i_b in qd.ndrange(self._B):
self.batch_pcg_active[i_b] = self.batch_active[i_b]
if not self.batch_pcg_active[i_b]:
continue
self.pcg_state[i_b].rTr = 0.0
self.pcg_state[i_b].rTz = 0.0
@qd.func
def init_fem_pcg_solve(self):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_fem_state_v[i_b, i_v].x = 0.0
self.pcg_fem_state_v[i_b, i_v].r = -self.fem_state_v.gradient[i_b, i_v]
self.pcg_fem_state_v[i_b, i_v].z = self.pcg_fem_state_v[i_b, i_v].prec @ self.pcg_fem_state_v[i_b, i_v].r
self.pcg_fem_state_v[i_b, i_v].p = self.pcg_fem_state_v[i_b, i_v].z
self.pcg_state[i_b].rTr += self.pcg_fem_state_v[i_b, i_v].r.dot(self.pcg_fem_state_v[i_b, i_v].r)
self.pcg_state[i_b].rTz += self.pcg_fem_state_v[i_b, i_v].r.dot(self.pcg_fem_state_v[i_b, i_v].z)
@qd.func
def compute_rigid_mass_mat_vec_product(self, vec, out, active, rigid_global_info: array_class.RigidGlobalInfo):
"""
Compute the rigid mass matrix-vector product.
"""
out.fill(0.0)
for i_b, i_d0, i_d1 in qd.ndrange(self._B, self.rigid_solver.n_dofs, self.rigid_solver.n_dofs):
if not active[i_b]:
continue
out[i_b, i_d1] += rigid_global_info.mass_mat[i_d1, i_d0, i_b] * vec[i_b, i_d0]
# FIXME: This following two rigid solves are duplicated with the one in rigid_solver.py:func_solve_mass_batched
# Consider refactoring.
@qd.func
def rigid_solve_pcg(
self,
vec,
out,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
):
# Step 1: Solve w st. L^T @ w = y
for i_b, i_e in qd.ndrange(self._B, self.rigid_solver.n_entities):
if not self.batch_pcg_active[i_b]:
continue
entity_dof_start = entities_info.dof_start[i_e]
entity_dof_end = entities_info.dof_end[i_e]
n_dofs = entities_info.n_dofs[i_e]
for i_d_ in range(n_dofs):
i_d = entity_dof_end - i_d_ - 1
out[i_b, i_d] = vec[i_b, i_d]
for j_d in range(i_d + 1, entity_dof_end):
out[i_b, i_d] -= rigid_global_info.mass_mat_L[j_d, i_d, i_b] * out[i_b, j_d]
# Step 2: z = D^{-1} w
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_pcg_active[i_b]:
continue
out[i_b, i_d] *= rigid_global_info.mass_mat_D_inv[i_d, i_b]
# Step 3: Solve x st. L @ x = z
for i_b, i_e in qd.ndrange(self._B, self.rigid_solver.n_entities):
if not self.batch_pcg_active[i_b]:
continue
entity_dof_start = entities_info.dof_start[i_e]
entity_dof_end = entities_info.dof_end[i_e]
n_dofs = entities_info.n_dofs[i_e]
for i_d in range(entity_dof_start, entity_dof_end):
for j_d in range(entity_dof_start, i_d):
out[i_b, i_d] -= rigid_global_info.mass_mat_L[i_d, j_d, i_b] * out[i_b, j_d]
@qd.func
def rigid_solve_jacobian(
self,
vec,
out,
n_contact_pairs,
i_bs,
dim,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
):
# Step 1: Solve w st. L^T @ w = y
for i_p, i_e, k in qd.ndrange(n_contact_pairs, self.rigid_solver.n_entities, dim):
i_b = i_bs[i_p]
entity_dof_start = entities_info.dof_start[i_e]
entity_dof_end = entities_info.dof_end[i_e]
n_dofs = entities_info.n_dofs[i_e]
for i_d_ in range(n_dofs):
i_d = entity_dof_end - i_d_ - 1
out[i_p, i_d][k] = vec[i_p, i_d][k]
for j_d in range(i_d + 1, entity_dof_end):
out[i_p, i_d][k] -= rigid_global_info.mass_mat_L[j_d, i_d, i_b] * out[i_p, j_d][k]
# Step 2: z = D^{-1} w
for i_p, i_d, k in qd.ndrange(n_contact_pairs, self.rigid_solver.n_dofs, dim):
i_b = i_bs[i_p]
out[i_p, i_d][k] *= rigid_global_info.mass_mat_D_inv[i_d, i_b]
# Step 3: Solve x st. L @ x = z
for i_p, i_e, k in qd.ndrange(n_contact_pairs, self.rigid_solver.n_entities, dim):
i_b = i_bs[i_p]
entity_dof_start = entities_info.dof_start[i_e]
entity_dof_end = entities_info.dof_end[i_e]
n_dofs = entities_info.n_dofs[i_e]
for i_d in range(entity_dof_start, entity_dof_end):
for j_d in range(entity_dof_start, i_d):
out[i_p, i_d][k] -= rigid_global_info.mass_mat_L[i_d, j_d, i_b] * out[i_p, j_d][k]
@qd.func
def init_rigid_pcg_solve(
self, entities_info: array_class.EntitiesInfo, rigid_global_info: array_class.RigidGlobalInfo
):
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_rigid_state_dof[i_b, i_d].x = 0.0
self.pcg_rigid_state_dof[i_b, i_d].r = -self.rigid_state_dof.gradient[i_b, i_d]
self.pcg_state[i_b].rTr += self.pcg_rigid_state_dof[i_b, i_d].r ** 2
self.rigid_solve_pcg(
self.pcg_rigid_state_dof.r,
self.pcg_rigid_state_dof.z,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
)
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_rigid_state_dof[i_b, i_d].p = self.pcg_rigid_state_dof[i_b, i_d].z
self.pcg_state[i_b].rTz += self.pcg_rigid_state_dof[i_b, i_d].r * self.pcg_rigid_state_dof[i_b, i_d].z
@qd.func
def init_pcg_active(self):
for i_b in qd.ndrange(self._B):
if not self.batch_pcg_active[i_b]:
continue
self.batch_pcg_active[i_b] = self.pcg_state[i_b].rTr > self._pcg_threshold
def one_pcg_iter(self):
self._kernel_one_pcg_iter(
entities_info=self.rigid_solver.entities_info, rigid_global_info=self.rigid_solver._rigid_global_info
)
@qd.kernel
def _kernel_one_pcg_iter(
self, entities_info: array_class.EntitiesInfo, rigid_global_info: array_class.RigidGlobalInfo
):
self.compute_pcg_matrix_vector_product(rigid_global_info=rigid_global_info)
self.clear_pcg_state()
self.compute_pcg_pTAp()
self.compute_alpha()
self.compute_pcg_state(entities_info=entities_info, rigid_global_info=rigid_global_info)
self.check_pcg_convergence()
self.compute_p()
@qd.func
def compute_pcg_matrix_vector_product(self, rigid_global_info: array_class.RigidGlobalInfo):
"""
Compute the matrix-vector product Ap used in the Preconditioned Conjugate Gradient method.
"""
if qd.static(self.fem_solver.is_active):
self.compute_fem_pcg_matrix_vector_product()
if qd.static(self.rigid_solver.is_active):
self.compute_rigid_pcg_matrix_vector_product(rigid_global_info=rigid_global_info)
# Constraint
if qd.static(self.rigid_solver.is_active and self.rigid_solver.n_equalities > 0):
self.equality_constraint_handler.compute_Ap()
# Contact
for contact in qd.static(self.contact_handlers):
contact.compute_pcg_matrix_vector_product()
@qd.func
def clear_pcg_state(self):
for i_b in qd.ndrange(self._B):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_state[i_b].pTAp = 0.0
self.pcg_state[i_b].rTr_new = 0.0
self.pcg_state[i_b].rTz_new = 0.0
@qd.func
def compute_pcg_pTAp(self):
"""
Compute the product p^T @ A @ p used in the Preconditioned Conjugate Gradient method.
Notes
-----
Reference: https://en.wikipedia.org/wiki/Conjugate_gradient_method#The_preconditioned_conjugate_gradient_method
"""
if qd.static(self.fem_solver.is_active):
self.compute_fem_pcg_pTAp()
if qd.static(self.rigid_solver.is_active):
self.compute_rigid_pcg_pTAp()
@qd.func
def compute_fem_pcg_pTAp(self):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_state[i_b].pTAp += self.pcg_fem_state_v[i_b, i_v].p.dot(self.pcg_fem_state_v[i_b, i_v].Ap)
@qd.func
def compute_rigid_pcg_pTAp(self):
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_state[i_b].pTAp += self.pcg_rigid_state_dof[i_b, i_d].p * self.pcg_rigid_state_dof[i_b, i_d].Ap
@qd.func
def compute_alpha(self):
for i_b in qd.ndrange(self._B):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_state[i_b].alpha = self.pcg_state[i_b].rTz / self.pcg_state[i_b].pTAp
@qd.func
def compute_pcg_state(
self, entities_info: array_class.EntitiesInfo, rigid_global_info: array_class.RigidGlobalInfo
):
if qd.static(self.fem_solver.is_active):
self.compute_fem_pcg_state()
if qd.static(self.rigid_solver.is_active):
self.compute_rigid_pcg_state(entities_info=entities_info, rigid_global_info=rigid_global_info)
@qd.func
def compute_fem_pcg_state(self):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_fem_state_v[i_b, i_v].x = (
self.pcg_fem_state_v[i_b, i_v].x + self.pcg_state[i_b].alpha * self.pcg_fem_state_v[i_b, i_v].p
)
self.pcg_fem_state_v[i_b, i_v].r = (
self.pcg_fem_state_v[i_b, i_v].r - self.pcg_state[i_b].alpha * self.pcg_fem_state_v[i_b, i_v].Ap
)
self.pcg_fem_state_v[i_b, i_v].z = self.pcg_fem_state_v[i_b, i_v].prec @ self.pcg_fem_state_v[i_b, i_v].r
self.pcg_state[i_b].rTr_new += self.pcg_fem_state_v[i_b, i_v].r.norm_sqr()
self.pcg_state[i_b].rTz_new += self.pcg_fem_state_v[i_b, i_v].r.dot(self.pcg_fem_state_v[i_b, i_v].z)
@qd.func
def compute_rigid_pcg_state(
self, entities_info: array_class.EntitiesInfo, rigid_global_info: array_class.RigidGlobalInfo
):
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_rigid_state_dof[i_b, i_d].x = (
self.pcg_rigid_state_dof[i_b, i_d].x + self.pcg_state[i_b].alpha * self.pcg_rigid_state_dof[i_b, i_d].p
)
self.pcg_rigid_state_dof[i_b, i_d].r = (
self.pcg_rigid_state_dof[i_b, i_d].r - self.pcg_state[i_b].alpha * self.pcg_rigid_state_dof[i_b, i_d].Ap
)
self.pcg_state[i_b].rTr_new += self.pcg_rigid_state_dof[i_b, i_d].r * self.pcg_rigid_state_dof[i_b, i_d].r
self.rigid_solve_pcg(
self.pcg_rigid_state_dof.r,
self.pcg_rigid_state_dof.z,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
)
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_state[i_b].rTz_new += self.pcg_rigid_state_dof[i_b, i_d].r * self.pcg_rigid_state_dof[i_b, i_d].z
@qd.func
def check_pcg_convergence(self):
# check convergence
for i_b in qd.ndrange(self._B):
if not self.batch_pcg_active[i_b]:
continue
self.batch_pcg_active[i_b] = self.pcg_state[i_b].rTr_new > self._pcg_threshold
# update beta, rTr, rTz
for i_b in qd.ndrange(self._B):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_state[i_b].beta = self.pcg_state[i_b].rTz_new / self.pcg_state[i_b].rTz
self.pcg_state[i_b].rTr = self.pcg_state[i_b].rTr_new
self.pcg_state[i_b].rTz = self.pcg_state[i_b].rTz_new
@qd.func
def compute_p(self):
if qd.static(self.fem_solver.is_active):
self.compute_fem_p()
if qd.static(self.rigid_solver.is_active):
self.compute_rigid_p()
@qd.func
def compute_fem_p(self):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_fem_state_v[i_b, i_v].p = (
self.pcg_fem_state_v[i_b, i_v].z + self.pcg_state[i_b].beta * self.pcg_fem_state_v[i_b, i_v].p
)
@qd.func
def compute_rigid_p(self):
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_pcg_active[i_b]:
continue
self.pcg_rigid_state_dof[i_b, i_d].p = (
self.pcg_rigid_state_dof[i_b, i_d].z + self.pcg_state[i_b].beta * self.pcg_rigid_state_dof[i_b, i_d].p
)
def pcg_solve(self):
self.init_pcg_solve(
entities_info=self.rigid_solver.entities_info, rigid_global_info=self.rigid_solver._rigid_global_info
)
for i in range(self._n_pcg_iterations):
self.one_pcg_iter()
@qd.func
def compute_total_energy(
self,
i_step: qd.i32,
energy: qd.template(),
dofs_state: array_class.DofsState,
rigid_global_info: array_class.RigidGlobalInfo,
):
energy.fill(0.0)
if qd.static(self.fem_solver.is_active):
self.compute_fem_energy(i_step, energy)
if qd.static(self.rigid_solver.is_active):
self.compute_rigid_energy(energy, dofs_state=dofs_state, rigid_global_info=rigid_global_info)
# Constraint
if qd.static(self.rigid_solver.is_active and self.rigid_solver.n_equalities > 0):
self.equality_constraint_handler.compute_energy(energy)
# Contact
for contact in qd.static(self.contact_handlers):
contact.compute_energy(energy)
@qd.func
def compute_fem_energy(self, i_step: qd.i32, energy: qd.template()):
dt2 = self.fem_solver._substep_dt**2
damping_alpha_factor = self.fem_solver._damping_alpha * self.fem_solver._substep_dt + 1.0
damping_beta_factor = self.fem_solver._damping_beta / self.fem_solver._substep_dt + 1.0
# Inertia
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_linesearch_active[i_b]:
continue
self.fem_state_v.v_diff[i_b, i_v] = (
self.fem_state_v.v[i_b, i_v] - self.fem_solver.elements_v[i_step + 1, i_v, i_b].vel
)
energy[i_b] += (
0.5
* self.fem_solver.elements_v_info[i_v].mass_over_dt2
* self.fem_state_v.v_diff[i_b, i_v].norm_sqr()
* dt2
* damping_alpha_factor
)
# Elastic
for i_b, i_e in qd.ndrange(self._B, self.fem_solver.n_elements):
if not self.batch_linesearch_active[i_b]:
continue
V_dt2 = self.fem_solver.elements_i[i_e].V * dt2
B = self.fem_solver.elements_i[i_e].B
S = qd.Matrix.zero(gs.qd_float, 4, 3)
S[:3, :] = B
S[3, :] = -B[0, :] - B[1, :] - B[2, :]
i_vs = self.fem_solver.elements_i[i_e].el2v
if qd.static(self.fem_solver._enable_vertex_constraints):
for i in qd.static(range(4)):
if self.fem_solver.vertex_constraints.is_constrained[i_vs[i], i_b]:
S[i, :] = qd.Vector.zero(gs.qd_float, 3)
p9, H9_p9 = self.compute_elastic_products(i_b, i_e, S, i_vs, self.fem_state_v.v_diff)
energy[i_b] += 0.5 * p9.dot(H9_p9) * damping_beta_factor * V_dt2
@qd.func
def compute_rigid_energy(
self, energy: qd.template(), dofs_state: array_class.DofsState, rigid_global_info: array_class.RigidGlobalInfo
):
# Kinetic energy
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_linesearch_active[i_b]:
continue
self.rigid_state_dof.v_diff[i_b, i_d] = self.rigid_state_dof.v[i_b, i_d] - dofs_state.vel[i_d, i_b]
self.compute_rigid_mass_mat_vec_product(
self.rigid_state_dof.v_diff,
self.rigid_state_dof.mass_v_diff,
self.batch_linesearch_active,
rigid_global_info=rigid_global_info,
)
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_linesearch_active[i_b]:
continue
energy[i_b] += 0.5 * self.rigid_state_dof.v_diff[i_b, i_d] * self.rigid_state_dof.mass_v_diff[i_b, i_d]
@qd.kernel
def init_exact_linesearch(
self, i_step: qd.i32, dofs_state: array_class.DofsState, rigid_global_info: array_class.RigidGlobalInfo
):
self._func_init_linesearch(self._linesearch_max_step_size)
self.compute_total_energy(
i_step, self.linesearch_state.prev_energy, dofs_state=dofs_state, rigid_global_info=rigid_global_info
)
self.prepare_search_direction_data(rigid_global_info=rigid_global_info)
self.update_velocity_linesearch()
self.compute_line_energy_gradient_hessian(i_step, dofs_state=dofs_state)
self.check_initial_exact_linesearch_convergence()
self.init_newton_linesearch()
@qd.func
def init_newton_linesearch(self):
for i_b in qd.ndrange(self._B):
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state[i_b].dell_scale = -self.linesearch_state[i_b].m
self.linesearch_state[i_b].step_size = qd.min(
-self.linesearch_state[i_b].m / self.linesearch_state[i_b].d2ell_dalpha2, self._linesearch_max_step_size
)
self.linesearch_state[i_b].alpha_min = 0.0
self.linesearch_state[i_b].alpha_max = self._linesearch_max_step_size
self.linesearch_state[i_b].f_lower = -1.0
self.linesearch_state[i_b].f_upper = (
self.linesearch_state[i_b].dell_dalpha / self.linesearch_state[i_b].dell_scale
)
self.linesearch_state[i_b].alpha_tol = self._linesearch_ftol * self.linesearch_state[i_b].step_size
self.linesearch_state[i_b].minus_dalpha = (
self.linesearch_state[i_b].alpha_min - self.linesearch_state[i_b].alpha_max
)
self.linesearch_state[i_b].minus_dalpha_prev = self.linesearch_state[i_b].minus_dalpha
if qd.abs(self.linesearch_state[i_b].f_lower) < self._linesearch_ftol:
self.batch_linesearch_active[i_b] = False
self.linesearch_state[i_b].step_size = self.linesearch_state[i_b].alpha_min
if qd.abs(self.linesearch_state[i_b].f_upper) < self._linesearch_ftol:
self.batch_linesearch_active[i_b] = False
self.linesearch_state[i_b].step_size = self.linesearch_state[i_b].alpha_max
@qd.func
def compute_line_energy_gradient_hessian(self, i_step: qd.i32, dofs_state: array_class.DofsState):
self.init_linesearch_energy_gradient_hessian()
if qd.static(self.fem_solver.is_active):
self.compute_fem_energy_alpha(i_step, self.linesearch_state.energy)
self.compute_fem_gradient_alpha(i_step)
if qd.static(self.rigid_solver.is_active):
self.compute_rigid_energy_alpha(self.linesearch_state.energy, dofs_state=dofs_state)
self.compute_rigid_gradient_alpha(dofs_state=dofs_state)
# Constraint
if qd.static(self.rigid_solver.is_active and self.rigid_solver.n_equalities > 0):
self.equality_constraint_handler.compute_energy_gamma_G()
self.equality_constraint_handler.update_gradient_hessian_alpha()
# Contact
for contact in qd.static(self.contact_handlers):
contact.compute_energy_gamma_G()
contact.update_gradient_hessian_alpha()
@qd.func
def init_linesearch_energy_gradient_hessian(self):
energy = qd.static(self.linesearch_state.energy)
alpha = qd.static(self.linesearch_state.step_size)
for i_b in qd.ndrange(self._B):
if not self.batch_linesearch_active[i_b]:
continue
# energy
energy[i_b] = (
self.linesearch_state.prev_energy[i_b]
+ 0.5 * alpha[i_b] ** 2 * self.linesearch_state[i_b].d2ellA_dalpha2
)
# gradient
self.linesearch_state[i_b].dell_dalpha = 0.0
# hessian
self.linesearch_state.d2ell_dalpha2[i_b] = self.linesearch_state.d2ellA_dalpha2[i_b]
@qd.func
def compute_fem_gradient_alpha(self, i_step: qd.i32):
dp = qd.static(self.linesearch_fem_state_v.dp)
v = qd.static(self.fem_state_v.v)
v_star = qd.static(self.fem_solver.elements_v.vel)
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state.dell_dalpha[i_b] += dp[i_b, i_v].dot(v[i_b, i_v] - v_star[i_step + 1, i_v, i_b])
@qd.func
def compute_rigid_gradient_alpha(self, dofs_state: array_class.DofsState):
dp = qd.static(self.linesearch_rigid_state_dof.dp)
v = qd.static(self.rigid_state_dof.v)
v_star = qd.static(dofs_state.vel)
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state.dell_dalpha[i_b] += dp[i_b, i_d] * (v[i_b, i_d] - v_star[i_d, i_b])
@qd.func
def compute_fem_energy_alpha(self, i_step: qd.i32, energy: qd.template()):
alpha = qd.static(self.linesearch_state.step_size)
dp = qd.static(self.linesearch_fem_state_v.dp)
v = qd.static(self.fem_state_v.v)
v_star = qd.static(self.fem_solver.elements_v.vel)
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_linesearch_active[i_b]:
continue
energy[i_b] += alpha[i_b] * dp[i_b, i_v].dot(v[i_b, i_v] - v_star[i_step + 1, i_v, i_b])
@qd.func
def compute_rigid_energy_alpha(self, energy: qd.template(), dofs_state: array_class.DofsState):
alpha = qd.static(self.linesearch_state.step_size)
dp = qd.static(self.linesearch_rigid_state_dof.dp)
v = qd.static(self.rigid_state_dof.v)
v_star = qd.static(dofs_state.vel)
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_linesearch_active[i_b]:
continue
energy[i_b] += alpha[i_b] * dp[i_b, i_d] * (v[i_b, i_d] - v_star[i_d, i_b])
@qd.func
def prepare_search_direction_data(self, rigid_global_info: array_class.RigidGlobalInfo):
if qd.static(self.fem_solver.is_active):
self.prepare_fem_search_direction_data()
if qd.static(self.rigid_solver.is_active):
self.prepare_rigid_search_direction_data(rigid_global_info=rigid_global_info)
# Constraint
if qd.static(self.rigid_solver.is_active and self.rigid_solver.n_equalities > 0):
self.equality_constraint_handler.prepare_search_direction_data()
# Contact
for contact in qd.static(self.contact_handlers):
contact.prepare_search_direction_data()
self.compute_d2ellA_dalpha2()
@qd.func
def compute_d2ellA_dalpha2(self):
for i_b in qd.ndrange(self._B):
self.linesearch_state[i_b].d2ellA_dalpha2 = 0.0
if qd.static(self.fem_solver.is_active):
self.compute_fem_d2ellA_dalpha2()
if qd.static(self.rigid_solver.is_active):
self.compute_rigid_d2ellA_dalpha2()
@qd.func
def compute_fem_d2ellA_dalpha2(self):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state[i_b].d2ellA_dalpha2 += self.pcg_fem_state_v[i_b, i_v].x.dot(
self.linesearch_fem_state_v[i_b, i_v].dp
)
@qd.func
def compute_rigid_d2ellA_dalpha2(self):
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state[i_b].d2ellA_dalpha2 += (
self.pcg_rigid_state_dof[i_b, i_d].x * self.linesearch_rigid_state_dof[i_b, i_d].dp
)
@qd.func
def prepare_fem_search_direction_data(self):
self.compute_fem_matrix_vector_product(
self.pcg_fem_state_v.x, self.linesearch_fem_state_v.dp, self.batch_linesearch_active
)
@qd.func
def prepare_rigid_search_direction_data(self, rigid_global_info: array_class.RigidGlobalInfo):
self.compute_rigid_mass_mat_vec_product(
self.pcg_rigid_state_dof.x,
self.linesearch_rigid_state_dof.dp,
self.batch_linesearch_active,
rigid_global_info=rigid_global_info,
)
@qd.func
def _func_init_linesearch(self, step_size: float):
for i_b in qd.ndrange(self._B):
self.batch_linesearch_active[i_b] = self.batch_active[i_b]
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state[i_b].step_size = step_size
self.linesearch_state[i_b].m = 0.0
if qd.static(self.fem_solver.is_active):
self._func_init_fem_linesearch()
if qd.static(self.rigid_solver.is_active):
self._func_init_rigid_linesearch()
@qd.func
def _func_init_fem_linesearch(self):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state[i_b].m += self.pcg_fem_state_v[i_b, i_v].x.dot(self.fem_state_v.gradient[i_b, i_v])
self.linesearch_fem_state_v[i_b, i_v].x_prev = self.fem_state_v.v[i_b, i_v]
@qd.func
def _func_init_rigid_linesearch(self):
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state[i_b].m += (
self.pcg_rigid_state_dof[i_b, i_d].x * self.rigid_state_dof.gradient[i_b, i_d]
)
self.linesearch_rigid_state_dof[i_b, i_d].x_prev = self.rigid_state_dof.v[i_b, i_d]
@qd.func
def check_initial_exact_linesearch_convergence(self):
for i_b in qd.ndrange(self._B):
if not self.batch_linesearch_active[i_b]:
continue
self.batch_linesearch_active[i_b] = self.linesearch_state[i_b].dell_dalpha > 0.0
if qd.static(self.fem_solver.is_active):
self.update_initial_fem_state()
if qd.static(self.rigid_solver.is_active):
self.update_initial_rigid_state()
# When tolerance is small but gradient norm is small, take step 1.0 and end, this is a rare case, directly
# copied from drake
# Link: https://github.com/RobotLocomotion/drake/blob/3bb00e611983fb894151c547776d5aa85abe9139/multibody/contact_solvers/sap/sap_solver.cc#L625
for i_b in range(self._B):
if not self.batch_linesearch_active[i_b]:
continue
err_threshold = (
self._sap_convergence_atol + self._sap_convergence_rtol * self.linesearch_state[i_b].prev_energy
)
if -self.linesearch_state[i_b].m < err_threshold:
self.batch_linesearch_active[i_b] = False
self.linesearch_state[i_b].step_size = 1.0
@qd.func
def update_initial_fem_state(self):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_linesearch_active[i_b]:
continue
err_threshold = (
self._sap_convergence_atol + self._sap_convergence_rtol * self.linesearch_state[i_b].prev_energy
)
if -self.linesearch_state[i_b].m < err_threshold:
self.fem_state_v.v[i_b, i_v] = (
self.linesearch_fem_state_v[i_b, i_v].x_prev + self.pcg_fem_state_v[i_b, i_v].x
)
@qd.func
def update_initial_rigid_state(self):
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_linesearch_active[i_b]:
continue
err_threshold = (
self._sap_convergence_atol + self._sap_convergence_rtol * self.linesearch_state[i_b].prev_energy
)
if -self.linesearch_state[i_b].m < err_threshold:
self.rigid_state_dof.v[i_b, i_d] = (
self.linesearch_rigid_state_dof[i_b, i_d].x_prev + self.pcg_rigid_state_dof[i_b, i_d].x
)
def one_linesearch_iter(self, i_step: qd.i32):
self.update_velocity_linesearch()
self.compute_total_energy(i_step, self.linesearch_state.energy)
self.check_linesearch_convergence()
@qd.func
def update_velocity_linesearch(self):
if qd.static(self.fem_solver.is_active):
self.update_fem_velocity_linesearch()
if qd.static(self.rigid_solver.is_active):
self.update_rigid_velocity_linesearch()
@qd.func
def update_fem_velocity_linesearch(self):
for i_b, i_v in qd.ndrange(self._B, self.fem_solver.n_vertices):
if not self.batch_linesearch_active[i_b]:
continue
self.fem_state_v.v[i_b, i_v] = (
self.linesearch_fem_state_v[i_b, i_v].x_prev
+ self.linesearch_state[i_b].step_size * self.pcg_fem_state_v[i_b, i_v].x
)
@qd.func
def update_rigid_velocity_linesearch(self):
for i_b, i_d in qd.ndrange(self._B, self.rigid_solver.n_dofs):
if not self.batch_linesearch_active[i_b]:
continue
self.rigid_state_dof.v[i_b, i_d] = (
self.linesearch_rigid_state_dof[i_b, i_d].x_prev
+ self.linesearch_state[i_b].step_size * self.pcg_rigid_state_dof[i_b, i_d].x
)
def exact_linesearch(self, i_step: qd.i32):
"""
Note
------
Exact line search using rtsafe
https://github.com/RobotLocomotion/drake/blob/master/multibody/contact_solvers/sap/sap_solver.h#L393
"""
self.init_exact_linesearch(
i_step, dofs_state=self.rigid_solver.dofs_state, rigid_global_info=self.rigid_solver._rigid_global_info
)
for i in range(self._n_linesearch_iterations):
self.one_exact_linesearch_iter(i_step, dofs_state=self.rigid_solver.dofs_state)
@qd.kernel
def one_exact_linesearch_iter(self, i_step: qd.i32, dofs_state: array_class.DofsState):
self.update_velocity_linesearch()
self.compute_line_energy_gradient_hessian(i_step, dofs_state=dofs_state)
self.compute_f_df_bracket()
self.find_next_step_size()
@qd.func
def compute_f_df_bracket(self):
"""
Compute the function (derivative of total energy) value and its derivative to alpha.
Update the bracket for the next step size.
The bracket is defined by [alpha_min, alpha_max] which is the range that contains the root of df/dalpha = 0.
"""
for i_b in qd.ndrange(self._B):
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state[i_b].f = (
self.linesearch_state[i_b].dell_dalpha / self.linesearch_state[i_b].dell_scale
)
self.linesearch_state[i_b].df = (
self.linesearch_state[i_b].d2ell_dalpha2 / self.linesearch_state[i_b].dell_scale
)
if qd.math.sign(self.linesearch_state[i_b].f) != qd.math.sign(self.linesearch_state[i_b].f_upper):
self.linesearch_state[i_b].alpha_min = self.linesearch_state[i_b].step_size
self.linesearch_state[i_b].f_lower = self.linesearch_state[i_b].f
else:
self.linesearch_state[i_b].alpha_max = self.linesearch_state[i_b].step_size
self.linesearch_state[i_b].f_upper = self.linesearch_state[i_b].f
if qd.abs(self.linesearch_state[i_b].f) < self._linesearch_ftol:
self.batch_linesearch_active[i_b] = False
@qd.func
def find_next_step_size(self):
for i_b in qd.ndrange(self._B):
if not self.batch_linesearch_active[i_b]:
continue
newton_is_slow = 2.0 * qd.abs(self.linesearch_state[i_b].f) > qd.abs(
self.linesearch_state[i_b].minus_dalpha_prev * self.linesearch_state[i_b].df
)
self.linesearch_state[i_b].minus_dalpha_prev = self.linesearch_state[i_b].minus_dalpha
if newton_is_slow:
# bisect
self.linesearch_state[i_b].minus_dalpha = 0.5 * (
self.linesearch_state[i_b].alpha_min - self.linesearch_state[i_b].alpha_max
)
self.linesearch_state[i_b].step_size = (
self.linesearch_state[i_b].alpha_min - self.linesearch_state[i_b].minus_dalpha
)
else:
# newton
self.linesearch_state[i_b].minus_dalpha = self.linesearch_state[i_b].f / self.linesearch_state[i_b].df
self.linesearch_state[i_b].step_size = (
self.linesearch_state[i_b].step_size - self.linesearch_state[i_b].minus_dalpha
)
if (
self.linesearch_state[i_b].step_size <= self.linesearch_state[i_b].alpha_min
or self.linesearch_state[i_b].step_size >= self.linesearch_state[i_b].alpha_max
):
# bisect
self.linesearch_state[i_b].minus_dalpha = 0.5 * (
self.linesearch_state[i_b].alpha_min - self.linesearch_state[i_b].alpha_max
)
self.linesearch_state[i_b].step_size = (
self.linesearch_state[i_b].alpha_min - self.linesearch_state[i_b].minus_dalpha
)
if qd.abs(self.linesearch_state[i_b].minus_dalpha) < self.linesearch_state[i_b].alpha_tol:
self.batch_linesearch_active[i_b] = False
# ------------------------------------------------------------------------------------
# ----------------------------------- Properties -------------------------------------
# ------------------------------------------------------------------------------------
@property
def active_solvers(self):
"""All the active solvers managed by the scene's simulator."""
return self.sim.active_solvers
@qd.data_oriented
class BaseConstraintHandler(RBC):
"""
Base class for constraint handling in SAPCoupler.
"""
def __init__(
self,
simulator: "Simulator",
stiffness: float = 1e8,
beta: float = 0.1,
) -> None:
self.sim = simulator
self.stiffness = stiffness
self.beta = beta
self._B = simulator._B
self.coupler = simulator.coupler
self.sap_constraint_info_type = qd.types.struct(
k=gs.qd_float, # constraint stiffness
R=gs.qd_float, # Regularization
R_inv=gs.qd_float, # Inverse of R
v_hat=gs.qd_float, # Stablization velocity
energy=gs.qd_float, # energy
gamma=gs.qd_float, # contact impulse
G=gs.qd_float, # Hessian matrix
dvc=gs.qd_float, # change in constraint velocity
)
@qd.func
def compute_constraint_regularization(self, sap_info, i_c, w_rms, time_step):
beta_factor = self.beta**2 / (4.0 * qd.math.pi**2)
dt2 = time_step**2
k = sap_info[i_c].k
R = max(beta_factor * w_rms, 1.0 / (dt2 * k))
sap_info[i_c].R = R
sap_info[i_c].R_inv = 1.0 / R
@qd.func
def compute_constraint_gamma_G(self, sap_info, i_c, vc):
y = (sap_info[i_c].v_hat - vc) * sap_info[i_c].R_inv
sap_info[i_c].gamma = y
sap_info[i_c].G = sap_info[i_c].R_inv
@qd.func
def compute_energy(self, energy: qd.template()):
constraints = qd.static(self.constraints)
sap_info = qd.static(constraints.sap_info)
for i_c in range(self.n_constraints[None]):
i_b = constraints[i_c].batch_idx
if self.coupler.batch_linesearch_active[i_b]:
vc = self.compute_vc(i_c)
self.compute_constraint_energy(sap_info, i_c, vc)
energy[i_b] += sap_info[i_c].energy
@qd.func
def compute_constraint_energy(self, sap_info, i_c, vc):
y = (sap_info[i_c].v_hat - vc) * sap_info[i_c].R_inv
sap_info[i_c].energy = 0.5 * y**2 * sap_info[i_c].R
@qd.data_oriented
class RigidConstraintHandler(BaseConstraintHandler):
"""
Rigid body constraints in SAPCoupler. Currently only support joint equality constraints.
"""
def __init__(
self,
simulator: "Simulator",
stiffness: float = 1e8,
beta: float = 0.1,
) -> None:
super().__init__(simulator, stiffness, beta)
self.rigid_solver = simulator.rigid_solver
self.constraint_solver = simulator.rigid_solver.constraint_solver
self.max_constraints = simulator.rigid_solver.n_equalities * self._B
self.n_constraints = qd.field(gs.qd_int, shape=())
self.constraint_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
i_dof1=gs.qd_int, # index of the first DOF in the constraint
i_dof2=gs.qd_int, # index of the second DOF in the constraint
sap_info=self.sap_constraint_info_type, # SAP info for the constraint
)
self.constraints = self.constraint_type.field(shape=(self.max_constraints,))
self.Jt = qd.field(gs.qd_float, shape=(self.max_constraints, self.rigid_solver.n_dofs))
self.M_inv_Jt = qd.field(gs.qd_float, shape=(self.max_constraints, self.rigid_solver.n_dofs))
self.W = qd.field(gs.qd_float, shape=(self.max_constraints,))
@qd.kernel
def build_constraints(
self,
equalities_info: array_class.EqualitiesInfo,
joints_info: array_class.JointsInfo,
static_rigid_sim_config: qd.template(),
):
self.n_constraints[None] = 0
self.Jt.fill(0.0)
# TODO: Maybe support different constraints for each batch in the future.
# For now all batches have the same constraints.
dt2 = self.sim._substep_dt**2
for i_b, i_e in qd.ndrange(self._B, self.rigid_solver.n_equalities):
if equalities_info.eq_type[i_e, i_b] == gs.EQUALITY_TYPE.JOINT:
i_c = qd.atomic_add(self.n_constraints[None], 1)
self.constraints[i_c].batch_idx = i_b
I_joint1 = (
[equalities_info.eq_obj1id[i_e, i_b], i_b]
if qd.static(static_rigid_sim_config.batch_joints_info)
else equalities_info.eq_obj1id[i_e, i_b]
)
I_joint2 = (
[equalities_info.eq_obj2id[i_e, i_b], i_b]
if qd.static(static_rigid_sim_config.batch_joints_info)
else equalities_info.eq_obj2id[i_e, i_b]
)
i_dof1 = joints_info.dof_start[I_joint1]
i_dof2 = joints_info.dof_start[I_joint2]
self.constraints[i_c].i_dof1 = i_dof1
self.constraints[i_c].i_dof2 = i_dof2
self.constraints[i_c].sap_info.k = self.stiffness
self.constraints[i_c].sap_info.R_inv = dt2 * self.stiffness
self.constraints[i_c].sap_info.R = 1.0 / self.constraints[i_c].sap_info.R_inv
self.constraints[i_c].sap_info.v_hat = 0.0
self.Jt[i_c, i_dof1] = 1.0
self.Jt[i_c, i_dof2] = -1.0
@qd.func
def compute_regularization(self, dofs_state: array_class.DofsState):
dt_inv = 1.0 / self.sim._substep_dt
q = qd.static(dofs_state.pos)
sap_info = qd.static(self.constraints.sap_info)
for i_c in range(self.n_constraints[None]):
i_b = self.constraints[i_c].batch_idx
g0 = q[self.constraints[i_c].i_dof1, i_b] - q[self.constraints[i_c].i_dof2, i_b]
self.constraints[i_c].sap_info.v_hat = -g0 * dt_inv
W = self.compute_delassus(i_c)
self.compute_constraint_regularization(sap_info, i_c, W, self.sim._substep_dt)
@qd.func
def compute_delassus_world_frame(
self,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
):
self.coupler.rigid_solve_jacobian(
self.Jt,
self.M_inv_Jt,
self.n_constraints[None],
self.constraints.batch_idx,
1,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
)
self.W.fill(0.0)
for i_c, i_d in qd.ndrange(self.n_constraints[None], self.rigid_solver.n_dofs):
self.W[i_c] += self.M_inv_Jt[i_c, i_d] * self.Jt[i_c, i_d]
@qd.func
def compute_delassus(self, i_c):
return self.W[i_c]
@qd.func
def compute_Jx(self, i_c, x):
i_b = self.constraints[i_c].batch_idx
i_dof1 = self.constraints[i_c].i_dof1
i_dof2 = self.constraints[i_c].i_dof2
return x[i_b, i_dof1] - x[i_b, i_dof2]
@qd.func
def add_Jt_x(self, y, i_c, x):
i_b = self.constraints[i_c].batch_idx
i_dof1 = self.constraints[i_c].i_dof1
i_dof2 = self.constraints[i_c].i_dof2
y[i_b, i_dof1] += x
y[i_b, i_dof2] -= x
@qd.func
def compute_vc(self, i_c):
return self.compute_Jx(i_c, self.coupler.rigid_state_dof.v)
@qd.func
def compute_gradient_hessian_diag(self):
constraints = qd.static(self.constraints)
sap_info = qd.static(constraints.sap_info)
for i_c in range(self.n_constraints[None]):
vc = self.compute_vc(i_c)
self.compute_constraint_gamma_G(sap_info, i_c, vc)
self.add_Jt_x(self.coupler.rigid_state_dof.gradient, i_c, -sap_info[i_c].gamma)
self.add_Jt_x(self.coupler.rigid_state_dof.impulse, i_c, sap_info[i_c].gamma)
@qd.func
def compute_Ap(self):
constraints = qd.static(self.constraints)
sap_info = qd.static(constraints.sap_info)
for i_c in range(self.n_constraints[None]):
# Jt @ G @ J @ p
x = self.compute_Jx(i_c, self.coupler.pcg_rigid_state_dof.p)
x = sap_info[i_c].G * x
self.add_Jt_x(self.coupler.pcg_rigid_state_dof.Ap, i_c, x)
@qd.func
def prepare_search_direction_data(self):
constraints = qd.static(self.constraints)
sap_info = qd.static(constraints.sap_info)
for i_c in range(self.n_constraints[None]):
i_b = constraints[i_c].batch_idx
if self.coupler.batch_linesearch_active[i_b]:
sap_info[i_c].dvc = self.compute_Jx(i_c, self.coupler.pcg_rigid_state_dof.x)
@qd.func
def compute_energy_gamma_G(self):
constraints = qd.static(self.constraints)
sap_info = qd.static(constraints.sap_info)
for i_c in range(self.n_constraints[None]):
vc = self.compute_vc(i_c)
self.compute_constraint_energy_gamma_G(sap_info, i_c, vc)
@qd.func
def compute_constraint_energy_gamma_G(self, sap_info, i_c, vc):
self.compute_constraint_gamma_G(sap_info, i_c, vc)
sap_info[i_c].energy = 0.5 * sap_info[i_c].gamma ** 2 * sap_info[i_c].R
@qd.func
def update_gradient_hessian_alpha(self):
dvc = qd.static(self.constraints.sap_info.dvc)
gamma = qd.static(self.constraints.sap_info.gamma)
G = qd.static(self.constraints.sap_info.G)
for i_c in qd.ndrange(self.n_constraints[None]):
i_b = self.constraints[i_c].batch_idx
if self.coupler.batch_linesearch_active[i_b]:
self.coupler.linesearch_state.dell_dalpha[i_b] -= dvc[i_c] * gamma[i_c]
self.coupler.linesearch_state.d2ell_dalpha2[i_b] += dvc[i_c] ** 2 * G[i_c]
class ContactMode(IntEnum):
STICK = 0
SLIDE = 1
NO_CONTACT = 2
@qd.data_oriented
class BaseContactHandler(RBC):
"""
Base class for contact handling in SAPCoupler.
This class provides a framework for managing contact pairs, computing gradients,
and handling contact-related computations.
"""
def __init__(
self,
simulator: "Simulator",
) -> None:
self.sim = simulator
self.coupler = simulator.coupler
self.n_contact_pairs = qd.field(gs.qd_int, shape=())
self.sap_contact_info_type = qd.types.struct(
k=gs.qd_float, # contact stiffness
phi0=gs.qd_float, # initial signed distance
Rn=gs.qd_float, # Regularization for normal
Rt=gs.qd_float, # Regularization for tangential
Rn_inv=gs.qd_float, # Inverse of Rn
Rt_inv=gs.qd_float, # Inverse of Rt
vn_hat=gs.qd_float, # Stablization for normal velocity
mu=gs.qd_float, # friction coefficient
mu_hat=gs.qd_float, # friction coefficient regularized
mu_factor=gs.qd_float, # friction coefficient factor, 1/(1+mu_tilde**2)
energy=gs.qd_float, # energy
gamma=gs.qd_vec3, # contact impulse
G=gs.qd_mat3, # Hessian matrix
dvc=gs.qd_vec3, # velocity change at contact point, for exact line search
)
@qd.func
def compute_jacobian(
self, links_info: array_class.LinksInfo, dofs_state: array_class.DofsState, links_state: array_class.LinksState
):
pass
@qd.func
def update_gradient_hessian_alpha(self):
dvc = qd.static(self.contact_pairs.sap_info.dvc)
gamma = qd.static(self.contact_pairs.sap_info.gamma)
G = qd.static(self.contact_pairs.sap_info.G)
for i_p in qd.ndrange(self.n_contact_pairs[None]):
i_b = self.contact_pairs[i_p].batch_idx
if self.coupler.batch_linesearch_active[i_b]:
self.coupler.linesearch_state.dell_dalpha[i_b] -= dvc[i_p].dot(gamma[i_p])
self.coupler.linesearch_state.d2ell_dalpha2[i_b] += dvc[i_p].dot(G[i_p] @ dvc[i_p])
@qd.func
def compute_delassus_world_frame(
self,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
):
pass
@qd.func
def compute_regularization(
self, entities_info: array_class.EntitiesInfo, rigid_global_info: array_class.RigidGlobalInfo
):
self.compute_delassus_world_frame(entities_info=entities_info, rigid_global_info=rigid_global_info)
for i_p in range(self.n_contact_pairs[None]):
W = self.compute_delassus(i_p)
w_rms = W.norm() / 3.0
self.compute_contact_regularization(self.contact_pairs.sap_info, i_p, w_rms, self.sim._substep_dt)
@qd.func
def compute_energy_gamma_G(self):
for i_p in range(self.n_contact_pairs[None]):
vc = self.compute_contact_velocity(i_p)
self.compute_contact_energy_gamma_G(self.contact_pairs.sap_info, i_p, vc)
@qd.func
def compute_energy(self, energy: qd.template()):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in range(self.n_contact_pairs[None]):
i_b = self.contact_pairs[i_p].batch_idx
if self.coupler.batch_linesearch_active[i_b]:
vc = self.compute_contact_velocity(i_p)
self.compute_contact_energy(sap_info, i_p, vc)
energy[i_b] += sap_info[i_p].energy
@qd.func
def compute_contact_gamma_G(self, sap_info, i_p, vc):
y = qd.Vector([0.0, 0.0, sap_info[i_p].vn_hat]) - vc
y[0] *= sap_info[i_p].Rt_inv
y[1] *= sap_info[i_p].Rt_inv
y[2] *= sap_info[i_p].Rn_inv
yr = y[:2].norm(gs.EPS)
yn = y[2]
t_hat = y[:2] / yr
contact_mode = self.compute_contact_mode(sap_info[i_p].mu, sap_info[i_p].mu_hat, yr, yn)
sap_info[i_p].gamma.fill(0.0)
sap_info[i_p].G.fill(0.0)
if contact_mode == ContactMode.STICK:
sap_info[i_p].gamma = y
sap_info[i_p].G[0, 0] = sap_info[i_p].Rt_inv
sap_info[i_p].G[1, 1] = sap_info[i_p].Rt_inv
sap_info[i_p].G[2, 2] = sap_info[i_p].Rn_inv
elif contact_mode == ContactMode.SLIDE:
gn = (yn + sap_info[i_p].mu_hat * yr) * sap_info[i_p].mu_factor
gt = sap_info[i_p].mu * gn * t_hat
sap_info[i_p].gamma = qd.Vector([gt[0], gt[1], gn])
P = t_hat.outer_product(t_hat)
Pperp = qd.Matrix.identity(gs.qd_float, 2) - P
dgt_dyt = sap_info[i_p].mu * (gn / yr * Pperp + sap_info[i_p].mu_hat * sap_info[i_p].mu_factor * P)
dgt_dyn = sap_info[i_p].mu * sap_info[i_p].mu_factor * t_hat
dgn_dyt = sap_info[i_p].mu_hat * sap_info[i_p].mu_factor * t_hat
dgn_dyn = sap_info[i_p].mu_factor
sap_info[i_p].G[:2, :2] = dgt_dyt * sap_info[i_p].Rt_inv
sap_info[i_p].G[:2, 2] = dgt_dyn * sap_info[i_p].Rn_inv
sap_info[i_p].G[2, :2] = dgn_dyt * sap_info[i_p].Rt_inv
sap_info[i_p].G[2, 2] = dgn_dyn * sap_info[i_p].Rn_inv
else: # No contact
pass
@qd.func
def compute_contact_energy_gamma_G(self, sap_info, i_p, vc):
self.compute_contact_gamma_G(sap_info, i_p, vc)
R_gamma = sap_info[i_p].gamma
R_gamma[0] *= sap_info[i_p].Rt
R_gamma[1] *= sap_info[i_p].Rt
R_gamma[2] *= sap_info[i_p].Rn
sap_info[i_p].energy = 0.5 * sap_info[i_p].gamma.dot(R_gamma)
@qd.func
def compute_contact_energy(self, sap_info, i_p, vc):
y = qd.Vector([0.0, 0.0, sap_info[i_p].vn_hat]) - vc
y[0] *= sap_info[i_p].Rt_inv
y[1] *= sap_info[i_p].Rt_inv
y[2] *= sap_info[i_p].Rn_inv
yr = y[:2].norm(gs.EPS)
yn = y[2]
t_hat = y[:2] / yr
contact_mode = self.compute_contact_mode(sap_info[i_p].mu, sap_info[i_p].mu_hat, yr, yn)
sap_info[i_p].gamma.fill(0.0)
if contact_mode == ContactMode.STICK:
sap_info[i_p].gamma = y
elif contact_mode == ContactMode.SLIDE:
gn = (yn + sap_info[i_p].mu_hat * yr) * sap_info[i_p].mu_factor
gt = sap_info[i_p].mu * gn * t_hat
sap_info[i_p].gamma = qd.Vector([gt[0], gt[1], gn])
else: # No contact
pass
R_gamma = sap_info[i_p].gamma
R_gamma[0] *= sap_info[i_p].Rt
R_gamma[1] *= sap_info[i_p].Rt
R_gamma[2] *= sap_info[i_p].Rn
sap_info[i_p].energy = 0.5 * sap_info[i_p].gamma.dot(R_gamma)
@qd.func
def compute_contact_mode(self, mu, mu_hat, yr, yn):
"""
Compute the contact mode based on the friction coefficients and the relative velocities.
"""
result = ContactMode.NO_CONTACT
if yr <= mu * yn:
result = ContactMode.STICK
elif -mu_hat * yr < yn and yn < yr / mu:
result = ContactMode.SLIDE
return result
@qd.func
def compute_contact_regularization(self, sap_info, i_p, w_rms, time_step):
beta_factor = self.coupler._sap_beta**2 / (4.0 * qd.math.pi**2)
k = sap_info[i_p].k
Rn = max(beta_factor * w_rms, 1.0 / (time_step * k * (time_step + self.coupler._sap_taud)))
Rt = self.coupler._sap_sigma * w_rms
vn_hat = -sap_info[i_p].phi0 / (time_step + self.coupler._sap_taud)
sap_info[i_p].Rn = Rn
sap_info[i_p].Rt = Rt
sap_info[i_p].Rn_inv = 1.0 / Rn
sap_info[i_p].Rt_inv = 1.0 / Rt
sap_info[i_p].vn_hat = vn_hat
sap_info[i_p].mu_hat = sap_info[i_p].mu * Rt * sap_info[i_p].Rn_inv
sap_info[i_p].mu_factor = 1.0 / (1.0 + sap_info[i_p].mu * sap_info[i_p].mu_hat)
@qd.data_oriented
class RigidContactHandler(BaseContactHandler):
def __init__(
self,
simulator: "Simulator",
) -> None:
super().__init__(simulator)
self.rigid_solver = self.sim.rigid_solver
# FIXME This function is similar to the one in constraint_solver.py:add_collision_constraints.
# Consider refactoring, using better naming, and removing while.
@qd.func
def compute_jacobian(
self, links_info: array_class.LinksInfo, dofs_state: array_class.DofsState, links_state: array_class.LinksState
):
self.Jt.fill(0.0)
for i_p in range(self.n_contact_pairs[None]):
link = self.contact_pairs[i_p].link_idx
i_b = self.contact_pairs[i_p].batch_idx
while link > -1:
link_maybe_batch = [link, i_b] if qd.static(self.rigid_solver._options.batch_links_info) else link
# reverse order to make sure dofs in each row of self.jac_relevant_dofs is strictly descending
for i_d_ in range(links_info.n_dofs[link_maybe_batch]):
i_d = links_info.dof_end[link_maybe_batch] - 1 - i_d_
cdof_ang = dofs_state.cdof_ang[i_d, i_b]
cdof_vel = dofs_state.cdof_vel[i_d, i_b]
t_quat = gu.qd_identity_quat()
t_pos = self.contact_pairs[i_p].contact_pos - links_state.root_COM[link, i_b]
_, vel = gu.qd_transform_motion_by_trans_quat(cdof_ang, cdof_vel, t_pos, t_quat)
diff = vel
jac = diff
self.Jt[i_p, i_d] = self.Jt[i_p, i_d] + jac
link = links_info.parent_idx[link_maybe_batch]
@qd.func
def compute_gradient_hessian_diag(self):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in range(self.n_contact_pairs[None]):
vc = self.compute_contact_velocity(i_p)
self.compute_contact_gamma_G(sap_info, i_p, vc)
self.add_Jt_x(self.coupler.rigid_state_dof.gradient, i_p, -sap_info[i_p].gamma)
self.add_Jt_x(self.coupler.rigid_state_dof.impulse, i_p, sap_info[i_p].gamma)
@qd.func
def compute_pcg_matrix_vector_product(self):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in range(self.n_contact_pairs[None]):
# Jt @ G @ J @ p
Jp = self.compute_Jx(i_p, self.coupler.pcg_rigid_state_dof.p)
GJp = sap_info[i_p].G @ Jp
self.add_Jt_x(self.coupler.pcg_rigid_state_dof.Ap, i_p, GJp)
@qd.func
def compute_contact_velocity(self, i_p):
"""
Compute the contact velocity in the contact frame.
"""
return self.compute_Jx(i_p, self.coupler.rigid_state_dof.v)
@qd.func
def prepare_search_direction_data(self):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in qd.ndrange(self.n_contact_pairs[None]):
i_b = self.contact_pairs[i_p].batch_idx
if self.coupler.batch_linesearch_active[i_b]:
sap_info[i_p].dvc = self.compute_Jx(i_p, self.coupler.pcg_rigid_state_dof.x)
@qd.func
def compute_delassus_world_frame(
self,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
):
self.coupler.rigid_solve_jacobian(
self.Jt,
self.M_inv_Jt,
self.n_contact_pairs[None],
self.contact_pairs.batch_idx,
3,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
)
self.W.fill(0.0)
for i_p, i_d, i, j in qd.ndrange(self.n_contact_pairs[None], self.rigid_solver.n_dofs, 3, 3):
self.W[i_p][i, j] += self.M_inv_Jt[i_p, i_d][i] * self.Jt[i_p, i_d][j]
@qd.func
def compute_delassus(self, i_p):
return self.W[i_p]
@qd.func
def compute_Jx(self, i_p, x):
pairs = qd.static(self.contact_pairs)
i_b = pairs[i_p].batch_idx
Jx = qd.Vector.zero(gs.qd_float, 3)
for i in range(self.rigid_solver.n_dofs):
Jx = Jx + self.Jt[i_p, i] * x[i_b, i]
return Jx
@qd.func
def add_Jt_x(self, y, i_p, x):
pairs = qd.static(self.contact_pairs)
i_b = pairs[i_p].batch_idx
for i in range(self.rigid_solver.n_dofs):
y[i_b, i] += self.Jt[i_p, i].dot(x)
@qd.data_oriented
class RigidRigidContactHandler(RigidContactHandler):
def __init__(
self,
simulator: "Simulator",
) -> None:
super().__init__(simulator)
@qd.func
def compute_jacobian(
self, links_info: array_class.LinksInfo, dofs_state: array_class.DofsState, links_state: array_class.LinksState
):
self.Jt.fill(0.0)
pairs = qd.static(self.contact_pairs)
for i_p in range(self.n_contact_pairs[None]):
i_b = pairs[i_p].batch_idx
link = pairs[i_p].link_idx0
while link > -1:
link_maybe_batch = [link, i_b] if qd.static(self.rigid_solver._options.batch_links_info) else link
# reverse order to make sure dofs in each row of self.jac_relevant_dofs is strictly descending
for i_d_ in range(links_info.n_dofs[link_maybe_batch]):
i_d = links_info.dof_end[link_maybe_batch] - 1 - i_d_
cdof_ang = dofs_state.cdof_ang[i_d, i_b]
cdof_vel = dofs_state.cdof_vel[i_d, i_b]
t_quat = gu.qd_identity_quat()
t_pos = pairs[i_p].contact_pos - links_state.root_COM[link, i_b]
_, vel = gu.qd_transform_motion_by_trans_quat(cdof_ang, cdof_vel, t_pos, t_quat)
self.Jt[i_p, i_d] = self.Jt[i_p, i_d] + vel
link = links_info.parent_idx[link_maybe_batch]
link = pairs[i_p].link_idx1
while link > -1:
link_maybe_batch = [link, i_b] if qd.static(self.rigid_solver._options.batch_links_info) else link
# reverse order to make sure dofs in each row of self.jac_relevant_dofs is strictly descending
for i_d_ in range(links_info.n_dofs[link_maybe_batch]):
i_d = links_info.dof_end[link_maybe_batch] - 1 - i_d_
cdof_ang = dofs_state.cdof_ang[i_d, i_b]
cdof_vel = dofs_state.cdof_vel[i_d, i_b]
t_quat = gu.qd_identity_quat()
t_pos = pairs[i_p].contact_pos - links_state.root_COM[link, i_b]
_, vel = gu.qd_transform_motion_by_trans_quat(cdof_ang, cdof_vel, t_pos, t_quat)
self.Jt[i_p, i_d] = self.Jt[i_p, i_d] - vel
link = links_info.parent_idx[link_maybe_batch]
@qd.func
def compute_delassus(self, i_p):
pairs = qd.static(self.contact_pairs)
world = qd.Matrix.cols([pairs[i_p].tangent0, pairs[i_p].tangent1, pairs[i_p].normal])
return world.transpose() @ self.W[i_p] @ world
@qd.func
def compute_Jx(self, i_p, x):
pairs = qd.static(self.contact_pairs)
i_b = pairs[i_p].batch_idx
Jx = qd.Vector.zero(gs.qd_float, 3)
for i in range(self.rigid_solver.n_dofs):
Jx = Jx + self.Jt[i_p, i] * x[i_b, i]
Jx = qd.Vector([Jx.dot(pairs[i_p].tangent0), Jx.dot(pairs[i_p].tangent1), Jx.dot(pairs[i_p].normal)])
return Jx
@qd.func
def add_Jt_x(self, y, i_p, x):
pairs = qd.static(self.contact_pairs)
i_b = pairs[i_p].batch_idx
world = qd.Matrix.cols([pairs[i_p].tangent0, pairs[i_p].tangent1, pairs[i_p].normal])
x_ = world @ x
for i in range(self.rigid_solver.n_dofs):
y[i_b, i] += self.Jt[i_p, i].dot(x_)
@qd.data_oriented
class FEMContactHandler(BaseContactHandler):
def __init__(
self,
simulator: "Simulator",
) -> None:
super().__init__(simulator)
self.fem_solver = simulator.fem_solver
@qd.func
def compute_gradient_hessian_diag(self):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in range(self.n_contact_pairs[None]):
vc = self.compute_Jx(i_p, self.coupler.fem_state_v.v)
self.compute_contact_gamma_G(sap_info, i_p, vc)
self.add_Jt_x(self.coupler.fem_state_v.gradient, i_p, -sap_info[i_p].gamma)
self.add_Jt_x(self.coupler.fem_state_v.impulse, i_p, sap_info[i_p].gamma)
self.add_Jt_A_J_diag3x3(self.coupler.pcg_fem_state_v.diag3x3, i_p, sap_info[i_p].G)
@qd.func
def prepare_search_direction_data(self):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in qd.ndrange(self.n_contact_pairs[None]):
i_b = self.contact_pairs[i_p].batch_idx
if self.coupler.batch_linesearch_active[i_b]:
sap_info[i_p].dvc = self.compute_Jx(i_p, self.coupler.pcg_fem_state_v.x)
@qd.func
def compute_pcg_matrix_vector_product(self):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in range(self.n_contact_pairs[None]):
# Jt @ G @ J @ p
x = self.compute_Jx(i_p, self.coupler.pcg_fem_state_v.p)
x = sap_info[i_p].G @ x
self.add_Jt_x(self.coupler.pcg_fem_state_v.Ap, i_p, x)
@qd.func
def compute_contact_velocity(self, i_p):
"""
Compute the contact velocity in the contact frame.
"""
return self.compute_Jx(i_p, self.coupler.fem_state_v.v)
@qd.data_oriented
class RigidFEMContactHandler(RigidContactHandler):
def __init__(
self,
simulator: "Simulator",
) -> None:
super().__init__(simulator)
self.fem_solver = simulator.fem_solver
@qd.func
def compute_gradient_hessian_diag(self):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in range(self.n_contact_pairs[None]):
vc = self.compute_Jx(i_p, self.coupler.fem_state_v.v, self.coupler.rigid_state_dof.v)
self.compute_contact_gamma_G(sap_info, i_p, vc)
self.add_Jt_x(
self.coupler.fem_state_v.gradient, self.coupler.rigid_state_dof.gradient, i_p, -sap_info[i_p].gamma
)
self.add_Jt_x(
self.coupler.fem_state_v.impulse, self.coupler.rigid_state_dof.impulse, i_p, sap_info[i_p].gamma
)
self.add_Jt_A_J_diag3x3(self.coupler.pcg_fem_state_v.diag3x3, i_p, sap_info[i_p].G)
@qd.func
def prepare_search_direction_data(self):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in qd.ndrange(self.n_contact_pairs[None]):
i_b = self.contact_pairs[i_p].batch_idx
if self.coupler.batch_linesearch_active[i_b]:
sap_info[i_p].dvc = self.compute_Jx(
i_p, self.coupler.pcg_fem_state_v.x, self.coupler.pcg_rigid_state_dof.x
)
@qd.func
def compute_pcg_matrix_vector_product(self):
sap_info = qd.static(self.contact_pairs.sap_info)
for i_p in range(self.n_contact_pairs[None]):
# Jt @ G @ J @ p
x = self.compute_Jx(i_p, self.coupler.pcg_fem_state_v.p, self.coupler.pcg_rigid_state_dof.p)
x = sap_info[i_p].G @ x
self.add_Jt_x(self.coupler.pcg_fem_state_v.Ap, self.coupler.pcg_rigid_state_dof.Ap, i_p, x)
@qd.func
def compute_contact_velocity(self, i_p):
"""
Compute the contact velocity in the contact frame.
"""
return self.compute_Jx(i_p, self.coupler.fem_state_v.v, self.coupler.rigid_state_dof.v)
@qd.func
def accumulate_area_centroid(
polygon_vertices, i, total_area: qd.template(), total_area_weighted_centroid: qd.template()
):
e1 = polygon_vertices[:, i - 1] - polygon_vertices[:, 0]
e2 = polygon_vertices[:, i] - polygon_vertices[:, 0]
area = 0.5 * e1.cross(e2).norm()
total_area += area
total_area_weighted_centroid += (
area * (polygon_vertices[:, 0] + polygon_vertices[:, i - 1] + polygon_vertices[:, i]) / 3.0
)
@qd.data_oriented
class FEMFloorTetContactHandler(FEMContactHandler):
"""
Class for handling contact between a tetrahedral mesh and a floor in a simulation using hydroelastic model.
This class extends the BaseContact class and provides methods for detecting contact
between the tetrahedral elements and the floor, computing contact pairs, and managing
contact-related computations.
"""
def __init__(
self,
simulator: "Simulator",
eps: float = 1e-10,
) -> None:
super().__init__(simulator)
self.name = "FEMFloorTetContactHandler"
self.fem_solver = self.sim.fem_solver
self.eps = eps
self.eps = eps
self.contact_candidate_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
geom_idx=gs.qd_int, # index of the FEM element
intersection_code=gs.qd_int, # intersection code for the element
distance=gs.qd_vec4, # distance vector for the element
)
self.n_contact_candidates = qd.field(gs.qd_int, shape=())
self.max_contact_candidates = self.fem_solver.n_surface_elements * self.fem_solver._B
self.contact_candidates = self.contact_candidate_type.field(shape=(self.max_contact_candidates,))
self.contact_pair_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
geom_idx=gs.qd_int, # index of the FEM element
barycentric=gs.qd_vec4, # barycentric coordinates of the contact point
contact_pos=gs.qd_vec3, # contact position
sap_info=self.sap_contact_info_type, # contact info
)
self.max_contact_pairs = self.fem_solver.n_surface_elements * self.fem_solver._B
self.contact_pairs = self.contact_pair_type.field(shape=(self.max_contact_pairs,))
@qd.func
def detection(
self,
f: qd.i32,
links_info: array_class.LinksInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
geoms_info: array_class.GeomsInfo,
):
overflow = False
# Compute contact pairs
self.n_contact_candidates[None] = 0
# TODO Check surface element only instead of all elements
for i_b, i_e in qd.ndrange(self.fem_solver._B, self.fem_solver.n_elements):
intersection_code = qd.int32(0)
distance = qd.Vector.zero(gs.qd_float, 4)
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_e].el2v[i]
pos_v = self.fem_solver.elements_v[f, i_v, i_b].pos
distance[i] = pos_v.z - self.fem_solver.floor_height
if distance[i] > 0.0:
intersection_code |= 1 << i
# check if the element intersect with the floor
if intersection_code != 0 and intersection_code != 15:
i_c = qd.atomic_add(self.n_contact_candidates[None], 1)
if i_c < self.max_contact_candidates:
self.contact_candidates[i_c].batch_idx = i_b
self.contact_candidates[i_c].geom_idx = i_e
self.contact_candidates[i_c].intersection_code = intersection_code
self.contact_candidates[i_c].distance = distance
else:
overflow = True
sap_info = qd.static(self.contact_pairs.sap_info)
self.n_contact_pairs[None] = 0
# Compute pair from candidates
result_count = qd.min(self.n_contact_candidates[None], self.max_contact_candidates)
for i_c in range(result_count):
candidate = self.contact_candidates[i_c]
i_b = candidate.batch_idx
i_e = candidate.geom_idx
intersection_code = candidate.intersection_code
intersected_edges = self.coupler.MarchingTetsEdgeTable[intersection_code]
tet_vertices = qd.Matrix.zero(gs.qd_float, 3, 4) # 4 vertices
tet_pressures = qd.Vector.zero(gs.qd_float, 4) # pressures at the vertices
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_e].el2v[i]
tet_vertices[:, i] = self.fem_solver.elements_v[f, i_v, i_b].pos
tet_pressures[i] = self.coupler.fem_pressure[i_v]
polygon_vertices = qd.Matrix.zero(gs.qd_float, 3, 4) # 3 or 4 vertices
total_area = gs.EPS # avoid division by zero
total_area_weighted_centroid = qd.Vector.zero(gs.qd_float, 3)
for i in qd.static(range(4)):
if intersected_edges[i] >= 0:
edge = self.coupler.TetEdges[intersected_edges[i]]
pos_v0 = tet_vertices[:, edge[0]]
pos_v1 = tet_vertices[:, edge[1]]
d_v0 = candidate.distance[edge[0]]
d_v1 = candidate.distance[edge[1]]
t = d_v0 / (d_v0 - d_v1)
polygon_vertices[:, i] = pos_v0 + t * (pos_v1 - pos_v0)
# Compute triangle area and centroid
if qd.static(i >= 2):
accumulate_area_centroid(polygon_vertices, i, total_area, total_area_weighted_centroid)
centroid = total_area_weighted_centroid / total_area
# Compute barycentric coordinates
barycentric = tet_barycentric(centroid, tet_vertices)
pressure = barycentric.dot(tet_pressures)
deformable_g = self.coupler._hydroelastic_stiffness
rigid_g = self.coupler.fem_pressure_gradient[i_b, i_e].z
# TODO A better way to handle corner cases where pressure and pressure gradient are ill defined
if total_area < self.eps or rigid_g < self.eps:
continue
g = 1.0 / (1.0 / deformable_g + 1.0 / rigid_g) # harmonic average
rigid_k = total_area * g
rigid_phi0 = -pressure / g
if rigid_k < self.eps or rigid_phi0 > self.eps:
continue
i_p = qd.atomic_add(self.n_contact_pairs[None], 1)
if i_p < self.max_contact_pairs:
self.contact_pairs[i_p].batch_idx = i_b
self.contact_pairs[i_p].geom_idx = i_e
self.contact_pairs[i_p].barycentric = barycentric
sap_info[i_p].k = rigid_k
sap_info[i_p].phi0 = rigid_phi0
sap_info[i_p].mu = self.fem_solver.elements_i[i_e].friction_mu
else:
overflow = True
return overflow
@qd.func
def compute_Jx(self, i_p, x):
"""
Compute the contact Jacobian J times a vector x.
"""
i_b = self.contact_pairs[i_p].batch_idx
i_g = self.contact_pairs[i_p].geom_idx
Jx = qd.Vector.zero(gs.qd_float, 3)
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g].el2v[i]
Jx += self.contact_pairs[i_p].barycentric[i] * x[i_b, i_v]
return Jx
@qd.func
def add_Jt_x(self, y, i_p, x):
i_b = self.contact_pairs[i_p].batch_idx
i_g = self.contact_pairs[i_p].geom_idx
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g].el2v[i]
if qd.static(self.fem_solver._enable_vertex_constraints):
if not self.fem_solver.vertex_constraints.is_constrained[i_v, i_b]:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric[i] * x
else:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric[i] * x
@qd.func
def add_Jt_A_J_diag3x3(self, y, i_p, A):
i_b = self.contact_pairs[i_p].batch_idx
i_g = self.contact_pairs[i_p].geom_idx
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g].el2v[i]
if qd.static(self.fem_solver._enable_vertex_constraints):
if not self.fem_solver.vertex_constraints.is_constrained[i_v, i_b]:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric[i] ** 2 * A
else:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric[i] ** 2 * A
@qd.func
def compute_delassus(self, i_p):
dt2_inv = 1.0 / self.sim._substep_dt**2
i_b = self.contact_pairs[i_p].batch_idx
i_g = self.contact_pairs[i_p].geom_idx
W = qd.Matrix.zero(gs.qd_float, 3, 3)
# W = sum (JA^-1J^T)
# With floor, J is Identity times the barycentric coordinates
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g].el2v[i]
W += self.contact_pairs[i_p].barycentric[i] ** 2 * dt2_inv * self.fem_solver.pcg_state_v[i_b, i_v].prec
return W
@qd.data_oriented
class FEMSelfTetContactHandler(FEMContactHandler):
"""
Class for handling self-contact between tetrahedral elements in a simulation using hydroelastic model.
This class extends the FEMContact class and provides methods for detecting self-contact
between tetrahedral elements, computing contact pairs, and managing contact-related computations.
"""
def __init__(
self,
simulator: "Simulator",
eps: float = 1e-10,
) -> None:
super().__init__(simulator)
self.name = "FEMSelfTetContactHandler"
self.eps = eps
self.contact_candidate_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
geom_idx0=gs.qd_int, # index of the FEM element0
intersection_code0=gs.qd_int, # intersection code for element0
geom_idx1=gs.qd_int, # index of the FEM element1
normal=gs.qd_vec3, # contact plane normal
x=gs.qd_vec3, # a point on the contact plane
distance0=gs.qd_vec4, # distance vector for element0
)
self.n_contact_candidates = qd.field(gs.qd_int, shape=())
self.max_contact_candidates = self.fem_solver.n_surface_elements * self.fem_solver._B * 8
self.contact_candidates = self.contact_candidate_type.field(shape=(self.max_contact_candidates,))
self.contact_pair_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
normal=gs.qd_vec3, # contact plane normal
tangent0=gs.qd_vec3, # contact plane tangent0
tangent1=gs.qd_vec3, # contact plane tangent1
geom_idx0=gs.qd_int, # index of the FEM element0
geom_idx1=gs.qd_int, # index of the FEM element1
barycentric0=gs.qd_vec4, # barycentric coordinates of the contact point in tet 0
barycentric1=gs.qd_vec4, # barycentric coordinates of the contact point in tet 1
contact_pos=gs.qd_vec3, # contact position
sap_info=self.sap_contact_info_type, # contact info
)
self.max_contact_pairs = self.fem_solver.n_surface_elements * self.fem_solver._B
self.contact_pairs = self.contact_pair_type.field(shape=(self.max_contact_pairs,))
@qd.func
def compute_candidates(self, f: qd.i32):
overflow = False
self.n_contact_candidates[None] = 0
result_count = qd.min(
self.coupler.fem_surface_tet_bvh.query_result_count[None],
self.coupler.fem_surface_tet_bvh.max_query_results,
)
for i_r in range(result_count):
i_b, i_sa, i_sq = self.coupler.fem_surface_tet_bvh.query_result[i_r]
i_a = self.fem_solver.surface_elements[i_sa]
i_q = self.fem_solver.surface_elements[i_sq]
i_v0 = self.fem_solver.elements_i[i_a].el2v[0]
i_v1 = self.fem_solver.elements_i[i_q].el2v[0]
x0 = self.fem_solver.elements_v[f, i_v0, i_b].pos
x1 = self.fem_solver.elements_v[f, i_v1, i_b].pos
p0 = self.coupler.fem_pressure[i_v0]
p1 = self.coupler.fem_pressure[i_v1]
g0 = self.coupler.fem_pressure_gradient[i_b, i_a]
g1 = self.coupler.fem_pressure_gradient[i_b, i_q]
g0_norm = g0.norm()
g1_norm = g1.norm()
if g0_norm < gs.EPS or g1_norm < gs.EPS:
continue
# Calculate the isosurface, i.e. equal pressure plane defined by x and normal
# Solve for p0 + g0.dot(x - x0) = p1 + g1.dot(x - x1)
normal = g0 - g1
magnitude = normal.norm()
if magnitude < gs.EPS:
continue
normal /= magnitude
b = p1 - p0 - g1.dot(x1) + g0.dot(x0)
x = b / magnitude * normal
# Check that the normal is pointing along g0 and against g1, some allowance as used in Drake
threshold = qd.static(np.cos(np.pi * 5.0 / 8.0))
if normal.dot(g0) < threshold * g0_norm or normal.dot(g1) > -threshold * g1_norm:
continue
intersection_code0 = qd.int32(0)
distance0 = qd.Vector.zero(gs.qd_float, 4)
intersection_code1 = qd.int32(0)
distance1 = qd.Vector.zero(gs.qd_float, 4)
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_a].el2v[i]
pos_v = self.fem_solver.elements_v[f, i_v, i_b].pos
distance0[i] = (pos_v - x).dot(normal) # signed distance
if distance0[i] > 0.0:
intersection_code0 |= 1 << i
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_q].el2v[i]
pos_v = self.fem_solver.elements_v[f, i_v, i_b].pos
distance1[i] = (pos_v - x).dot(normal)
if distance1[i] > 0.0:
intersection_code1 |= 1 << i
# Fast check for whether both tets intersect with the plane
if (
intersection_code0 == 0
or intersection_code1 == 0
or intersection_code0 == 15
or intersection_code1 == 15
):
continue
i_c = qd.atomic_add(self.n_contact_candidates[None], 1)
if i_c < self.max_contact_candidates:
self.contact_candidates[i_c].batch_idx = i_b
self.contact_candidates[i_c].normal = normal
self.contact_candidates[i_c].x = x
self.contact_candidates[i_c].geom_idx0 = i_a
self.contact_candidates[i_c].intersection_code0 = intersection_code0
self.contact_candidates[i_c].distance0 = distance0
self.contact_candidates[i_c].geom_idx1 = i_q
else:
overflow = True
return overflow
@qd.func
def compute_pairs(self, i_step: qd.i32):
"""
Computes the FEM self contact pairs and their properties.
Intersection code reference:
https://github.com/RobotLocomotion/drake/blob/8c3a249184ed09f0faab3c678536d66d732809ce/geometry/proximity/field_intersection.cc#L87
"""
overflow = False
sap_info = qd.static(self.contact_pairs.sap_info)
normal_signs = qd.Vector([1.0, -1.0, 1.0, -1.0], dt=gs.qd_float) # make normal point outward
self.n_contact_pairs[None] = 0
result_count = qd.min(self.n_contact_candidates[None], self.max_contact_candidates)
for i_c in range(result_count):
i_b = self.contact_candidates[i_c].batch_idx
i_e0 = self.contact_candidates[i_c].geom_idx0
i_e1 = self.contact_candidates[i_c].geom_idx1
intersection_code0 = self.contact_candidates[i_c].intersection_code0
distance0 = self.contact_candidates[i_c].distance0
intersected_edges0 = self.coupler.MarchingTetsEdgeTable[intersection_code0]
tet_vertices0 = qd.Matrix.zero(gs.qd_float, 3, 4) # 4 vertices of tet 0
tet_pressures0 = qd.Vector.zero(gs.qd_float, 4) # pressures at the vertices of tet 0
tet_vertices1 = qd.Matrix.zero(gs.qd_float, 3, 4) # 4 vertices of tet 1
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_e0].el2v[i]
tet_vertices0[:, i] = self.fem_solver.elements_v[i_step, i_v, i_b].pos
tet_pressures0[i] = self.coupler.fem_pressure[i_v]
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_e1].el2v[i]
tet_vertices1[:, i] = self.fem_solver.elements_v[i_step, i_v, i_b].pos
polygon_vertices = qd.Matrix.zero(gs.qd_float, 3, 8) # maximum 8 vertices
polygon_n_vertices = gs.qd_int(0)
clipped_vertices = qd.Matrix.zero(gs.qd_float, 3, 8) # maximum 8 vertices
clipped_n_vertices = gs.qd_int(0)
for i in range(4):
if intersected_edges0[i] >= 0:
edge = self.coupler.TetEdges[intersected_edges0[i]]
pos_v0 = tet_vertices0[:, edge[0]]
pos_v1 = tet_vertices0[:, edge[1]]
d_v0 = distance0[edge[0]]
d_v1 = distance0[edge[1]]
t = d_v0 / (d_v0 - d_v1)
polygon_vertices[:, polygon_n_vertices] = pos_v0 + t * (pos_v1 - pos_v0)
polygon_n_vertices += 1
# Intersects the polygon with the four halfspaces of the four triangles
# of the tetrahedral element1.
for face in range(4):
clipped_n_vertices = 0
x = tet_vertices1[:, (face + 1) % 4]
normal = (tet_vertices1[:, (face + 2) % 4] - x).cross(
tet_vertices1[:, (face + 3) % 4] - x
) * normal_signs[face]
normal /= normal.norm()
distances = qd.Vector.zero(gs.qd_float, 8)
for i in range(polygon_n_vertices):
distances[i] = (polygon_vertices[:, i] - x).dot(normal)
for i in range(polygon_n_vertices):
j = (i + 1) % polygon_n_vertices
if distances[i] <= 0.0:
clipped_vertices[:, clipped_n_vertices] = polygon_vertices[:, i]
clipped_n_vertices += 1
if distances[j] > 0.0:
wa = distances[j] / (distances[j] - distances[i])
wb = 1.0 - wa
clipped_vertices[:, clipped_n_vertices] = (
wa * polygon_vertices[:, i] + wb * polygon_vertices[:, j]
)
clipped_n_vertices += 1
elif distances[j] <= 0.0:
wa = distances[j] / (distances[j] - distances[i])
wb = 1.0 - wa
clipped_vertices[:, clipped_n_vertices] = (
wa * polygon_vertices[:, i] + wb * polygon_vertices[:, j]
)
clipped_n_vertices += 1
polygon_n_vertices = clipped_n_vertices
polygon_vertices = clipped_vertices
if polygon_n_vertices < 3:
# If the polygon has less than 3 vertices, it is not a valid contact
break
if polygon_n_vertices < 3:
continue
# compute centroid and area of the polygon
total_area = 0.0
total_area_weighted_centroid = qd.Vector.zero(gs.qd_float, 3)
for i in range(2, polygon_n_vertices):
accumulate_area_centroid(polygon_vertices, i, total_area, total_area_weighted_centroid)
if total_area < self.eps:
continue
centroid = total_area_weighted_centroid / total_area
barycentric0 = tet_barycentric(centroid, tet_vertices0)
barycentric1 = tet_barycentric(centroid, tet_vertices1)
tangent0 = polygon_vertices[:, 0] - centroid
tangent0 /= tangent0.norm()
tangent1 = self.contact_candidates[i_c].normal.cross(tangent0)
pressure = barycentric0.dot(tet_pressures0)
g0 = self.coupler.fem_pressure_gradient[i_b, i_e0].dot(self.contact_candidates[i_c].normal)
g1 = -self.coupler.fem_pressure_gradient[i_b, i_e1].dot(self.contact_candidates[i_c].normal)
# FIXME This is an approximated value, different from Drake, which actually calculates the distance
deformable_phi0 = -pressure / g0 - pressure / g1
if deformable_phi0 > gs.EPS:
continue
i_p = qd.atomic_add(self.n_contact_pairs[None], 1)
if i_p < self.max_contact_pairs:
self.contact_pairs[i_p].batch_idx = i_b
self.contact_pairs[i_p].normal = self.contact_candidates[i_c].normal
self.contact_pairs[i_p].tangent0 = tangent0
self.contact_pairs[i_p].tangent1 = tangent1
self.contact_pairs[i_p].geom_idx0 = i_e0
self.contact_pairs[i_p].geom_idx1 = i_e1
self.contact_pairs[i_p].barycentric0 = barycentric0
self.contact_pairs[i_p].barycentric1 = barycentric1
deformable_g = self.coupler._hydroelastic_stiffness
deformable_k = total_area * deformable_g
sap_info[i_p].k = deformable_k
sap_info[i_p].phi0 = deformable_phi0
sap_info[i_p].mu = qd.sqrt(
self.fem_solver.elements_i[i_e0].friction_mu * self.fem_solver.elements_i[i_e1].friction_mu
)
else:
overflow = True
return overflow
@qd.func
def detection(
self,
f: qd.i32,
links_info: array_class.LinksInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
geoms_info: array_class.GeomsInfo,
):
overflow = False
overflow |= self.coupler.fem_surface_tet_bvh.query(self.coupler.fem_surface_tet_aabb.aabbs)
overflow |= self.compute_candidates(f)
overflow |= self.compute_pairs(f)
return overflow
@qd.func
def compute_Jx(self, i_p, x):
"""
Compute the contact Jacobian J times a vector x.
"""
i_b = self.contact_pairs[i_p].batch_idx
i_g0 = self.contact_pairs[i_p].geom_idx0
i_g1 = self.contact_pairs[i_p].geom_idx1
Jx = qd.Vector.zero(gs.qd_float, 3)
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g0].el2v[i]
Jx += self.contact_pairs[i_p].barycentric0[i] * x[i_b, i_v]
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g1].el2v[i]
Jx -= self.contact_pairs[i_p].barycentric1[i] * x[i_b, i_v]
return qd.Vector(
[
Jx.dot(self.contact_pairs[i_p].tangent0),
Jx.dot(self.contact_pairs[i_p].tangent1),
Jx.dot(self.contact_pairs[i_p].normal),
]
)
@qd.func
def add_Jt_x(self, y, i_p, x):
i_b = self.contact_pairs[i_p].batch_idx
i_g0 = self.contact_pairs[i_p].geom_idx0
i_g1 = self.contact_pairs[i_p].geom_idx1
world = qd.Matrix.cols(
[self.contact_pairs[i_p].tangent0, self.contact_pairs[i_p].tangent1, self.contact_pairs[i_p].normal]
)
x_ = world @ x
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g0].el2v[i]
if qd.static(self.fem_solver._enable_vertex_constraints):
if not self.fem_solver.vertex_constraints.is_constrained[i_v, i_b]:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric0[i] * x_
else:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric0[i] * x_
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g1].el2v[i]
if qd.static(self.fem_solver._enable_vertex_constraints):
if not self.fem_solver.vertex_constraints.is_constrained[i_v, i_b]:
y[i_b, i_v] -= self.contact_pairs[i_p].barycentric1[i] * x_
else:
y[i_b, i_v] -= self.contact_pairs[i_p].barycentric1[i] * x_
@qd.func
def add_Jt_A_J_diag3x3(self, y, i_p, A):
i_b = self.contact_pairs[i_p].batch_idx
i_g0 = self.contact_pairs[i_p].geom_idx0
i_g1 = self.contact_pairs[i_p].geom_idx1
world = qd.Matrix.cols(
[self.contact_pairs[i_p].tangent0, self.contact_pairs[i_p].tangent1, self.contact_pairs[i_p].normal]
)
B_ = world @ A @ world.transpose()
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g0].el2v[i]
if qd.static(self.fem_solver._enable_vertex_constraints):
if not self.fem_solver.vertex_constraints.is_constrained[i_v, i_b]:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric0[i] ** 2 * B_
else:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric0[i] ** 2 * B_
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g1].el2v[i]
if qd.static(self.fem_solver._enable_vertex_constraints):
if not self.fem_solver.vertex_constraints.is_constrained[i_v, i_b]:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric1[i] ** 2 * B_
else:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric1[i] ** 2 * B_
@qd.func
def compute_delassus(self, i_p):
dt2_inv = 1.0 / self.sim._substep_dt**2
i_b = self.contact_pairs[i_p].batch_idx
i_g0 = self.contact_pairs[i_p].geom_idx0
i_g1 = self.contact_pairs[i_p].geom_idx1
world = qd.Matrix.cols(
[self.contact_pairs[i_p].tangent0, self.contact_pairs[i_p].tangent1, self.contact_pairs[i_p].normal]
)
W = qd.Matrix.zero(gs.qd_float, 3, 3)
# W = sum (JA^-1J^T)
# With floor, J is Identity times the barycentric coordinates
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g0].el2v[i]
W += self.contact_pairs[i_p].barycentric0[i] ** 2 * dt2_inv * self.fem_solver.pcg_state_v[i_b, i_v].prec
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g1].el2v[i]
W += self.contact_pairs[i_p].barycentric1[i] ** 2 * dt2_inv * self.fem_solver.pcg_state_v[i_b, i_v].prec
W = world.transpose() @ W @ world
return W
@qd.data_oriented
class FEMFloorVertContactHandler(FEMContactHandler):
"""
Class for handling contact between tetrahedral elements and a floor in a simulation using point contact model.
This class extends the FEMContact class and provides methods for detecting contact
between the tetrahedral elements and the floor, computing contact pairs, and managing
contact-related computations.
"""
def __init__(
self,
simulator: "Simulator",
) -> None:
super().__init__(simulator)
self.name = "FEMFloorVertContactHandler"
self.fem_solver = self.sim.fem_solver
self.contact_pair_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
geom_idx=gs.qd_int, # index of the vertex
contact_pos=gs.qd_vec3, # contact position
sap_info=self.sap_contact_info_type, # contact info
)
self.max_contact_pairs = self.fem_solver.n_surface_elements * self.fem_solver._B
self.contact_pairs = self.contact_pair_type.field(shape=(self.max_contact_pairs,))
@qd.func
def detection(
self,
f: qd.i32,
links_info: array_class.LinksInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
geoms_info: array_class.GeomsInfo,
):
overflow = False
sap_info = qd.static(self.contact_pairs.sap_info)
# Compute contact pairs
self.n_contact_pairs[None] = 0
for i_b, i_sv in qd.ndrange(self.fem_solver._B, self.fem_solver.n_surface_vertices):
i_v = self.fem_solver.surface_vertices[i_sv]
pos_v = self.fem_solver.elements_v[f, i_v, i_b].pos
distance = pos_v.z - self.fem_solver.floor_height
if distance > 0.0:
continue
i_p = qd.atomic_add(self.n_contact_pairs[None], 1)
if i_p < self.max_contact_pairs:
self.contact_pairs[i_p].batch_idx = i_b
self.contact_pairs[i_p].geom_idx = i_v
sap_info[i_p].k = self.coupler._point_contact_stiffness * self.fem_solver.surface_vert_mass[i_v]
sap_info[i_p].phi0 = distance
sap_info[i_p].mu = self.fem_solver.elements_v_info[i_v].friction_mu
else:
overflow = True
return overflow
@qd.func
def compute_Jx(self, i_p, x):
"""
Compute the contact Jacobian J times a vector x.
"""
i_b = self.contact_pairs[i_p].batch_idx
i_g = self.contact_pairs[i_p].geom_idx
Jx = x[i_b, i_g]
return Jx
@qd.func
def add_Jt_x(self, y, i_p, x):
i_b = self.contact_pairs[i_p].batch_idx
i_g = self.contact_pairs[i_p].geom_idx
if qd.static(self.fem_solver._enable_vertex_constraints):
if not self.fem_solver.vertex_constraints.is_constrained[i_g, i_b]:
y[i_b, i_g] += x
else:
y[i_b, i_g] += x
@qd.func
def add_Jt_A_J_diag3x3(self, y, i_p, A):
i_b = self.contact_pairs[i_p].batch_idx
i_g = self.contact_pairs[i_p].geom_idx
if qd.static(self.fem_solver._enable_vertex_constraints):
if not self.fem_solver.vertex_constraints.is_constrained[i_g, i_b]:
y[i_b, i_g] += A
else:
y[i_b, i_g] += A
@qd.func
def compute_delassus(self, i_p):
dt2_inv = 1.0 / self.sim._substep_dt**2
i_b = self.contact_pairs[i_p].batch_idx
i_g = self.contact_pairs[i_p].geom_idx
# W = sum (JA^-1J^T)
# With floor, J is Identity
W = self.fem_solver.pcg_state_v[i_b, i_g].prec * dt2_inv
return W
@qd.data_oriented
class RigidFloorVertContactHandler(RigidContactHandler):
def __init__(
self,
simulator: "Simulator",
) -> None:
super().__init__(simulator)
self.name = "RigidFloorVertContactHandler"
self.rigid_solver = self.sim.rigid_solver
self.floor_height = self.sim.fem_solver.floor_height
self.contact_pair_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
link_idx=gs.qd_int, # index of the link
contact_pos=gs.qd_vec3, # contact position
sap_info=self.sap_contact_info_type, # contact info
)
self.max_contact_pairs = self.rigid_solver.n_free_verts * self.sim._B
self.contact_pairs = self.contact_pair_type.field(shape=(self.max_contact_pairs,))
self.Jt = qd.field(gs.qd_vec3, shape=(self.max_contact_pairs, self.rigid_solver.n_dofs))
self.M_inv_Jt = qd.field(gs.qd_vec3, shape=(self.max_contact_pairs, self.rigid_solver.n_dofs))
self.W = qd.field(gs.qd_mat3, shape=(self.max_contact_pairs,))
@qd.func
def detection(
self,
f: qd.i32,
links_info: array_class.LinksInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
geoms_info: array_class.GeomsInfo,
):
overflow = False
sap_info = qd.static(self.contact_pairs.sap_info)
C = qd.static(1.0e6)
# Compute contact pairs
self.n_contact_pairs[None] = 0
for i_b, i_v in qd.ndrange(self.rigid_solver._B, self.rigid_solver.n_verts):
if verts_info.is_fixed[i_v]:
continue
i_fv = verts_info.verts_state_idx[i_v]
pos_v = free_verts_state.pos[i_fv, i_b]
distance = pos_v.z - self.floor_height
if distance > 0.0:
continue
i_g = verts_info.geom_idx[i_v]
i_l = geoms_info.link_idx[i_g]
i_p = qd.atomic_add(self.n_contact_pairs[None], 1)
if i_p < self.max_contact_pairs:
self.contact_pairs[i_p].batch_idx = i_b
self.contact_pairs[i_p].link_idx = i_l
self.contact_pairs[i_p].contact_pos = pos_v
sap_info[i_p].k = C
sap_info[i_p].phi0 = distance
sap_info[i_p].mu = geoms_info.coup_friction[i_g]
else:
overflow = True
return overflow
@qd.data_oriented
class RigidFloorTetContactHandler(RigidContactHandler):
def __init__(
self,
simulator: "Simulator",
eps: float = 1e-10,
) -> None:
super().__init__(simulator)
self.name = "RigidFloorTetContactHandler"
self.rigid_solver = self.sim.rigid_solver
self.floor_height = self.sim.fem_solver.floor_height
self.eps = eps
self.contact_candidate_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
geom_idx=gs.qd_int, # index of the element
intersection_code=gs.qd_int, # intersection code for the element
distance=gs.qd_vec4, # distance vector for the element
)
self.n_contact_candidates = qd.field(gs.qd_int, shape=())
self.max_contact_candidates = self.coupler.rigid_volume_elems.shape[0] * self.sim._B * 8
self.contact_candidates = self.contact_candidate_type.field(shape=(self.max_contact_candidates,))
self.contact_pair_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
link_idx=gs.qd_int, # index of the link
contact_pos=gs.qd_vec3, # contact position
sap_info=self.sap_contact_info_type, # contact info
)
self.max_contact_pairs = self.coupler.rigid_volume_elems.shape[0] * self.sim._B
self.contact_pairs = self.contact_pair_type.field(shape=(self.max_contact_pairs,))
self.Jt = qd.field(gs.qd_vec3, shape=(self.max_contact_pairs, self.rigid_solver.n_dofs))
self.M_inv_Jt = qd.field(gs.qd_vec3, shape=(self.max_contact_pairs, self.rigid_solver.n_dofs))
self.W = qd.field(gs.qd_mat3, shape=(self.max_contact_pairs,))
@qd.func
def detection(
self,
f: qd.i32,
links_info: array_class.LinksInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
geoms_info: array_class.GeomsInfo,
):
overflow = False
candidates = qd.static(self.contact_candidates)
# Compute contact pairs
self.n_contact_candidates[None] = 0
# TODO Check surface element only instead of all elements
for i_b, i_e in qd.ndrange(self.sim._B, self.coupler.n_rigid_volume_elems):
i_g = self.coupler.rigid_volume_elems_geom_idx[i_e]
i_l = geoms_info.link_idx[i_g]
if links_info.is_fixed[i_l]:
continue
intersection_code = qd.int32(0)
distance = qd.Vector.zero(gs.qd_float, 4)
for i in qd.static(range(4)):
i_v = self.coupler.rigid_volume_elems[i_e][i]
pos_v = self.coupler.rigid_volume_verts[i_b, i_v]
distance[i] = pos_v.z - self.floor_height
if distance[i] > 0.0:
intersection_code |= 1 << i
# check if the element intersect with the floor
if intersection_code != 0 and intersection_code != 15:
i_c = qd.atomic_add(self.n_contact_candidates[None], 1)
if i_c < self.max_contact_candidates:
candidates[i_c].batch_idx = i_b
candidates[i_c].geom_idx = i_e
candidates[i_c].intersection_code = intersection_code
candidates[i_c].distance = distance
else:
overflow = True
pairs = qd.static(self.contact_pairs)
sap_info = qd.static(pairs.sap_info)
self.n_contact_pairs[None] = 0
# Compute pair from candidates
result_count = qd.min(self.n_contact_candidates[None], self.max_contact_candidates)
for i_c in range(result_count):
candidate = candidates[i_c]
i_b = candidate.batch_idx
i_e = candidate.geom_idx
intersection_code = candidate.intersection_code
distance = candidate.distance
intersected_edges = self.coupler.MarchingTetsEdgeTable[intersection_code]
tet_vertices = qd.Matrix.zero(gs.qd_float, 3, 4) # 4 vertices
tet_pressures = qd.Vector.zero(gs.qd_float, 4) # pressures at the vertices
for i in qd.static(range(4)):
i_v = self.coupler.rigid_volume_elems[i_e][i]
tet_vertices[:, i] = self.coupler.rigid_volume_verts[i_b, i_v]
tet_pressures[i] = self.coupler.rigid_pressure_field[i_v]
polygon_vertices = qd.Matrix.zero(gs.qd_float, 3, 4) # 3 or 4 vertices
total_area = gs.EPS # avoid division by zero
total_area_weighted_centroid = qd.Vector([0.0, 0.0, 0.0])
for i in range(4):
if intersected_edges[i] >= 0:
edge = self.coupler.TetEdges[intersected_edges[i]]
pos_v0 = tet_vertices[:, edge[0]]
pos_v1 = tet_vertices[:, edge[1]]
d_v0 = distance[edge[0]]
d_v1 = distance[edge[1]]
t = d_v0 / (d_v0 - d_v1)
polygon_vertices[:, i] = pos_v0 + t * (pos_v1 - pos_v0)
# Compute tirangle area and centroid
if i >= 2:
e1 = polygon_vertices[:, i - 1] - polygon_vertices[:, 0]
e2 = polygon_vertices[:, i] - polygon_vertices[:, 0]
area = 0.5 * e1.cross(e2).norm()
total_area += area
total_area_weighted_centroid += (
area * (polygon_vertices[:, 0] + polygon_vertices[:, i - 1] + polygon_vertices[:, i]) / 3.0
)
centroid = total_area_weighted_centroid / total_area
# Compute barycentric coordinates
barycentric = tet_barycentric(centroid, tet_vertices)
pressure = (
barycentric[0] * tet_pressures[0]
+ barycentric[1] * tet_pressures[1]
+ barycentric[2] * tet_pressures[2]
+ barycentric[3] * tet_pressures[3]
)
rigid_g = self.coupler.rigid_pressure_gradient[i_b, i_e].z
g = rigid_g # harmonic average
rigid_k = total_area * g
rigid_phi0 = -pressure / g
if rigid_k < self.eps or rigid_phi0 > self.eps:
continue
i_p = qd.atomic_add(self.n_contact_pairs[None], 1)
i_g = self.coupler.rigid_volume_elems_geom_idx[i_e]
i_l = geoms_info.link_idx[i_g]
if i_p < self.max_contact_pairs:
pairs[i_p].batch_idx = i_b
pairs[i_p].link_idx = i_l
pairs[i_p].contact_pos = centroid
sap_info[i_p].k = rigid_k
sap_info[i_p].phi0 = rigid_phi0
sap_info[i_p].mu = geoms_info.coup_friction[i_g]
else:
overflow = True
return overflow
@qd.data_oriented
class RigidFemTriTetContactHandler(RigidFEMContactHandler):
"""
Class for handling self-contact between tetrahedral elements in a simulation using hydroelastic model.
This class extends the FEMContact class and provides methods for detecting self-contact
between tetrahedral elements, computing contact pairs, and managing contact-related computations.
"""
def __init__(
self,
simulator: "Simulator",
eps: float = 1e-10,
) -> None:
super().__init__(simulator)
self.name = "RigidFemTriTetContactHandler"
self.eps = eps
self.contact_candidate_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
geom_idx0=gs.qd_int, # index of the FEM element
geom_idx1=gs.qd_int, # index of the Rigid Triangle
vert_idx1=gs.qd_ivec3, # vertex indices of the rigid triangle
normal=gs.qd_vec3, # contact plane normal
x=gs.qd_vec3, # a point on the contact plane
)
self.n_contact_candidates = qd.field(gs.qd_int, shape=())
self.max_contact_candidates = (
max(self.fem_solver.n_surface_elements, self.rigid_solver.n_faces) * self.fem_solver._B * 8
)
self.contact_candidates = self.contact_candidate_type.field(shape=(self.max_contact_candidates,))
self.contact_pair_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
normal=gs.qd_vec3, # contact plane normal
tangent0=gs.qd_vec3, # contact plane tangent0
tangent1=gs.qd_vec3, # contact plane tangent1
geom_idx0=gs.qd_int, # index of the FEM element
barycentric0=gs.qd_vec4, # barycentric coordinates of the contact point in tet
link_idx=gs.qd_int, # index of the link
contact_pos=gs.qd_vec3, # contact position
sap_info=self.sap_contact_info_type, # contact info
)
self.max_contact_pairs = max(self.fem_solver.n_surface_elements, self.rigid_solver.n_faces) * self.fem_solver._B
self.contact_pairs = self.contact_pair_type.field(shape=(self.max_contact_pairs,))
self.Jt = qd.field(gs.qd_vec3, shape=(self.max_contact_pairs, self.rigid_solver.n_dofs))
self.M_inv_Jt = qd.field(gs.qd_vec3, shape=(self.max_contact_pairs, self.rigid_solver.n_dofs))
self.W = qd.field(gs.qd_mat3, shape=(self.max_contact_pairs,))
@qd.func
def compute_candidates(
self,
f: qd.i32,
faces_info: array_class.FacesInfo,
verts_info: array_class.VertsInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
):
self.n_contact_candidates[None] = 0
overflow = False
result_count = qd.min(
self.coupler.rigid_tri_bvh.query_result_count[None], self.coupler.rigid_tri_bvh.max_query_results
)
for i_r in range(result_count):
i_b, i_a, i_sq = self.coupler.rigid_tri_bvh.query_result[i_r]
i_q = self.fem_solver.surface_elements[i_sq]
vert_idx1 = qd.Vector.zero(gs.qd_int, 3)
tri_vertices = qd.Matrix.zero(gs.qd_float, 3, 3)
for i in qd.static(range(3)):
i_v = faces_info.verts_idx[i_a][i]
i_fv = verts_info.verts_state_idx[i_v]
if verts_info.is_fixed[i_v]:
tri_vertices[:, i] = fixed_verts_state.pos[i_fv]
else:
tri_vertices[:, i] = free_verts_state.pos[i_fv, i_b]
vert_idx1[i] = i_v
pos_v0, pos_v1, pos_v2 = tri_vertices[:, 0], tri_vertices[:, 1], tri_vertices[:, 2]
normal = (pos_v1 - pos_v0).cross(pos_v2 - pos_v0)
magnitude_sqr = normal.norm_sqr()
if magnitude_sqr < gs.EPS:
continue
normal *= qd.rsqrt(magnitude_sqr)
g0 = self.coupler.fem_pressure_gradient[i_b, i_q]
if g0.dot(normal) < gs.EPS:
continue
intersection_code = qd.int32(0)
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_q].el2v[i]
pos_v = self.fem_solver.elements_v[f, i_v, i_b].pos
distance = (pos_v - pos_v0).dot(normal) # signed distance
if distance > 0.0:
intersection_code |= 1 << i
if intersection_code == 0 or intersection_code == 15:
continue
i_c = qd.atomic_add(self.n_contact_candidates[None], 1)
if i_c < self.max_contact_candidates:
self.contact_candidates[i_c].batch_idx = i_b
self.contact_candidates[i_c].normal = normal
self.contact_candidates[i_c].x = pos_v0
self.contact_candidates[i_c].geom_idx0 = i_q
self.contact_candidates[i_c].geom_idx1 = i_a
self.contact_candidates[i_c].vert_idx1 = vert_idx1
else:
overflow = True
return overflow
@qd.func
def compute_pairs(
self,
f: qd.i32,
verts_info: array_class.VertsInfo,
geoms_info: array_class.GeomsInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
):
"""
Computes the tet triangle intersection pair and their properties.
Intersection code reference:
https://github.com/RobotLocomotion/drake/blob/49ab120ec6f5981484918daa821fc7101e10ebc6/geometry/proximity/mesh_intersection.cc
"""
sap_info = qd.static(self.contact_pairs.sap_info)
overflow = False
normal_signs = qd.Vector([1.0, -1.0, 1.0, -1.0]) # make normal point outward
self.n_contact_pairs[None] = 0
result_count = qd.min(self.n_contact_candidates[None], self.max_contact_candidates)
for i_c in range(result_count):
i_b = self.contact_candidates[i_c].batch_idx
i_e = self.contact_candidates[i_c].geom_idx0
tri_vertices = qd.Matrix.zero(gs.qd_float, 3, 3) # 3 vertices of the triangle
tet_vertices = qd.Matrix.zero(gs.qd_float, 3, 4) # 4 vertices of tet 0
tet_pressures = qd.Vector.zero(gs.qd_float, 4) # pressures at the vertices of tet 0
for i in qd.static(range(3)):
i_v = self.contact_candidates[i_c].vert_idx1[i]
i_fv = verts_info.verts_state_idx[i_v]
if verts_info.is_fixed[i_v]:
tri_vertices[:, i] = fixed_verts_state.pos[i_fv]
else:
tri_vertices[:, i] = free_verts_state.pos[i_fv, i_b]
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_e].el2v[i]
tet_vertices[:, i] = self.fem_solver.elements_v[f, i_v, i_b].pos
tet_pressures[i] = self.coupler.fem_pressure[i_v]
polygon_vertices = qd.Matrix.zero(gs.qd_float, 3, 7) # maximum 7 vertices
polygon_n_vertices = 3
for i in qd.static(range(3)):
polygon_vertices[:, i] = tri_vertices[:, i]
clipped_vertices = qd.Matrix.zero(gs.qd_float, 3, 7) # maximum 7 vertices
clipped_n_vertices = 0
distances = qd.Vector.zero(gs.qd_float, 7)
for face in range(4):
clipped_n_vertices = 0
x = tet_vertices[:, (face + 1) % 4]
normal = (tet_vertices[:, (face + 2) % 4] - x).cross(
tet_vertices[:, (face + 3) % 4] - x
) * normal_signs[face]
normal /= normal.norm()
for i in range(polygon_n_vertices):
distances[i] = (polygon_vertices[:, i] - x).dot(normal)
for i in range(polygon_n_vertices):
j = (i + 1) % polygon_n_vertices
if distances[i] <= 0.0:
clipped_vertices[:, clipped_n_vertices] = polygon_vertices[:, i]
clipped_n_vertices += 1
if distances[i] * distances[j] < 0.0:
wa = distances[j] / (distances[j] - distances[i])
wb = 1.0 - wa
clipped_vertices[:, clipped_n_vertices] = (
wa * polygon_vertices[:, i] + wb * polygon_vertices[:, j]
)
clipped_n_vertices += 1
polygon_n_vertices = clipped_n_vertices
polygon_vertices = clipped_vertices
if polygon_n_vertices < 3:
# If the polygon has less than 3 vertices, it is not a valid contact
break
if polygon_n_vertices < 3:
continue
total_area = 0.0
total_area_weighted_centroid = qd.Vector.zero(gs.qd_float, 3)
for i in range(2, polygon_n_vertices):
e1 = polygon_vertices[:, i - 1] - polygon_vertices[:, 0]
e2 = polygon_vertices[:, i] - polygon_vertices[:, 0]
area = 0.5 * e1.cross(e2).norm()
total_area += area
total_area_weighted_centroid += (
area * (polygon_vertices[:, 0] + polygon_vertices[:, i - 1] + polygon_vertices[:, i]) / 3.0
)
centroid = total_area_weighted_centroid / total_area
barycentric0 = tet_barycentric(centroid, tet_vertices)
tangent0 = (polygon_vertices[:, 0] - centroid).normalized()
tangent1 = self.contact_candidates[i_c].normal.cross(tangent0)
deformable_g = self.coupler._hydroelastic_stiffness
rigid_g = self.coupler.fem_pressure_gradient[i_b, i_e].dot(self.contact_candidates[i_c].normal)
pressure = barycentric0.dot(tet_pressures)
if total_area < self.eps or rigid_g < self.eps:
continue
g = rigid_g * deformable_g / (deformable_g + rigid_g) # harmonic average
rigid_k = total_area * g
rigid_phi0 = -pressure / g
i_g = verts_info.geom_idx[self.contact_candidates[i_c].vert_idx1[0]]
i_l = geoms_info.link_idx[i_g]
i_p = qd.atomic_add(self.n_contact_pairs[None], 1)
if i_p < self.max_contact_pairs:
self.contact_pairs[i_p].batch_idx = i_b
self.contact_pairs[i_p].normal = self.contact_candidates[i_c].normal
self.contact_pairs[i_p].tangent0 = tangent0
self.contact_pairs[i_p].tangent1 = tangent1
self.contact_pairs[i_p].geom_idx0 = i_e
self.contact_pairs[i_p].barycentric0 = barycentric0
self.contact_pairs[i_p].link_idx = i_l
self.contact_pairs[i_p].contact_pos = centroid
sap_info[i_p].k = rigid_k
sap_info[i_p].phi0 = rigid_phi0
sap_info[i_p].mu = qd.sqrt(self.fem_solver.elements_i[i_e].friction_mu * geoms_info.coup_friction[i_g])
else:
overflow = True
return overflow
@qd.func
def detection(
self,
f: qd.i32,
links_info: array_class.LinksInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
geoms_info: array_class.GeomsInfo,
):
overflow = False
overflow |= self.coupler.rigid_tri_bvh.query(self.coupler.fem_surface_tet_aabb.aabbs)
overflow |= self.compute_candidates(f, faces_info, verts_info, free_verts_state, fixed_verts_state)
overflow |= self.compute_pairs(f, verts_info, geoms_info, free_verts_state, fixed_verts_state)
return overflow
@qd.func
def compute_delassus_world_frame(
self,
entities_info: array_class.EntitiesInfo,
rigid_global_info: array_class.RigidGlobalInfo,
):
dt2_inv = 1.0 / self.sim._substep_dt**2
# rigid
self.coupler.rigid_solve_jacobian(
self.Jt,
self.M_inv_Jt,
self.n_contact_pairs[None],
self.contact_pairs.batch_idx,
3,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
)
self.W.fill(0.0)
for i_p, i_d, i, j in qd.ndrange(self.n_contact_pairs[None], self.rigid_solver.n_dofs, 3, 3):
self.W[i_p][i, j] += self.M_inv_Jt[i_p, i_d][i] * self.Jt[i_p, i_d][j]
# fem
barycentric0 = qd.static(self.contact_pairs.barycentric0)
for i_p in range(self.n_contact_pairs[None]):
i_g0 = self.contact_pairs[i_p].geom_idx0
i_b = self.contact_pairs[i_p].batch_idx
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g0].el2v[i]
self.W[i_p] += barycentric0[i_p][i] ** 2 * dt2_inv * self.fem_solver.pcg_state_v[i_b, i_v].prec
@qd.func
def compute_delassus(self, i_p):
world = qd.Matrix.cols(
[self.contact_pairs[i_p].tangent0, self.contact_pairs[i_p].tangent1, self.contact_pairs[i_p].normal]
)
return world.transpose() @ self.W[i_p] @ world
@qd.func
def compute_Jx(self, i_p, x0, x1):
"""
Compute the contact Jacobian J times a vector x.
"""
i_b = self.contact_pairs[i_p].batch_idx
i_g0 = self.contact_pairs[i_p].geom_idx0
Jx = qd.Vector.zero(gs.qd_float, 3)
# fem
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g0].el2v[i]
Jx = Jx + self.contact_pairs[i_p].barycentric0[i] * x0[i_b, i_v]
# rigid
for i in range(self.rigid_solver.n_dofs):
Jx = Jx - self.Jt[i_p, i] * x1[i_b, i]
return qd.Vector(
[
Jx.dot(self.contact_pairs[i_p].tangent0),
Jx.dot(self.contact_pairs[i_p].tangent1),
Jx.dot(self.contact_pairs[i_p].normal),
]
)
@qd.func
def add_Jt_x(self, y0, y1, i_p, x):
i_b = self.contact_pairs[i_p].batch_idx
i_g0 = self.contact_pairs[i_p].geom_idx0
world = qd.Matrix.cols(
[self.contact_pairs[i_p].tangent0, self.contact_pairs[i_p].tangent1, self.contact_pairs[i_p].normal]
)
x_ = world @ x
# fem
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g0].el2v[i]
y0[i_b, i_v] += self.contact_pairs[i_p].barycentric0[i] * x_
# rigid
for i in range(self.rigid_solver.n_dofs):
y1[i_b, i] -= self.Jt[i_p, i].dot(x_)
@qd.func
def add_Jt_A_J_diag3x3(self, y, i_p, A):
i_b = self.contact_pairs[i_p].batch_idx
i_g0 = self.contact_pairs[i_p].geom_idx0
world = qd.Matrix.cols(
[self.contact_pairs[i_p].tangent0, self.contact_pairs[i_p].tangent1, self.contact_pairs[i_p].normal]
)
B_ = world @ A @ world.transpose()
for i in qd.static(range(4)):
i_v = self.fem_solver.elements_i[i_g0].el2v[i]
if i_v < self.fem_solver.n_vertices:
y[i_b, i_v] += self.contact_pairs[i_p].barycentric0[i] ** 2 * B_
@qd.data_oriented
class RigidRigidTetContactHandler(RigidRigidContactHandler):
"""
Class for handling contact between Rigid bodies using hydroelastic model.
This class extends the RigidContact class and provides methods for detecting contact
between tetrahedral elements, computing contact pairs, and managing contact-related computations.
"""
def __init__(
self,
simulator: "Simulator",
eps: float = 1e-10,
) -> None:
super().__init__(simulator)
self.coupler = simulator.coupler
self.name = "RigidRigidTetContactHandler"
self.eps = eps
self.contact_candidate_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
geom_idx0=gs.qd_int, # index of the element
geom_idx1=gs.qd_int, # index of the other element
intersection_code0=gs.qd_int, # intersection code for element0
normal=gs.qd_vec3, # contact plane normal
x=gs.qd_vec3, # a point on the contact plane
distance0=gs.qd_vec4, # distance vector for element0
)
self.n_contact_candidates = qd.field(gs.qd_int, shape=())
self.max_contact_candidates = self.coupler.rigid_volume_elems.shape[0] * self.sim._B * 8
self.contact_candidates = self.contact_candidate_type.field(shape=(self.max_contact_candidates,))
self.contact_pair_type = qd.types.struct(
batch_idx=gs.qd_int, # batch index
normal=gs.qd_vec3, # contact plane normal
tangent0=gs.qd_vec3, # contact plane tangent0
tangent1=gs.qd_vec3, # contact plane tangent1
link_idx0=gs.qd_int, # index of the link
link_idx1=gs.qd_int, # index of the other link
contact_pos=gs.qd_vec3, # contact position
sap_info=self.sap_contact_info_type, # contact info
)
self.max_contact_pairs = self.coupler.rigid_volume_elems.shape[0] * self.sim._B
self.contact_pairs = self.contact_pair_type.field(shape=(self.max_contact_pairs,))
self.Jt = qd.field(gs.qd_vec3, shape=(self.max_contact_pairs, self.rigid_solver.n_dofs))
self.M_inv_Jt = qd.field(gs.qd_vec3, shape=(self.max_contact_pairs, self.rigid_solver.n_dofs))
self.W = qd.field(gs.qd_mat3, shape=(self.max_contact_pairs,))
@qd.func
def compute_candidates(self, f: qd.i32):
overflow = False
candidates = qd.static(self.contact_candidates)
self.n_contact_candidates[None] = 0
result_count = qd.min(
self.coupler.rigid_tet_bvh.query_result_count[None],
self.coupler.rigid_tet_bvh.max_query_results,
)
for i_r in range(result_count):
i_b, i_a, i_q = self.coupler.rigid_tet_bvh.query_result[i_r]
i_v0 = self.coupler.rigid_volume_elems[i_a][0]
i_v1 = self.coupler.rigid_volume_elems[i_q][1]
x0 = self.coupler.rigid_volume_verts[i_b, i_v0]
x1 = self.coupler.rigid_volume_verts[i_b, i_v1]
p0 = self.coupler.rigid_pressure_field[i_v0]
p1 = self.coupler.rigid_pressure_field[i_v1]
g0 = self.coupler.rigid_pressure_gradient[i_b, i_a]
g1 = self.coupler.rigid_pressure_gradient[i_b, i_q]
g0_norm = g0.norm()
g1_norm = g1.norm()
if g0_norm < gs.EPS or g1_norm < gs.EPS:
continue
# Calculate the isosurface, i.e. equal pressure plane defined by x and normal
# Solve for p0 + g0.dot(x - x0) = p1 + g1.dot(x - x1)
normal = g0 - g1
magnitude = normal.norm()
if magnitude < gs.EPS:
continue
normal /= magnitude
b = p1 - p0 - g1.dot(x1) + g0.dot(x0)
x = b / magnitude * normal
# Check that the normal is pointing along g0 and against g1, some allowance as used in Drake
if normal.dot(g0) < self.eps or normal.dot(g1) > -self.eps:
continue
intersection_code0 = qd.int32(0)
distance0 = qd.Vector([0.0, 0.0, 0.0, 0.0])
intersection_code1 = qd.int32(0)
distance1 = qd.Vector([0.0, 0.0, 0.0, 0.0])
for i in qd.static(range(4)):
i_v = self.coupler.rigid_volume_elems[i_a][i]
pos_v = self.coupler.rigid_volume_verts[i_b, i_v]
distance0[i] = (pos_v - x).dot(normal) # signed distance
if distance0[i] > 0:
intersection_code0 |= 1 << i
for i in qd.static(range(4)):
i_v = self.coupler.rigid_volume_elems[i_q][i]
pos_v = self.coupler.rigid_volume_verts[i_b, i_v]
distance1[i] = (pos_v - x).dot(normal)
if distance1[i] > 0:
intersection_code1 |= 1 << i
# Fast check for whether both tets intersect with the plane
if (
intersection_code0 == 0
or intersection_code1 == 0
or intersection_code0 == 15
or intersection_code1 == 15
):
continue
i_c = qd.atomic_add(self.n_contact_candidates[None], 1)
if i_c < self.max_contact_candidates:
candidates[i_c].batch_idx = i_b
candidates[i_c].normal = normal
candidates[i_c].x = x
candidates[i_c].geom_idx0 = i_a
candidates[i_c].intersection_code0 = intersection_code0
candidates[i_c].distance0 = distance0
candidates[i_c].geom_idx1 = i_q
else:
overflow = True
return overflow
@qd.func
def compute_pairs(self, i_step: qd.i32, geoms_info: array_class.GeomsInfo):
overflow = False
candidates = qd.static(self.contact_candidates)
pairs = qd.static(self.contact_pairs)
sap_info = qd.static(pairs.sap_info)
normal_signs = qd.Vector([1.0, -1.0, 1.0, -1.0]) # make normal point outward
self.n_contact_pairs[None] = 0
result_count = qd.min(self.n_contact_candidates[None], self.max_contact_candidates)
for i_c in range(result_count):
i_b = candidates[i_c].batch_idx
i_e0 = candidates[i_c].geom_idx0
i_e1 = candidates[i_c].geom_idx1
intersection_code0 = candidates[i_c].intersection_code0
distance0 = candidates[i_c].distance0
intersected_edges0 = self.coupler.MarchingTetsEdgeTable[intersection_code0]
tet_vertices0 = qd.Matrix.zero(gs.qd_float, 3, 4) # 4 vertices of tet 0
tet_pressures0 = qd.Vector.zero(gs.qd_float, 4) # pressures at the vertices of tet 0
tet_vertices1 = qd.Matrix.zero(gs.qd_float, 3, 4) # 4 vertices of tet 1
for i in qd.static(range(4)):
i_v = self.coupler.rigid_volume_elems[i_e0][i]
tet_vertices0[:, i] = self.coupler.rigid_volume_verts[i_b, i_v]
tet_pressures0[i] = self.coupler.rigid_pressure_field[i_v]
for i in qd.static(range(4)):
i_v = self.coupler.rigid_volume_elems[i_e1][i]
tet_vertices1[:, i] = self.coupler.rigid_volume_verts[i_b, i_v]
polygon_vertices = qd.Matrix.zero(gs.qd_float, 3, 8) # maximum 8 vertices
polygon_n_vertices = gs.qd_int(0)
clipped_vertices = qd.Matrix.zero(gs.qd_float, 3, 8) # maximum 8 vertices
clipped_n_vertices = gs.qd_int(0)
for i in range(4):
if intersected_edges0[i] >= 0:
edge = self.coupler.TetEdges[intersected_edges0[i]]
pos_v0 = tet_vertices0[:, edge[0]]
pos_v1 = tet_vertices0[:, edge[1]]
d_v0 = distance0[edge[0]]
d_v1 = distance0[edge[1]]
t = d_v0 / (d_v0 - d_v1)
polygon_vertices[:, polygon_n_vertices] = pos_v0 + t * (pos_v1 - pos_v0)
polygon_n_vertices += 1
# Intersects the polygon with the four halfspaces of the four triangles
# of the tetrahedral element1.
for face in range(4):
clipped_n_vertices = 0
x = tet_vertices1[:, (face + 1) % 4]
normal = (tet_vertices1[:, (face + 2) % 4] - x).cross(
tet_vertices1[:, (face + 3) % 4] - x
) * normal_signs[face]
normal /= normal.norm()
distances = qd.Vector.zero(gs.qd_float, 8)
for i in range(polygon_n_vertices):
distances[i] = (polygon_vertices[:, i] - x).dot(normal)
for i in range(polygon_n_vertices):
j = (i + 1) % polygon_n_vertices
if distances[i] <= 0.0:
clipped_vertices[:, clipped_n_vertices] = polygon_vertices[:, i]
clipped_n_vertices += 1
if distances[j] > 0.0:
wa = distances[j] / (distances[j] - distances[i])
wb = 1.0 - wa
clipped_vertices[:, clipped_n_vertices] = (
wa * polygon_vertices[:, i] + wb * polygon_vertices[:, j]
)
clipped_n_vertices += 1
elif distances[j] <= 0.0:
wa = distances[j] / (distances[j] - distances[i])
wb = 1.0 - wa
clipped_vertices[:, clipped_n_vertices] = (
wa * polygon_vertices[:, i] + wb * polygon_vertices[:, j]
)
clipped_n_vertices += 1
polygon_n_vertices = clipped_n_vertices
polygon_vertices = clipped_vertices
if polygon_n_vertices < 3:
# If the polygon has less than 3 vertices, it is not a valid contact
break
if polygon_n_vertices < 3:
continue
# compute centroid and area of the polygon
total_area = 0.0 # avoid division by zero
total_area_weighted_centroid = qd.Vector.zero(gs.qd_float, 3)
for i in range(2, polygon_n_vertices):
e1 = polygon_vertices[:, i - 1] - polygon_vertices[:, 0]
e2 = polygon_vertices[:, i] - polygon_vertices[:, 0]
area = 0.5 * e1.cross(e2).norm()
total_area += area
total_area_weighted_centroid += (
area * (polygon_vertices[:, 0] + polygon_vertices[:, i - 1] + polygon_vertices[:, i]) / 3.0
)
if total_area < self.eps:
continue
centroid = total_area_weighted_centroid / total_area
tangent0 = polygon_vertices[:, 0] - centroid
tangent0 /= tangent0.norm()
tangent1 = candidates[i_c].normal.cross(tangent0)
g0 = self.coupler.rigid_pressure_gradient[i_b, i_e0].dot(candidates[i_c].normal)
g1 = -self.coupler.rigid_pressure_gradient[i_b, i_e1].dot(candidates[i_c].normal)
g = 1.0 / (1.0 / g0 + 1.0 / g1) # harmonic average, can handle infinity
rigid_k = total_area * g
barycentric0 = tet_barycentric(centroid, tet_vertices0)
pressure = (
barycentric0[0] * tet_pressures0[0]
+ barycentric0[1] * tet_pressures0[1]
+ barycentric0[2] * tet_pressures0[2]
+ barycentric0[3] * tet_pressures0[3]
)
rigid_phi0 = -pressure / g
if rigid_phi0 > self.eps:
continue
i_p = qd.atomic_add(self.n_contact_pairs[None], 1)
if i_p < self.max_contact_pairs:
pairs[i_p].batch_idx = i_b
pairs[i_p].normal = candidates[i_c].normal
pairs[i_p].tangent0 = tangent0
pairs[i_p].tangent1 = tangent1
pairs[i_p].contact_pos = centroid
i_g0 = self.coupler.rigid_volume_elems_geom_idx[i_e0]
i_g1 = self.coupler.rigid_volume_elems_geom_idx[i_e1]
i_l0 = geoms_info.link_idx[i_g0]
i_l1 = geoms_info.link_idx[i_g1]
pairs[i_p].link_idx0 = i_l0
pairs[i_p].link_idx1 = i_l1
sap_info[i_p].k = rigid_k
sap_info[i_p].phi0 = rigid_phi0
sap_info[i_p].mu = qd.sqrt(geoms_info.friction[i_g0] * geoms_info.friction[i_g1])
else:
overflow = True
return overflow
@qd.func
def detection(
self,
f: qd.i32,
links_info: array_class.LinksInfo,
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
free_verts_state: array_class.VertsState,
fixed_verts_state: array_class.VertsState,
geoms_info: array_class.GeomsInfo,
):
overflow = False
overflow |= self.coupler.rigid_tet_bvh.query(self.coupler.rigid_tet_aabb.aabbs)
overflow |= self.compute_candidates(f)
overflow |= self.compute_pairs(f, geoms_info)
return overflow
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/couplers/sap_coupler.py",
"license": "Apache License 2.0",
"lines": 3610,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:tests/test_render.py | import enum
import itertools
import os
import re
import sys
import time
import numpy as np
import pytest
import torch
import genesis as gs
import genesis.utils.geom as gu
from genesis.options.sensors import RasterizerCameraOptions
from genesis.utils import set_random_seed
from genesis.utils.image_exporter import FrameImageExporter, as_grayscale_image
from genesis.utils.misc import tensor_to_array
from genesis.vis.keybindings import Key
from .conftest import IS_INTERACTIVE_VIEWER_AVAILABLE
from .utils import assert_allclose, assert_equal, get_hf_dataset, rgb_array_to_png_bytes
IMG_STD_ERR_THR = 1.0
class RENDERER_TYPE(enum.IntEnum):
RASTERIZER = 0
RAYTRACER = 1
BATCHRENDER_RASTERIZER = 2
BATCHRENDER_RAYTRACER = 3
@pytest.fixture(scope="function")
def renderer(renderer_type):
if renderer_type == RENDERER_TYPE.RASTERIZER:
return gs.renderers.Rasterizer()
if renderer_type == RENDERER_TYPE.RAYTRACER:
return gs.renderers.RayTracer(
env_surface=gs.surfaces.Emission(
emissive_texture=gs.textures.ImageTexture(
image_path="textures/indoor_bright.png",
),
),
env_radius=15.0,
env_euler=(0, 0, 180),
lights=[
{"pos": (0.0, 0.0, 10.0), "radius": 3.0, "color": (15.0, 15.0, 15.0)},
],
)
return gs.renderers.BatchRenderer(
use_rasterizer=renderer_type == RENDERER_TYPE.BATCHRENDER_RASTERIZER,
)
@pytest.fixture(scope="function")
def backend(pytestconfig, renderer_type):
if renderer_type in (RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER):
return gs.cuda
if renderer_type == RENDERER_TYPE.RAYTRACER:
return gs.gpu
backend = pytestconfig.getoption("--backend") or gs.cpu
if isinstance(backend, str):
return getattr(gs.constants.backend, backend)
return backend
@pytest.fixture(scope="function", autouse=True)
def skip_if_not_installed(renderer_type):
if renderer_type in (RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER):
pytest.importorskip("gs_madrona", reason="Python module 'gs-madrona' not installed.")
if renderer_type == RENDERER_TYPE.RAYTRACER:
# Cannot rely on 'pytest.importorskip' because LuisaRenderPy is not cleanly installed
try:
import LuisaRenderPy
except ImportError:
pytest.skip("Python module 'LuisaRenderPy' not installed.")
@pytest.mark.required
@pytest.mark.parametrize(
"renderer_type",
[
RENDERER_TYPE.RASTERIZER,
RENDERER_TYPE.RAYTRACER,
RENDERER_TYPE.BATCHRENDER_RASTERIZER,
RENDERER_TYPE.BATCHRENDER_RAYTRACER,
],
)
def test_render_api(show_viewer, renderer_type, renderer):
IS_BATCHRENDER = renderer_type in (RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER)
scene = gs.Scene(
renderer=renderer,
show_viewer=show_viewer,
show_FPS=False,
)
if IS_BATCHRENDER:
scene.add_light(
pos=(0.0, 0.0, 1.5),
dir=(1.0, 1.0, -2.0),
directional=True,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_light(
pos=(4.0, -4.0, 4.0),
dir=(-1.0, 1.0, -1.0),
directional=False,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_entity(
morph=gs.morphs.Sphere(
pos=(0.0, 0.0, 0.0),
radius=1.0,
fixed=True,
),
)
camera = scene.add_camera(
pos=(0.0, 0.0, 10.0),
lookat=(0.0, 0.0, 0.0),
GUI=show_viewer,
)
scene.build()
rgb_arrs, depth_arrs, seg_arrs, normal_arrs = [], [], [], []
for rgb, depth, seg, normal in itertools.product((True, False), repeat=4):
rgb_arr, depth_arr, seg_arr, normal_arr = camera.render(rgb=rgb, depth=depth, segmentation=seg, normal=normal)
if rgb:
rgb_arrs.append(tensor_to_array(rgb_arr).astype(np.float32))
if depth:
if renderer_type == RENDERER_TYPE.BATCHRENDER_RAYTRACER:
depth_arr[~torch.isfinite(depth_arr)] = 0
depth_arrs.append(tensor_to_array(depth_arr).astype(np.float32))
if seg:
seg_arrs.append(tensor_to_array(seg_arr).astype(np.float32))
if normal:
normal_arrs.append(tensor_to_array(normal_arr).astype(np.float32))
try:
assert_allclose(np.diff(rgb_arrs, axis=0), 0.0, tol=gs.EPS)
assert_allclose(np.diff(seg_arrs, axis=0), 0.0, tol=gs.EPS)
assert_allclose(np.diff(normal_arrs, axis=0), 0.0, tol=gs.EPS)
# Depth is not matching at machine-precision because of MSAA being disabled for depth-only
msaa_mask = [0, 1, 2, 4, 5, 6] if renderer_type == RENDERER_TYPE.RASTERIZER else slice(None)
assert_allclose(np.diff(depth_arrs, axis=0)[msaa_mask], 0.0, tol=gs.EPS)
except AssertionError:
if sys.platform == "darwin" and scene.visualizer._rasterizer._renderer._is_software:
pytest.xfail("Flaky on MacOS with Apple Software Renderer.")
raise
@pytest.mark.required
@pytest.mark.parametrize(
"renderer_type",
[RENDERER_TYPE.RASTERIZER, RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER],
)
def test_deterministic(tmp_path, renderer_type, renderer, show_viewer, tol):
IS_BATCHRENDER = renderer_type in (RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER)
scene = gs.Scene(
vis_options=gs.options.VisOptions(
# rendered_envs_idx=(0, 1, 2),
env_separate_rigid=False,
),
renderer=renderer,
show_viewer=show_viewer,
show_FPS=False,
)
if IS_BATCHRENDER:
scene.add_light(
pos=(0.0, 0.0, 1.5),
dir=(1.0, 1.0, -2.0),
directional=True,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_light(
pos=(4.0, -4.0, 4.0),
dir=(-1.0, 1.0, -1.0),
directional=False,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_entity(
morph=gs.morphs.Plane(),
surface=gs.surfaces.Aluminium(
ior=10.0,
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/sphere.obj",
scale=0.1,
pos=(-0.2, -0.8, 0.2),
fixed=True,
),
surface=gs.surfaces.Rough(
diffuse_texture=gs.textures.ColorTexture(
color=(1.0, 0.5, 0.5),
),
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/sphere.obj",
scale=0.1,
pos=(-0.2, -0.5, 0.2),
fixed=True,
),
surface=gs.surfaces.Rough(
color=(1.0, 1.0, 1.0),
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/sphere.obj",
scale=0.1,
pos=(-0.2, -0.2, 0.2),
fixed=True,
),
surface=gs.surfaces.Smooth(
color=(0.6, 0.8, 1.0),
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/sphere.obj",
scale=0.1,
pos=(-0.2, 0.2, 0.2),
fixed=True,
),
surface=gs.surfaces.Iron(
color=(1.0, 1.0, 1.0),
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/sphere.obj",
scale=0.1,
pos=(-0.2, 0.5, 0.2),
fixed=True,
),
surface=gs.surfaces.Gold(
color=(1.0, 1.0, 1.0),
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/sphere.obj",
scale=0.1,
pos=(-0.2, 0.8, 0.2),
fixed=True,
),
surface=gs.surfaces.Glass(
color=(1.0, 1.0, 1.0),
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/sphere.obj",
scale=0.1,
pos=(0.2, -0.8, 0.2),
fixed=True,
),
surface=gs.surfaces.Smooth(
color=(1.0, 1.0, 1.0, 0.5),
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/wooden_sphere_OBJ/wooden_sphere.obj",
scale=0.025,
pos=(0.2, -0.5, 0.2),
fixed=True,
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/wooden_sphere_OBJ/wooden_sphere.obj",
scale=0.025,
pos=(0.2, -0.2, 0.2),
fixed=True,
),
surface=gs.surfaces.Rough(
diffuse_texture=gs.textures.ImageTexture(
image_path="textures/checker.png",
)
),
)
robot = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
)
cam = scene.add_camera(
pos=(0.9, 0.0, 0.4),
lookat=(0.0, 0.0, 0.4),
res=(500, 500),
fov=60,
spp=512,
GUI=False,
)
scene.build(n_envs=3, env_spacing=(2.0, 2.0))
cam.start_recording()
for _ in range(7):
dofs_lower_bound, dofs_upper_bound = robot.get_dofs_limit()
qpos = dofs_lower_bound + (dofs_upper_bound - dofs_lower_bound) * torch.as_tensor(
np.random.rand(robot.n_qs), dtype=gs.tc_float, device=gs.device
)
steps_rgb_arrays = []
for _ in range(2):
scene.step()
robots_rgb_arrays = []
robot.set_qpos(qpos)
if show_viewer:
scene.visualizer.update()
for i in range(3):
pos_i = scene.envs_offset[i] + np.array([0.9, 0.0, 0.4])
lookat_i = scene.envs_offset[i] + np.array([0.0, 0.0, 0.4])
cam.set_pose(pos=pos_i, lookat=lookat_i)
rgb_array, *_ = cam.render(
rgb=True, depth=False, segmentation=False, colorize_seg=False, normal=False, force_render=True
)
rgb_std = tensor_to_array(rgb_array).reshape((-1, 3)).astype(np.float32).std(axis=0).max()
try:
assert rgb_std > 10.0
except AssertionError:
if rgb_std < gs.EPS:
if sys.platform == "darwin" and scene.visualizer._rasterizer._renderer._is_software:
pytest.xfail(
"Flaky on MacOS with Apple Software Renderer. Nothing but the background was rendered."
)
raise
robots_rgb_arrays.append(rgb_array)
steps_rgb_arrays.append(robots_rgb_arrays)
try:
for i in range(3):
assert_allclose(steps_rgb_arrays[0][i], steps_rgb_arrays[1][i], tol=tol)
except AssertionError:
if sys.platform == "darwin" and scene.visualizer._rasterizer._renderer._is_software:
pytest.xfail("Flaky on MacOS with Apple Software Renderer. Successive captures do not match.")
raise
cam.stop_recording(save_to_filename=(tmp_path / "video.mp4"))
@pytest.mark.required
@pytest.mark.parametrize(
"renderer_type",
[RENDERER_TYPE.RASTERIZER, RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER],
)
@pytest.mark.parametrize("n_envs", [0, 4])
def test_render_api_advanced(tmp_path, n_envs, show_viewer, png_snapshot, renderer_type, renderer):
CAM_RES = (256, 256)
DIFF_TOL = 0.01
NUM_STEPS = 5
IS_BATCHRENDER = renderer_type in (RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.04,
),
rigid_options=gs.options.RigidOptions(
enable_collision=False,
),
vis_options=gs.options.VisOptions(
# Disable shadows systematically for Rasterizer because they are forcibly disabled on CPU backend anyway
shadow=(renderer_type != RENDERER_TYPE.RASTERIZER),
),
renderer=renderer,
show_viewer=False,
show_FPS=False,
)
scene.add_entity(
morph=gs.morphs.Plane(),
surface=gs.surfaces.Aluminium(
ior=10.0,
),
)
robot = scene.add_entity(
gs.morphs.URDF(
file="urdf/go2/urdf/go2.urdf",
merge_fixed_links=False,
),
)
cam_debug = scene.add_camera(
res=(640, 480),
pos=(1.5, 0.5, 1.5),
lookat=(0.0, 0.0, 0.5),
fov=45,
debug=True,
GUI=show_viewer,
)
cameras = []
for i in range(max(1 if IS_BATCHRENDER else n_envs, 1)):
env_idx = None if i < 1 else i
cam_0 = scene.add_camera(
res=CAM_RES,
pos=(1.5, 0.5, 1.5),
lookat=(0.0, 0.0, 0.5),
fov=45,
near=0.05,
far=100.0,
env_idx=env_idx,
GUI=show_viewer,
)
cam_1 = scene.add_camera(
res=CAM_RES,
pos=(0.8, -0.5, 0.8),
lookat=(0.0, 0.0, 0.5),
fov=45,
near=0.05,
far=100.0,
env_idx=env_idx,
GUI=show_viewer,
)
cam_2 = scene.add_camera(
res=CAM_RES,
fov=45,
env_idx=env_idx,
near=0.05,
far=100.0,
GUI=show_viewer,
)
cameras += (cam_0, cam_1, cam_2)
if IS_BATCHRENDER:
scene.add_light(
pos=(0.0, 0.0, 1.5),
dir=(1.0, 1.0, -2.0),
directional=True,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_light(
pos=(4.0, -4.0, 4.0),
dir=(-1.0, 1.0, -1.0),
directional=False,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.build(n_envs=n_envs, env_spacing=(4.0, 4.0))
# Attach cameras
for i in range(0, len(cameras), 3):
cameras[i + 1].follow_entity(robot)
pose_rel = gu.trans_R_to_T(np.array([0.1, 0.0, 0.2]), np.eye(3))
cameras[i + 2].attach(robot.get_link("Head_upper"), pose_rel)
# Create image exporter
exporter = FrameImageExporter(tmp_path)
# Initialize the simulation
set_random_seed(0)
for i in range(max(n_envs, 1)):
qpos = torch.zeros(robot.n_dofs, device=gs.device)
qpos[:2] = torch.as_tensor(np.random.rand(2), dtype=gs.tc_float, device=gs.device) - 0.5
qpos[2] = 1.0
qpos[3:6] = 0.5 * (torch.as_tensor(np.random.rand(3), dtype=gs.tc_float, device=gs.device) - 0.5)
qpos[6:] = torch.as_tensor(np.random.rand(robot.n_dofs - 6), dtype=gs.tc_float, device=gs.device) - 0.5
robot.set_dofs_position(qpos, envs_idx=([i] if n_envs else None))
qvel = torch.zeros(robot.n_dofs, device=gs.device)
qvel[:6] = torch.as_tensor(np.random.rand(6), dtype=gs.tc_float, device=gs.device) - 0.5
robot.set_dofs_velocity(qvel, envs_idx=([i] if n_envs else None))
# Run a few simulation steps while monitoring the result
cam_debug.start_recording()
frames_prev = None
for i in range(NUM_STEPS):
# Move forward step forward in time
scene.step()
# Render cameras
if IS_BATCHRENDER:
# Note that the individual cameras is rendered alone first on purpose to make sure it works
rgb_1, depth_1, seg_1, normal_1 = cam_1.render(
rgb=True, depth=True, segmentation=True, colorize_seg=True, normal=True
)
rgb_all, depth_all, seg_all, normal_all = scene.render_all_cameras(
rgb=True, depth=True, segmentation=True, colorize_seg=True, normal=True
)
assert all(isinstance(img_data, torch.Tensor) for img_data in (rgb_1, depth_1, seg_1, normal_1))
assert all(isinstance(img_data, torch.Tensor) for img_data in (*rgb_all, *depth_all, *seg_all, *normal_all))
else:
# Emulate batch rendering which is not supported natively
rgb_all, depth_all, seg_all, normal_all = zip(
*(
camera.render(rgb=True, depth=True, segmentation=True, colorize_seg=True, normal=True)
for camera in scene._visualizer._cameras
if not camera.debug
)
)
if n_envs > 0:
rgb_all, depth_all, seg_all, normal_all = (
tuple(np.swapaxes(np.stack(img_data, axis=0).reshape((n_envs, 3, *img_data[0].shape)), 0, 1))
for img_data in (rgb_all, depth_all, seg_all, normal_all)
)
rgb_1, depth_1, seg_1, normal_1 = rgb_all[1], depth_all[1], seg_all[1], normal_all[1]
# Check that the dimensions are valid
batch_shape = (*((n_envs,) if n_envs else ()), *CAM_RES)
assert len(rgb_all) == len(depth_all) == 3
assert all(e.shape == (*batch_shape, 3) for e in (*rgb_all, *seg_all, *normal_all, rgb_1, seg_1, normal_1))
assert all(e.shape == batch_shape for e in (*depth_all, depth_1))
# Check that the camera whose output was rendered individually is matching batched output
for img_data_1, img_data_2 in (
(rgb_all[1], rgb_1),
(depth_all[1], depth_1),
(seg_all[1], seg_1),
(normal_all[1], normal_1),
):
assert_allclose(img_data_1, img_data_2, tol=gs.EPS)
# Check that there is something to see here
depth_normalized_all = tuple(as_grayscale_image(tensor_to_array(img_data)) for img_data in depth_all)
frame_data = tuple(
tensor_to_array(img_data).astype(np.float32)
for img_data in (*rgb_all, *depth_normalized_all, *seg_all, *normal_all)
)
for img_data in frame_data:
for img_data_i in img_data if n_envs else (img_data,):
assert np.max(np.std(img_data_i.reshape((-1, img_data_i.shape[-1])), axis=0)) > 10.0
# Export a few frames for later pixel-matching validation
if i < 2:
exporter.export_frame_all_cameras(i, rgb=rgb_all, depth=depth_all, segmentation=seg_all, normal=normal_all)
exporter.export_frame_single_camera(
i, cam_1.idx, rgb=rgb_1, depth=depth_1, segmentation=seg_1, normal=normal_1
)
# Check that cameras are recording different part of the scene
for rgb_diff in np.diff(frame_data[:3], axis=0):
for rgb_diff_i in rgb_diff if n_envs else (rgb_diff,):
assert np.max(np.std(rgb_diff.reshape((-1, rgb_diff_i.shape[-1])), axis=0)) > 10.0
# Check that images are changing over time.
# We expect sufficient difference between two consecutive frames.
if frames_prev is not None:
try:
for img_data_prev, img_data in zip(frames_prev, frame_data):
img_diff = np.abs(img_data_prev - img_data)
assert np.sum(img_diff > np.finfo(np.float32).eps) > DIFF_TOL * img_data.size
except AssertionError:
if sys.platform == "darwin" and scene.visualizer._rasterizer._renderer._is_software:
pytest.xfail("Flaky on MacOS with Apple Software Renderer. Successive captures are too close.")
raise
frames_prev = frame_data
# Add current frame to monitor video
rgb_debug, *_ = cam_debug.render(rgb=True, depth=False, segmentation=False, colorize_seg=False, normal=False)
assert isinstance(rgb_debug, np.ndarray)
assert rgb_debug.shape == (480, 640, 3)
assert len(cam_debug._recorded_imgs) == NUM_STEPS
cam_debug.stop_recording(save_to_filename=(tmp_path / "video.mp4"))
# Verify that the output is correct pixel-wise over multiple simulation steps
try:
for image_file in sorted(tmp_path.rglob("*.png")):
with open(image_file, "rb") as f:
assert f.read() == png_snapshot
except AssertionError:
if sys.platform == "darwin" and scene.visualizer._rasterizer._renderer._is_software:
pytest.xfail("Flaky on MacOS with Apple Software Renderer. Pixel-matching failure.")
raise
def _test_madrona_scene(
show_viewer,
renderer,
png_snapshot,
use_batch_texture=False,
use_fisheye_camera=False,
use_directional_light=False,
n_envs=2,
):
CAM_RES = (128, 128)
scene = gs.Scene(
renderer=renderer,
show_viewer=show_viewer,
show_FPS=False,
)
# entities
surface = (
gs.surfaces.Default(diffuse_texture=gs.textures.BatchTexture.from_images(image_folder="textures"))
if use_batch_texture
else None
)
scene.add_entity(gs.morphs.Plane(), surface=surface)
scene.add_entity(gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"))
# cameras
cam = scene.add_camera(
res=CAM_RES,
pos=(1.5, -0.5, 1.5),
lookat=(0.0, 0.0, 0.5),
fov=45,
model="fisheye" if use_fisheye_camera else "pinhole",
GUI=show_viewer,
)
# lights
if use_directional_light:
scene.add_light(
pos=(0.0, 0.0, 1.5),
dir=(1.0, 1.0, -2.0),
color=(1.0, 1.0, 0.0),
directional=True,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_light(
pos=(4.0, -4.0, 4.0),
dir=(-1.0, 1.0, -1.0),
directional=False,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.build(n_envs=n_envs)
rgb_arrs, _, _, _ = cam.render(rgb=True, depth=False, segmentation=False, colorize_seg=False, normal=False)
assert rgb_arrs is not None
for i in range(scene.n_envs):
rgb_arr = rgb_arrs[i]
assert rgb_arr.shape == (*CAM_RES, 3)
assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot
@pytest.mark.required
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER])
def test_madrona_lights(show_viewer, renderer, png_snapshot):
_test_madrona_scene(show_viewer, renderer, png_snapshot, use_directional_light=True)
@pytest.mark.required
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER])
def test_madrona_batch_texture(show_viewer, renderer, png_snapshot):
_test_madrona_scene(show_viewer, renderer, png_snapshot, use_batch_texture=True, n_envs=3)
@pytest.mark.required
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER])
def test_madrona_fisheye_camera(show_viewer, renderer, png_snapshot):
_test_madrona_scene(show_viewer, renderer, png_snapshot, use_fisheye_camera=True)
@pytest.mark.parametrize(
"renderer_type",
[RENDERER_TYPE.RASTERIZER, RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER],
)
@pytest.mark.parametrize("segmentation_level", ["entity", "link", "geom"])
@pytest.mark.parametrize("particle_mode", ["visual", "particle"])
def test_segmentation_map(segmentation_level, particle_mode, renderer_type, renderer, show_viewer):
"""Test segmentation rendering."""
scene = gs.Scene(
fem_options=gs.options.FEMOptions(
use_implicit_solver=True, # Implicit solver allows for larger timestep without failure on GPU backend
n_pcg_iterations=40, # Reduce number of iterations to speedup runtime
),
rigid_options=gs.options.RigidOptions(
enable_collision=False, # Disable many physics features to speedup compilation
),
coupler_options=gs.options.LegacyCouplerOptions(
rigid_mpm=False,
rigid_sph=False,
rigid_pbd=False,
rigid_fem=False,
mpm_sph=False,
mpm_pbd=False,
fem_mpm=False,
fem_sph=False,
),
vis_options=gs.options.VisOptions(
segmentation_level=segmentation_level,
),
renderer=renderer,
show_viewer=False,
show_FPS=False,
)
robot = scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/simple/two_link_arm.urdf",
pos=(-1.0, -1.0, 0.5),
euler=(0, 0, 90),
),
)
# We don't test "recon" for vis_mode because it is hard to install.
materials = ((gs.materials.Rigid(), "visual"),)
if renderer_type == RENDERER_TYPE.RASTERIZER:
materials = (
*materials,
(gs.materials.Tool(), "visual"),
(gs.materials.FEM.Elastic(), "visual"),
(gs.materials.MPM.Elastic(), particle_mode),
(gs.materials.PBD.Cloth(), particle_mode),
(gs.materials.SPH.Liquid(), "particle" if particle_mode == "visual" else particle_mode),
(gs.materials.Kinematic(), "visual"),
)
ducks = []
for i, (material, vis_mode) in enumerate(materials):
col_idx, row_idx = i // 3 - 1, i % 3 - 1
ducks.append(
scene.add_entity(
material=material,
morph=gs.morphs.Mesh(
file="meshes/duck.obj",
scale=0.1,
pos=(col_idx * 0.5, row_idx * 0.5, 0.5),
),
surface=gs.surfaces.Default(
color=np.random.rand(3),
vis_mode=vis_mode,
),
)
)
camera = scene.add_camera(
# Using very low resolution to speed up rendering
res=(128, 128),
pos=(2.0, 0.0, 2.0),
lookat=(0, 0, 0.5),
fov=40,
GUI=show_viewer,
)
scene.build()
# Segmentation count: background(1) + URDF links/entity + duck materials.
# Rigid and Kinematic ducks use add_rigid_node (tuple keys at link/geom level),
# other ducks use add_static_node (int keys). The URDF has 2 visual links.
n_rigid_like = sum(isinstance(m, gs.materials.Kinematic) for m, _ in materials)
seg_num = len(materials) + (2 if segmentation_level == "entity" else 3)
idx_dict = scene.segmentation_idx_dict
assert len(idx_dict) == seg_num
comp_key = 0
for seg_key in idx_dict.values():
if isinstance(seg_key, tuple):
comp_key += 1
# At entity level no tuple keys; at link/geom level: 2 URDF links + rigid-like ducks
assert comp_key == (0 if segmentation_level == "entity" else 2 + n_rigid_like)
for i in range(2):
scene.step()
_, _, seg, _ = camera.render(rgb=False, depth=False, segmentation=True, colorize_seg=False, normal=False)
seg = tensor_to_array(seg)
assert_equal(np.sort(np.unique(seg.flat)), np.arange(0, seg_num))
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER])
def test_camera_follow_entity(n_envs, renderer, show_viewer):
CAM_RES = (100, 100)
scene = gs.Scene(
vis_options=gs.options.VisOptions(
rendered_envs_idx=[max(n_envs - 1, 0)],
segmentation_level="entity",
),
renderer=renderer,
show_viewer=False,
show_FPS=False,
)
for pos in ((1.0, 0.0, 0.0), (-1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, -1.0, 0.0)):
obj = scene.add_entity(
gs.morphs.Box(
size=(0.1, 0.1, 0.1),
pos=pos,
),
)
cam = scene.add_camera(
res=CAM_RES,
pos=(0.0, 0.0, 0.0),
lookat=(1.0, 0, 0.0),
env_idx=1 if n_envs else None,
GUI=show_viewer,
)
cam.follow_entity(obj, smoothing=None)
cam.unfollow_entity()
scene.build(n_envs=n_envs)
for cam, obj in zip(scene.visualizer.cameras, scene.entities):
cam.follow_entity(obj, smoothing=None)
# First render
seg_mask = None
for entity_idx, cam in enumerate(scene.visualizer.cameras, 1):
_, _, seg, _ = cam.render(rgb=False, segmentation=True)
assert (np.unique(seg) == (0, entity_idx)).all()
if seg_mask is None:
seg_mask = seg != 0
else:
assert ((seg != 0) == seg_mask).all()
# Second render - same
for i, obj in enumerate(scene.entities):
obj.set_pos((10.0, 0.0, i), envs_idx=([1] if n_envs else None))
force_render = True
for entity_idx, cam in enumerate(scene.visualizer.cameras, 1):
_, _, seg, _ = cam.render(rgb=False, segmentation=True, force_render=force_render)
assert (np.unique(seg) == (0, entity_idx)).all()
assert ((seg != 0) == seg_mask).all()
force_render = False
# Third render - All objects but all different
for i, obj in enumerate(scene.entities):
obj.set_pos((0.1 * ((i // 2) % 2 - 1), 0.1 * (i % 2), 0.1 * i), envs_idx=([1] if n_envs else None))
force_render = True
seg_masks = []
for cam in scene.visualizer.cameras:
_, _, seg, _ = cam.render(rgb=False, segmentation=True, force_render=force_render)
assert (np.unique(seg) == np.arange(len(scene.entities) + 1)).all()
seg_masks.append(seg != 0)
force_render = False
assert np.diff(seg_masks, axis=0).any(axis=(1, 2)).all()
# Track a trajectory over time
for i in range(3):
pos = 2.0 * (np.random.rand(3) - 0.5)
quat = gu.rotvec_to_quat(np.pi * (np.random.rand(3) - 0.5))
obj.set_pos(pos + np.array([10.0, 0.0, 0.0]), envs_idx=([1] if n_envs else None))
obj.set_quat(quat, envs_idx=([1] if n_envs else None))
_, _, seg, _ = cam.render(segmentation=True, force_render=True)
assert (np.unique(seg) == (0, entity_idx)).all()
assert not seg[tuple([*range(0, res // 3), *range(2 * res // 3, res)] for res in CAM_RES)].any()
@pytest.mark.required
@pytest.mark.parametrize(
"renderer_type",
[RENDERER_TYPE.RASTERIZER, RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER],
)
def test_point_cloud(renderer_type, renderer, show_viewer):
N_ENVS = 2
CAM_RES = (256, 256)
CAMERA_DIST = 8.0
OBJ_OFFSET = 10.0
BOX_HALFSIZE = 1.0
SPHERE_RADIUS = 1.0
IS_BATCHRENDER = renderer_type in (RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER)
BATCH_SHAPE = (N_ENVS,) if N_ENVS > 0 and IS_BATCHRENDER else ()
scene = gs.Scene(
renderer=renderer,
show_viewer=show_viewer,
show_FPS=False,
)
if IS_BATCHRENDER:
scene.add_light(
pos=(0.0, 0.0, 1.5),
dir=(1.0, 1.0, -2.0),
directional=True,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_light(
pos=(4.0, -4.0, 4.0),
dir=(-1.0, 1.0, -1.0),
directional=False,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_entity(
morph=gs.morphs.Sphere(
pos=(0.0, OBJ_OFFSET, 0.0),
radius=SPHERE_RADIUS,
fixed=True,
),
)
scene.add_entity(
morph=gs.morphs.Box(
pos=(0.0, -OBJ_OFFSET, 0.0),
size=(2.0 * BOX_HALFSIZE, 2.0 * BOX_HALFSIZE, 2.0 * BOX_HALFSIZE),
fixed=True,
)
)
camera_sphere = scene.add_camera(
res=CAM_RES,
pos=(0.0, OBJ_OFFSET, CAMERA_DIST),
lookat=(0.0, OBJ_OFFSET, 0.0),
near=2.0,
far=15.0,
GUI=show_viewer,
)
camera_box_1 = scene.add_camera(
res=CAM_RES,
pos=(0.0, -OBJ_OFFSET, CAMERA_DIST),
lookat=(0.0, -OBJ_OFFSET, 0.0),
near=2.0,
far=15.0,
GUI=show_viewer,
)
camera_box_2 = scene.add_camera(
res=CAM_RES,
pos=np.array((CAMERA_DIST, CAMERA_DIST - OBJ_OFFSET, CAMERA_DIST)),
lookat=(0.0, -OBJ_OFFSET, 0.0),
near=2.0,
far=15.0,
GUI=show_viewer,
)
scene.build(n_envs=N_ENVS)
if show_viewer:
for camera in scene.visualizer.cameras:
camera.render(rgb=True, depth=True)
point_cloud, mask = camera_box_1.render_pointcloud(world_frame=False)
assert point_cloud.shape == (*BATCH_SHAPE, *CAM_RES, 3)
point_cloud = point_cloud[mask]
assert_allclose(CAMERA_DIST - point_cloud[:, 2], BOX_HALFSIZE, atol=1e-4)
assert np.all(-BOX_HALFSIZE <= point_cloud[:, :2].min(axis=0))
assert np.all(point_cloud[:, :2].max(axis=0) <= BOX_HALFSIZE)
point_cloud, mask = camera_box_2.render_pointcloud(world_frame=False)
assert point_cloud.shape == (*BATCH_SHAPE, *CAM_RES, 3)
point_cloud = point_cloud[mask]
point_cloud = point_cloud @ gu.z_up_to_R(np.array((1.0, 1.0, 1.0)), np.array((0.0, 0.0, 1.0))).T
point_cloud -= np.array((CAMERA_DIST, CAMERA_DIST, CAMERA_DIST))
# FIXME: Tolerance must be increased whe using Apple's Software Rendering, probably due to an OpenGL bug...
tol = 2e-4 if sys.platform == "darwin" else 1e-4
assert_allclose(np.linalg.norm(point_cloud, ord=float("inf"), axis=-1), BOX_HALFSIZE, atol=tol)
point_cloud, mask = camera_box_2.render_pointcloud(world_frame=True)
assert point_cloud.shape == (*BATCH_SHAPE, *CAM_RES, 3)
point_cloud = point_cloud[mask]
point_cloud += np.array((0.0, OBJ_OFFSET, 0.0))
assert_allclose(np.linalg.norm(point_cloud, ord=float("inf"), axis=-1), BOX_HALFSIZE, atol=tol)
# It is not possible to get higher accuracy because of tesselation
point_cloud, mask = camera_sphere.render_pointcloud(world_frame=False)
assert point_cloud.shape == (*BATCH_SHAPE, *CAM_RES, 3)
point_cloud = point_cloud[mask]
assert_allclose(np.linalg.norm((0.0, 0.0, CAMERA_DIST) - point_cloud, axis=-1), SPHERE_RADIUS, atol=1e-2)
@pytest.mark.required
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER])
def test_draw_debug(renderer, show_viewer):
if "GS_DISABLE_OFFSCREEN_MARKERS" in os.environ:
pytest.skip("Offscreen rendering of markers is forcibly disabled. Skipping...")
scene = gs.Scene(
vis_options=gs.options.VisOptions(
rendered_envs_idx=[0, 2],
),
renderer=renderer,
show_viewer=show_viewer,
show_FPS=False,
)
cam = scene.add_camera(
pos=(3.5, 0.5, 2.5),
lookat=(0.0, 0.0, 0.5),
up=(0.0, 0.0, 1.0),
res=(640, 640),
env_idx=2,
GUI=show_viewer,
)
scene.build(n_envs=3)
rgb_array, *_ = cam.render(rgb=True, depth=False, segmentation=False, colorize_seg=False, normal=False)
assert_allclose(np.std(rgb_array.reshape((-1, 3)), axis=0), 0.0, tol=gs.EPS)
scene.draw_debug_arrow(
pos=(0, 0.4, 0.1),
vec=(0, 0.3, 0.8),
color=(1, 0, 0),
)
scene.draw_debug_line(
start=(0.7, -0.3, 0.7),
end=(0.6, 0.2, 0.7),
radius=0.01,
color=(1, 0, 0, 1),
)
sphere_obj = scene.draw_debug_sphere(
pos=(-0.3, 0.3, 0.0),
radius=0.15,
color=(0, 1, 0),
)
frame_obj = scene.draw_debug_frame(
T=np.array(
[
[1.0, 0.0, 0.0, -0.3],
[0.0, 0.0, 1.0, 0.0],
[0.0, -1.0, 0.0, -0.2],
[0.0, 0.0, 0.0, 1.0],
]
),
axis_length=0.5,
origin_size=0.03,
axis_radius=0.02,
)
scene.visualizer.update()
rgb_array, *_ = cam.render(rgb=True, depth=False, segmentation=False, colorize_seg=False, normal=False)
rgb_array_flat = rgb_array.reshape((-1, 3)).astype(np.int32)
assert (np.std(rgb_array_flat, axis=0) > 10.0).any()
rgb_array_prev = rgb_array_flat
poses = gu.trans_to_T(np.zeros((2, 2, 3)))
for i in range(2):
poses[:, i] = gu.trans_quat_to_T(2.0 * (np.random.rand(2, 3) - 0.5), np.random.rand(2, 4))
scene.visualizer.context.update_debug_objects([frame_obj, sphere_obj], poses)
scene.visualizer.update()
rgb_array, *_ = cam.render(rgb=True, depth=False, segmentation=False, colorize_seg=False, normal=False)
rgb_array_flat = rgb_array.reshape((-1, 3)).astype(np.int32)
assert (np.std(rgb_array_flat - rgb_array_prev, axis=0) > 10.0).any()
rgb_array_prev = rgb_array_flat
scene.clear_debug_objects()
scene.visualizer.update()
rgb_array, *_ = cam.render(rgb=True, depth=False, segmentation=False, colorize_seg=False, normal=False)
assert_allclose(np.std(rgb_array.reshape((-1, 3)), axis=0), 0.0, tol=gs.EPS)
@pytest.mark.required
@pytest.mark.parametrize("n_envs", [0, 2])
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER])
@pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.")
def test_sensors_draw_debug(n_envs, renderer_type, renderer, png_snapshot):
"""Test that sensor debug drawing works correctly and renders visible debug elements."""
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.0, 2.0, 2.0),
camera_lookat=(0.0, 0.0, 0.2),
# Force screen-independent low-quality resolution when running unit tests for consistency
res=(480, 320),
# Enable running in background thread if supported by the platform
run_in_thread=(sys.platform == "linux"),
),
vis_options=gs.options.VisOptions(
# Disable shadows systematically for Rasterizer because they are forcibly disabled on CPU backend anyway
shadow=(renderer_type != RENDERER_TYPE.RASTERIZER),
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
renderer=renderer,
show_viewer=True,
)
scene.add_entity(gs.morphs.Plane())
floating_box = scene.add_entity(
gs.morphs.Box(
size=(0.1, 0.1, 0.1),
pos=(0.0, 0.0, 0.5),
fixed=True,
)
)
scene.add_sensor(
gs.sensors.IMU(
entity_idx=floating_box.idx,
pos_offset=(0.0, 0.0, 0.1),
draw_debug=True,
)
)
ground_box = scene.add_entity(
gs.morphs.Box(
size=(0.4, 0.2, 0.1),
pos=(-0.25, 0.0, 0.05),
)
)
scene.add_sensor(
gs.sensors.Contact(
entity_idx=ground_box.idx,
draw_debug=True,
debug_sphere_radius=0.08,
debug_color=(1.0, 0.5, 1.0, 1.0),
)
)
scene.add_sensor(
gs.sensors.ContactForce(
entity_idx=ground_box.idx,
draw_debug=True,
debug_scale=0.01,
)
)
scene.add_sensor(
gs.sensors.Raycaster(
pattern=gs.sensors.raycaster.GridPattern(
resolution=0.2,
size=(0.4, 0.4),
direction=(0.0, 0.0, -1.0),
),
entity_idx=floating_box.idx,
pos_offset=(0.2, 0.0, -0.1),
return_world_frame=True,
draw_debug=True,
)
)
scene.add_sensor(
gs.sensors.Raycaster(
pattern=gs.sensors.raycaster.SphericalPattern(
n_points=(6, 6),
fov=(60.0, (-120.0, -60.0)),
),
entity_idx=floating_box.idx,
pos_offset=(0.0, 0.5, 0.0),
return_world_frame=False,
draw_debug=True,
debug_sphere_radius=0.01,
debug_ray_start_color=(1.0, 1.0, 0.0, 1.0),
debug_ray_hit_color=(0.5, 1.0, 1.0, 1.0),
)
)
scene.build(n_envs=n_envs)
for _ in range(5):
scene.step()
pyrender_viewer = scene.visualizer.viewer._pyrender_viewer
assert pyrender_viewer.is_active
rgb_arr, *_ = pyrender_viewer.render_offscreen(
pyrender_viewer._camera_node,
pyrender_viewer._renderer,
rgb=True,
depth=False,
seg=False,
normal=False,
)
if sys.platform == "darwin":
glinfo = pyrender_viewer.context.get_info()
renderer = glinfo.get_renderer()
if renderer == "Apple Software Renderer":
pytest.xfail("Tile ground colors are altered on Apple Software Renderer.")
assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot
@pytest.mark.required
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER])
@pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.")
def test_interactive_viewer_key_press(renderer_type, tmp_path, monkeypatch, renderer, png_snapshot):
IMAGE_FILENAME = tmp_path / "screenshot.png"
# Mock 'get_save_filename' to avoid poping up an interactive dialog
def get_save_filename(self, file_exts):
return IMAGE_FILENAME
monkeypatch.setattr("genesis.ext.pyrender.viewer.Viewer._get_save_filename", get_save_filename)
# Mock 'on_key_press' to determine whether requests have been processed
is_done = False
on_key_press_orig = gs.ext.pyrender.viewer.Viewer.on_key_press
def on_key_press(self, symbol: int, modifiers: int):
nonlocal is_done
assert not is_done
ret = on_key_press_orig(self, symbol, modifiers)
is_done = True
return ret
monkeypatch.setattr("genesis.ext.pyrender.viewer.Viewer.on_key_press", on_key_press)
# Create a scene
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
# Force screen-independent low-quality resolution when running unit tests for consistency.
# Still, it must be large enough since rendering text involved alpha blending, which is platform-dependent.
res=(640, 480),
# Enable running in background thread if supported by the platform.
# Note that windows is not supported because it would trigger the following exception if some previous tests
# was only using rasterizer without interactive viewer:
# 'EventLoop.run() must be called from the same thread that imports pyglet.app'.
run_in_thread=(sys.platform == "linux"),
),
vis_options=gs.options.VisOptions(
# Disable shadows systematically for Rasterizer because they are forcibly disabled on CPU backend anyway
shadow=(renderer_type != RENDERER_TYPE.RASTERIZER),
),
renderer=renderer,
show_viewer=True,
show_FPS=False,
)
scene.add_entity(
gs.morphs.Box(
size=(0.5, 0.5, 0.5),
pos=(0.0, 0.0, 0.0),
),
)
scene.build()
pyrender_viewer = scene.visualizer.viewer._pyrender_viewer
assert pyrender_viewer.is_active
# Try saving the current frame
pyrender_viewer.dispatch_event("on_key_press", Key.S, 0)
# Waiting for request completion
if pyrender_viewer.run_in_thread:
for i in range(100):
if is_done:
is_done = False
break
time.sleep(0.1)
else:
raise AssertionError("Keyboard event not processed before timeout")
else:
pyrender_viewer.dispatch_pending_events()
pyrender_viewer.dispatch_events()
# Skip the rest of the test if necessary.
# Similarly, 'glBlitFramebuffer(..., GL_DEPTH_BUFFER_BIT, GL_NEAREST)' involved in offscreen rendering of depth map
# with interactive viewer enabled takes ages on old CPU-based Mesa rendering driver (~15000s).
if sys.platform == "linux":
glinfo = pyrender_viewer.context.get_info()
renderer = glinfo.get_renderer()
if "llvmpipe" in renderer:
llvm_version = re.search(r"LLVM\s+([\d.]+)", renderer).group(1)
if llvm_version < "20":
pytest.xfail("Text is blurry on Linux using old CPU-based Mesa rendering driver.")
# Make sure that the result is valid
with open(IMAGE_FILENAME, "rb") as f:
assert f.read() == png_snapshot
@pytest.mark.required
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER])
def test_camera_gimbal_lock_singularity(renderer, show_viewer):
"""
Test that camera maintains continuous orientation when moving through singularity conditions.
"""
# Minimal scene
scene = gs.Scene(
renderer=renderer,
show_viewer=show_viewer,
show_FPS=False,
)
cam = scene.add_camera(pos=(0.0, -1.5, 5.0), lookat=(0.0, 0.0, 0.0))
scene.build()
prev_right = None
# Move camera through singularity along y-axis: y=-1.5 to y=1.5 (singularity at y=0)
for i in range(7):
cam.set_pose(pos=(0.0, -1.5 + i * 0.5, 5.0), lookat=(0.0, 0.0, 0.0))
# Get the right vector (x-axis) from camera transform
transform = cam.get_transform()
right = transform[:3, 0]
# Check direction with the previous one
if prev_right is not None:
assert torch.dot(prev_right, right) > 0.0
prev_right = right
# Move camera through singularity along x-axis: x=-1.5 to x=1.5 (singularity at x=0)
prev_right = None
for i in range(7):
cam.set_pose(pos=(-1.5 + i * 0.5, 0.0, 5.0), lookat=(0.0, 0.0, 0.0))
# Get the right vector (x-axis) from camera transform
transform = cam.get_transform()
right = transform[:3, 0]
# Check direction with the previous one
if prev_right is not None:
assert torch.dot(prev_right, right) > 0.0
prev_right = right
@pytest.mark.parametrize(
"renderer_type",
[RENDERER_TYPE.RASTERIZER, RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER],
)
def test_render_planes(tmp_path, png_snapshot, renderer_type, renderer):
IS_BATCHRENDER = renderer_type in (RENDERER_TYPE.BATCHRENDER_RASTERIZER, RENDERER_TYPE.BATCHRENDER_RAYTRACER)
for test_idx, (plane_size, tile_size) in enumerate(
(
((3, 4.5), (0.5, 0.75)),
((3.0, 5.0), (5.0, 3.0)),
((4.0, 4.0), (1.0, 1.0)),
)
):
scene = gs.Scene(
renderer=renderer,
show_viewer=False,
show_FPS=False,
)
if IS_BATCHRENDER:
scene.add_light(
pos=(0.0, 0.0, 1.5),
dir=(1.0, 1.0, -2.0),
directional=True,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_light(
pos=(4.0, -4.0, 4.0),
dir=(-1.0, 1.0, -1.0),
directional=False,
castshadow=True,
cutoff=45.0,
intensity=0.5,
)
scene.add_entity(
gs.morphs.Plane(plane_size=plane_size, tile_size=tile_size),
)
camera = scene.add_camera(
res=(256, 256),
pos=(0.0, 0.0, 8),
lookat=(0.0, 0.0, 0.0),
fov=45,
GUI=False,
)
scene.build()
exporter = FrameImageExporter(tmp_path)
rgba, depth, _, _ = camera.render(rgb=True, depth=False)
exporter.export_frame_single_camera(test_idx, camera.idx, rgb=rgba, depth=depth)
for image_file in sorted(tmp_path.rglob("*.png")):
with open(image_file, "rb") as f:
assert f.read() == png_snapshot
@pytest.mark.slow # ~500s
@pytest.mark.required
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER])
@pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.")
def test_batch_deformable_render(monkeypatch, png_snapshot):
# Having many particles in the scene creates artifacts that are not deterministic between different hardware
png_snapshot.extension._std_err_threshold = 2.0
png_snapshot.extension._blurred_kernel_size = 3
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=5e-4,
substeps=10,
),
pbd_options=gs.options.PBDOptions(
particle_size=1e-2,
),
mpm_options=gs.options.MPMOptions(
lower_bound=(-1.0, -1.0, -0.2),
upper_bound=(1.0, 1.0, 1.0),
),
sph_options=gs.options.SPHOptions(
lower_bound=(-0.5, -0.5, 0.0),
upper_bound=(0.5, 0.5, 1),
particle_size=0.01,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(6.0, 0.0, 4.0),
camera_lookat=(0.0, 0.0, 0.0),
camera_fov=40,
res=(640, 480),
run_in_thread=False,
# Disable text rendering as it is messing up with pixel matching when using old CPU-based Mesa driver
enable_help_text=False,
),
vis_options=gs.options.VisOptions(
visualize_mpm_boundary=True,
visualize_sph_boundary=True,
show_world_frame=True,
),
show_viewer=True,
show_FPS=False,
)
scene.add_entity(
morph=gs.morphs.Plane(),
material=gs.materials.Rigid(
needs_coup=True,
coup_friction=0.0,
),
)
scene.add_entity(
morph=gs.morphs.Box(
pos=(0.5, 0.5, 0.2),
size=(0.2, 0.2, 0.2),
euler=(30, 40, 0),
fixed=True,
),
material=gs.materials.Rigid(
needs_coup=True,
coup_friction=0.0,
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/cloth.obj",
scale=1.0,
pos=(0.5, 0.5, 0.5),
euler=(180.0, 0.0, 0.0),
),
material=gs.materials.PBD.Cloth(),
surface=gs.surfaces.Default(
color=(0.2, 0.4, 0.8, 1.0),
),
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/worm/worm.obj",
pos=(0.3, 0.3, 0.001),
scale=0.1,
euler=(90, 0, 0),
),
material=gs.materials.MPM.Muscle(
E=5e5,
nu=0.45,
rho=10000.0,
model="neohooken",
sampler="random",
n_groups=4,
),
)
scene.add_entity(
morph=gs.morphs.Box(
pos=(0.0, 0.0, 0.65),
size=(0.4, 0.4, 0.4),
),
material=gs.materials.SPH.Liquid(
sampler="random",
),
surface=gs.surfaces.Default(
color=(0.4, 0.8, 1.0),
vis_mode="particle",
),
)
scene.build(n_envs=4, env_spacing=(2.0, 2.0))
pyrender_viewer = scene.visualizer.viewer._pyrender_viewer
assert pyrender_viewer.is_active
scene.visualizer.viewer.update(auto_refresh=True, force=True)
rgb_arr, *_ = pyrender_viewer.render_offscreen(
pyrender_viewer._camera_node, pyrender_viewer._renderer, rgb=True, depth=False, seg=False, normal=False
)
assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot
@pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not available")
@pytest.mark.parametrize("add_box", [False, True])
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER])
def test_add_camera_vs_interactive_viewer_consistency(add_box, renderer_type, show_viewer):
CAM_RES = (128, 128)
CAM_POS = (0.0, -2.0, 1.5)
CAM_LOOKAT = (0.0, 0.0, 0.0)
CAM_FOV = 60.0
scene = gs.Scene(
vis_options=gs.options.VisOptions(
ambient_light=(0.1, 0.1, 0.1),
lights=[
dict(
type="directional",
dir=(-1, -1, -1),
color=(1.0, 1.0, 1.0),
intensity=5.0,
),
],
),
viewer_options=gs.options.ViewerOptions(
res=CAM_RES,
camera_pos=CAM_POS,
camera_lookat=CAM_LOOKAT,
camera_fov=CAM_FOV,
),
renderer=renderer_type,
show_viewer=True,
)
scene.add_entity(morph=gs.morphs.Plane())
if add_box:
scene.add_entity(
morph=gs.morphs.Box(
pos=(0.1, 0.1, 0.1),
size=(0.1, 0.1, 0.1),
fixed=True,
),
)
camera = scene.add_camera(
res=CAM_RES,
pos=CAM_POS,
lookat=CAM_LOOKAT,
fov=CAM_FOV,
GUI=show_viewer,
)
scene.build()
# Render from interactive viewer
pyrender_viewer = scene.visualizer.viewer._pyrender_viewer
assert pyrender_viewer.is_active
viewer_rgb, *_ = pyrender_viewer.render_offscreen(
pyrender_viewer._camera_node, pyrender_viewer._renderer, rgb=True, depth=False, seg=False, normal=False
)
# Render from add_camera
add_cam_rgb, *_ = camera.render(rgb=True)
# Compare brightness (mean pixel value)
viewer_brightness = viewer_rgb.mean()
add_cam_brightness = add_cam_rgb.mean()
brightness_ratio = add_cam_brightness / viewer_brightness
assert 0.99 <= brightness_ratio <= 1.01, (
f"add_camera brightness ({add_cam_brightness:.2f}) should match "
f"interactive viewer brightness ({viewer_brightness:.2f}), "
f"but ratio is {brightness_ratio:.2f}"
)
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER, RENDERER_TYPE.RAYTRACER])
def test_deformable_uv_textures(renderer, show_viewer, png_snapshot):
# Relax pixel matching because RayTracer is not deterministic between different hardware (eg RTX6000 vs H100), even
# without denoiser.
png_snapshot.extension._std_err_threshold = 3.0
png_snapshot.extension._blurred_kernel_size = 3
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.04,
substeps=6,
),
pbd_options=gs.options.PBDOptions(
particle_size=0.01,
),
fem_options=gs.options.FEMOptions(
# Implicit solver allows for larger timestep without failure on GPU backend
use_implicit_solver=True,
# Reduce number of iterations to speedup runtime
n_pcg_iterations=40,
),
renderer=renderer,
show_viewer=show_viewer,
show_FPS=False,
)
# Add ground plane
scene.add_entity(
morph=gs.morphs.Plane(),
surface=gs.surfaces.Aluminium(
ior=10.0,
),
)
# Add PBD cloth with checker texture
asset_path = get_hf_dataset(pattern="uv_plane.obj")
scene.add_entity(
morph=gs.morphs.Mesh(
file=f"{asset_path}/uv_plane.obj",
scale=0.4,
pos=(-0.2, 0.0, 0.4),
),
material=gs.materials.PBD.Cloth(),
surface=gs.surfaces.Default(
diffuse_texture=gs.textures.ImageTexture(
image_path="textures/checker.png",
),
vis_mode="visual",
),
)
# Add FEM elastic object with checker texture
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/duck.obj",
scale=0.1,
pos=(0.2, 0.0, 0.2),
),
material=gs.materials.FEM.Elastic(
E=1e5,
nu=0.4,
),
surface=gs.surfaces.Default(
diffuse_texture=gs.textures.ImageTexture(
image_path="textures/checker.png",
),
vis_mode="visual",
),
)
camera = scene.add_camera(
res=(256, 256),
pos=(1.5, 1.5, 1),
lookat=(0.0, 0.0, 0.3),
fov=45,
spp=64,
denoise=False,
GUI=show_viewer,
)
scene.build()
# Step simulation to deform the objects
for _ in range(4):
scene.step()
# Render and verify
rgb_arr, *_ = camera.render(rgb=True)
assert rgb_array_to_png_bytes(rgb_arr) == png_snapshot
@pytest.mark.required
@pytest.mark.parametrize("renderer_type", [RENDERER_TYPE.RASTERIZER])
@pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.")
def test_rasterizer_camera_sensor_with_viewer(renderer):
"""Test that RasterizerCameraSensor works correctly when interactive viewer is enabled.
This verifies that the sensor properly shares the viewer's OpenGL context instead of
creating a conflicting separate context.
"""
CAM_RES = (128, 64)
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
res=CAM_RES,
run_in_thread=False,
),
renderer=renderer,
show_viewer=True,
)
# At least one entity is needed to ensure the rendered image is not entirely blank,
# otherwise it is not possible to verify that something was actually rendered.
scene.add_entity(morph=gs.morphs.Plane())
camera_sensor = scene.add_sensor(
RasterizerCameraOptions(
res=CAM_RES,
)
)
scene.build()
pyrender_viewer = scene.visualizer.viewer._pyrender_viewer
assert pyrender_viewer.is_active
scene.step()
data = camera_sensor.read()
assert data.rgb.float().std() > 1.0, "RGB std too low, image may be blank"
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_render.py",
"license": "Apache License 2.0",
"lines": 1498,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:examples/collision/pyramid.py | import numpy as np
import genesis as gs
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pile_type", type=str, default="falling", choices=("static", "falling"))
parser.add_argument("--num_cubes", type=int, default=5, choices=(5, 6, 7, 8, 9, 10))
parser.add_argument("--cpu", action="store_true", help="Use CPU backend instead of GPU", default=True)
parser.add_argument("--steps", type=int, default=150)
parser.add_argument("-v", "--vis", action="store_true", default=False)
args = parser.parse_args()
gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="32")
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.01,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(0, -5.5, 2.5),
camera_lookat=(0, 0.0, 1.5),
max_FPS=60,
),
show_viewer=args.vis,
)
plane = scene.add_entity(gs.morphs.Plane())
# create pyramid of boxes
box_size = 0.25
box_spacing = (1.0 - 1e-3 + 0.1 * (args.pile_type == "static")) * box_size
box_pos_offset = (-0.5, 1, 0.0) + 0.5 * np.array([box_size, box_size, box_size])
boxes = {}
for i in range(args.num_cubes):
for j in range(args.num_cubes - i):
box = scene.add_entity(
gs.morphs.Box(
size=[box_size, box_size, box_size],
pos=box_pos_offset + box_spacing * np.array([i + 0.5 * j, 0, j]),
),
# visualize_contact=True,
)
scene.build()
for i in range(args.steps):
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/collision/pyramid.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/collision/tower.py | import argparse
import os
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--object", type=str, default="cylinder", choices=("sphere", "cylinder", "duck"))
parser.add_argument("-v", "--vis", action="store_true", default=False)
args = parser.parse_args()
object_type = args.object
horizon = 50 if "PYTEST_VERSION" in os.environ else 1000
gs.init(backend=gs.cpu, precision="32", performance_mode=True)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.004,
),
rigid_options=gs.options.RigidOptions(
max_collision_pairs=200,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(20, -20, 20),
camera_lookat=(0.0, 0.0, 5.0),
max_FPS=60,
),
show_viewer=args.vis,
)
scene.add_entity(gs.morphs.Plane())
# create pyramid of boxes
box_width, box_length, box_height = 0.25, 2.0, 0.1
num_stacks = 50
for i in range(num_stacks):
if i % 2 == 0: # horizontal stack
box_size = (box_width, box_length, box_height)
box_pos_0 = (-0.4 * box_length, 0, i * (box_height - 1e-3) + 0.5 * box_height)
box_pos_1 = (+0.4 * box_length, 0, i * (box_height - 1e-3) + 0.5 * box_height)
else: # vertical stack
box_size = (box_length, box_width, box_height)
box_pos_0 = (0, -0.4 * box_length, i * (box_height - 1e-3) + 0.5 * box_height)
box_pos_1 = (0, +0.4 * box_length, i * (box_height - 1e-3) + 0.5 * box_height)
for box_pos in (box_pos_0, box_pos_1):
scene.add_entity(
gs.morphs.Box(
size=box_size,
pos=box_pos,
),
)
# Drop a huge mesh
if object_type == "duck":
duck_scale = 0.8
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/duck.obj",
scale=duck_scale,
pos=(0, -0.1, num_stacks * box_height + 10 * duck_scale),
),
)
elif object_type == "sphere":
sphere_radius = 2.0
scene.add_entity(
morph=gs.morphs.Sphere(
radius=sphere_radius,
pos=(0.0, 0.0, num_stacks * box_height + 5 * sphere_radius),
),
)
elif object_type == "cylinder":
cylinder_radius, cylinder_height = 2.0, 1.0
scene.add_entity(
morph=gs.morphs.Cylinder(
radius=cylinder_radius,
height=cylinder_height,
pos=(0.0, 0.0, num_stacks * box_height + 5 * cylinder_height),
),
)
scene.build()
for i in range(horizon):
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/collision/tower.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/keyboard_teleop.py | """
Keyboard Controls:
↑ - Move Forward (North)
↓ - Move Backward (South)
← - Move Left (West)
→ - Move Right (East)
n - Move Up
m - Move Down
j - Rotate Counterclockwise
k - Rotate Clockwise
u - Reset Scene
space - Press to close gripper, release to open gripper
esc - Quit
Plus all default viewer controls (press 'i' to see them)
"""
import os
import random
import numpy as np
import genesis as gs
import genesis.utils.geom as gu
from genesis.vis.keybindings import Key, KeyAction, Keybind
if __name__ == "__main__":
########################## init ##########################
gs.init(precision="32", logging_level="info", backend=gs.cpu)
np.set_printoptions(precision=7, suppress=True)
########################## create a scene ##########################
scene = gs.Scene(
sim_options=gs.options.SimOptions(
substeps=4,
),
rigid_options=gs.options.RigidOptions(
enable_joint_limit=True,
enable_collision=True,
gravity=(0, 0, -9.8),
box_box_detection=True,
constraint_timeconst=0.01,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.5, 0.0, 0.7),
camera_lookat=(0.2, 0.0, 0.1),
camera_fov=50,
max_FPS=60,
),
show_viewer=True,
show_FPS=False,
)
########################## entities ##########################
plane = scene.add_entity(
gs.morphs.Plane(),
)
robot = scene.add_entity(
material=gs.materials.Rigid(gravity_compensation=1),
morph=gs.morphs.MJCF(
file="xml/franka_emika_panda/panda.xml",
euler=(0, 0, 0),
),
)
cube = scene.add_entity(
material=gs.materials.Rigid(rho=300),
morph=gs.morphs.Box(
pos=(0.5, 0.0, 0.07),
size=(0.04, 0.04, 0.04),
),
surface=gs.surfaces.Default(color=(0.5, 1, 0.5)),
)
target = scene.add_entity(
gs.morphs.Mesh(
file="meshes/axis.obj",
scale=0.15,
collision=False,
),
surface=gs.surfaces.Default(color=(1, 0.5, 0.5, 1)),
)
########################## build ##########################
scene.build()
# Initialize robot control state
robot_init_pos = np.array([0.5, 0, 0.55])
robot_init_quat = gu.xyz_to_quat(np.array([0, np.pi, 0])) # Rotation around Y axis
# Get DOF indices
n_dofs = robot.n_dofs
motors_dof = np.arange(n_dofs - 2)
fingers_dof = np.arange(n_dofs - 2, n_dofs)
ee_link = robot.get_link("hand")
# Initialize target pose
target_pos = robot_init_pos.copy()
target_quat = [robot_init_quat.copy()] # Use list to make it mutable in closures
# Control parameters
dpos = 0.002
drot = 0.01
# Helper function to reset robot
def reset_robot():
"""Reset robot and cube to initial positions."""
target_pos[:] = robot_init_pos.copy()
target_quat[0] = robot_init_quat.copy()
target.set_qpos(np.concatenate([target_pos, target_quat[0]]))
q = robot.inverse_kinematics(link=ee_link, pos=target_pos, quat=target_quat[0])
robot.set_qpos(q[:-2], motors_dof)
# Randomize cube position
cube.set_pos((random.uniform(0.2, 0.4), random.uniform(-0.2, 0.2), 0.05))
random_angle = random.uniform(0, np.pi * 2)
cube.set_quat(gu.xyz_to_quat(np.array([0, 0, random_angle])))
# Initialize robot pose
reset_robot()
# Robot teleoperation callback functions
def move(dpos: tuple[float, float, float]):
target_pos[:] += np.array(dpos, dtype=gs.np_float)
def rotate(drot: float):
drot_quat = gu.xyz_to_quat(np.array([0, 0, drot]))
target_quat[0] = gu.transform_quat_by_quat(target_quat[0], drot_quat)
def toggle_gripper(close: bool = True):
pos = -1.0 if close else 1.0
robot.control_dofs_force(np.array([pos, pos]), fingers_dof)
is_running = True
def stop():
global is_running
is_running = False
# Register robot teleoperation keybindings
scene.viewer.register_keybinds(
Keybind("move_forward", Key.UP, KeyAction.HOLD, callback=move, args=((-dpos, 0, 0),)),
Keybind("move_back", Key.DOWN, KeyAction.HOLD, callback=move, args=((dpos, 0, 0),)),
Keybind("move_left", Key.LEFT, KeyAction.HOLD, callback=move, args=((0, -dpos, 0),)),
Keybind("move_right", Key.RIGHT, KeyAction.HOLD, callback=move, args=((0, dpos, 0),)),
Keybind("move_up", Key.N, KeyAction.HOLD, callback=move, args=((0, 0, dpos),)),
Keybind("move_down", Key.M, KeyAction.HOLD, callback=move, args=((0, 0, -dpos),)),
Keybind("rotate_ccw", Key.J, KeyAction.HOLD, callback=rotate, args=(drot,)),
Keybind("rotate_cw", Key.K, KeyAction.HOLD, callback=rotate, args=(-drot,)),
Keybind("reset_scene", Key.U, KeyAction.HOLD, callback=reset_robot),
Keybind("close_gripper", Key.SPACE, KeyAction.PRESS, callback=toggle_gripper, args=(True,)),
Keybind("open_gripper", Key.SPACE, KeyAction.RELEASE, callback=toggle_gripper, args=(False,)),
Keybind("quit", Key.ESCAPE, KeyAction.PRESS, callback=stop),
)
########################## run simulation ##########################
try:
while is_running:
# Update target entity visualization
target.set_qpos(np.concatenate([target_pos, target_quat[0]]))
# Control arm with inverse kinematics
q, err = robot.inverse_kinematics(link=ee_link, pos=target_pos, quat=target_quat[0], return_error=True)
robot.control_dofs_position(q[:-2], motors_dof)
scene.step()
if "PYTEST_VERSION" in os.environ:
break
except KeyboardInterrupt:
gs.logger.info("Simulation interrupted, exiting.")
finally:
gs.logger.info("Simulation finished.")
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/keyboard_teleop.py",
"license": "Apache License 2.0",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:tests/test_hybrid.py | import numpy as np
import pytest
import torch
import genesis as gs
from genesis.utils.misc import tensor_to_array
from .utils import assert_allclose, get_hf_dataset
@pytest.mark.required
def test_rigid_mpm_muscle(show_viewer):
BALL_POS_INIT = (0.8, 0.6, 0.12)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=3e-3,
substeps=10,
),
rigid_options=gs.options.RigidOptions(
gravity=(0, 0, -9.8),
constraint_timeconst=0.02,
),
mpm_options=gs.options.MPMOptions(
lower_bound=(0.0, 0.0, -0.2),
upper_bound=(1.0, 1.0, 1.0),
gravity=(0.0, 0.0, 0.0), # mimic gravity compensation
enable_CPIC=True,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(1.5, 1.3, 0.5),
camera_lookat=(0.0, 0.0, 0.0),
camera_fov=40,
),
show_viewer=show_viewer,
show_FPS=False,
)
scene.add_entity(morph=gs.morphs.Plane())
robot = scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/simple/two_link_arm.urdf",
pos=(0.45, 0.45, 0.2),
euler=(0.0, 0.0, 0.0),
scale=0.2,
fixed=True,
),
material=gs.materials.Hybrid(
material_rigid=gs.materials.Rigid(
gravity_compensation=1.0,
),
material_soft=gs.materials.MPM.Muscle(
E=1e4,
nu=0.45,
rho=1000.0,
model="neohooken",
),
thickness=0.05,
damping=1000.0,
),
)
ball = scene.add_entity(
morph=gs.morphs.Sphere(
pos=BALL_POS_INIT,
radius=0.12,
),
material=gs.materials.Rigid(rho=1000, friction=0.5),
)
scene.build()
scene.reset()
for i in range(150):
robot.control_dofs_velocity(np.array([2.0 * np.sin(2 * np.pi * i * 0.006)] * robot.n_dofs))
scene.step()
ball_pos_delta = ball.get_pos() - torch.tensor(BALL_POS_INIT, dtype=gs.tc_float, device=gs.device)
assert_allclose(ball_pos_delta[..., 0], 0.0, tol=1e-2)
assert ((0.02 < ball_pos_delta[1]) & (ball_pos_delta[1] < 0.05)).all()
assert_allclose(ball_pos_delta[..., 2], 0.0, tol=1e-3)
@pytest.mark.slow # ~700s
@pytest.mark.required
def test_mesh_mpm_build(show_viewer):
# FIXME: This test is crashing on Linux (x86 & aarch64) Github-hosted runners
scene = gs.Scene(
mpm_options=gs.options.MPMOptions(
lower_bound=(-0.5, -0.5, -0.5),
upper_bound=(0.5, 0.5, 0.5),
),
show_viewer=show_viewer,
show_FPS=False,
)
scene.add_entity(
morph=gs.morphs.Mesh(
file="meshes/duck.obj",
scale=0.15,
),
material=gs.materials.Hybrid(
material_rigid=gs.materials.Rigid(),
material_soft=gs.materials.MPM.Muscle(),
),
)
scene.build()
@pytest.mark.required
@pytest.mark.parametrize(
"n_envs, material_type",
[
(0, gs.materials.SPH.Liquid),
(1, gs.materials.SPH.Liquid),
(2, gs.materials.SPH.Liquid),
(2, gs.materials.PBD.Liquid),
(2, gs.materials.MPM.Liquid),
(2, gs.materials.MPM.Sand),
(2, gs.materials.MPM.Snow),
(2, gs.materials.MPM.Elastic), # This makes little sense but nothing prevents doing this
],
)
def test_fluid_emitter(n_envs, material_type, show_viewer):
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=4e-3,
substeps=10,
),
mpm_options=gs.options.MPMOptions(
lower_bound=(0.0, -1.5, 0.0),
upper_bound=(1.0, 1.5, 4.0),
),
sph_options=gs.options.SPHOptions(
particle_size=0.02,
),
pbd_options=gs.options.PBDOptions(
particle_size=0.02,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(5.5, 6.5, 3.2),
camera_lookat=(0.5, 1.5, 1.5),
camera_fov=35,
max_FPS=120,
),
vis_options=gs.options.VisOptions(
rendered_envs_idx=[0],
),
show_viewer=show_viewer,
)
scene.add_entity(gs.morphs.Plane())
scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/wheel/fancy_wheel.urdf",
pos=(0.5, 0.25, 1.6),
euler=(0, 0, 0),
fixed=True,
convexify=False,
),
)
emitter = scene.add_emitter(
material=material_type(),
max_particles=5000,
surface=gs.surfaces.Glass(
color=(0.7, 0.85, 1.0, 0.7),
),
)
scene.build(n_envs=n_envs)
emitter.emit_omni()
for i in range(5):
emitter.emit(droplet_shape="circle", droplet_size=0.25)
scene.step()
scene.step()
@pytest.mark.required
@pytest.mark.parametrize("precision", ["64"])
def test_sap_rigid_rigid_hydroelastic_contact(show_viewer):
BOX_POS = (0.0, 0.0, 0.1)
BOX_HALFHEIGHT = 0.1
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=1 / 60,
substeps=2,
),
coupler_options=gs.options.SAPCouplerOptions(
pcg_threshold=1e-10,
sap_convergence_atol=1e-10,
sap_convergence_rtol=1e-10,
linesearch_ftol=1e-10,
),
show_viewer=show_viewer,
show_FPS=False,
)
plane = scene.add_entity(
gs.morphs.Plane(
collision=False,
),
)
box = scene.add_entity(
morph=gs.morphs.Box(
size=(0.5, 0.5, 2 * BOX_HALFHEIGHT),
pos=(0.0, 0.0, BOX_HALFHEIGHT),
),
material=gs.materials.Rigid(),
)
asset_path = get_hf_dataset(pattern="heavy_three_joint_link.xml")
robot_1 = scene.add_entity(
gs.morphs.MJCF(
file=f"{asset_path}/heavy_three_joint_link.xml",
pos=(-0.2, -0.26, 0.0),
scale=0.3,
),
)
robot_2 = scene.add_entity(
gs.morphs.MJCF(
file=f"{asset_path}/heavy_three_joint_link.xml",
pos=(0.17, -0.26, 0.1),
euler=(0.0, 0.0, 90.0),
scale=0.3,
),
)
scene.build()
# Run simulation
for _ in range(80):
scene.step()
# All the entities must be still
for entity in scene.entities:
assert_allclose(entity.get_links_vel(), 0.0, atol=2e-2)
# The box should stay at its initial position
assert_allclose(box.get_pos(), (0.0, 0.0, BOX_HALFHEIGHT), atol=2e-3)
# The box, and both robots should be laying on top of each other
robot_1_min_corner, robot_1_max_corner = robot_1.get_AABB()
robot_2_min_corner, robot_2_max_corner = robot_2.get_AABB()
assert (robot_1_min_corner[:2] > -0.4).all() and (robot_2_min_corner[:2] > -0.4).all()
assert (robot_1_min_corner[:2] < 0.4).all() and (robot_2_min_corner[:2] < 0.4).all()
assert robot_1_max_corner[2] > 2 * BOX_HALFHEIGHT
assert robot_2_max_corner[2] > robot_1_max_corner[2] + 0.05
@pytest.mark.required
@pytest.mark.parametrize("precision", ["64"])
def test_sap_fem_vs_robot(show_viewer):
SPHERE_RADIUS = 0.2
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=1 / 60,
substeps=2,
),
fem_options=gs.options.FEMOptions(
use_implicit_solver=True,
),
coupler_options=gs.options.SAPCouplerOptions(),
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.0, 1.5, 1.2),
),
show_viewer=show_viewer,
show_FPS=False,
)
plane = scene.add_entity(
gs.morphs.Plane(
collision=False,
),
)
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
pos=(0.0, 0.0, SPHERE_RADIUS),
radius=SPHERE_RADIUS,
),
material=gs.materials.FEM.Elastic(
E=1e5,
nu=0.4,
model="linear_corotated",
),
)
asset_path = get_hf_dataset(pattern="cross.xml")
robot = scene.add_entity(
gs.morphs.MJCF(
file=f"{asset_path}/cross.xml",
pos=(0.0, 0.0, 2 * SPHERE_RADIUS + 0.04),
scale=0.5,
),
)
scene.build()
# Run the simulation
for _ in range(50):
scene.step()
# Check that the sphere did not move, and the slightly squished
state = sphere.get_state()
center = state.pos.mean(axis=(0, 1))
assert_allclose(center[:2], 0.0, tol=0.01)
assert center[2] < SPHERE_RADIUS - 0.02
# Check that the ant is laying on top of the sphere
robot_pos = robot.get_pos()
assert_allclose(robot_pos[:2], 0.0, tol=0.03)
assert robot_pos[2] > (2 * SPHERE_RADIUS + 0.04) - 0.05
# Check that the legs of the ants are resting on the sphere
assert_allclose(robot.get_qpos()[-4:].abs(), 1.0, tol=0.1)
@pytest.mark.required
@pytest.mark.parametrize("substeps", [1, 10])
def test_rigid_mpm_legacy_coupling(substeps, show_viewer):
# This test is aimining at two things:
# 1) When substep = 1, the rigid object should be affected by the MPM particles
# 2) Regardless of substeps, as far as the substep dt is the same, they should give consistent results
substep_dt = 4e-4
horizon_substeps = 100
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=substep_dt * substeps,
substeps=substeps,
),
mpm_options=gs.options.MPMOptions(
lower_bound=(-0.5, -1.0, 0.0),
upper_bound=(0.5, 1.0, 1),
),
vis_options=gs.options.VisOptions(
visualize_mpm_boundary=True,
),
viewer_options=gs.options.ViewerOptions(
camera_fov=30,
),
show_viewer=show_viewer,
)
plane = scene.add_entity(
morph=gs.morphs.Plane(),
)
obj_rigid = scene.add_entity(
material=gs.materials.Rigid(
rho=1.0,
),
morph=gs.morphs.Box(
pos=(0.0, -0.25, 0.1),
size=(0.2, 0.2, 0.2),
),
surface=gs.surfaces.Default(
color=(1.0, 0.4, 0.4),
vis_mode="visual",
),
)
obj_sand = scene.add_entity(
material=gs.materials.MPM.Liquid(),
morph=gs.morphs.Box(
pos=(0.0, 0.0, 0.2),
size=(0.3, 0.3, 0.3),
),
surface=gs.surfaces.Default(
color=(0.3, 0.3, 1.0),
vis_mode="particle",
),
)
scene.build()
horizon = horizon_substeps // substeps
for i in range(horizon):
scene.step()
# Check that the sand moved the box along the negative Y direction
pos = tensor_to_array(obj_rigid.get_pos())
assert pos[1] + 0.25 < 0.0
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_hybrid.py",
"license": "Apache License 2.0",
"lines": 338,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:genesis/engine/bvh.py | import quadrants as qd
import genesis as gs
from genesis.repr_base import RBC
# A constant stack size should be sufficient for BVH traversal.
# https://madmann91.github.io/2021/01/06/bvhs-part-2.html
# https://forums.developer.nvidia.com/t/thinking-parallel-part-ii-tree-traversal-on-the-gpu/148342
STACK_SIZE = 64
@qd.data_oriented
class AABB(RBC):
"""
AABB (Axis-Aligned Bounding Box) class for managing collections of bounding boxes in batches.
This class defines an axis-aligned bounding box (AABB) structure and provides a Quadrants dataclass
for efficient computation and intersection testing on the GPU. Each AABB is represented by its
minimum and maximum 3D coordinates. The class supports batch processing of multiple AABBs.
Attributes:
n_batches (int): Number of batches of AABBs.
n_aabbs (int): Number of AABBs per batch.
qd_aabb (quadrants.dataclass): Quadrants dataclass representing an individual AABB with min and max vectors.
aabbs (quadrants.field): Quadrants field storing all AABBs in the specified batches.
Args:
n_batches (int): Number of batches to allocate.
n_aabbs (int): Number of AABBs per batch.
Example:
aabb_manager = AABB(n_batches=4, n_aabbs=128)
"""
def __init__(self, n_batches, n_aabbs):
self.n_batches = n_batches
self.n_aabbs = n_aabbs
@qd.dataclass
class qd_aabb:
min: gs.qd_vec3
max: gs.qd_vec3
@qd.func
def intersects(self, other) -> bool:
"""
Check if this AABB intersects with another AABB.
"""
return (
self.min[0] <= other.max[0]
and self.max[0] >= other.min[0]
and self.min[1] <= other.max[1]
and self.max[1] >= other.min[1]
and self.min[2] <= other.max[2]
and self.max[2] >= other.min[2]
)
self.qd_aabb = qd_aabb
self.aabbs = qd_aabb.field(
shape=(n_batches, n_aabbs),
needs_grad=False,
layout=qd.Layout.SOA,
)
@qd.data_oriented
class LBVH(RBC):
"""
Linear BVH is a simple BVH that is used to accelerate collision detection. It supports parallel building and
querying of the BVH tree. Only supports axis-aligned bounding boxes (AABBs).
Parameters
-----
aabbs : qd.field
The input AABBs to be organized in the BVH, shape (n_batches, n_aabbs).
max_n_query_result_per_aabb : int
Maximum number of query results per AABB per batch (n_batches * n_aabbs * max_n_query_result_per_aabb).
Defaults to 0, which means the max number of query results is 1 regardless of n_batches or n_aabbs.
n_radix_sort_groups : int
Number of groups to use for radix sort. More groups may improve performance but will use more memory.
max_stack_depth : int
Maximum stack depth for BVH traversal. Defaults to STACK_SIZE.
Attributes
-----
aabbs : qd.field
The input AABBs to be organized in the BVH, shape (n_batches, n_aabbs).
n_aabbs : int
Number of AABBs per batch.
n_batches : int
Number of batches.
max_query_results : int
Maximum number of query results allowed.
max_stack_depth : int
Maximum stack depth for BVH traversal.
aabb_centers : qd.field
Centers of the AABBs, shape (n_batches, n_aabbs).
aabb_min : qd.field
Minimum coordinates of AABB centers per batch, shape (n_batches).
aabb_max : qd.field
Maximum coordinates of AABB centers per batch, shape (n_batches).
scale : qd.field
Scaling factors for normalizing AABB centers, shape (n_batches).
morton_codes : qd.field
Morton codes for each AABB, shape (n_batches, n_aabbs).
hist : qd.field
Histogram for radix sort, shape (n_batches, 256).
prefix_sum : qd.field
Prefix sum for histogram, shape (n_batches, 256).
offset : qd.field
Offset for radix sort, shape (n_batches, n_aabbs).
tmp_morton_codes : qd.field
Temporary storage for radix sort, shape (n_batches, n_aabbs).
Node : qd.dataclass
Node structure for the BVH tree, containing left, right, parent indices and bounding box.
nodes : qd.field
BVH nodes, shape (n_batches, n_aabbs * 2 - 1).
internal_node_visited : qd.field
Flags indicating if an internal node has been visited during traversal, shape (n_batches, n_aabbs - 1).
query_result : qd.field
Query results as a vector of (batch id, self id, query id), shape (max_query_results).
query_result_count : qd.field
Counter for the number of query results.
Notes
------
For algorithmic details, see:
https://research.nvidia.com/sites/default/files/pubs/2012-06_Maximizing-Parallelism-in/karras2012hpg_paper.pdf
"""
def __init__(
self,
aabb: AABB,
max_n_query_result_per_aabb: int = 8,
n_radix_sort_groups: int = 256,
max_stack_depth: int = STACK_SIZE,
):
if aabb.n_aabbs < 2:
gs.raise_exception("The number of AABBs must be larger than 2.")
n_radix_sort_groups = min(aabb.n_aabbs, n_radix_sort_groups)
self.aabbs = aabb.aabbs
self.n_aabbs = aabb.n_aabbs
self.n_batches = aabb.n_batches
self.max_query_results = max(1, min(self.n_aabbs * max_n_query_result_per_aabb * self.n_batches, 0x7FFFFFFF))
self.max_stack_depth = max_stack_depth
self.aabb_centers = qd.field(gs.qd_vec3, shape=(self.n_batches, self.n_aabbs))
self.aabb_min = qd.field(gs.qd_vec3, shape=(self.n_batches,))
self.aabb_max = qd.field(gs.qd_vec3, shape=(self.n_batches,))
self.scale = qd.field(gs.qd_vec3, shape=(self.n_batches,))
self.morton_codes = qd.field(qd.types.vector(2, qd.u32), shape=(self.n_batches, self.n_aabbs))
# Histogram for radix sort
self.hist = qd.field(qd.u32, shape=(self.n_batches, 256))
# Prefix sum for histogram
self.prefix_sum = qd.field(qd.u32, shape=(self.n_batches, 256 + 1))
# Offset for radix sort
self.offset = qd.field(qd.u32, shape=(self.n_batches, self.n_aabbs))
# Temporary storage for radix sort
self.tmp_morton_codes = qd.field(qd.types.vector(2, qd.u32), shape=(self.n_batches, self.n_aabbs))
self.n_radix_sort_groups = n_radix_sort_groups
self.hist_group = qd.field(qd.u32, shape=(self.n_batches, self.n_radix_sort_groups, 256 + 1))
self.prefix_sum_group = qd.field(qd.u32, shape=(self.n_batches, self.n_radix_sort_groups + 1, 256))
self.group_size = self.n_aabbs // self.n_radix_sort_groups
self.visited = qd.field(qd.u8, shape=(self.n_aabbs,))
@qd.dataclass
class Node:
"""
Node structure for the BVH tree.
Attributes:
left (int): Index of the left child node.
right (int): Index of the right child node.
parent (int): Index of the parent node.
bound (qd_aabb): Bounding box of the node, represented as an AABB.
"""
left: qd.i32
right: qd.i32
parent: qd.i32
bound: aabb.qd_aabb
self.Node = Node
# Nodes of the BVH, first n_aabbs - 1 are internal nodes, last n_aabbs are leaf nodes
self.nodes = self.Node.field(shape=(self.n_batches, self.n_aabbs * 2 - 1))
# Whether an internal node has been visited during traversal
self.internal_node_active = qd.field(gs.qd_bool, shape=(self.n_batches, self.n_aabbs - 1))
self.internal_node_ready = qd.field(gs.qd_bool, shape=(self.n_batches, self.n_aabbs - 1))
# Query results, vec3 of batch id, self id, query id
self.query_result = qd.field(gs.qd_ivec3, shape=(self.max_query_results,))
# Count of query results
self.query_result_count = qd.field(qd.i32, shape=())
def build(self):
"""
Build the BVH from the axis-aligned bounding boxes (AABBs).
"""
self.compute_aabb_centers_and_scales()
self.compute_morton_codes()
self.radix_sort_morton_codes()
self.build_radix_tree()
self.compute_bounds()
@qd.func
def filter(self, i_a, i_q):
"""
Filter function that always returns False.
This function does not filter out any AABB by default.
It can be overridden in subclasses to implement custom filtering logic.
i_a: index of the found AABB
i_q: index of the query AABB
"""
return False
@qd.kernel
def compute_aabb_centers_and_scales(self):
for i_b, i_a in qd.ndrange(self.n_batches, self.n_aabbs):
self.aabb_centers[i_b, i_a] = (self.aabbs[i_b, i_a].min + self.aabbs[i_b, i_a].max) / 2
for i_b in qd.ndrange(self.n_batches):
self.aabb_min[i_b] = self.aabb_centers[i_b, 0]
self.aabb_max[i_b] = self.aabb_centers[i_b, 0]
for i_b, i_a in qd.ndrange(self.n_batches, self.n_aabbs):
qd.atomic_min(self.aabb_min[i_b], self.aabbs[i_b, i_a].min)
qd.atomic_max(self.aabb_max[i_b], self.aabbs[i_b, i_a].max)
for i_b in qd.ndrange(self.n_batches):
scale = self.aabb_max[i_b] - self.aabb_min[i_b]
for i in qd.static(range(3)):
self.scale[i_b][i] = qd.select(scale[i] > gs.EPS, 1.0 / scale[i], 1.0)
@qd.kernel
def compute_morton_codes(self):
"""
Compute the Morton codes for each AABB.
The first 32 bits is the Morton code for the x, y, z coordinates, and the last 32 bits is the index of the AABB
in the original array. The x, y, z coordinates are scaled to a 10-bit integer range [0, 1024) and interleaved to
form the Morton code.
"""
for i_b, i_a in qd.ndrange(self.n_batches, self.n_aabbs):
center = self.aabb_centers[i_b, i_a] - self.aabb_min[i_b]
scaled_center = center * self.scale[i_b]
morton_code_x = qd.floor(scaled_center[0] * 1023.0, dtype=qd.u32)
morton_code_y = qd.floor(scaled_center[1] * 1023.0, dtype=qd.u32)
morton_code_z = qd.floor(scaled_center[2] * 1023.0, dtype=qd.u32)
morton_code_x = self.expand_bits(morton_code_x)
morton_code_y = self.expand_bits(morton_code_y)
morton_code_z = self.expand_bits(morton_code_z)
morton_code = (morton_code_x << 2) | (morton_code_y << 1) | (morton_code_z)
self.morton_codes[i_b, i_a] = qd.Vector([morton_code, i_a], dt=qd.u32)
@qd.func
def expand_bits(self, v: qd.u32) -> qd.u32:
"""
Expands a 10-bit integer into 30 bits by inserting 2 zeros before each bit.
"""
v = (v * qd.u32(0x00010001)) & qd.u32(0xFF0000FF)
# This is to silence Quadrants debug warning of overflow
# Has the same result as v = (v * qd.u32(0x00000101)) & qd.u32(0x0F00F00F)
# Performance difference is negligible
# See https://github.com/Genesis-Embodied-AI/Genesis/pull/1560 for details
v = (v | ((v & 0x00FFFFFF) << 8)) & 0x0F00F00F
v = (v * qd.u32(0x00000011)) & qd.u32(0xC30C30C3)
v = (v * qd.u32(0x00000005)) & qd.u32(0x49249249)
return v
def radix_sort_morton_codes(self):
"""
Radix sort the morton codes, using 8 bits at a time.
"""
# The last 32 bits are the index of the AABB which are already sorted, no need to sort
for i in range(4, 8):
if self.n_radix_sort_groups == 1:
self._kernel_radix_sort_morton_codes_one_round(i)
else:
self._kernel_radix_sort_morton_codes_one_round_group(i)
@qd.kernel
def _kernel_radix_sort_morton_codes_one_round(self, i: int):
# Clear histogram
self.hist.fill(0)
# Fill histogram
for i_b in range(self.n_batches):
# This is now sequential
# TODO Parallelize, need to use groups to handle data to remain stable, could be not worth it
for i_a in range(self.n_aabbs):
code = (self.morton_codes[i_b, i_a][1 - (i // 4)] >> ((i % 4) * 8)) & 0xFF
self.offset[i_b, i_a] = qd.atomic_add(self.hist[i_b, qd.i32(code)], 1)
# Compute prefix sum
for i_b in qd.ndrange(self.n_batches):
self.prefix_sum[i_b, 0] = 0
for j in range(1, 256): # sequential prefix sum
self.prefix_sum[i_b, j] = self.prefix_sum[i_b, j - 1] + self.hist[i_b, j - 1]
# Reorder morton codes
for i_b, i_a in qd.ndrange(self.n_batches, self.n_aabbs):
code = qd.i32((self.morton_codes[i_b, i_a][1 - (i // 4)] >> ((i % 4) * 8)) & 0xFF)
idx = qd.i32(self.offset[i_b, i_a] + self.prefix_sum[i_b, code])
self.tmp_morton_codes[i_b, idx] = self.morton_codes[i_b, i_a]
# Swap the temporary and original morton codes
for i_b, i_a in qd.ndrange(self.n_batches, self.n_aabbs):
self.morton_codes[i_b, i_a] = self.tmp_morton_codes[i_b, i_a]
@qd.kernel
def _kernel_radix_sort_morton_codes_one_round_group(self, i: int):
# Clear histogram
self.hist_group.fill(0)
# Fill histogram
for i_b, i_g in qd.ndrange(self.n_batches, self.n_radix_sort_groups):
start = i_g * self.group_size
end = qd.select(i_g == self.n_radix_sort_groups - 1, self.n_aabbs, (i_g + 1) * self.group_size)
for i_a in range(start, end):
code = qd.i32((self.morton_codes[i_b, i_a][1 - (i // 4)] >> ((i % 4) * 8)) & 0xFF)
self.offset[i_b, i_a] = self.hist_group[i_b, i_g, code]
self.hist_group[i_b, i_g, code] = self.hist_group[i_b, i_g, code] + 1
# Compute prefix sum
for i_b, i_c in qd.ndrange(self.n_batches, 256):
self.prefix_sum_group[i_b, 0, i_c] = 0
for i_g in range(1, self.n_radix_sort_groups + 1): # sequential prefix sum
self.prefix_sum_group[i_b, i_g, i_c] = (
self.prefix_sum_group[i_b, i_g - 1, i_c] + self.hist_group[i_b, i_g - 1, i_c]
)
for i_b in range(self.n_batches):
self.prefix_sum[i_b, 0] = 0
for i_c in range(1, 256 + 1): # sequential prefix sum
self.prefix_sum[i_b, i_c] = (
self.prefix_sum[i_b, i_c - 1] + self.prefix_sum_group[i_b, self.n_radix_sort_groups, i_c - 1]
)
# Reorder morton codes
for i_b, i_a in qd.ndrange(self.n_batches, self.n_aabbs):
code = qd.i32((self.morton_codes[i_b, i_a][1 - (i // 4)] >> ((i % 4) * 8)) & 0xFF)
i_g = qd.min(i_a // self.group_size, self.n_radix_sort_groups - 1)
idx = qd.i32(self.prefix_sum[i_b, code] + self.prefix_sum_group[i_b, i_g, code] + self.offset[i_b, i_a])
# Use the group prefix sum to find the correct index
self.tmp_morton_codes[i_b, idx] = self.morton_codes[i_b, i_a]
# Swap the temporary and original morton codes
for i_b, i_a in qd.ndrange(self.n_batches, self.n_aabbs):
self.morton_codes[i_b, i_a] = self.tmp_morton_codes[i_b, i_a]
@qd.kernel
def build_radix_tree(self):
"""
Build the radix tree from the sorted morton codes.
The tree is built in parallel for every internal node.
"""
# Initialize the first node
for i_b in qd.ndrange(self.n_batches):
self.nodes[i_b, 0].parent = -1
# Initialize the leaf nodes
for i_b, i in qd.ndrange(self.n_batches, self.n_aabbs):
self.nodes[i_b, i + self.n_aabbs - 1].left = -1
self.nodes[i_b, i + self.n_aabbs - 1].right = -1
# Parallel build for every internal node
for i_b, i in qd.ndrange(self.n_batches, self.n_aabbs - 1):
d = qd.select(self.delta(i, i + 1, i_b) > self.delta(i, i - 1, i_b), 1, -1)
delta_min = self.delta(i, i - d, i_b)
l_max = qd.u32(2)
while self.delta(i, i + qd.i32(l_max) * d, i_b) > delta_min:
l_max *= 2
l = qd.u32(0)
t = l_max // 2
while t > 0:
if self.delta(i, i + qd.i32(l + t) * d, i_b) > delta_min:
l += t
t //= 2
j = i + qd.i32(l) * d
delta_node = self.delta(i, j, i_b)
s = qd.u32(0)
t = (l + 1) // 2
while t > 0:
if self.delta(i, i + qd.i32(s + t) * d, i_b) > delta_node:
s += t
t = qd.select(t > 1, (t + 1) // 2, 0)
gamma = i + qd.i32(s) * d + qd.min(d, 0)
left = qd.select(qd.min(i, j) == gamma, gamma + self.n_aabbs - 1, gamma)
right = qd.select(qd.max(i, j) == gamma + 1, gamma + self.n_aabbs, gamma + 1)
self.nodes[i_b, i].left = qd.i32(left)
self.nodes[i_b, i].right = qd.i32(right)
self.nodes[i_b, qd.i32(left)].parent = i
self.nodes[i_b, qd.i32(right)].parent = i
@qd.func
def delta(self, i: qd.i32, j: qd.i32, i_b: qd.i32):
"""
Compute the longest common prefix (LCP) of the morton codes of two AABBs.
"""
result = -1
if j >= 0 and j < self.n_aabbs:
result = 64
for i_bit in range(2):
x = self.morton_codes[i_b, i][i_bit] ^ self.morton_codes[i_b, j][i_bit]
for b in range(32):
if x & (qd.u32(1) << (31 - b)):
result = b + 32 * i_bit
break
if result != 64:
break
return result
def compute_bounds(self):
"""
Compute the bounds of the BVH nodes.
Starts from the leaf nodes and works upwards layer by layer.
"""
self._kernel_compute_bounds_init()
is_done = False
while not is_done:
is_done = self._kernel_compute_bounds_one_layer()
@qd.kernel
def _kernel_compute_bounds_init(self):
self.internal_node_active.fill(False)
self.internal_node_ready.fill(False)
for i_b, i in qd.ndrange(self.n_batches, self.n_aabbs):
idx = qd.i32(self.morton_codes[i_b, i][1])
self.nodes[i_b, i + self.n_aabbs - 1].bound.min = self.aabbs[i_b, idx].min
self.nodes[i_b, i + self.n_aabbs - 1].bound.max = self.aabbs[i_b, idx].max
parent_idx = self.nodes[i_b, i + self.n_aabbs - 1].parent
if parent_idx != -1:
self.internal_node_active[i_b, parent_idx] = True
@qd.kernel
def _kernel_compute_bounds_one_layer(self) -> qd.i32:
for i_b, i in qd.ndrange(self.n_batches, self.n_aabbs - 1):
if self.internal_node_active[i_b, i]:
left_bound = self.nodes[i_b, self.nodes[i_b, i].left].bound
right_bound = self.nodes[i_b, self.nodes[i_b, i].right].bound
self.nodes[i_b, i].bound.min = qd.min(left_bound.min, right_bound.min)
self.nodes[i_b, i].bound.max = qd.max(left_bound.max, right_bound.max)
parent_idx = self.nodes[i_b, i].parent
if parent_idx != -1:
self.internal_node_ready[i_b, parent_idx] = True
self.internal_node_active[i_b, i] = False
is_done = True
for i_b, i in qd.ndrange(self.n_batches, self.n_aabbs - 1):
if self.internal_node_ready[i_b, i]:
self.internal_node_active[i_b, i] = True
is_done = False
self.internal_node_ready.fill(False)
return is_done
@qd.func
def query(self, aabbs: qd.template()):
"""
Query the BVH for intersections with the given AABBs.
The results are stored in the query_result field.
"""
self.query_result_count[None] = 0
overflow = False
n_querys = aabbs.shape[1]
for i_b, i_q in qd.ndrange(self.n_batches, n_querys):
query_stack = qd.Vector.zero(qd.i32, self.max_stack_depth)
stack_depth = 1
while stack_depth > 0:
stack_depth -= 1
node_idx = query_stack[stack_depth]
node = self.nodes[i_b, node_idx]
# Check if the AABB intersects with the node's bounding box
if aabbs[i_b, i_q].intersects(node.bound):
# If it's a leaf node, add the AABB index to the query results
if node.left == -1 and node.right == -1:
i_a = qd.i32(self.morton_codes[i_b, node_idx - (self.n_aabbs - 1)][1])
# Check if the filter condition is met
if self.filter(i_a, i_q):
continue
idx = qd.atomic_add(self.query_result_count[None], 1)
if idx < self.max_query_results:
self.query_result[idx] = gs.qd_ivec3(i_b, i_a, i_q) # Store the AABB index
else:
overflow = True
else:
# Push children onto the stack
if node.right != -1:
query_stack[stack_depth] = node.right
stack_depth += 1
if node.left != -1:
query_stack[stack_depth] = node.left
stack_depth += 1
return overflow
@qd.data_oriented
class FEMSurfaceTetLBVH(LBVH):
"""
FEMSurfaceTetLBVH is a specialized Linear BVH for FEM surface tetrahedrals.
It extends the LBVH class to support filtering based on FEM surface tetrahedral elements.
"""
def __init__(self, fem_solver, aabb: AABB, max_n_query_result_per_aabb: int = 8, n_radix_sort_groups: int = 256):
super().__init__(aabb, max_n_query_result_per_aabb, n_radix_sort_groups)
self.fem_solver = fem_solver
@qd.func
def filter(self, i_a, i_q):
"""
Filter function for FEM surface tets. Filter out tet that share vertices.
This is used to avoid self-collisions in FEM surface tets.
Parameters
----------
i_a:
index of the found AABB
i_q:
index of the query AABB
"""
result = i_a >= i_q
i_av = self.fem_solver.elements_i[self.fem_solver.surface_elements[i_a]].el2v
i_qv = self.fem_solver.elements_i[self.fem_solver.surface_elements[i_q]].el2v
for i, j in qd.static(qd.ndrange(4, 4)):
if i_av[i] == i_qv[j]:
result = True
return result
@qd.data_oriented
class RigidTetLBVH(LBVH):
"""
RigidTetLBVH is a specialized Linear BVH for rigid tetrahedrals.
It extends the LBVH class to support filtering based on rigid tetrahedral elements.
"""
def __init__(self, coupler, aabb: AABB, max_n_query_result_per_aabb: int = 8, n_radix_sort_groups: int = 256):
super().__init__(aabb, max_n_query_result_per_aabb, n_radix_sort_groups)
self.coupler = coupler
self.rigid_solver = coupler.rigid_solver
@qd.func
def filter(self, i_a, i_q):
"""
Filter function for Rigid tets. Filter out tet that belong to the same link
i_a: index of the found AABB
i_q: index of the query AABB
"""
i_ag = self.coupler.rigid_volume_elems_geom_idx[i_a]
i_qg = self.coupler.rigid_volume_elems_geom_idx[i_q]
return self.coupler.rigid_collision_pair_idx[i_ag, i_qg] == -1
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/bvh.py",
"license": "Apache License 2.0",
"lines": 487,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/options/profiling.py | from .options import Options
class ProfilingOptions(Options):
"""
Profiling options
Parameters
----------
show_FPS : bool
Whether to show the frame rate each step. Default true
FPS_tracker_alpha: float
Exponential decay momentum for FPS moving average
"""
show_FPS: bool = True
FPS_tracker_alpha: float = 0.95
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/options/profiling.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Genesis-Embodied-AI/Genesis:genesis/utils/warnings.py | import genesis as gs
_seen: set[str] = set()
def warn_once(message: str):
global _seen
if message in _seen:
return
_seen.add(message)
gs.logger.warning(message)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/warnings.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/ddp_multi_gpu.py | #!/usr/bin/env python3
"""
Multi-node / multi-GPU Genesis ✕ PyTorch DDP demo
=================================================
Single machine, 2 GPUs:
torchrun --standalone --nnodes=1 --nproc_per_node=2 examples/ddp_multi_gpu.py
Expectation:
- In nvidia-smi, you will see multiple GPUs are being used.
- As you increase the number of GPUs, the gradient will be less noisy and the loss decreases faster.
"""
import os
import argparse
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import genesis as gs
class TinyMLP(nn.Module):
def __init__(self, obs_dim: int, act_dim: int) -> None:
super().__init__()
self.net = nn.Sequential(
nn.Linear(obs_dim, 128),
nn.ReLU(),
nn.Linear(128, act_dim),
)
def forward(self, x):
return self.net(x.float())
def run_worker(args: argparse.Namespace) -> None:
# setup
local_rank = int(os.environ.get("LOCAL_RANK", 0))
os.environ["CUDA_VISIBLE_DEVICES"] = str(local_rank)
os.environ["QD_VISIBLE_DEVICE"] = str(local_rank)
# FIXME: Forcing rendering device is not working reliably on all machines
# os.environ["EGL_DEVICE_ID"] = str(local_rank)
gs.init(backend=gs.gpu, seed=local_rank)
# sim
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(3.5, 0.0, 2.5),
camera_lookat=(0.0, 0.0, 0.5),
camera_fov=40,
),
show_viewer=False,
show_FPS=False,
)
scene.add_entity(gs.morphs.Plane())
scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
visualize_contact=True,
)
scene.build(n_envs=args.n_envs)
# model
gpu_id = 0
torch.cuda.set_device(gpu_id)
dist.init_process_group(backend="nccl", init_method="env://")
device = torch.device("cuda", gpu_id)
rigid = scene.sim.rigid_solver
qpos = rigid.get_qpos()
obs_dim = qpos.shape[1]
act_dim = 1
model = TinyMLP(obs_dim, act_dim).to(device)
model = DDP(model, device_ids=[gpu_id])
optim = torch.optim.Adam(model.parameters(), lr=3e-4)
# train loop
for step in range(args.steps):
scene.step()
qpos = rigid.get_qpos()
obs = qpos + torch.randn_like(qpos)
logits = model(obs)
target = qpos.sum(dim=1, keepdim=True)
loss = torch.nn.functional.mse_loss(logits, target)
optim.zero_grad(set_to_none=True)
loss.backward() # DDP handles all-reduce, gradients are averaged
optim.step()
if local_rank == 0 and step % 100 == 0:
print(f"[{step:04d}/{args.steps}] loss = {loss.item():.6f}")
# cleanup
dist.barrier() # sync all ranks before shutting down NCCL
dist.destroy_process_group()
gs.destroy()
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("--steps", type=int, default=1000, help="simulation / training steps")
p.add_argument("--vis", action="store_true", help="open viewer on rank-0")
p.add_argument("--n_envs", type=int, default=2048, help="number of environments")
return p.parse_args()
if __name__ == "__main__":
run_worker(parse_args())
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/ddp_multi_gpu.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:tests/run_benchmarks.py | import os
import subprocess
import tempfile
START_REF = "v0.2.1"
END_REF = "upstream/main"
BENCHMARK_SCRIPT = r"""
#!/bin/bash
set -e
# Make sure that WanDB is configured, otherwise running the benchmarks is useless
if [[ -z "${WANDB_API_KEY}" ]] ; then
exit 1;
fi
# Make sure that Genesis is properly installed, with including all its requirements
pip uninstall --no-input -y genesis-world
pip install --no-input --no-user --no-cache --quiet -e ".[dev]" "libigl==2.5.1"
# Run the benchmarks
pytest --print -x -m benchmarks --backend gpu "./tests_2/test_rigid_benchmarks.py"
"""
# Get all commit hashes after a given date (oldest to newest)
commits = subprocess.check_output(
["git", "rev-list", "--reverse", f"{START_REF}^..{END_REF}"],
cwd=os.path.dirname(__file__),
stderr=subprocess.DEVNULL,
encoding="utf-8",
).splitlines()
print(f"Found {len(commits)} commits since {START_REF}")
with tempfile.NamedTemporaryFile("w", suffix=".sh") as fd:
script_fullpath = fd.name
fd.write(BENCHMARK_SCRIPT)
fd.flush()
os.chmod(fd.name, 0o755) # Make the script executable
for i, commit in enumerate(commits):
print(f"\n[{i + 1}/{len(commits)}] Checking out {commit}")
subprocess.run(["git", "checkout", "-f", commit], check=True)
print("================= ...Running benchmarks... ==================")
process = subprocess.Popen(
["bash", script_fullpath],
cwd=os.path.dirname(__file__),
)
process.wait()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/run_benchmarks.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:examples/rigid/multi_gpu.py | import multiprocessing
import os
import argparse
import torch
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--vis", action="store_true", default=False)
args = parser.parse_args()
########################## init ##########################
# get current gpu
gpu_id = torch.cuda.current_device()
print("gpu_id:", gpu_id)
gs.init(backend=gs.gpu, logger_verbose_time=True)
########################## create a scene ##########################
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(3.5, 0.0, 2.5),
camera_lookat=(0.0, 0.0, 0.5),
camera_fov=40,
),
show_viewer=args.vis,
show_FPS=False,
)
########################## entities ##########################
plane = scene.add_entity(
gs.morphs.Plane(),
)
franka = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
visualize_contact=True,
)
########################## build ##########################
scene.build()
for i in range(1000):
scene.step()
def run(gpu_id, func):
# Set environment args
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
os.environ["QD_VISIBLE_DEVICE"] = str(gpu_id)
os.environ["EGL_DEVICE_ID"] = str(gpu_id)
# main script
func()
if __name__ == "__main__":
num_gpus = 2
processes = []
for i in range(num_gpus):
p = multiprocessing.Process(target=run, args=(i, main))
processes.append(p)
p.start()
for p in processes:
p.join()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/rigid/multi_gpu.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Guovin/iptv-api:utils/frozen.py | import gzip
import os
import pickle
import time
from typing import Dict, Optional, Set
MAX_BACKOFF = 24 * 3600
BASE_BACKOFF = 60
_frozen: Dict[str, Dict] = {}
def _now_ts() -> int:
return int(time.time())
def mark_url_bad(url: str, initial: bool = False) -> None:
if not url:
return
meta = _frozen.setdefault(url, {"bad_count": 0, "last_bad": 0, "last_good": 0, "frozen_until": None})
if initial:
meta["bad_count"] = max(meta["bad_count"], 3)
meta["bad_count"] += 1
meta["last_bad"] = _now_ts()
backoff = min(MAX_BACKOFF, (2 ** meta["bad_count"]) * BASE_BACKOFF)
meta["frozen_until"] = _now_ts() + backoff
def mark_url_good(url: str) -> None:
if not url:
return
meta = _frozen.get(url)
if not meta:
return
meta["last_good"] = _now_ts()
meta["bad_count"] = max(0, meta.get("bad_count", 0) - 1)
meta["frozen_until"] = None
if meta["bad_count"] == 0:
_frozen.pop(url, None)
def is_url_frozen(url: str) -> bool:
meta = _frozen.get(url)
if not meta:
return False
fu = meta.get("frozen_until")
if not fu:
return False
now = _now_ts()
if fu > now:
return True
meta["frozen_until"] = None
meta["bad_count"] = max(0, meta.get("bad_count", 0) - 1)
if meta["bad_count"] == 0:
_frozen.pop(url, None)
return False
def get_current_frozen_set() -> Set[str]:
now = _now_ts()
res = set()
for url, meta in list(_frozen.items()):
fu = meta.get("frozen_until")
if fu and fu > now:
res.add(url)
else:
is_url_frozen(url)
return res
def load(path: Optional[str]) -> None:
if not path or not os.path.exists(path):
return
try:
with gzip.open(path, "rb") as f:
data = pickle.load(f)
if isinstance(data, dict):
for k, v in data.items():
if k not in _frozen:
_frozen[k] = v
except Exception:
pass
def save(path: Optional[str]) -> None:
if not path:
return
try:
dirp = os.path.dirname(path)
if dirp:
os.makedirs(dirp, exist_ok=True)
with gzip.open(path, "wb") as f:
pickle.dump(_frozen, f)
except Exception:
pass
__all__ = ["mark_url_bad", "mark_url_good", "is_url_frozen", "get_current_frozen_set", "load", "save"]
| {
"repo_id": "Guovin/iptv-api",
"file_path": "utils/frozen.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Guovin/iptv-api:utils/aggregator.py | import asyncio
import copy
from collections import defaultdict
from logging import INFO
from typing import Any, Dict, Optional, Set, Tuple, Callable, cast
import utils.constants as constants
from utils.channel import sort_channel_result, generate_channel_statistic, write_channel_to_file, retain_origin
from utils.config import config
from utils.tools import get_logger
class ResultAggregator:
"""
Aggregates test results and periodically writes sorted views to files.
"""
def __init__(
self,
base_data: Dict[str, Dict[str, Any]],
first_channel_name: Optional[str] = None,
ipv6_support: bool = True,
write_interval: float = 5.0,
min_items_before_flush: int = config.urls_limit,
flush_debounce: Optional[float] = None,
sort_logger=None,
stat_logger=None,
result: Optional[Dict[str, Dict[str, list]]] = None,
):
self.base_data = base_data
self.result = sort_channel_result(
base_data,
result=result,
ipv6_support=ipv6_support
)
self.test_results: Dict[str, Dict[str, list]] = defaultdict(lambda: defaultdict(list))
self._dirty = False
self._dirty_count = 0
self._stopped = True
self._task: Optional[asyncio.Task] = None
self.realtime_write = config.open_realtime_write
self.write_interval = write_interval
self.first_channel_name = first_channel_name
self.ipv6_support = ipv6_support
self.sort_logger = sort_logger or get_logger(constants.result_log_path, level=INFO, init=True)
self.stat_logger = stat_logger or get_logger(constants.statistic_log_path, level=INFO, init=True)
self.is_last = False
self._lock = asyncio.Lock()
self._min_items_before_flush = min_items_before_flush
self.flush_debounce = flush_debounce if flush_debounce is not None else max(0.2, write_interval / 2)
self._flush_event = asyncio.Event()
self._debounce_task: Optional[asyncio.Task] = None
self._pending_channels: Set[Tuple[str, str]] = set()
self._finished_channels: Set[Tuple[str, str]] = set()
def _ensure_debounce_task_in_loop(self, loop: asyncio.AbstractEventLoop) -> None:
"""
Ensure the debounce task is running in the specified event loop.
"""
if not self._debounce_task or self._debounce_task.done():
try:
self._debounce_task = loop.create_task(self._debounce_loop())
except Exception:
try:
cast(Any, loop).call_soon_threadsafe(
cast(Callable[[], None], self._create_debounce_task_threadsafe), *())
except Exception:
pass
def _create_debounce_task_threadsafe(self) -> None:
"""
Helper to create the debounce task from within the event loop thread.
This is intended to be invoked via loop.call_soon_threadsafe.
"""
self._debounce_task = asyncio.create_task(self._debounce_loop())
def add_item(self, cate: str, name: str, item: dict, is_channel_last: bool = False, is_last: bool = False,
is_valid: bool = True):
"""
Add a test result item for a specific category and name.
"""
self.test_results[cate][name].append(item)
self.is_last = is_last
self._pending_channels.add((cate, name))
if is_channel_last:
self._finished_channels.add((cate, name))
if is_channel_last:
try:
generate_channel_statistic(self.stat_logger, cate, name, self.test_results[cate][name])
except Exception:
pass
if is_valid and self.realtime_write:
try:
self._dirty = True
self._dirty_count += 1
loop = asyncio.get_running_loop()
self._ensure_debounce_task_in_loop(loop)
if self._dirty_count >= self._min_items_before_flush:
self._dirty_count = 0
cast(Any, loop).call_soon(cast(Callable[[], None], self._flush_event.set), *())
except RuntimeError:
try:
loop = asyncio.get_event_loop()
self._ensure_debounce_task_in_loop(loop)
if self._dirty_count >= self._min_items_before_flush:
self._dirty_count = 0
cast(Any, loop).call_soon_threadsafe(cast(Callable[[], None], self._flush_event.set), *())
except Exception:
pass
async def _atomic_write_sorted_view(
self,
test_copy: Dict[str, Dict[str, list]],
affected: Optional[Set[Tuple[str, str]]] = None,
finished: Optional[Set[Tuple[str, str]]] = None,
) -> None:
"""
Atomic write of sorted view to file, either partially or fully.
"""
if finished is None:
finished = set()
speed_test_filter_host = config.speed_test_filter_host
if affected:
partial_base = defaultdict(lambda: defaultdict(list))
partial_result = defaultdict(lambda: defaultdict(list))
for cate, name in affected:
base_entries = self.base_data.get(cate, {})
if name in base_entries:
partial_base[cate][name] = list(base_entries[name])
partial_result[cate][name] = list(test_copy.get(cate, {}).get(name, []))
if (cate, name) not in finished:
prev_sorted = self.result.get(cate, {}).get(name, [])
seen = {it.get("url") for it in partial_result[cate][name] if
isinstance(it, dict) and it.get("url")}
for item in prev_sorted:
if not isinstance(item, dict):
continue
url = item.get("url")
if url and url not in seen and item.get("origin") not in retain_origin:
partial_result[cate][name].append(item)
seen.add(url)
try:
if len(affected) == 1:
cate_single, name_single = next(iter(affected))
new_sorted = sort_channel_result(
partial_base,
result=partial_result,
filter_host=speed_test_filter_host,
ipv6_support=self.ipv6_support,
cate=cate_single,
name=name_single,
)
else:
new_sorted = sort_channel_result(
partial_base, result=partial_result, filter_host=speed_test_filter_host,
ipv6_support=self.ipv6_support
)
except Exception:
new_sorted = defaultdict(lambda: defaultdict(list))
else:
try:
new_sorted = sort_channel_result(
self.base_data, result=test_copy, filter_host=speed_test_filter_host,
ipv6_support=self.ipv6_support
)
except Exception:
new_sorted = defaultdict(lambda: defaultdict(list))
merged = defaultdict(lambda: defaultdict(list))
for cate, names in self.base_data.items():
for name in names.keys():
merged[cate][name] = list(self.result.get(cate, {}).get(name, []))
for cate, names in new_sorted.items():
if cate not in self.base_data:
continue
for name, vals in names.items():
if name in self.base_data.get(cate, {}) and vals:
merged[cate][name] = list(vals)
loop = asyncio.get_running_loop()
await loop.run_in_executor(
None,
write_channel_to_file,
merged,
self.ipv6_support,
self.first_channel_name,
True,
self.is_last,
)
self.result = merged
async def _debounce_loop(self):
"""
Debounce loop to handle flush events.
"""
self._debounce_task = asyncio.current_task()
try:
while not self._stopped:
await self._flush_event.wait()
try:
await asyncio.sleep(self.flush_debounce)
except asyncio.CancelledError:
raise
self._flush_event.clear()
if self._dirty:
await self.flush_once()
finally:
self._debounce_task = None
self._flush_event.clear()
async def flush_once(self, force: bool = False) -> None:
"""
Flush the current test results to file once.
"""
async with self._lock:
if not self._dirty and not force:
return
pending = set(self._pending_channels)
self._pending_channels.clear()
if force:
test_copy = copy.deepcopy(self.test_results)
finished_for_flush = set(self._finished_channels)
self._finished_channels.clear()
else:
test_copy = defaultdict(lambda: defaultdict(list))
for cate, name in pending:
items = self.test_results.get(cate, {}).get(name, [])
copied_items = [it.copy() if isinstance(it, dict) else it for it in items]
if copied_items:
test_copy[cate][name] = copied_items
finished_for_flush = set(self._finished_channels & pending)
self._finished_channels.difference_update(finished_for_flush)
self._dirty = False
self._dirty_count = 0
affected = None if force else (pending if pending else None)
try:
await self._atomic_write_sorted_view(test_copy, affected=affected, finished=finished_for_flush)
except Exception:
pass
async def _run_loop(self):
"""
Run the periodic flush loop.
"""
self._stopped = False
try:
while not self._stopped:
await asyncio.sleep(self.write_interval)
if self._dirty:
await self.flush_once()
finally:
self._stopped = True
async def start(self) -> None:
"""
Start the aggregator's periodic flush loop.
"""
if not self.realtime_write:
self._stopped = False
return
if self._task and not self._task.done():
return
self._task = asyncio.create_task(self._run_loop())
loop = asyncio.get_running_loop()
self._ensure_debounce_task_in_loop(loop)
async def stop(self) -> None:
"""
Stop the aggregator and clean up resources.
"""
try:
await self.flush_once(force=True)
except Exception:
pass
self._stopped = True
if self._task:
await self._task
self._task = None
if self._debounce_task:
self._debounce_task.cancel()
try:
await self._debounce_task
except asyncio.CancelledError:
pass
self._debounce_task = None
if self.sort_logger:
self.sort_logger.handlers.clear()
if self.stat_logger:
self.stat_logger.handlers.clear()
| {
"repo_id": "Guovin/iptv-api",
"file_path": "utils/aggregator.py",
"license": "MIT License",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Guovin/iptv-api:utils/whitelist.py | import os
import re
from collections import defaultdict
from typing import List, Pattern
import utils.constants as constants
from utils.tools import get_real_path, resource_path
from utils.types import WhitelistMaps
def load_whitelist_maps(path: str = constants.whitelist_path) -> WhitelistMaps:
"""
Load whitelist maps from the given path.
Returns two dictionaries:
- exact: channel_name -> list of exact whitelist entries
- keywords: channel_name -> list of keyword whitelist entries
The special key "" (empty string) is used for global entries.
"""
exact = defaultdict(list)
keywords = defaultdict(list)
in_keyword_section = False
real_path = get_real_path(resource_path(path))
if not os.path.exists(real_path):
return exact, keywords
with open(real_path, "r", encoding="utf-8") as f:
for raw in f:
line = raw.rstrip("\n")
s = line.strip()
if not s or s.startswith("#"):
continue
if re.match(r"^\[.*\]$", s):
in_keyword_section = s.upper() == "[KEYWORDS]"
continue
if "," in s:
name, value = map(str.strip, s.split(",", 1))
key = name or ""
else:
key = ""
value = s
if not value:
continue
if in_keyword_section:
if value not in keywords[key]:
keywords[key].append(value)
else:
if value not in exact[key]:
exact[key].append(value)
return exact, keywords
def is_url_whitelisted(data_map: WhitelistMaps, url: str, channel_name: str | None = None) -> bool:
"""
Check if the given URL is whitelisted for the specified channel.
If channel_name is None, only global whitelist entries are considered.
1. Exact match (channel-specific)
2. Exact match (global)
3. Keyword match (channel-specific)
4. Keyword match (global)
5. If none match, return False
"""
if not url or not data_map:
return False
exact_map, keyword_map = data_map
channel_key = channel_name or ""
def check_exact_for(key):
for candidate in exact_map.get(key, []):
if not candidate:
continue
c = candidate.strip()
if c == url:
return True
return False
if check_exact_for(channel_key) or check_exact_for(""):
return True
for kw in keyword_map.get(channel_key, []) + keyword_map.get("", []):
if not kw:
continue
if kw in url:
return True
return False
def get_whitelist_url(data_map: WhitelistMaps, channel_name: str | None = None) -> List[str]:
"""
Get the list of whitelisted URLs for the specified channel.
If channel_name is None, only global whitelist entries are considered.
"""
exact_map, _ = data_map
channel_key = channel_name or ""
whitelist_urls = set()
for candidate in exact_map.get(channel_key, []) + exact_map.get("", []):
c = candidate.strip()
if c:
whitelist_urls.add(c)
return list(whitelist_urls)
def get_whitelist_total_count(data_map: WhitelistMaps) -> int:
"""
Get the total count of unique whitelist entries across all channels.
"""
exact_map, keyword_map = data_map
unique_entries = set()
for entries in exact_map.values():
for entry in entries:
unique_entries.add(entry.strip())
for entries in keyword_map.values():
for entry in entries:
unique_entries.add(entry.strip())
return len(unique_entries)
def get_section_entries(path: str = constants.whitelist_path, section: str = "WHITELIST",
pattern: Pattern[str] = None) -> tuple[List[str], List[str]]:
"""
Get URLs from a specific section in the whitelist file.
Returns a tuple: (inside_section_list, outside_section_list).
"""
real_path = get_real_path(resource_path(path))
if not os.path.exists(real_path):
return [], []
inside: List[str] = []
outside: List[str] = []
in_section = False
header_re = re.compile(r"^\[.*\]$")
with open(real_path, "r", encoding="utf-8") as f:
for raw in f:
line = raw.rstrip("\n")
s = line.strip()
if not s:
continue
if header_re.match(s):
in_section = s.upper() == f"[{section.upper()}]"
continue
if s.startswith("#"):
continue
if s:
target = inside if in_section else outside
if pattern:
match = pattern.search(s)
if match:
target.append(match.group())
else:
target.append(s)
return inside, outside
| {
"repo_id": "Guovin/iptv-api",
"file_path": "utils/whitelist.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Guovin/iptv-api:utils/i18n.py | import json
import os
from typing import Dict
from utils.config import config, resource_path
_LOCALES_CACHE: Dict[str, Dict[str, str]] = {}
_CURRENT_LANG = None
_TRANSLATIONS: Dict[str, str] = {}
def _load_locale(lang: str) -> Dict[str, str]:
global _LOCALES_CACHE
if lang in _LOCALES_CACHE:
return _LOCALES_CACHE[lang]
locales_dir = resource_path(os.path.join("locales"))
file_path = os.path.join(locales_dir, f"{lang}.json")
if not os.path.exists(file_path):
fallback_path = os.path.join(locales_dir, "zh_CN.json")
file_path = fallback_path
try:
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
except Exception:
data = {}
_LOCALES_CACHE[lang] = data
return data
def set_language(lang: str):
global _CURRENT_LANG, _TRANSLATIONS
_CURRENT_LANG = lang
_TRANSLATIONS = _load_locale(lang)
def get_language() -> str:
global _CURRENT_LANG
if _CURRENT_LANG is None:
set_language(config.language)
return _CURRENT_LANG
def t(key: str, default: str | None = None) -> str:
global _TRANSLATIONS
if not _TRANSLATIONS:
set_language(config.language)
if key in _TRANSLATIONS:
return _TRANSLATIONS[key]
if default is not None:
return default
return key
| {
"repo_id": "Guovin/iptv-api",
"file_path": "utils/i18n.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Guovin/iptv-api:service/rtmp.py | import json
import os
import subprocess
import sys
import threading
import time
from collections import OrderedDict
import utils.constants as constants
from utils.config import config
from utils.db import get_db_connection, return_db_connection
from utils.i18n import t
from utils.tools import join_url, resource_path, render_nginx_conf
nginx_dir = resource_path(os.path.join('utils', 'nginx-rtmp-win32'))
nginx_conf_template = resource_path(os.path.join(nginx_dir, 'conf', 'nginx.conf.template'))
nginx_conf = resource_path(os.path.join(nginx_dir, 'conf', 'nginx.conf'))
nginx_path = resource_path(os.path.join(nginx_dir, 'nginx.exe'))
stop_path = resource_path(os.path.join(nginx_dir, 'stop.bat'))
app_rtmp_url = f"rtmp://127.0.0.1:{config.nginx_rtmp_port}"
hls_running_streams = OrderedDict()
STREAMS_LOCK = threading.Lock()
hls_last_access = {}
HLS_IDLE_TIMEOUT = config.rtmp_idle_timeout
HLS_WAIT_TIMEOUT = 30
HLS_WAIT_INTERVAL = 0.5
MAX_STREAMS = config.rtmp_max_streams
nginx_dir = resource_path(os.path.join('utils', 'nginx-rtmp-win32'))
hls_temp_path = resource_path(os.path.join(nginx_dir, 'temp', 'hls')) if sys.platform == "win32" else '/tmp/hls'
_hls_monitor_started_evt = threading.Event()
_hls_monitor_lock = threading.Lock()
def ensure_hls_idle_monitor_started():
if _hls_monitor_started_evt.is_set():
return
with _hls_monitor_lock:
if _hls_monitor_started_evt.is_set():
return
try:
if not config.open_rtmp:
return
thread = threading.Thread(target=hls_idle_monitor, daemon=True, name="hls-idle-monitor")
thread.start()
_hls_monitor_started_evt.set()
print(t("msg.rtmp_hls_idle_monitor_start_success"))
except Exception as e:
print(t("msg.rtmp_hls_idle_monitor_start_fail").format(info=e))
def start_hls_to_rtmp(host, channel_id):
ensure_hls_idle_monitor_started()
if not host:
return None
if not channel_id:
return print(t("msg.error_channel_id_not_found"))
data = get_channel_data(channel_id)
url = data.get("url", "")
if not url:
return print(t("msg.error_channel_url_not_found"))
with STREAMS_LOCK:
if channel_id in hls_running_streams:
process = hls_running_streams[channel_id]
if process.poll() is None:
return print(t("msg.rtmp_hls_stream_already_running"))
else:
del hls_running_streams[channel_id]
cleanup_streams(hls_running_streams)
headers = data.get("headers", None)
headers_str = ''.join(f'{k}: {v}\r\n' for k, v in headers.items()) if headers else ''
cmd = [
'ffmpeg',
'-loglevel', 'error',
'-re',
]
if headers_str:
cmd += ['-headers', headers_str]
cmd += [
'-i', url.partition('$')[0],
'-c:v', 'libx264',
'-preset', 'veryfast',
'-tune', 'zerolatency',
'-vf', 'scale=trunc(iw/2)*2:trunc(ih/2)*2',
'-c:a', 'aac',
'-b:a', '128k',
'-f', 'flv',
'-flvflags', 'no_duration_filesize',
join_url(host, channel_id)
]
try:
process = subprocess.Popen(
cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
stdin=subprocess.DEVNULL
)
print(t("msg.rtmp_publish").format(channel_id=channel_id, source=url))
except Exception as e:
return print(t("msg.error_start_ffmpeg_failed").format(info=e))
threading.Thread(
target=monitor_stream_process,
args=(hls_running_streams, process, channel_id),
daemon=True
).start()
with STREAMS_LOCK:
hls_running_streams[channel_id] = process
def _terminate_process_safe(process):
try:
process.terminate()
process.wait(timeout=5)
except Exception:
try:
process.kill()
process.wait(timeout=5)
except Exception:
pass
def cleanup_streams(streams):
with STREAMS_LOCK:
to_delete = []
for channel_id, process in list(streams.items()):
if process.poll() is not None:
to_delete.append(channel_id)
for channel_id in to_delete:
streams.pop(channel_id, None)
while len(streams) > MAX_STREAMS:
try:
oldest_channel_id, oldest_proc = streams.popitem(last=False)
_terminate_process_safe(oldest_proc)
except KeyError:
break
def monitor_stream_process(streams, process, channel_id):
try:
process.wait()
except Exception:
pass
with STREAMS_LOCK:
if channel_id in streams and streams[channel_id] is process:
del streams[channel_id]
def hls_idle_monitor():
while True:
now = time.time()
to_stop = []
with STREAMS_LOCK:
for channel_id, last_ts in list(hls_last_access.items()):
proc = hls_running_streams.get(channel_id)
if proc and proc.poll() is None:
if now - last_ts > HLS_IDLE_TIMEOUT:
print(t("msg_rtmp_hls_idle_will_stop").format(channel_id=channel_id,
second=f"{now - last_ts:.1f}"))
to_stop.append(channel_id)
for cid in to_stop:
stop_stream(cid)
with STREAMS_LOCK:
hls_last_access.pop(cid, None)
time.sleep(5)
def get_channel_data(channel_id):
conn = get_db_connection(constants.rtmp_data_path)
channel_data = {}
try:
cursor = conn.cursor()
cursor.execute("SELECT url, headers FROM result_data WHERE id=?", (channel_id,))
data = cursor.fetchone()
if data:
channel_data = {
'url': data[0],
'headers': json.loads(data[1]) if data[1] else None
}
except Exception as e:
print(t("msg.error_get_channel_data_from_database").format(info=e))
finally:
return_db_connection(constants.rtmp_data_path, conn)
return channel_data
def stop_stream(channel_id):
with STREAMS_LOCK:
process = hls_running_streams.get(channel_id)
if process and process.poll() is None:
try:
_terminate_process_safe(process)
except Exception as e:
print(t("msg.error_stop_channel_stream").format(channel_id=channel_id, info=e))
hls_running_streams.pop(channel_id, None)
def start_rtmp_service():
render_nginx_conf(nginx_conf_template, nginx_conf)
original_dir = os.getcwd()
try:
os.chdir(nginx_dir)
subprocess.Popen([nginx_path], shell=True)
except Exception as e:
print(t("msg.error_rtmp_service_start_failed").format(info=e))
finally:
os.chdir(original_dir)
def stop_rtmp_service():
try:
os.chdir(nginx_dir)
subprocess.Popen([stop_path], shell=True)
except Exception as e:
print(t("msg.error_rtmp_service_stop_failed").format(info=e))
| {
"repo_id": "Guovin/iptv-api",
"file_path": "service/rtmp.py",
"license": "MIT License",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Guovin/iptv-api:utils/ip_checker/ip_checker.py | import socket
from urllib.parse import urlparse
import ipdb
from utils.tools import resource_path
class IPChecker:
def __init__(self):
self.db = ipdb.City(resource_path("utils/ip_checker/data/qqwry.ipdb"))
self.url_host = {}
self.host_ip = {}
self.host_ipv_type = {}
def get_host(self, url: str) -> str:
"""
Get the host from a URL
"""
if url in self.url_host:
return self.url_host[url]
host = urlparse(url).hostname or url
self.url_host[url] = host
return host
def get_ip(self, url: str) -> str | None:
"""
Get the IP from a URL
"""
host = self.get_host(url)
if host in self.host_ip:
return self.host_ip[host]
self.get_ipv_type(url)
return self.host_ip.get(host)
def get_ipv_type(self, url: str) -> str:
"""
Get the IPv type of URL
"""
host = self.get_host(url)
if host in self.host_ipv_type:
return self.host_ipv_type[host]
try:
addr_info = socket.getaddrinfo(host, None, socket.AF_UNSPEC, socket.SOCK_STREAM)
ip = next((info[4][0] for info in addr_info if info[0] == socket.AF_INET6), None)
if not ip:
ip = next((info[4][0] for info in addr_info if info[0] == socket.AF_INET), None)
ipv_type = "ipv6" if any(info[0] == socket.AF_INET6 for info in addr_info) else "ipv4"
except Exception as e:
print(f"Error on getting IPv type for {host}: {e}")
ip = None
ipv_type = "ipv4"
self.host_ip[host] = ip
self.host_ipv_type[host] = ipv_type
return ipv_type
def find_map(self, ip: str) -> tuple[str | None, str | None]:
"""
Find the IP address and return the location and ISP
:param ip: The IP address to find
:return: A tuple of (location, ISP)
"""
try:
result = self.db.find_map(ip, "CN")
if not result:
return None, None
location_parts = [
result.get('country_name', ''),
result.get('region_name', ''),
result.get('city_name', '')
]
location = "-".join(filter(None, location_parts))
isp = result.get('isp_domain', None)
return location, isp
except Exception as e:
print(f"Error on finding ip location and ISP: {e}")
return None, None
| {
"repo_id": "Guovin/iptv-api",
"file_path": "utils/ip_checker/ip_checker.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
HKUDS/LightRAG:tests/test_qdrant_upsert_batching.py | from unittest.mock import MagicMock
import numpy as np
import pytest
from qdrant_client import models
from lightrag.kg.qdrant_impl import QdrantVectorDBStorage
def _make_point(point_id: str, content: str) -> models.PointStruct:
return models.PointStruct(
id=point_id,
vector=[0.1, 0.2, 0.3],
payload={"id": point_id, "content": content},
)
def test_build_upsert_batches_respects_point_limit():
points = [_make_point(str(i), "x" * 10) for i in range(5)]
batches = QdrantVectorDBStorage._build_upsert_batches(
points, max_payload_bytes=1024 * 1024, max_points_per_batch=2
)
assert [len(batch_points) for batch_points, _ in batches] == [2, 2, 1]
def test_build_upsert_batches_exact_payload_boundary_no_split():
point_a = _make_point("a", "x" * 32)
point_b = _make_point("b", "y" * 32)
size_a = QdrantVectorDBStorage._estimate_point_payload_bytes(point_a)
size_b = QdrantVectorDBStorage._estimate_point_payload_bytes(point_b)
# JSON array envelope: [] => 2 bytes, and comma between two elements => 1 byte
exact_limit = 2 + size_a + 1 + size_b
batches = QdrantVectorDBStorage._build_upsert_batches(
[point_a, point_b],
max_payload_bytes=exact_limit,
max_points_per_batch=128,
)
assert len(batches) == 1
assert len(batches[0][0]) == 2
assert batches[0][1] == exact_limit
def test_build_upsert_batches_raises_for_single_oversized_point():
point = _make_point("oversized", "x" * 64)
point_size = QdrantVectorDBStorage._estimate_point_payload_bytes(point)
too_small_limit = point_size + 1
with pytest.raises(ValueError, match="Single Qdrant point exceeds payload limit"):
QdrantVectorDBStorage._build_upsert_batches(
[point],
max_payload_bytes=too_small_limit,
max_points_per_batch=128,
)
@pytest.mark.asyncio
async def test_upsert_fail_fast_stops_on_first_failed_batch():
storage = QdrantVectorDBStorage.__new__(QdrantVectorDBStorage)
storage.workspace = "test_ws"
storage.namespace = "chunks"
storage.effective_workspace = "test_ws"
storage.meta_fields = {"content"}
storage._max_batch_size = 16
storage._max_upsert_payload_bytes = 1024 * 1024
storage._max_upsert_points_per_batch = 2
storage.final_namespace = "test_collection"
storage._client = MagicMock()
async def fake_embedding_func(texts, **kwargs):
return np.array([[float(len(text)), 0.0] for text in texts], dtype=np.float32)
storage.embedding_func = fake_embedding_func
storage._client.upsert.side_effect = [None, RuntimeError("batch failed"), None]
data = {f"chunk-{i}": {"content": f"content-{i}"} for i in range(5)}
with pytest.raises(RuntimeError, match="batch failed"):
await storage.upsert(data)
# 5 items with max 2 points per batch => expected 3 batches, but stop at batch #2 on error.
assert storage._client.upsert.call_count == 2
first_call = storage._client.upsert.call_args_list[0]
second_call = storage._client.upsert.call_args_list[1]
assert len(first_call.kwargs["points"]) == 2
assert len(second_call.kwargs["points"]) == 2
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_qdrant_upsert_batching.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_batch_embeddings.py | """
Tests for batch embedding pre-computation in _perform_kg_search().
Verifies that kg_query batches all needed embeddings (query, ll_keywords,
hl_keywords) into a single embedding API call instead of 3 sequential calls.
"""
from unittest.mock import AsyncMock, MagicMock
import numpy as np
import pytest
from lightrag.base import QueryParam
def _make_mock_embedding_func(dim=1536):
"""Create a mock async embedding function that returns distinct vectors per input."""
async def _embed(texts, **kwargs):
return np.array(
[np.full(dim, i + 1, dtype=np.float32) for i in range(len(texts))]
)
mock = AsyncMock(side_effect=_embed)
return mock
def _make_mock_kv_storage(embedding_func, global_config=None):
mock = MagicMock()
mock.embedding_func = embedding_func
mock.global_config = global_config or {"kg_chunk_pick_method": "VECTOR"}
return mock
def _make_mock_vdb():
"""Create a mock VDB whose query() records the query_embedding it receives."""
mock = AsyncMock()
mock.query = AsyncMock(return_value=[])
mock.cosine_better_than_threshold = 0.2
return mock
def _make_mock_graph():
mock = AsyncMock()
return mock
@pytest.mark.offline
@pytest.mark.asyncio
async def test_hybrid_mode_batches_embeddings():
"""In hybrid mode with both keywords, embedding_func should be called exactly once."""
from lightrag.operate import _perform_kg_search
embed_func = _make_mock_embedding_func()
text_chunks_db = _make_mock_kv_storage(embed_func)
entities_vdb = _make_mock_vdb()
relationships_vdb = _make_mock_vdb()
knowledge_graph = _make_mock_graph()
query_param = QueryParam(mode="hybrid", top_k=5)
await _perform_kg_search(
query="test query",
ll_keywords="entity1, entity2",
hl_keywords="theme1, theme2",
knowledge_graph_inst=knowledge_graph,
entities_vdb=entities_vdb,
relationships_vdb=relationships_vdb,
text_chunks_db=text_chunks_db,
query_param=query_param,
)
# The embedding function should be called exactly once with all 3 texts batched
assert (
embed_func.call_count == 1
), f"Expected 1 batched embedding call, got {embed_func.call_count}"
call_args = embed_func.call_args[0][0]
assert len(call_args) == 3, f"Expected 3 texts in batch, got {len(call_args)}"
assert call_args == ["test query", "entity1, entity2", "theme1, theme2"]
@pytest.mark.offline
@pytest.mark.asyncio
async def test_hybrid_mode_passes_embeddings_to_vdbs():
"""Pre-computed embeddings should be forwarded to entities and relationships VDB queries."""
from lightrag.operate import _perform_kg_search
embed_func = _make_mock_embedding_func()
text_chunks_db = _make_mock_kv_storage(embed_func)
entities_vdb = _make_mock_vdb()
relationships_vdb = _make_mock_vdb()
knowledge_graph = _make_mock_graph()
query_param = QueryParam(mode="hybrid", top_k=5)
await _perform_kg_search(
query="test query",
ll_keywords="entity keywords",
hl_keywords="theme keywords",
knowledge_graph_inst=knowledge_graph,
entities_vdb=entities_vdb,
relationships_vdb=relationships_vdb,
text_chunks_db=text_chunks_db,
query_param=query_param,
)
# entities_vdb.query should receive ll_embedding (index 1 → all 2s)
entities_call = entities_vdb.query.call_args
assert entities_call is not None, "entities_vdb.query was not called"
ll_embedding = entities_call.kwargs.get("query_embedding")
assert ll_embedding is not None, "ll_embedding was not passed to entities_vdb.query"
assert np.all(
ll_embedding == 2.0
), f"Expected ll_embedding=[2,2,...], got {ll_embedding[:3]}"
# relationships_vdb.query should receive hl_embedding (index 2 → all 3s)
rel_call = relationships_vdb.query.call_args
assert rel_call is not None, "relationships_vdb.query was not called"
hl_embedding = rel_call.kwargs.get("query_embedding")
assert (
hl_embedding is not None
), "hl_embedding was not passed to relationships_vdb.query"
assert np.all(
hl_embedding == 3.0
), f"Expected hl_embedding=[3,3,...], got {hl_embedding[:3]}"
@pytest.mark.offline
@pytest.mark.asyncio
async def test_local_mode_skips_hl_keywords():
"""In local mode, should only embed query + ll_keywords (skip hl_keywords)."""
from lightrag.operate import _perform_kg_search
embed_func = _make_mock_embedding_func()
text_chunks_db = _make_mock_kv_storage(embed_func)
entities_vdb = _make_mock_vdb()
relationships_vdb = _make_mock_vdb()
knowledge_graph = _make_mock_graph()
query_param = QueryParam(mode="local", top_k=5)
await _perform_kg_search(
query="test query",
ll_keywords="entity keywords",
hl_keywords="theme keywords",
knowledge_graph_inst=knowledge_graph,
entities_vdb=entities_vdb,
relationships_vdb=relationships_vdb,
text_chunks_db=text_chunks_db,
query_param=query_param,
)
assert embed_func.call_count == 1
call_args = embed_func.call_args[0][0]
assert len(call_args) == 2, f"Expected 2 texts (query + ll), got {len(call_args)}"
assert "theme keywords" not in call_args
@pytest.mark.offline
@pytest.mark.asyncio
async def test_global_mode_skips_ll_keywords():
"""In global mode, should only embed query + hl_keywords (skip ll_keywords)."""
from lightrag.operate import _perform_kg_search
embed_func = _make_mock_embedding_func()
text_chunks_db = _make_mock_kv_storage(embed_func)
entities_vdb = _make_mock_vdb()
relationships_vdb = _make_mock_vdb()
knowledge_graph = _make_mock_graph()
query_param = QueryParam(mode="global", top_k=5)
await _perform_kg_search(
query="test query",
ll_keywords="entity keywords",
hl_keywords="theme keywords",
knowledge_graph_inst=knowledge_graph,
entities_vdb=entities_vdb,
relationships_vdb=relationships_vdb,
text_chunks_db=text_chunks_db,
query_param=query_param,
)
assert embed_func.call_count == 1
call_args = embed_func.call_args[0][0]
assert len(call_args) == 2, f"Expected 2 texts (query + hl), got {len(call_args)}"
assert "entity keywords" not in call_args
@pytest.mark.offline
@pytest.mark.asyncio
async def test_embedding_failure_falls_back_gracefully():
"""If batch embedding fails, VDB queries should still work (fallback to individual calls)."""
from lightrag.operate import _perform_kg_search
embed_func = AsyncMock(side_effect=RuntimeError("API error"))
text_chunks_db = _make_mock_kv_storage(embed_func)
entities_vdb = _make_mock_vdb()
relationships_vdb = _make_mock_vdb()
knowledge_graph = _make_mock_graph()
query_param = QueryParam(mode="hybrid", top_k=5)
# Should not raise — graceful degradation
await _perform_kg_search(
query="test query",
ll_keywords="entity keywords",
hl_keywords="theme keywords",
knowledge_graph_inst=knowledge_graph,
entities_vdb=entities_vdb,
relationships_vdb=relationships_vdb,
text_chunks_db=text_chunks_db,
query_param=query_param,
)
# VDB queries should still be called (with query_embedding=None fallback)
entities_call = entities_vdb.query.call_args
assert entities_call is not None
assert entities_call.kwargs.get("query_embedding") is None
rel_call = relationships_vdb.query.call_args
assert rel_call is not None
assert rel_call.kwargs.get("query_embedding") is None
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_batch_embeddings.py",
"license": "MIT License",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_description_api_validation.py | import pytest
from lightrag.constants import SOURCE_IDS_LIMIT_METHOD_KEEP
from lightrag.operate import (
_merge_nodes_then_upsert,
_handle_single_relationship_extraction,
)
from lightrag import utils_graph
class DummyGraphStorage:
def __init__(self, node=None):
self.node = node
self.upserted_nodes = []
async def get_node(self, node_id):
return self.node
async def upsert_node(self, node_id, node_data):
self.upserted_nodes.append((node_id, node_data))
self.node = dict(node_data)
class DummyVectorStorage:
def __init__(self):
self.global_config = {"workspace": "test"}
async def upsert(self, data):
return None
async def delete(self, ids):
return None
async def get_by_id(self, id_):
return None
async def index_done_callback(self):
return True
class DummyAsyncContext:
async def __aenter__(self):
return None
async def __aexit__(self, exc_type, exc, tb):
return False
@pytest.mark.asyncio
async def test_merge_nodes_then_upsert_handles_missing_legacy_description():
graph = DummyGraphStorage(node={"source_id": "chunk-1"})
global_config = {
"source_ids_limit_method": SOURCE_IDS_LIMIT_METHOD_KEEP,
"max_source_ids_per_entity": 20,
}
result = await _merge_nodes_then_upsert(
entity_name="LegacyEntity",
nodes_data=[],
knowledge_graph_inst=graph,
entity_vdb=None,
global_config=global_config,
)
assert result["description"] == "Entity LegacyEntity"
assert graph.upserted_nodes[-1][1]["description"] == "Entity LegacyEntity"
@pytest.mark.asyncio
async def test_acreate_entity_rejects_empty_description():
with pytest.raises(ValueError, match="description cannot be empty"):
await utils_graph.acreate_entity(
chunk_entity_relation_graph=None,
entities_vdb=None,
relationships_vdb=None,
entity_name="EntityA",
entity_data={"description": " "},
)
@pytest.mark.asyncio
async def test_acreate_relation_rejects_empty_description():
with pytest.raises(ValueError, match="description cannot be empty"):
await utils_graph.acreate_relation(
chunk_entity_relation_graph=None,
entities_vdb=None,
relationships_vdb=None,
source_entity="A",
target_entity="B",
relation_data={"description": ""},
)
@pytest.mark.asyncio
async def test_aedit_entity_rejects_empty_description():
with pytest.raises(ValueError, match="description cannot be empty"):
await utils_graph.aedit_entity(
chunk_entity_relation_graph=None,
entities_vdb=None,
relationships_vdb=None,
entity_name="EntityA",
updated_data={"description": None},
)
@pytest.mark.asyncio
async def test_aedit_relation_rejects_empty_description():
with pytest.raises(ValueError, match="description cannot be empty"):
await utils_graph.aedit_relation(
chunk_entity_relation_graph=None,
entities_vdb=None,
relationships_vdb=None,
source_entity="A",
target_entity="B",
updated_data={"description": " "},
)
@pytest.mark.asyncio
async def test_aedit_entity_allows_updates_without_description(monkeypatch):
async def fake_edit_impl(*args, **kwargs):
return {"entity_name": "EntityA", "description": "kept", "source_id": "chunk-1"}
monkeypatch.setattr(
utils_graph, "get_storage_keyed_lock", lambda *a, **k: DummyAsyncContext()
)
monkeypatch.setattr(utils_graph, "_edit_entity_impl", fake_edit_impl)
result = await utils_graph.aedit_entity(
chunk_entity_relation_graph=None,
entities_vdb=DummyVectorStorage(),
relationships_vdb=DummyVectorStorage(),
entity_name="EntityA",
updated_data={"entity_type": "ORG"},
)
assert result["operation_summary"]["operation_status"] == "success"
@pytest.mark.asyncio
async def test_handle_single_relationship_extraction_ignores_empty_description():
relation = await _handle_single_relationship_extraction(
["relation", "Alice", "Bob", "works_with", " "],
chunk_key="chunk-1",
timestamp=1,
)
assert relation is None
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_description_api_validation.py",
"license": "MIT License",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_extract_entities.py | """Tests for entity extraction gleaning token limit guard."""
from unittest.mock import AsyncMock, patch
import pytest
from lightrag.utils import Tokenizer, TokenizerInterface
class DummyTokenizer(TokenizerInterface):
"""Simple 1:1 character-to-token mapping for testing."""
def encode(self, content: str):
return [ord(ch) for ch in content]
def decode(self, tokens):
return "".join(chr(token) for token in tokens)
def _make_global_config(
max_extract_input_tokens: int = 20480,
entity_extract_max_gleaning: int = 1,
) -> dict:
"""Build a minimal global_config dict for extract_entities."""
tokenizer = Tokenizer("dummy", DummyTokenizer())
return {
"llm_model_func": AsyncMock(return_value=""),
"entity_extract_max_gleaning": entity_extract_max_gleaning,
"addon_params": {},
"tokenizer": tokenizer,
"max_extract_input_tokens": max_extract_input_tokens,
"llm_model_max_async": 1,
}
# Minimal valid extraction result that _process_extraction_result can parse
_EXTRACTION_RESULT = (
"(entity<|#|>TEST_ENTITY<|#|>CONCEPT<|#|>A test entity)<|COMPLETE|>"
)
def _make_chunks(content: str = "Test content.") -> dict[str, dict]:
return {
"chunk-001": {
"tokens": len(content),
"content": content,
"full_doc_id": "doc-001",
"chunk_order_index": 0,
}
}
@pytest.mark.offline
@pytest.mark.asyncio
async def test_gleaning_skipped_when_tokens_exceed_limit():
"""Gleaning should be skipped when estimated tokens exceed max_extract_input_tokens."""
from lightrag.operate import extract_entities
# Use a very small token limit so the gleaning context will exceed it
global_config = _make_global_config(
max_extract_input_tokens=10,
entity_extract_max_gleaning=1,
)
llm_func = global_config["llm_model_func"]
llm_func.return_value = _EXTRACTION_RESULT
with patch("lightrag.operate.logger") as mock_logger:
await extract_entities(
chunks=_make_chunks(),
global_config=global_config,
)
# LLM should be called exactly once (initial extraction only, no gleaning)
assert llm_func.await_count == 1
# Warning should be logged about skipping gleaning
mock_logger.warning.assert_called_once()
warning_msg = mock_logger.warning.call_args[0][0]
assert "Gleaning stopped" in warning_msg
assert "exceeded limit" in warning_msg
@pytest.mark.offline
@pytest.mark.asyncio
async def test_gleaning_proceeds_when_tokens_within_limit():
"""Gleaning should proceed when estimated tokens are within max_extract_input_tokens."""
from lightrag.operate import extract_entities
# Use a very large token limit so gleaning will proceed
global_config = _make_global_config(
max_extract_input_tokens=999999,
entity_extract_max_gleaning=1,
)
llm_func = global_config["llm_model_func"]
llm_func.return_value = _EXTRACTION_RESULT
with patch("lightrag.operate.logger"):
await extract_entities(
chunks=_make_chunks(),
global_config=global_config,
)
# LLM should be called twice (initial extraction + gleaning)
assert llm_func.await_count == 2
@pytest.mark.offline
@pytest.mark.asyncio
async def test_no_gleaning_when_max_gleaning_zero():
"""No gleaning when entity_extract_max_gleaning is 0, regardless of token limit."""
from lightrag.operate import extract_entities
global_config = _make_global_config(
max_extract_input_tokens=999999,
entity_extract_max_gleaning=0,
)
llm_func = global_config["llm_model_func"]
llm_func.return_value = _EXTRACTION_RESULT
with patch("lightrag.operate.logger"):
await extract_entities(
chunks=_make_chunks(),
global_config=global_config,
)
# LLM should be called exactly once (initial extraction only)
assert llm_func.await_count == 1
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_extract_entities.py",
"license": "MIT License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:examples/lightrag_gemini_workspace_demo.py | """
LightRAG Data Isolation Demo: Workspace Management
This example demonstrates how to maintain multiple isolated knowledge bases
within a single application using LightRAG's 'workspace' feature.
Key Concepts:
- Workspace Isolation: Each RAG instance is assigned a unique workspace name,
which ensures that Knowledge Graphs, Vector Databases, and Chunks are
stored in separate, non-conflicting directories.
- Independent Configuration: Different workspaces can utilize different
ENTITY_TYPES and document sets simultaneously.
Prerequisites:
1. Set the following environment variables:
- GEMINI_API_KEY: Your Google Gemini API key.
- ENTITY_TYPES: A JSON string of entity categories (e.g., '["Person", "Organization"]').
2. Ensure your data directory contains:
- Data/book-small.txt
- Data/HR_policies.txt
Usage:
python lightrag_workspace_demo.py
"""
import os
import asyncio
import json
import numpy as np
from lightrag import LightRAG, QueryParam
from lightrag.llm.gemini import gemini_model_complete, gemini_embed
from lightrag.utils import wrap_embedding_func_with_attrs
from lightrag.constants import DEFAULT_ENTITY_TYPES
async def llm_model_func(
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
) -> str:
"""Wrapper for Gemini LLM completion."""
return await gemini_model_complete(
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
api_key=os.getenv("GEMINI_API_KEY"),
model_name="gemini-2.0-flash-exp",
**kwargs,
)
@wrap_embedding_func_with_attrs(
embedding_dim=768, max_token_size=2048, model_name="models/text-embedding-004"
)
async def embedding_func(texts: list[str]) -> np.ndarray:
"""Wrapper for Gemini embedding model."""
return await gemini_embed.func(
texts, api_key=os.getenv("GEMINI_API_KEY"), model="models/text-embedding-004"
)
async def initialize_rag(
workspace: str = "default_workspace",
entities=None,
) -> LightRAG:
"""
Initializes a LightRAG instance with data isolation.
- entities (if provided) overrides everything
- else ENTITY_TYPES env var is used
- else DEFAULT_ENTITY_TYPES is used
"""
if entities is not None:
entity_types = entities
else:
env_entities = os.getenv("ENTITY_TYPES")
if env_entities:
entity_types = json.loads(env_entities)
else:
entity_types = DEFAULT_ENTITY_TYPES
rag = LightRAG(
workspace=workspace,
llm_model_name="gemini-2.0-flash",
llm_model_func=llm_model_func,
embedding_func=embedding_func,
embedding_func_max_async=4,
embedding_batch_num=8,
llm_model_max_async=2,
addon_params={"entity_types": entity_types},
)
await rag.initialize_storages()
return rag
async def main():
rag_1 = None
rag_2 = None
try:
# 1. Initialize Isolated Workspaces
# Instance 1: Dedicated to literary analysis
# Instance 2: Dedicated to corporate HR documentation
print("Initializing isolated LightRAG workspaces...")
rag_1 = await initialize_rag("rag_workspace_book")
rag_2 = await initialize_rag("rag_workspace_hr")
# 2. Populate Workspace 1 (Literature)
book_path = "Data/book-small.txt"
if os.path.exists(book_path):
with open(book_path, "r", encoding="utf-8") as f:
print(f"Indexing {book_path} into Literature Workspace...")
await rag_1.ainsert(f.read())
# 3. Populate Workspace 2 (Corporate)
hr_path = "Data/HR_policies.txt"
if os.path.exists(hr_path):
with open(hr_path, "r", encoding="utf-8") as f:
print(f"Indexing {hr_path} into HR Workspace...")
await rag_2.ainsert(f.read())
# 4. Context-Specific Querying
print("\n--- Querying Literature Workspace ---")
res1 = await rag_1.aquery(
"What is the main theme?",
param=QueryParam(mode="hybrid", stream=False),
)
print(f"Book Analysis: {res1[:200]}...")
print("\n--- Querying HR Workspace ---")
res2 = await rag_2.aquery(
"What is the leave policy?", param=QueryParam(mode="hybrid")
)
print(f"HR Response: {res2[:200]}...")
except Exception as e:
print(f"An error occurred: {e}")
finally:
# Finalize storage to safely close DB connections and write buffers
if rag_1:
await rag_1.finalize_storages()
if rag_2:
await rag_2.finalize_storages()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "examples/lightrag_gemini_workspace_demo.py",
"license": "MIT License",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
HKUDS/LightRAG:examples/lightrag_vllm_demo.py | """
LightRAG Demo with vLLM (LLM, Embeddings, and Reranker)
This example demonstrates how to use LightRAG with:
- vLLM-served LLM (OpenAI-compatible API)
- vLLM-served embedding model
- Jina-compatible reranker (also vLLM-served)
Prerequisites:
1. Create a .env file or export environment variables:
- LLM_MODEL
- LLM_BINDING_HOST
- LLM_BINDING_API_KEY
- EMBEDDING_MODEL
- EMBEDDING_BINDING_HOST
- EMBEDDING_BINDING_API_KEY
- EMBEDDING_DIM
- EMBEDDING_TOKEN_LIMIT
- RERANK_MODEL
- RERANK_BINDING_HOST
- RERANK_BINDING_API_KEY
2. Prepare a text file to index (default: Data/book-small.txt)
3. Configure storage backends via environment variables or modify
the storage parameters in initialize_rag() below.
Usage:
python examples/lightrag_vllm_demo.py
"""
import os
import asyncio
from functools import partial
from dotenv import load_dotenv
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
from lightrag.rerank import jina_rerank
load_dotenv()
# --------------------------------------------------
# Constants
# --------------------------------------------------
WORKING_DIR = "./LightRAG_Data"
BOOK_FILE = "Data/book-small.txt"
# --------------------------------------------------
# LLM function (vLLM, OpenAI-compatible)
# --------------------------------------------------
async def llm_model_func(
prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
return await openai_complete_if_cache(
model=os.getenv("LLM_MODEL", "Qwen/Qwen3-14B-AWQ"),
prompt=prompt,
system_prompt=system_prompt,
history_messages=history_messages,
base_url=os.getenv("LLM_BINDING_HOST", "http://0.0.0.0:4646/v1"),
api_key=os.getenv("LLM_BINDING_API_KEY", "not_needed"),
timeout=600,
**kwargs,
)
# --------------------------------------------------
# Embedding function (vLLM)
# --------------------------------------------------
vLLM_emb_func = EmbeddingFunc(
model_name=os.getenv("EMBEDDING_MODEL", "Qwen/Qwen3-Embedding-0.6B"),
send_dimensions=False,
embedding_dim=int(os.getenv("EMBEDDING_DIM", 1024)),
max_token_size=int(os.getenv("EMBEDDING_TOKEN_LIMIT", 4096)),
func=partial(
openai_embed.func,
model=os.getenv("EMBEDDING_MODEL", "Qwen/Qwen3-Embedding-0.6B"),
base_url=os.getenv(
"EMBEDDING_BINDING_HOST",
"http://0.0.0.0:1234/v1",
),
api_key=os.getenv("EMBEDDING_BINDING_API_KEY", "not_needed"),
),
)
# --------------------------------------------------
# Reranker (Jina-compatible, vLLM-served)
# --------------------------------------------------
jina_rerank_model_func = partial(
jina_rerank,
model=os.getenv("RERANK_MODEL", "Qwen/Qwen3-Reranker-0.6B"),
api_key=os.getenv("RERANK_BINDING_API_KEY"),
base_url=os.getenv(
"RERANK_BINDING_HOST",
"http://0.0.0.0:3535/v1/rerank",
),
)
# --------------------------------------------------
# Initialize RAG
# --------------------------------------------------
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=vLLM_emb_func,
rerank_model_func=jina_rerank_model_func,
# Storage backends (configurable via environment or modify here)
kv_storage=os.getenv("KV_STORAGE", "PGKVStorage"),
doc_status_storage=os.getenv("DOC_STATUS_STORAGE", "PGDocStatusStorage"),
vector_storage=os.getenv("VECTOR_STORAGE", "PGVectorStorage"),
graph_storage=os.getenv("GRAPH_STORAGE", "Neo4JStorage"),
)
await rag.initialize_storages()
return rag
# --------------------------------------------------
# Main
# --------------------------------------------------
async def main():
rag = None
try:
# Validate book file exists
if not os.path.exists(BOOK_FILE):
raise FileNotFoundError(
f"'{BOOK_FILE}' not found. Please provide a text file to index."
)
rag = await initialize_rag()
# --------------------------------------------------
# Data Ingestion
# --------------------------------------------------
print(f"Indexing {BOOK_FILE}...")
with open(BOOK_FILE, "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
print("Indexing complete.")
# --------------------------------------------------
# Query
# --------------------------------------------------
query = (
"What are the main themes of the book, and how do the key characters "
"evolve throughout the story?"
)
print("\nHybrid Search with Reranking:")
result = await rag.aquery(
query,
param=QueryParam(
mode="hybrid",
stream=False,
enable_rerank=True,
),
)
print("\nResult:\n", result)
except Exception as e:
print(f"An error occurred: {e}")
finally:
if rag:
await rag.finalize_storages()
if __name__ == "__main__":
asyncio.run(main())
print("\nDone!")
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "examples/lightrag_vllm_demo.py",
"license": "MIT License",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
HKUDS/LightRAG:examples/lightrag_gemini_postgres_demo.py | """
LightRAG Demo with PostgreSQL + Google Gemini
This example demonstrates how to use LightRAG with:
- Google Gemini (LLM + Embeddings)
- PostgreSQL-backed storages for:
- Vector storage
- Graph storage
- KV storage
- Document status storage
Prerequisites:
1. PostgreSQL database running and accessible
2. Required tables will be auto-created by LightRAG
3. Set environment variables (example .env):
POSTGRES_HOST=localhost
POSTGRES_PORT=5432
POSTGRES_USER=admin
POSTGRES_PASSWORD=admin
POSTGRES_DATABASE=ai
LIGHTRAG_KV_STORAGE=PGKVStorage
LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage
LIGHTRAG_GRAPH_STORAGE=PGGraphStorage
LIGHTRAG_VECTOR_STORAGE=PGVectorStorage
GEMINI_API_KEY=your-api-key
4. Prepare a text file to index (default: Data/book-small.txt)
Usage:
python examples/lightrag_postgres_demo.py
"""
import os
import asyncio
import numpy as np
from lightrag import LightRAG, QueryParam
from lightrag.llm.gemini import gemini_model_complete, gemini_embed
from lightrag.utils import setup_logger, wrap_embedding_func_with_attrs
# --------------------------------------------------
# Logger
# --------------------------------------------------
setup_logger("lightrag", level="INFO")
# --------------------------------------------------
# Config
# --------------------------------------------------
WORKING_DIR = "./rag_storage"
BOOK_FILE = "Data/book.txt"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
if not GEMINI_API_KEY:
raise ValueError("GEMINI_API_KEY environment variable is not set")
# --------------------------------------------------
# LLM function (Gemini)
# --------------------------------------------------
async def llm_model_func(
prompt,
system_prompt=None,
history_messages=[],
keyword_extraction=False,
**kwargs,
) -> str:
return await gemini_model_complete(
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
api_key=GEMINI_API_KEY,
model_name="gemini-2.0-flash",
**kwargs,
)
# --------------------------------------------------
# Embedding function (Gemini)
# --------------------------------------------------
@wrap_embedding_func_with_attrs(
embedding_dim=768,
max_token_size=2048,
model_name="models/text-embedding-004",
)
async def embedding_func(texts: list[str]) -> np.ndarray:
return await gemini_embed.func(
texts,
api_key=GEMINI_API_KEY,
model="models/text-embedding-004",
)
# --------------------------------------------------
# Initialize RAG with PostgreSQL storages
# --------------------------------------------------
async def initialize_rag() -> LightRAG:
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_name="gemini-2.0-flash",
llm_model_func=llm_model_func,
embedding_func=embedding_func,
# Performance tuning
embedding_func_max_async=4,
embedding_batch_num=8,
llm_model_max_async=2,
# Chunking
chunk_token_size=1200,
chunk_overlap_token_size=100,
# PostgreSQL-backed storages
graph_storage="PGGraphStorage",
vector_storage="PGVectorStorage",
doc_status_storage="PGDocStatusStorage",
kv_storage="PGKVStorage",
)
# REQUIRED: initialize all storage backends
await rag.initialize_storages()
return rag
# --------------------------------------------------
# Main
# --------------------------------------------------
async def main():
rag = None
try:
print("Initializing LightRAG with PostgreSQL + Gemini...")
rag = await initialize_rag()
if not os.path.exists(BOOK_FILE):
raise FileNotFoundError(
f"'{BOOK_FILE}' not found. Please provide a text file to index."
)
print(f"\nReading document: {BOOK_FILE}")
with open(BOOK_FILE, "r", encoding="utf-8") as f:
content = f.read()
print(f"Loaded document ({len(content)} characters)")
print("\nInserting document into LightRAG (this may take some time)...")
await rag.ainsert(content)
print("Document indexed successfully!")
print("\n" + "=" * 60)
print("Running sample queries")
print("=" * 60)
query = "What are the top themes in this document?"
for mode in ["naive", "local", "global", "hybrid"]:
print(f"\n[{mode.upper()} MODE]")
result = await rag.aquery(query, param=QueryParam(mode=mode))
print(result[:400] + "..." if len(result) > 400 else result)
print("\nRAG system is ready for use!")
except Exception as e:
print("An error occurred:", e)
import traceback
traceback.print_exc()
finally:
if rag is not None:
await rag.finalize_storages()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "examples/lightrag_gemini_postgres_demo.py",
"license": "MIT License",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
HKUDS/LightRAG:tests/test_token_auto_renewal.py | """
Pytest unit tests for token auto-renewal functionality
Tests:
1. Backend token renewal logic
2. Rate limiting for token renewals
3. Token renewal state tracking
"""
import pytest
from datetime import datetime, timedelta, timezone
from unittest.mock import Mock
from fastapi import Response
import time
import sys
# Mock the config before importing utils_api
sys.modules["lightrag.api.config"] = Mock()
sys.modules["lightrag.api.auth"] = Mock()
# Create a simple token renewal cache for testing
_token_renewal_cache = {}
_RENEWAL_MIN_INTERVAL = 60
@pytest.mark.offline
class TestTokenRenewal:
"""Tests for token auto-renewal logic"""
@pytest.fixture
def mock_auth_handler(self):
"""Mock authentication handler"""
handler = Mock()
handler.guest_expire_hours = 24
handler.expire_hours = 24
handler.create_token = Mock(return_value="new-token-12345")
return handler
@pytest.fixture
def mock_global_args(self):
"""Mock global configuration"""
args = Mock()
args.token_auto_renew = True
args.token_renew_threshold = 0.5
return args
@pytest.fixture
def mock_token_info_guest(self):
"""Mock token info for guest user"""
# Token with 10 hours remaining (below 50% of 24 hours)
exp_time = datetime.now(timezone.utc) + timedelta(hours=10)
return {
"username": "guest",
"role": "guest",
"exp": exp_time,
"metadata": {"auth_mode": "disabled"},
}
@pytest.fixture
def mock_token_info_user(self):
"""Mock token info for regular user"""
# Token with 10 hours remaining (below 50% of 24 hours)
exp_time = datetime.now(timezone.utc) + timedelta(hours=10)
return {
"username": "testuser",
"role": "user",
"exp": exp_time,
"metadata": {"auth_mode": "enabled"},
}
@pytest.fixture
def mock_token_info_above_threshold(self):
"""Mock token info with time above renewal threshold"""
# Token with 20 hours remaining (above 50% of 24 hours)
exp_time = datetime.now(timezone.utc) + timedelta(hours=20)
return {
"username": "testuser",
"role": "user",
"exp": exp_time,
"metadata": {"auth_mode": "enabled"},
}
def test_token_renewal_when_below_threshold(
self, mock_auth_handler, mock_global_args, mock_token_info_user
):
"""Test that token is renewed when remaining time < threshold"""
# Use global cache
global _token_renewal_cache
# Clear cache
_token_renewal_cache.clear()
response = Mock(spec=Response)
response.headers = {}
# Simulate the renewal logic
expire_time = mock_token_info_user["exp"]
now = datetime.now(timezone.utc)
remaining_seconds = (expire_time - now).total_seconds()
role = mock_token_info_user["role"]
total_hours = (
mock_auth_handler.expire_hours
if role == "user"
else mock_auth_handler.guest_expire_hours
)
total_seconds = total_hours * 3600
# Should renew because remaining_seconds < total_seconds * 0.5
should_renew = (
remaining_seconds < total_seconds * mock_global_args.token_renew_threshold
)
assert should_renew is True
# Simulate renewal
username = mock_token_info_user["username"]
current_time = time.time()
last_renewal = _token_renewal_cache.get(username, 0)
time_since_last_renewal = current_time - last_renewal
# Should pass rate limit (first renewal)
assert time_since_last_renewal >= 60 or last_renewal == 0
# Perform renewal
new_token = mock_auth_handler.create_token(
username=username, role=role, metadata=mock_token_info_user["metadata"]
)
response.headers["X-New-Token"] = new_token
_token_renewal_cache[username] = current_time
# Verify
assert "X-New-Token" in response.headers
assert response.headers["X-New-Token"] == "new-token-12345"
assert username in _token_renewal_cache
def test_token_no_renewal_when_above_threshold(
self, mock_auth_handler, mock_global_args, mock_token_info_above_threshold
):
"""Test that token is NOT renewed when remaining time > threshold"""
response = Mock(spec=Response)
response.headers = {}
expire_time = mock_token_info_above_threshold["exp"]
now = datetime.now(timezone.utc)
remaining_seconds = (expire_time - now).total_seconds()
mock_token_info_above_threshold["role"]
total_hours = mock_auth_handler.expire_hours
total_seconds = total_hours * 3600
# Should NOT renew because remaining_seconds > total_seconds * 0.5
should_renew = (
remaining_seconds < total_seconds * mock_global_args.token_renew_threshold
)
assert should_renew is False
# No renewal should happen
assert "X-New-Token" not in response.headers
def test_token_renewal_disabled(
self, mock_auth_handler, mock_global_args, mock_token_info_user
):
"""Test that no renewal happens when TOKEN_AUTO_RENEW=false"""
mock_global_args.token_auto_renew = False
response = Mock(spec=Response)
response.headers = {}
# Auto-renewal is disabled, so even if below threshold, no renewal
if not mock_global_args.token_auto_renew:
# Skip renewal logic
pass
assert "X-New-Token" not in response.headers
def test_token_renewal_for_guest_mode(
self, mock_auth_handler, mock_global_args, mock_token_info_guest
):
"""Test that guest tokens are renewed correctly"""
# Use global cache
global _token_renewal_cache
_token_renewal_cache.clear()
response = Mock(spec=Response)
response.headers = {}
expire_time = mock_token_info_guest["exp"]
now = datetime.now(timezone.utc)
remaining_seconds = (expire_time - now).total_seconds()
role = mock_token_info_guest["role"]
total_hours = mock_auth_handler.guest_expire_hours
total_seconds = total_hours * 3600
should_renew = (
remaining_seconds < total_seconds * mock_global_args.token_renew_threshold
)
assert should_renew is True
# Renewal for guest
username = mock_token_info_guest["username"]
new_token = mock_auth_handler.create_token(
username=username, role=role, metadata=mock_token_info_guest["metadata"]
)
response.headers["X-New-Token"] = new_token
_token_renewal_cache[username] = time.time()
assert "X-New-Token" in response.headers
assert username in _token_renewal_cache
@pytest.mark.offline
class TestRateLimiting:
"""Tests for token renewal rate limiting"""
@pytest.fixture
def mock_auth_handler(self):
"""Mock authentication handler"""
handler = Mock()
handler.expire_hours = 24
handler.create_token = Mock(return_value="new-token-12345")
return handler
def test_rate_limit_prevents_rapid_renewals(self, mock_auth_handler):
"""Test that second renewal within 60s is blocked"""
# Use global cache and constant
global _token_renewal_cache, _RENEWAL_MIN_INTERVAL
username = "testuser"
_token_renewal_cache.clear()
# First renewal
current_time_1 = time.time()
_token_renewal_cache[username] = current_time_1
response_1 = Mock(spec=Response)
response_1.headers = {}
response_1.headers["X-New-Token"] = "new-token-12345"
# Immediate second renewal attempt (within 60s)
current_time_2 = time.time() # Almost same time
last_renewal = _token_renewal_cache.get(username, 0)
time_since_last_renewal = current_time_2 - last_renewal
# Should be blocked by rate limit
assert time_since_last_renewal < _RENEWAL_MIN_INTERVAL
response_2 = Mock(spec=Response)
response_2.headers = {}
# No new token should be issued
if time_since_last_renewal < _RENEWAL_MIN_INTERVAL:
# Rate limited, skip renewal
pass
assert "X-New-Token" not in response_2.headers
def test_rate_limit_allows_renewal_after_interval(self, mock_auth_handler):
"""Test that renewal succeeds after 60s interval"""
# Use global cache and constant
global _token_renewal_cache, _RENEWAL_MIN_INTERVAL
username = "testuser"
_token_renewal_cache.clear()
# First renewal at time T
first_renewal_time = time.time() - 61 # 61 seconds ago
_token_renewal_cache[username] = first_renewal_time
# Second renewal attempt now
current_time = time.time()
last_renewal = _token_renewal_cache.get(username, 0)
time_since_last_renewal = current_time - last_renewal
# Should pass rate limit (>60s elapsed)
assert time_since_last_renewal >= _RENEWAL_MIN_INTERVAL
response = Mock(spec=Response)
response.headers = {}
if time_since_last_renewal >= _RENEWAL_MIN_INTERVAL:
new_token = mock_auth_handler.create_token(
username=username, role="user", metadata={}
)
response.headers["X-New-Token"] = new_token
_token_renewal_cache[username] = current_time
assert "X-New-Token" in response.headers
assert response.headers["X-New-Token"] == "new-token-12345"
def test_rate_limit_per_user(self, mock_auth_handler):
"""Test that different users have independent rate limits"""
# Use global cache
global _token_renewal_cache
_token_renewal_cache.clear()
user1 = "user1"
user2 = "user2"
current_time = time.time()
# User1 gets renewal
_token_renewal_cache[user1] = current_time
# User2 should still be able to get renewal (independent cache)
last_renewal_user2 = _token_renewal_cache.get(user2, 0)
assert last_renewal_user2 == 0 # No previous renewal
# User2 can renew
_token_renewal_cache[user2] = current_time
# Both users should have entries
assert user1 in _token_renewal_cache
assert user2 in _token_renewal_cache
assert _token_renewal_cache[user1] == _token_renewal_cache[user2]
@pytest.mark.offline
class TestTokenExpirationCalculation:
"""Tests for token expiration time calculation"""
def test_expiration_extraction_from_jwt(self):
"""Test extracting expiration time from JWT token"""
import base64
import json
# Create a mock JWT payload
exp_timestamp = int(
(datetime.now(timezone.utc) + timedelta(hours=24)).timestamp()
)
payload = {"sub": "testuser", "role": "user", "exp": exp_timestamp}
# Encode as base64 (simulating JWT structure: header.payload.signature)
payload_b64 = base64.b64encode(json.dumps(payload).encode()).decode()
mock_token = f"header.{payload_b64}.signature"
# Simulate extraction
parts = mock_token.split(".")
assert len(parts) == 3
decoded_payload = json.loads(base64.b64decode(parts[1]))
assert decoded_payload["exp"] == exp_timestamp
assert decoded_payload["sub"] == "testuser"
def test_remaining_time_calculation(self):
"""Test calculation of remaining token time"""
# Token expires in 10 hours
exp_time = datetime.now(timezone.utc) + timedelta(hours=10)
now = datetime.now(timezone.utc)
remaining_seconds = (exp_time - now).total_seconds()
# Should be approximately 10 hours (36000 seconds)
assert 35990 < remaining_seconds < 36010
# Calculate percentage remaining (for 24-hour token)
total_seconds = 24 * 3600
percentage_remaining = remaining_seconds / total_seconds
# Should be approximately 41.67% remaining
assert 0.41 < percentage_remaining < 0.42
def test_threshold_comparison(self):
"""Test threshold-based renewal decision"""
threshold = 0.5
total_hours = 24
total_seconds = total_hours * 3600
# Scenario 1: 10 hours remaining -> should renew
remaining_seconds_1 = 10 * 3600
should_renew_1 = remaining_seconds_1 < total_seconds * threshold
assert should_renew_1 is True
# Scenario 2: 20 hours remaining -> should NOT renew
remaining_seconds_2 = 20 * 3600
should_renew_2 = remaining_seconds_2 < total_seconds * threshold
assert should_renew_2 is False
# Scenario 3: Exactly 12 hours remaining (at threshold) -> should NOT renew
remaining_seconds_3 = 12 * 3600
should_renew_3 = remaining_seconds_3 < total_seconds * threshold
assert should_renew_3 is False
@pytest.mark.offline
def test_renewal_cache_cleanup():
"""Test that renewal cache can be cleared"""
# Use global cache
global _token_renewal_cache
# Clear first
_token_renewal_cache.clear()
# Add some entries
_token_renewal_cache["user1"] = time.time()
_token_renewal_cache["user2"] = time.time()
assert len(_token_renewal_cache) == 2
# Clear cache
_token_renewal_cache.clear()
assert len(_token_renewal_cache) == 0
if __name__ == "__main__":
pytest.main([__file__, "-v", "--tb=short"])
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_token_auto_renewal.py",
"license": "MIT License",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:lightrag/tools/prepare_qdrant_legacy_data.py | #!/usr/bin/env python3
"""
Qdrant Legacy Data Preparation Tool for LightRAG
This tool copies data from new collections to legacy collections for testing
the data migration logic in setup_collection function.
New Collections (with workspace_id):
- lightrag_vdb_chunks
- lightrag_vdb_entities
- lightrag_vdb_relationships
Legacy Collections (without workspace_id, dynamically named as {workspace}_{suffix}):
- {workspace}_chunks (e.g., space1_chunks)
- {workspace}_entities (e.g., space1_entities)
- {workspace}_relationships (e.g., space1_relationships)
The tool:
1. Filters source data by workspace_id
2. Verifies workspace data exists before creating legacy collections
3. Removes workspace_id field to simulate legacy data format
4. Copies only the specified workspace's data to legacy collections
Usage:
python -m lightrag.tools.prepare_qdrant_legacy_data
# or
python lightrag/tools/prepare_qdrant_legacy_data.py
# Specify custom workspace
python -m lightrag.tools.prepare_qdrant_legacy_data --workspace space1
# Process specific collection types only
python -m lightrag.tools.prepare_qdrant_legacy_data --types chunks,entities
# Dry run (preview only, no actual changes)
python -m lightrag.tools.prepare_qdrant_legacy_data --dry-run
"""
import argparse
import asyncio
import configparser
import os
import sys
import time
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
import pipmaster as pm
from dotenv import load_dotenv
from qdrant_client import QdrantClient, models # type: ignore
# Add project root to path for imports
sys.path.insert(
0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
)
# Load environment variables
load_dotenv(dotenv_path=".env", override=False)
# Ensure qdrant-client is installed
if not pm.is_installed("qdrant-client"):
pm.install("qdrant-client")
# Collection namespace mapping: new collection pattern -> legacy suffix
# Legacy collection will be named as: {workspace}_{suffix}
COLLECTION_NAMESPACES = {
"chunks": {
"new": "lightrag_vdb_chunks",
"suffix": "chunks",
},
"entities": {
"new": "lightrag_vdb_entities",
"suffix": "entities",
},
"relationships": {
"new": "lightrag_vdb_relationships",
"suffix": "relationships",
},
}
# Default batch size for copy operations
DEFAULT_BATCH_SIZE = 500
# Field to remove from legacy data
WORKSPACE_ID_FIELD = "workspace_id"
# ANSI color codes for terminal output
BOLD_CYAN = "\033[1;36m"
BOLD_GREEN = "\033[1;32m"
BOLD_YELLOW = "\033[1;33m"
BOLD_RED = "\033[1;31m"
RESET = "\033[0m"
@dataclass
class CopyStats:
"""Copy operation statistics"""
collection_type: str
source_collection: str
target_collection: str
total_records: int = 0
copied_records: int = 0
failed_records: int = 0
errors: List[Dict[str, Any]] = field(default_factory=list)
elapsed_time: float = 0.0
def add_error(self, batch_idx: int, error: Exception, batch_size: int):
"""Record batch error"""
self.errors.append(
{
"batch": batch_idx,
"error_type": type(error).__name__,
"error_msg": str(error),
"records_lost": batch_size,
"timestamp": time.time(),
}
)
self.failed_records += batch_size
class QdrantLegacyDataPreparationTool:
"""Tool for preparing legacy data in Qdrant for migration testing"""
def __init__(
self,
workspace: str = "space1",
batch_size: int = DEFAULT_BATCH_SIZE,
dry_run: bool = False,
clear_target: bool = False,
):
"""
Initialize the tool.
Args:
workspace: Workspace to use for filtering new collection data
batch_size: Number of records to process per batch
dry_run: If True, only preview operations without making changes
clear_target: If True, delete target collection before copying data
"""
self.workspace = workspace
self.batch_size = batch_size
self.dry_run = dry_run
self.clear_target = clear_target
self._client: Optional[QdrantClient] = None
def _get_client(self) -> QdrantClient:
"""Get or create QdrantClient instance"""
if self._client is None:
config = configparser.ConfigParser()
config.read("config.ini", "utf-8")
self._client = QdrantClient(
url=os.environ.get(
"QDRANT_URL", config.get("qdrant", "uri", fallback=None)
),
api_key=os.environ.get(
"QDRANT_API_KEY",
config.get("qdrant", "apikey", fallback=None),
),
)
return self._client
def print_header(self):
"""Print tool header"""
print("\n" + "=" * 60)
print("Qdrant Legacy Data Preparation Tool - LightRAG")
print("=" * 60)
if self.dry_run:
print(f"{BOLD_YELLOW}⚠️ DRY RUN MODE - No changes will be made{RESET}")
if self.clear_target:
print(
f"{BOLD_RED}⚠️ CLEAR TARGET MODE - Target collections will be deleted first{RESET}"
)
print(f"Workspace: {BOLD_CYAN}{self.workspace}{RESET}")
print(f"Batch Size: {self.batch_size}")
print("=" * 60)
def check_connection(self) -> bool:
"""Check Qdrant connection"""
try:
client = self._get_client()
# Try to list collections to verify connection
client.get_collections()
print(f"{BOLD_GREEN}✓{RESET} Qdrant connection successful")
return True
except Exception as e:
print(f"{BOLD_RED}✗{RESET} Qdrant connection failed: {e}")
return False
def get_collection_info(self, collection_name: str) -> Optional[Dict[str, Any]]:
"""
Get collection information.
Args:
collection_name: Name of the collection
Returns:
Dictionary with collection info (vector_size, count) or None if not exists
"""
client = self._get_client()
if not client.collection_exists(collection_name):
return None
info = client.get_collection(collection_name)
count = client.count(collection_name=collection_name, exact=True).count
# Handle both object and dict formats for vectors config
vectors_config = info.config.params.vectors
if isinstance(vectors_config, dict):
# Named vectors format or dict format
if vectors_config:
first_key = next(iter(vectors_config.keys()), None)
if first_key and hasattr(vectors_config[first_key], "size"):
vector_size = vectors_config[first_key].size
distance = vectors_config[first_key].distance
else:
# Try to get from dict values
first_val = next(iter(vectors_config.values()), {})
vector_size = (
first_val.get("size")
if isinstance(first_val, dict)
else getattr(first_val, "size", None)
)
distance = (
first_val.get("distance")
if isinstance(first_val, dict)
else getattr(first_val, "distance", None)
)
else:
vector_size = None
distance = None
else:
# Standard single vector format
vector_size = vectors_config.size
distance = vectors_config.distance
return {
"name": collection_name,
"vector_size": vector_size,
"count": count,
"distance": distance,
}
def delete_collection(self, collection_name: str) -> bool:
"""
Delete a collection if it exists.
Args:
collection_name: Name of the collection to delete
Returns:
True if deleted or doesn't exist
"""
client = self._get_client()
if not client.collection_exists(collection_name):
return True
if self.dry_run:
target_info = self.get_collection_info(collection_name)
count = target_info["count"] if target_info else 0
print(
f" {BOLD_YELLOW}[DRY RUN]{RESET} Would delete collection '{collection_name}' ({count:,} records)"
)
return True
try:
target_info = self.get_collection_info(collection_name)
count = target_info["count"] if target_info else 0
client.delete_collection(collection_name=collection_name)
print(
f" {BOLD_RED}✗{RESET} Deleted collection '{collection_name}' ({count:,} records)"
)
return True
except Exception as e:
print(f" {BOLD_RED}✗{RESET} Failed to delete collection: {e}")
return False
def create_legacy_collection(
self, collection_name: str, vector_size: int, distance: models.Distance
) -> bool:
"""
Create legacy collection if it doesn't exist.
Args:
collection_name: Name of the collection to create
vector_size: Dimension of vectors
distance: Distance metric
Returns:
True if created or already exists
"""
client = self._get_client()
if client.collection_exists(collection_name):
print(f" Collection '{collection_name}' already exists")
return True
if self.dry_run:
print(
f" {BOLD_YELLOW}[DRY RUN]{RESET} Would create collection '{collection_name}' with {vector_size}d vectors"
)
return True
try:
client.create_collection(
collection_name=collection_name,
vectors_config=models.VectorParams(
size=vector_size,
distance=distance,
),
hnsw_config=models.HnswConfigDiff(
payload_m=16,
m=0,
),
)
print(
f" {BOLD_GREEN}✓{RESET} Created collection '{collection_name}' with {vector_size}d vectors"
)
return True
except Exception as e:
print(f" {BOLD_RED}✗{RESET} Failed to create collection: {e}")
return False
def _get_workspace_filter(self) -> models.Filter:
"""Create workspace filter for Qdrant queries"""
return models.Filter(
must=[
models.FieldCondition(
key=WORKSPACE_ID_FIELD,
match=models.MatchValue(value=self.workspace),
)
]
)
def get_workspace_count(self, collection_name: str) -> int:
"""
Get count of records for the current workspace in a collection.
Args:
collection_name: Name of the collection
Returns:
Count of records for the workspace
"""
client = self._get_client()
return client.count(
collection_name=collection_name,
count_filter=self._get_workspace_filter(),
exact=True,
).count
def copy_collection_data(
self,
source_collection: str,
target_collection: str,
collection_type: str,
workspace_count: int,
) -> CopyStats:
"""
Copy data from source to target collection.
This filters by workspace_id and removes it from payload to simulate legacy data format.
Args:
source_collection: Source collection name
target_collection: Target collection name
collection_type: Type of collection (chunks, entities, relationships)
workspace_count: Pre-computed count of workspace records
Returns:
CopyStats with operation results
"""
client = self._get_client()
stats = CopyStats(
collection_type=collection_type,
source_collection=source_collection,
target_collection=target_collection,
)
start_time = time.time()
stats.total_records = workspace_count
if workspace_count == 0:
print(f" No records for workspace '{self.workspace}', skipping")
stats.elapsed_time = time.time() - start_time
return stats
print(f" Workspace records: {workspace_count:,}")
if self.dry_run:
print(
f" {BOLD_YELLOW}[DRY RUN]{RESET} Would copy {workspace_count:,} records to '{target_collection}'"
)
stats.copied_records = workspace_count
stats.elapsed_time = time.time() - start_time
return stats
# Batch copy using scroll with workspace filter
workspace_filter = self._get_workspace_filter()
offset = None
batch_idx = 0
while True:
# Scroll source collection with workspace filter
result = client.scroll(
collection_name=source_collection,
scroll_filter=workspace_filter,
limit=self.batch_size,
offset=offset,
with_vectors=True,
with_payload=True,
)
points, next_offset = result
if not points:
break
batch_idx += 1
# Transform points: remove workspace_id from payload
new_points = []
for point in points:
new_payload = dict(point.payload or {})
# Remove workspace_id to simulate legacy format
new_payload.pop(WORKSPACE_ID_FIELD, None)
# Use original id from payload if available, otherwise use point.id
original_id = new_payload.get("id")
if original_id:
# Generate a simple deterministic id for legacy format
# Use original id directly (legacy format didn't have workspace prefix)
import hashlib
import uuid
hashed = hashlib.sha256(original_id.encode("utf-8")).digest()
point_id = uuid.UUID(bytes=hashed[:16], version=4).hex
else:
point_id = str(point.id)
new_points.append(
models.PointStruct(
id=point_id,
vector=point.vector,
payload=new_payload,
)
)
try:
# Upsert to target collection
client.upsert(
collection_name=target_collection, points=new_points, wait=True
)
stats.copied_records += len(new_points)
# Progress bar
progress = (stats.copied_records / workspace_count) * 100
bar_length = 30
filled = int(bar_length * stats.copied_records // workspace_count)
bar = "█" * filled + "░" * (bar_length - filled)
print(
f"\r Copying: {bar} {stats.copied_records:,}/{workspace_count:,} ({progress:.1f}%) ",
end="",
flush=True,
)
except Exception as e:
stats.add_error(batch_idx, e, len(new_points))
print(
f"\n {BOLD_RED}✗{RESET} Batch {batch_idx} failed: {type(e).__name__}: {e}"
)
if next_offset is None:
break
offset = next_offset
print() # New line after progress bar
stats.elapsed_time = time.time() - start_time
return stats
def process_collection_type(self, collection_type: str) -> Optional[CopyStats]:
"""
Process a single collection type.
Args:
collection_type: Type of collection (chunks, entities, relationships)
Returns:
CopyStats or None if error
"""
namespace_config = COLLECTION_NAMESPACES.get(collection_type)
if not namespace_config:
print(f"{BOLD_RED}✗{RESET} Unknown collection type: {collection_type}")
return None
source = namespace_config["new"]
# Generate legacy collection name dynamically: {workspace}_{suffix}
target = f"{self.workspace}_{namespace_config['suffix']}"
print(f"\n{'=' * 50}")
print(f"Processing: {BOLD_CYAN}{collection_type}{RESET}")
print(f"{'=' * 50}")
print(f" Source: {source}")
print(f" Target: {target}")
# Check source collection
source_info = self.get_collection_info(source)
if source_info is None:
print(
f" {BOLD_YELLOW}⚠{RESET} Source collection '{source}' does not exist, skipping"
)
return None
print(f" Source vector dimension: {source_info['vector_size']}d")
print(f" Source distance metric: {source_info['distance']}")
print(f" Source total records: {source_info['count']:,}")
# Check workspace data exists BEFORE creating legacy collection
workspace_count = self.get_workspace_count(source)
print(f" Workspace '{self.workspace}' records: {workspace_count:,}")
if workspace_count == 0:
print(
f" {BOLD_YELLOW}⚠{RESET} No data found for workspace '{self.workspace}' in '{source}', skipping"
)
return None
# Clear target collection if requested
if self.clear_target:
if not self.delete_collection(target):
return None
# Create target collection only after confirming workspace data exists
if not self.create_legacy_collection(
target, source_info["vector_size"], source_info["distance"]
):
return None
# Copy data with workspace filter
stats = self.copy_collection_data(
source, target, collection_type, workspace_count
)
# Print result
if stats.failed_records == 0:
print(
f" {BOLD_GREEN}✓{RESET} Copied {stats.copied_records:,} records in {stats.elapsed_time:.2f}s"
)
else:
print(
f" {BOLD_YELLOW}⚠{RESET} Copied {stats.copied_records:,} records, "
f"{BOLD_RED}{stats.failed_records:,} failed{RESET} in {stats.elapsed_time:.2f}s"
)
return stats
def print_summary(self, all_stats: List[CopyStats]):
"""Print summary of all operations"""
print("\n" + "=" * 60)
print("Summary")
print("=" * 60)
total_copied = sum(s.copied_records for s in all_stats)
total_failed = sum(s.failed_records for s in all_stats)
total_time = sum(s.elapsed_time for s in all_stats)
for stats in all_stats:
status = (
f"{BOLD_GREEN}✓{RESET}"
if stats.failed_records == 0
else f"{BOLD_YELLOW}⚠{RESET}"
)
print(
f" {status} {stats.collection_type}: {stats.copied_records:,}/{stats.total_records:,} "
f"({stats.source_collection} → {stats.target_collection})"
)
print("-" * 60)
print(f" Total records copied: {BOLD_CYAN}{total_copied:,}{RESET}")
if total_failed > 0:
print(f" Total records failed: {BOLD_RED}{total_failed:,}{RESET}")
print(f" Total time: {total_time:.2f}s")
if self.dry_run:
print(f"\n{BOLD_YELLOW}⚠️ DRY RUN - No actual changes were made{RESET}")
# Print error details if any
all_errors = []
for stats in all_stats:
all_errors.extend(stats.errors)
if all_errors:
print(f"\n{BOLD_RED}Errors ({len(all_errors)}){RESET}")
for i, error in enumerate(all_errors[:5], 1):
print(
f" {i}. Batch {error['batch']}: {error['error_type']}: {error['error_msg']}"
)
if len(all_errors) > 5:
print(f" ... and {len(all_errors) - 5} more errors")
print("=" * 60)
async def run(self, collection_types: Optional[List[str]] = None):
"""
Run the data preparation tool.
Args:
collection_types: List of collection types to process (default: all)
"""
self.print_header()
# Check connection
if not self.check_connection():
return
# Determine which collection types to process
if collection_types:
types_to_process = [t.strip() for t in collection_types]
invalid_types = [
t for t in types_to_process if t not in COLLECTION_NAMESPACES
]
if invalid_types:
print(
f"{BOLD_RED}✗{RESET} Invalid collection types: {', '.join(invalid_types)}"
)
print(f" Valid types: {', '.join(COLLECTION_NAMESPACES.keys())}")
return
else:
types_to_process = list(COLLECTION_NAMESPACES.keys())
print(f"\nCollection types to process: {', '.join(types_to_process)}")
# Process each collection type
all_stats = []
for ctype in types_to_process:
stats = self.process_collection_type(ctype)
if stats:
all_stats.append(stats)
# Print summary
if all_stats:
self.print_summary(all_stats)
else:
print(f"\n{BOLD_YELLOW}⚠{RESET} No collections were processed")
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Prepare legacy data in Qdrant for migration testing",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python -m lightrag.tools.prepare_qdrant_legacy_data
python -m lightrag.tools.prepare_qdrant_legacy_data --workspace space1
python -m lightrag.tools.prepare_qdrant_legacy_data --types chunks,entities
python -m lightrag.tools.prepare_qdrant_legacy_data --dry-run
""",
)
parser.add_argument(
"--workspace",
type=str,
default="space1",
help="Workspace name (default: space1)",
)
parser.add_argument(
"--types",
type=str,
default=None,
help="Comma-separated list of collection types (chunks, entities, relationships)",
)
parser.add_argument(
"--batch-size",
type=int,
default=DEFAULT_BATCH_SIZE,
help=f"Batch size for copy operations (default: {DEFAULT_BATCH_SIZE})",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Preview operations without making changes",
)
parser.add_argument(
"--clear-target",
action="store_true",
help="Delete target collections before copying (for clean test environment)",
)
return parser.parse_args()
async def main():
"""Main entry point"""
args = parse_args()
collection_types = None
if args.types:
collection_types = [t.strip() for t in args.types.split(",")]
tool = QdrantLegacyDataPreparationTool(
workspace=args.workspace,
batch_size=args.batch_size,
dry_run=args.dry_run,
clear_target=args.clear_target,
)
await tool.run(collection_types=collection_types)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "lightrag/tools/prepare_qdrant_legacy_data.py",
"license": "MIT License",
"lines": 601,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
HKUDS/LightRAG:tests/test_dimension_mismatch.py | """
Tests for dimension mismatch handling during migration.
This test module verifies that both PostgreSQL and Qdrant storage backends
properly detect and handle vector dimension mismatches when migrating from
legacy collections/tables to new ones with different embedding models.
"""
import json
import pytest
from unittest.mock import MagicMock, AsyncMock, patch
from lightrag.kg.qdrant_impl import QdrantVectorDBStorage
from lightrag.kg.postgres_impl import PGVectorStorage
from lightrag.exceptions import DataMigrationError
# Note: Tests should use proper table names that have DDL templates
# Valid base tables: LIGHTRAG_VDB_CHUNKS, LIGHTRAG_VDB_ENTITIES, LIGHTRAG_VDB_RELATIONSHIPS,
# LIGHTRAG_DOC_CHUNKS, LIGHTRAG_DOC_FULL_DOCS, LIGHTRAG_DOC_TEXT_CHUNKS
class TestQdrantDimensionMismatch:
"""Test suite for Qdrant dimension mismatch handling."""
def test_qdrant_dimension_mismatch_raises_error(self):
"""
Test that Qdrant raises DataMigrationError when dimensions don't match.
Scenario: Legacy collection has 1536d vectors, new model expects 3072d.
Expected: DataMigrationError is raised to prevent data corruption.
"""
from qdrant_client import models
# Setup mock client
client = MagicMock()
# Mock legacy collection with 1536d vectors
legacy_collection_info = MagicMock()
legacy_collection_info.config.params.vectors.size = 1536
# Setup collection existence checks
def collection_exists_side_effect(name):
if (
name == "lightrag_vdb_chunks"
): # legacy (matches _find_legacy_collection pattern)
return True
elif name == "lightrag_chunks_model_3072d": # new
return False
return False
client.collection_exists.side_effect = collection_exists_side_effect
client.get_collection.return_value = legacy_collection_info
client.count.return_value.count = 100 # Legacy has data
# Patch _find_legacy_collection to return the legacy collection name
with patch(
"lightrag.kg.qdrant_impl._find_legacy_collection",
return_value="lightrag_vdb_chunks",
):
# Call setup_collection with 3072d (different from legacy 1536d)
# Should raise DataMigrationError due to dimension mismatch
with pytest.raises(DataMigrationError) as exc_info:
QdrantVectorDBStorage.setup_collection(
client,
"lightrag_chunks_model_3072d",
namespace="chunks",
workspace="test",
vectors_config=models.VectorParams(
size=3072, distance=models.Distance.COSINE
),
hnsw_config=models.HnswConfigDiff(
payload_m=16,
m=0,
),
model_suffix="model_3072d",
)
# Verify error message contains dimension information
assert "3072" in str(exc_info.value) or "1536" in str(exc_info.value)
# Verify new collection was NOT created (error raised before creation)
client.create_collection.assert_not_called()
# Verify migration was NOT attempted
client.scroll.assert_not_called()
client.upsert.assert_not_called()
def test_qdrant_dimension_match_proceed_migration(self):
"""
Test that Qdrant proceeds with migration when dimensions match.
Scenario: Legacy collection has 1536d vectors, new model also expects 1536d.
Expected: Migration proceeds normally.
"""
from qdrant_client import models
client = MagicMock()
# Mock legacy collection with 1536d vectors (matching new)
legacy_collection_info = MagicMock()
legacy_collection_info.config.params.vectors.size = 1536
def collection_exists_side_effect(name):
if name == "lightrag_chunks": # legacy
return True
elif name == "lightrag_chunks_model_1536d": # new
return False
return False
client.collection_exists.side_effect = collection_exists_side_effect
client.get_collection.return_value = legacy_collection_info
# Track whether upsert has been called (migration occurred)
migration_done = {"value": False}
def upsert_side_effect(*args, **kwargs):
migration_done["value"] = True
return MagicMock()
client.upsert.side_effect = upsert_side_effect
# Mock count to return different values based on collection name and migration state
# Before migration: new collection has 0 records
# After migration: new collection has 1 record (matching migrated data)
def count_side_effect(collection_name, **kwargs):
result = MagicMock()
if collection_name == "lightrag_chunks": # legacy
result.count = 1 # Legacy has 1 record
elif collection_name == "lightrag_chunks_model_1536d": # new
# Return 0 before migration, 1 after migration
result.count = 1 if migration_done["value"] else 0
else:
result.count = 0
return result
client.count.side_effect = count_side_effect
# Mock scroll to return sample data (1 record for easier verification)
sample_point = MagicMock()
sample_point.id = "test_id"
sample_point.vector = [0.1] * 1536
sample_point.payload = {"id": "test"}
client.scroll.return_value = ([sample_point], None)
# Mock _find_legacy_collection to return the legacy collection name
with patch(
"lightrag.kg.qdrant_impl._find_legacy_collection",
return_value="lightrag_chunks",
):
# Call setup_collection with matching 1536d
QdrantVectorDBStorage.setup_collection(
client,
"lightrag_chunks_model_1536d",
namespace="chunks",
workspace="test",
vectors_config=models.VectorParams(
size=1536, distance=models.Distance.COSINE
),
hnsw_config=models.HnswConfigDiff(
payload_m=16,
m=0,
),
model_suffix="model_1536d",
)
# Verify migration WAS attempted
client.create_collection.assert_called_once()
client.scroll.assert_called()
client.upsert.assert_called()
class TestPostgresDimensionMismatch:
"""Test suite for PostgreSQL dimension mismatch handling."""
async def test_postgres_dimension_mismatch_raises_error_metadata(self):
"""
Test that PostgreSQL raises DataMigrationError when dimensions don't match.
Scenario: Legacy table has 1536d vectors, new model expects 3072d.
Expected: DataMigrationError is raised to prevent data corruption.
"""
# Setup mock database
db = AsyncMock()
# Mock check_table_exists
async def mock_check_table_exists(table_name):
if table_name == "LIGHTRAG_DOC_CHUNKS": # legacy
return True
elif table_name == "LIGHTRAG_DOC_CHUNKS_model_3072d": # new
return False
return False
db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
# Mock table existence and dimension checks
async def query_side_effect(query, params, **kwargs):
if "COUNT(*)" in query:
return {"count": 100} # Legacy has data
elif "SELECT content_vector FROM" in query:
# Return sample vector with 1536 dimensions
return {"content_vector": [0.1] * 1536}
return {}
db.query.side_effect = query_side_effect
db.execute = AsyncMock()
db._create_vector_index = AsyncMock()
# Call setup_table with 3072d (different from legacy 1536d)
# Should raise DataMigrationError due to dimension mismatch
with pytest.raises(DataMigrationError) as exc_info:
await PGVectorStorage.setup_table(
db,
"LIGHTRAG_DOC_CHUNKS_model_3072d",
legacy_table_name="LIGHTRAG_DOC_CHUNKS",
base_table="LIGHTRAG_DOC_CHUNKS",
embedding_dim=3072,
workspace="test",
)
# Verify error message contains dimension information
assert "3072" in str(exc_info.value) or "1536" in str(exc_info.value)
async def test_postgres_dimension_mismatch_raises_error_sampling(self):
"""
Test that PostgreSQL raises error when dimensions don't match (via sampling).
Scenario: Legacy table vector sampling detects 1536d vs expected 3072d.
Expected: DataMigrationError is raised to prevent data corruption.
"""
db = AsyncMock()
# Mock check_table_exists
async def mock_check_table_exists(table_name):
if table_name == "LIGHTRAG_DOC_CHUNKS": # legacy
return True
elif table_name == "LIGHTRAG_DOC_CHUNKS_model_3072d": # new
return False
return False
db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
# Mock table existence and dimension checks
async def query_side_effect(query, params, **kwargs):
if "information_schema.tables" in query:
if params[0] == "LIGHTRAG_DOC_CHUNKS": # legacy
return {"exists": True}
elif params[0] == "LIGHTRAG_DOC_CHUNKS_model_3072d": # new
return {"exists": False}
elif "COUNT(*)" in query:
return {"count": 100} # Legacy has data
elif "SELECT content_vector FROM" in query:
# Return sample vector with 1536 dimensions as a JSON string
return {"content_vector": json.dumps([0.1] * 1536)}
return {}
db.query.side_effect = query_side_effect
db.execute = AsyncMock()
db._create_vector_index = AsyncMock()
# Call setup_table with 3072d (different from legacy 1536d)
# Should raise DataMigrationError due to dimension mismatch
with pytest.raises(DataMigrationError) as exc_info:
await PGVectorStorage.setup_table(
db,
"LIGHTRAG_DOC_CHUNKS_model_3072d",
legacy_table_name="LIGHTRAG_DOC_CHUNKS",
base_table="LIGHTRAG_DOC_CHUNKS",
embedding_dim=3072,
workspace="test",
)
# Verify error message contains dimension information
assert "3072" in str(exc_info.value) or "1536" in str(exc_info.value)
async def test_postgres_dimension_match_proceed_migration(self):
"""
Test that PostgreSQL proceeds with migration when dimensions match.
Scenario: Legacy table has 1536d vectors, new model also expects 1536d.
Expected: Migration proceeds normally.
"""
db = AsyncMock()
# Track migration state
migration_done = {"value": False}
# Define exactly 2 records for consistency
mock_records = [
{
"id": "test1",
"content_vector": [0.1] * 1536,
"workspace": "test",
},
{
"id": "test2",
"content_vector": [0.2] * 1536,
"workspace": "test",
},
]
# Mock check_table_exists
async def mock_check_table_exists(table_name):
if table_name == "LIGHTRAG_DOC_CHUNKS": # legacy exists
return True
elif table_name == "LIGHTRAG_DOC_CHUNKS_model_1536d": # new doesn't exist
return False
return False
db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
async def query_side_effect(query, params, **kwargs):
multirows = kwargs.get("multirows", False)
query_upper = query.upper()
if "information_schema.tables" in query:
if params[0] == "LIGHTRAG_DOC_CHUNKS": # legacy
return {"exists": True}
elif params[0] == "LIGHTRAG_DOC_CHUNKS_model_1536d": # new
return {"exists": False}
elif "COUNT(*)" in query_upper:
# Return different counts based on table name in query and migration state
if "LIGHTRAG_DOC_CHUNKS_MODEL_1536D" in query_upper:
# After migration: return migrated count, before: return 0
return {
"count": len(mock_records) if migration_done["value"] else 0
}
# Legacy table always has 2 records (matching mock_records)
return {"count": len(mock_records)}
elif "PG_ATTRIBUTE" in query_upper:
return {"vector_dim": 1536} # Legacy has matching 1536d
elif "SELECT" in query_upper and "FROM" in query_upper and multirows:
# Return sample data for migration using keyset pagination
# Handle keyset pagination: params = [workspace, limit] or [workspace, last_id, limit]
if "id >" in query.lower():
# Keyset pagination: params = [workspace, last_id, limit]
last_id = params[1] if len(params) > 1 else None
# Find records after last_id
found_idx = -1
for i, rec in enumerate(mock_records):
if rec["id"] == last_id:
found_idx = i
break
if found_idx >= 0:
return mock_records[found_idx + 1 :]
return []
else:
# First batch: params = [workspace, limit]
return mock_records
return {}
db.query.side_effect = query_side_effect
# Mock _run_with_retry to track when migration happens
migration_executed = []
async def mock_run_with_retry(operation, *args, **kwargs):
migration_executed.append(True)
migration_done["value"] = True
return None
db._run_with_retry = AsyncMock(side_effect=mock_run_with_retry)
db.execute = AsyncMock()
db._create_vector_index = AsyncMock()
# Call setup_table with matching 1536d
await PGVectorStorage.setup_table(
db,
"LIGHTRAG_DOC_CHUNKS_model_1536d",
legacy_table_name="LIGHTRAG_DOC_CHUNKS",
base_table="LIGHTRAG_DOC_CHUNKS",
embedding_dim=1536,
workspace="test",
)
# Verify migration WAS called (via _run_with_retry for batch operations)
assert len(migration_executed) > 0, "Migration should have been executed"
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_dimension_mismatch.py",
"license": "MIT License",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_no_model_suffix_safety.py | """
Tests for safety when model suffix is absent (no model_name provided).
This test module verifies that the system correctly handles the case when
no model_name is provided, preventing accidental deletion of the only table/collection
on restart.
Critical Bug: When model_suffix is empty, table_name == legacy_table_name.
On second startup, Case 1 logic would delete the only table/collection thinking
it's "legacy", causing all subsequent operations to fail.
"""
from unittest.mock import MagicMock, AsyncMock, patch
from lightrag.kg.qdrant_impl import QdrantVectorDBStorage
from lightrag.kg.postgres_impl import PGVectorStorage
class TestNoModelSuffixSafety:
"""Test suite for preventing data loss when model_suffix is absent."""
def test_qdrant_no_suffix_second_startup(self):
"""
Test Qdrant doesn't delete collection on second startup when no model_name.
Scenario:
1. First startup: Creates collection without suffix
2. Collection is empty
3. Second startup: Should NOT delete the collection
Bug: Without fix, Case 1 would delete the only collection.
"""
from qdrant_client import models
client = MagicMock()
# Simulate second startup: collection already exists and is empty
# IMPORTANT: Without suffix, collection_name == legacy collection name
collection_name = "lightrag_vdb_chunks" # No suffix, same as legacy
# Both exist (they're the same collection)
client.collection_exists.return_value = True
# Collection is empty
client.count.return_value.count = 0
# Patch _find_legacy_collection to return the SAME collection name
# This simulates the scenario where new collection == legacy collection
with patch(
"lightrag.kg.qdrant_impl._find_legacy_collection",
return_value="lightrag_vdb_chunks", # Same as collection_name
):
# Call setup_collection
# This should detect that new == legacy and skip deletion
QdrantVectorDBStorage.setup_collection(
client,
collection_name,
namespace="chunks",
workspace="_",
vectors_config=models.VectorParams(
size=1536, distance=models.Distance.COSINE
),
hnsw_config=models.HnswConfigDiff(
payload_m=16,
m=0,
),
model_suffix="", # Empty suffix to simulate no model_name provided
)
# CRITICAL: Collection should NOT be deleted
client.delete_collection.assert_not_called()
# Verify we returned early (skipped Case 1 cleanup)
# The collection_exists was checked, but we didn't proceed to count
# because we detected same name
assert client.collection_exists.call_count >= 1
async def test_postgres_no_suffix_second_startup(self):
"""
Test PostgreSQL doesn't delete table on second startup when no model_name.
Scenario:
1. First startup: Creates table without suffix
2. Table is empty
3. Second startup: Should NOT delete the table
Bug: Without fix, Case 1 would delete the only table.
"""
db = AsyncMock()
# Configure mock return values to avoid unawaited coroutine warnings
db.query.return_value = {"count": 0}
db._create_vector_index.return_value = None
# Simulate second startup: table already exists and is empty
# IMPORTANT: table_name and legacy_table_name are THE SAME
table_name = "LIGHTRAG_VDB_CHUNKS" # No suffix
legacy_table_name = "LIGHTRAG_VDB_CHUNKS" # Same as new
# Setup mock responses using check_table_exists on db
async def check_table_exists_side_effect(name):
# Both tables exist (they're the same)
return True
db.check_table_exists = AsyncMock(side_effect=check_table_exists_side_effect)
# Call setup_table
# This should detect that new == legacy and skip deletion
await PGVectorStorage.setup_table(
db,
table_name,
workspace="test_workspace",
embedding_dim=1536,
legacy_table_name=legacy_table_name,
base_table="LIGHTRAG_VDB_CHUNKS",
)
# CRITICAL: Table should NOT be deleted (no DROP TABLE)
drop_calls = [
call
for call in db.execute.call_args_list
if call[0][0] and "DROP TABLE" in call[0][0]
]
assert (
len(drop_calls) == 0
), "Should not drop table when new and legacy are the same"
# Note: COUNT queries for workspace data are expected behavior in Case 1
# (for logging/warning purposes when workspace data is empty).
# The critical safety check is that DROP TABLE is not called.
def test_qdrant_with_suffix_case1_still_works(self):
"""
Test that Case 1 cleanup still works when there IS a suffix.
This ensures our fix doesn't break the normal Case 1 scenario.
"""
from qdrant_client import models
client = MagicMock()
# Different names (normal case)
collection_name = "lightrag_vdb_chunks_ada_002_1536d" # With suffix
legacy_collection = "lightrag_vdb_chunks" # Without suffix
# Setup: both exist
def collection_exists_side_effect(name):
return name in [collection_name, legacy_collection]
client.collection_exists.side_effect = collection_exists_side_effect
# Legacy is empty
client.count.return_value.count = 0
# Call setup_collection
QdrantVectorDBStorage.setup_collection(
client,
collection_name,
namespace="chunks",
workspace="_",
vectors_config=models.VectorParams(
size=1536, distance=models.Distance.COSINE
),
hnsw_config=models.HnswConfigDiff(
payload_m=16,
m=0,
),
model_suffix="ada_002_1536d",
)
# SHOULD delete legacy (normal Case 1 behavior)
client.delete_collection.assert_called_once_with(
collection_name=legacy_collection
)
async def test_postgres_with_suffix_case1_still_works(self):
"""
Test that Case 1 cleanup still works when there IS a suffix.
This ensures our fix doesn't break the normal Case 1 scenario.
"""
db = AsyncMock()
# Different names (normal case)
table_name = "LIGHTRAG_VDB_CHUNKS_ADA_002_1536D" # With suffix
legacy_table_name = "LIGHTRAG_VDB_CHUNKS" # Without suffix
# Setup mock responses using check_table_exists on db
async def check_table_exists_side_effect(name):
# Both tables exist
return True
db.check_table_exists = AsyncMock(side_effect=check_table_exists_side_effect)
# Mock empty table
async def query_side_effect(sql, params, **kwargs):
if "COUNT(*)" in sql:
return {"count": 0}
return {}
db.query.side_effect = query_side_effect
# Call setup_table
await PGVectorStorage.setup_table(
db,
table_name,
workspace="test_workspace",
embedding_dim=1536,
legacy_table_name=legacy_table_name,
base_table="LIGHTRAG_VDB_CHUNKS",
)
# SHOULD delete legacy (normal Case 1 behavior)
drop_calls = [
call
for call in db.execute.call_args_list
if call[0][0] and "DROP TABLE" in call[0][0]
]
assert len(drop_calls) == 1, "Should drop legacy table in normal Case 1"
assert legacy_table_name in drop_calls[0][0][0]
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_no_model_suffix_safety.py",
"license": "MIT License",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_overlap_validation.py | """
Test for overlap_tokens validation to prevent infinite loop.
This test validates the fix for the bug where overlap_tokens >= max_tokens
causes an infinite loop in the chunking function.
"""
from lightrag.rerank import chunk_documents_for_rerank
class TestOverlapValidation:
"""Test suite for overlap_tokens validation"""
def test_overlap_greater_than_max_tokens(self):
"""Test that overlap_tokens > max_tokens is clamped and doesn't hang"""
documents = [" ".join([f"word{i}" for i in range(100)])]
# This should clamp overlap_tokens to 29 (max_tokens - 1)
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=30, overlap_tokens=32
)
# Should complete without hanging
assert len(chunked_docs) > 0
assert all(idx == 0 for idx in doc_indices)
def test_overlap_equal_to_max_tokens(self):
"""Test that overlap_tokens == max_tokens is clamped and doesn't hang"""
documents = [" ".join([f"word{i}" for i in range(100)])]
# This should clamp overlap_tokens to 29 (max_tokens - 1)
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=30, overlap_tokens=30
)
# Should complete without hanging
assert len(chunked_docs) > 0
assert all(idx == 0 for idx in doc_indices)
def test_overlap_slightly_less_than_max_tokens(self):
"""Test that overlap_tokens < max_tokens works normally"""
documents = [" ".join([f"word{i}" for i in range(100)])]
# This should work without clamping
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=30, overlap_tokens=29
)
# Should complete successfully
assert len(chunked_docs) > 0
assert all(idx == 0 for idx in doc_indices)
def test_small_max_tokens_with_large_overlap(self):
"""Test edge case with very small max_tokens"""
documents = [" ".join([f"word{i}" for i in range(50)])]
# max_tokens=5, overlap_tokens=10 should clamp to 4
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=5, overlap_tokens=10
)
# Should complete without hanging
assert len(chunked_docs) > 0
assert all(idx == 0 for idx in doc_indices)
def test_multiple_documents_with_invalid_overlap(self):
"""Test multiple documents with overlap_tokens >= max_tokens"""
documents = [
" ".join([f"word{i}" for i in range(50)]),
"short document",
" ".join([f"word{i}" for i in range(75)]),
]
# overlap_tokens > max_tokens
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=25, overlap_tokens=30
)
# Should complete successfully and chunk the long documents
assert len(chunked_docs) >= len(documents)
# Short document should not be chunked
assert "short document" in chunked_docs
def test_normal_operation_unaffected(self):
"""Test that normal cases continue to work correctly"""
documents = [
" ".join([f"word{i}" for i in range(100)]),
"short doc",
]
# Normal case: overlap_tokens (10) < max_tokens (50)
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=50, overlap_tokens=10
)
# Long document should be chunked, short one should not
assert len(chunked_docs) > 2 # At least 3 chunks (2 from long doc + 1 short)
assert "short doc" in chunked_docs
# Verify doc_indices maps correctly
assert doc_indices[-1] == 1 # Last chunk is from second document
def test_edge_case_max_tokens_one(self):
"""Test edge case where max_tokens=1"""
documents = [" ".join([f"word{i}" for i in range(20)])]
# max_tokens=1, overlap_tokens=5 should clamp to 0
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=1, overlap_tokens=5
)
# Should complete without hanging
assert len(chunked_docs) > 0
assert all(idx == 0 for idx in doc_indices)
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_overlap_validation.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_postgres_index_name.py | """
Unit tests for PostgreSQL safe index name generation.
This module tests the _safe_index_name helper function which prevents
PostgreSQL's silent 63-byte identifier truncation from causing index
lookup failures.
"""
import pytest
# Mark all tests as offline (no external dependencies)
pytestmark = pytest.mark.offline
class TestSafeIndexName:
"""Test suite for _safe_index_name function."""
def test_short_name_unchanged(self):
"""Short index names should remain unchanged."""
from lightrag.kg.postgres_impl import _safe_index_name
# Short table name - should return unchanged
result = _safe_index_name("lightrag_vdb_entity", "hnsw_cosine")
assert result == "idx_lightrag_vdb_entity_hnsw_cosine"
assert len(result.encode("utf-8")) <= 63
def test_long_name_gets_hashed(self):
"""Long table names exceeding 63 bytes should get hashed."""
from lightrag.kg.postgres_impl import _safe_index_name
# Long table name that would exceed 63 bytes
long_table_name = "LIGHTRAG_VDB_ENTITY_text_embedding_3_large_3072d"
result = _safe_index_name(long_table_name, "hnsw_cosine")
# Should be within 63 bytes
assert len(result.encode("utf-8")) <= 63
# Should start with idx_ prefix
assert result.startswith("idx_")
# Should contain the suffix
assert result.endswith("_hnsw_cosine")
# Should NOT be the naive concatenation (which would be truncated)
naive_name = f"idx_{long_table_name.lower()}_hnsw_cosine"
assert result != naive_name
def test_deterministic_output(self):
"""Same input should always produce same output (deterministic)."""
from lightrag.kg.postgres_impl import _safe_index_name
table_name = "LIGHTRAG_VDB_CHUNKS_text_embedding_3_large_3072d"
suffix = "hnsw_cosine"
result1 = _safe_index_name(table_name, suffix)
result2 = _safe_index_name(table_name, suffix)
assert result1 == result2
def test_different_suffixes_different_results(self):
"""Different suffixes should produce different index names."""
from lightrag.kg.postgres_impl import _safe_index_name
table_name = "LIGHTRAG_VDB_ENTITY_text_embedding_3_large_3072d"
result1 = _safe_index_name(table_name, "hnsw_cosine")
result2 = _safe_index_name(table_name, "ivfflat_cosine")
assert result1 != result2
def test_case_insensitive(self):
"""Table names should be normalized to lowercase."""
from lightrag.kg.postgres_impl import _safe_index_name
result_upper = _safe_index_name("LIGHTRAG_VDB_ENTITY", "hnsw_cosine")
result_lower = _safe_index_name("lightrag_vdb_entity", "hnsw_cosine")
assert result_upper == result_lower
def test_boundary_case_exactly_63_bytes(self):
"""Test boundary case where name is exactly at 63-byte limit."""
from lightrag.kg.postgres_impl import _safe_index_name
# Create a table name that results in exactly 63 bytes
# idx_ (4) + table_name + _ (1) + suffix = 63
# So table_name + suffix = 58
# Test a name that's just under the limit (should remain unchanged)
short_suffix = "id"
# idx_ (4) + 56 chars + _ (1) + id (2) = 63
table_56 = "a" * 56
result = _safe_index_name(table_56, short_suffix)
expected = f"idx_{table_56}_{short_suffix}"
assert result == expected
assert len(result.encode("utf-8")) == 63
def test_unicode_handling(self):
"""Unicode characters should be properly handled (bytes, not chars)."""
from lightrag.kg.postgres_impl import _safe_index_name
# Unicode characters can take more bytes than visible chars
# Chinese characters are 3 bytes each in UTF-8
table_name = "lightrag_测试_table" # Contains Chinese chars
result = _safe_index_name(table_name, "hnsw_cosine")
# Should always be within 63 bytes
assert len(result.encode("utf-8")) <= 63
def test_real_world_model_names(self):
"""Test with real-world embedding model names that cause issues."""
from lightrag.kg.postgres_impl import _safe_index_name
# These are actual model names that have caused issues
test_cases = [
("LIGHTRAG_VDB_CHUNKS_text_embedding_3_large_3072d", "hnsw_cosine"),
("LIGHTRAG_VDB_ENTITY_text_embedding_3_large_3072d", "hnsw_cosine"),
("LIGHTRAG_VDB_RELATION_text_embedding_3_large_3072d", "hnsw_cosine"),
(
"LIGHTRAG_VDB_ENTITY_bge_m3_1024d",
"hnsw_cosine",
), # Shorter model name
(
"LIGHTRAG_VDB_CHUNKS_nomic_embed_text_v1_768d",
"ivfflat_cosine",
), # Different index type
]
for table_name, suffix in test_cases:
result = _safe_index_name(table_name, suffix)
# Critical: must be within PostgreSQL's 63-byte limit
assert (
len(result.encode("utf-8")) <= 63
), f"Index name too long: {result} for table {table_name}"
# Must have consistent format
assert result.startswith("idx_"), f"Missing idx_ prefix: {result}"
assert result.endswith(f"_{suffix}"), f"Missing suffix {suffix}: {result}"
def test_hash_uniqueness_for_similar_tables(self):
"""Similar but different table names should produce different hashes."""
from lightrag.kg.postgres_impl import _safe_index_name
# These tables have similar names but should have different hashes
tables = [
"LIGHTRAG_VDB_CHUNKS_model_a_1024d",
"LIGHTRAG_VDB_CHUNKS_model_b_1024d",
"LIGHTRAG_VDB_ENTITY_model_a_1024d",
]
results = [_safe_index_name(t, "hnsw_cosine") for t in tables]
# All results should be unique
assert len(set(results)) == len(results), "Hash collision detected!"
class TestIndexNameIntegration:
"""Integration-style tests for index name usage patterns."""
def test_pg_indexes_lookup_compatibility(self):
"""
Test that the generated index name will work with pg_indexes lookup.
This is the core problem: PostgreSQL stores the truncated name,
but we were looking up the untruncated name. Our fix ensures we
always use a name that fits within 63 bytes.
"""
from lightrag.kg.postgres_impl import _safe_index_name
table_name = "LIGHTRAG_VDB_CHUNKS_text_embedding_3_large_3072d"
suffix = "hnsw_cosine"
# Generate the index name
index_name = _safe_index_name(table_name, suffix)
# Simulate what PostgreSQL would store (truncate at 63 bytes)
stored_name = index_name.encode("utf-8")[:63].decode("utf-8", errors="ignore")
# The key fix: our generated name should equal the stored name
# because it's already within the 63-byte limit
assert (
index_name == stored_name
), "Index name would be truncated by PostgreSQL, causing lookup failures!"
def test_backward_compatibility_short_names(self):
"""
Ensure backward compatibility with existing short index names.
For tables that have existing indexes with short names (pre-model-suffix era),
the function should not change their names.
"""
from lightrag.kg.postgres_impl import _safe_index_name
# Legacy table names without model suffix
legacy_tables = [
"LIGHTRAG_VDB_ENTITY",
"LIGHTRAG_VDB_RELATION",
"LIGHTRAG_VDB_CHUNKS",
]
for table in legacy_tables:
for suffix in ["hnsw_cosine", "ivfflat_cosine", "id"]:
result = _safe_index_name(table, suffix)
expected = f"idx_{table.lower()}_{suffix}"
# Short names should remain unchanged for backward compatibility
if len(expected.encode("utf-8")) <= 63:
assert (
result == expected
), f"Short name changed unexpectedly: {result} != {expected}"
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_postgres_index_name.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_postgres_migration.py | import pytest
from unittest.mock import patch, AsyncMock
import numpy as np
from lightrag.utils import EmbeddingFunc
from lightrag.kg.postgres_impl import (
PGVectorStorage,
)
from lightrag.namespace import NameSpace
# Mock PostgreSQLDB
@pytest.fixture
def mock_pg_db():
"""Mock PostgreSQL database connection"""
db = AsyncMock()
db.workspace = "test_workspace"
# Mock query responses with multirows support
async def mock_query(sql, params=None, multirows=False, **kwargs):
# Default return value
if multirows:
return [] # Return empty list for multirows
return {"exists": False, "count": 0}
# Mock for execute that mimics PostgreSQLDB.execute() behavior
async def mock_execute(sql, data=None, **kwargs):
"""
Mock that mimics PostgreSQLDB.execute() behavior:
- Accepts data as dict[str, Any] | None (second parameter)
- Internally converts dict.values() to tuple for AsyncPG
"""
# Mimic real execute() which accepts dict and converts to tuple
if data is not None and not isinstance(data, dict):
raise TypeError(
f"PostgreSQLDB.execute() expects data as dict, got {type(data).__name__}"
)
return None
db.query = AsyncMock(side_effect=mock_query)
db.execute = AsyncMock(side_effect=mock_execute)
return db
# Mock get_data_init_lock to avoid async lock issues in tests
@pytest.fixture(autouse=True)
def mock_data_init_lock():
with patch("lightrag.kg.postgres_impl.get_data_init_lock") as mock_lock:
mock_lock_ctx = AsyncMock()
mock_lock.return_value = mock_lock_ctx
yield mock_lock
# Mock ClientManager
@pytest.fixture
def mock_client_manager(mock_pg_db):
with patch("lightrag.kg.postgres_impl.ClientManager") as mock_manager:
mock_manager.get_client = AsyncMock(return_value=mock_pg_db)
mock_manager.release_client = AsyncMock()
yield mock_manager
# Mock Embedding function
@pytest.fixture
def mock_embedding_func():
async def embed_func(texts, **kwargs):
return np.array([[0.1] * 768 for _ in texts])
func = EmbeddingFunc(embedding_dim=768, func=embed_func, model_name="test_model")
return func
async def test_postgres_table_naming(
mock_client_manager, mock_pg_db, mock_embedding_func
):
"""Test if table name is correctly generated with model suffix"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=mock_embedding_func,
workspace="test_ws",
)
# Verify table name contains model suffix
expected_suffix = "test_model_768d"
assert expected_suffix in storage.table_name
assert storage.table_name == f"LIGHTRAG_VDB_CHUNKS_{expected_suffix}"
# Verify legacy table name
assert storage.legacy_table_name == "LIGHTRAG_VDB_CHUNKS"
async def test_postgres_migration_trigger(
mock_client_manager, mock_pg_db, mock_embedding_func
):
"""Test if migration logic is triggered correctly"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=mock_embedding_func,
workspace="test_ws",
)
# Setup mocks for migration scenario
# 1. New table does not exist, legacy table exists
async def mock_check_table_exists(table_name):
return table_name == storage.legacy_table_name
mock_pg_db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
# 2. Legacy table has 100 records
mock_rows = [
{"id": f"test_id_{i}", "content": f"content_{i}", "workspace": "test_ws"}
for i in range(100)
]
migration_state = {"new_table_count": 0}
async def mock_query(sql, params=None, multirows=False, **kwargs):
if "COUNT(*)" in sql:
sql_upper = sql.upper()
legacy_table = storage.legacy_table_name.upper()
new_table = storage.table_name.upper()
is_new_table = new_table in sql_upper
is_legacy_table = legacy_table in sql_upper and not is_new_table
if is_new_table:
return {"count": migration_state["new_table_count"]}
if is_legacy_table:
return {"count": 100}
return {"count": 0}
elif multirows and "SELECT *" in sql:
# Mock batch fetch for migration using keyset pagination
# New pattern: WHERE workspace = $1 AND id > $2 ORDER BY id LIMIT $3
# or first batch: WHERE workspace = $1 ORDER BY id LIMIT $2
if "WHERE workspace" in sql:
if "id >" in sql:
# Keyset pagination: params = [workspace, last_id, limit]
last_id = params[1] if len(params) > 1 else None
# Find rows after last_id
start_idx = 0
for i, row in enumerate(mock_rows):
if row["id"] == last_id:
start_idx = i + 1
break
limit = params[2] if len(params) > 2 else 500
else:
# First batch (no last_id): params = [workspace, limit]
start_idx = 0
limit = params[1] if len(params) > 1 else 500
else:
# No workspace filter with keyset
if "id >" in sql:
last_id = params[0] if params else None
start_idx = 0
for i, row in enumerate(mock_rows):
if row["id"] == last_id:
start_idx = i + 1
break
limit = params[1] if len(params) > 1 else 500
else:
start_idx = 0
limit = params[0] if params else 500
end = min(start_idx + limit, len(mock_rows))
return mock_rows[start_idx:end]
return {}
mock_pg_db.query = AsyncMock(side_effect=mock_query)
# Track migration through _run_with_retry calls
migration_executed = []
async def mock_run_with_retry(operation, **kwargs):
# Track that migration batch operation was called
migration_executed.append(True)
migration_state["new_table_count"] = 100
return None
mock_pg_db._run_with_retry = AsyncMock(side_effect=mock_run_with_retry)
with patch(
"lightrag.kg.postgres_impl.PGVectorStorage._pg_create_table", AsyncMock()
):
# Initialize storage (should trigger migration)
await storage.initialize()
# Verify migration was executed by checking _run_with_retry was called
# (batch migration uses _run_with_retry with executemany)
assert len(migration_executed) > 0, "Migration should have been executed"
async def test_postgres_no_migration_needed(
mock_client_manager, mock_pg_db, mock_embedding_func
):
"""Test scenario where new table already exists (no migration needed)"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=mock_embedding_func,
workspace="test_ws",
)
# Mock: new table already exists
async def mock_check_table_exists(table_name):
return table_name == storage.table_name
mock_pg_db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
with patch(
"lightrag.kg.postgres_impl.PGVectorStorage._pg_create_table", AsyncMock()
) as mock_create:
await storage.initialize()
# Verify no table creation was attempted
mock_create.assert_not_called()
async def test_scenario_1_new_workspace_creation(
mock_client_manager, mock_pg_db, mock_embedding_func
):
"""
Scenario 1: New workspace creation
Expected behavior:
- No legacy table exists
- Directly create new table with model suffix
- No migration needed
"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
embedding_func = EmbeddingFunc(
embedding_dim=3072,
func=mock_embedding_func.func,
model_name="text-embedding-3-large",
)
storage = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=embedding_func,
workspace="new_workspace",
)
# Mock: neither table exists
async def mock_check_table_exists(table_name):
return False
mock_pg_db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
with patch(
"lightrag.kg.postgres_impl.PGVectorStorage._pg_create_table", AsyncMock()
) as mock_create:
await storage.initialize()
# Verify table name format
assert "text_embedding_3_large_3072d" in storage.table_name
# Verify new table creation was called
mock_create.assert_called_once()
call_args = mock_create.call_args
assert (
call_args[0][1] == storage.table_name
) # table_name is second positional arg
async def test_scenario_2_legacy_upgrade_migration(
mock_client_manager, mock_pg_db, mock_embedding_func
):
"""
Scenario 2: Upgrade from legacy version
Expected behavior:
- Legacy table exists (without model suffix)
- New table doesn't exist
- Automatically migrate data to new table with suffix
"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
embedding_func = EmbeddingFunc(
embedding_dim=1536,
func=mock_embedding_func.func,
model_name="text-embedding-ada-002",
)
storage = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=embedding_func,
workspace="legacy_workspace",
)
# Mock: only legacy table exists
async def mock_check_table_exists(table_name):
return table_name == storage.legacy_table_name
mock_pg_db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
# Mock: legacy table has 50 records
mock_rows = [
{
"id": f"legacy_id_{i}",
"content": f"legacy_content_{i}",
"workspace": "legacy_workspace",
}
for i in range(50)
]
# Track which queries have been made for proper response
query_history = []
migration_state = {"new_table_count": 0}
async def mock_query(sql, params=None, multirows=False, **kwargs):
query_history.append(sql)
if "COUNT(*)" in sql:
# Determine table type:
# - Legacy: contains base name but NOT model suffix
# - New: contains model suffix (e.g., text_embedding_ada_002_1536d)
sql_upper = sql.upper()
base_name = storage.legacy_table_name.upper()
# Check if this is querying the new table (has model suffix)
has_model_suffix = storage.table_name.upper() in sql_upper
is_legacy_table = base_name in sql_upper and not has_model_suffix
has_workspace_filter = "WHERE workspace" in sql
if is_legacy_table and has_workspace_filter:
# Count for legacy table with workspace filter (before migration)
return {"count": 50}
elif is_legacy_table and not has_workspace_filter:
# Total count for legacy table
return {"count": 50}
else:
# New table count (before/after migration)
return {"count": migration_state["new_table_count"]}
elif multirows and "SELECT *" in sql:
# Mock batch fetch for migration using keyset pagination
# New pattern: WHERE workspace = $1 AND id > $2 ORDER BY id LIMIT $3
# or first batch: WHERE workspace = $1 ORDER BY id LIMIT $2
if "WHERE workspace" in sql:
if "id >" in sql:
# Keyset pagination: params = [workspace, last_id, limit]
last_id = params[1] if len(params) > 1 else None
# Find rows after last_id
start_idx = 0
for i, row in enumerate(mock_rows):
if row["id"] == last_id:
start_idx = i + 1
break
limit = params[2] if len(params) > 2 else 500
else:
# First batch (no last_id): params = [workspace, limit]
start_idx = 0
limit = params[1] if len(params) > 1 else 500
else:
# No workspace filter with keyset
if "id >" in sql:
last_id = params[0] if params else None
start_idx = 0
for i, row in enumerate(mock_rows):
if row["id"] == last_id:
start_idx = i + 1
break
limit = params[1] if len(params) > 1 else 500
else:
start_idx = 0
limit = params[0] if params else 500
end = min(start_idx + limit, len(mock_rows))
return mock_rows[start_idx:end]
return {}
mock_pg_db.query = AsyncMock(side_effect=mock_query)
# Track migration through _run_with_retry calls
migration_executed = []
async def mock_run_with_retry(operation, **kwargs):
# Track that migration batch operation was called
migration_executed.append(True)
migration_state["new_table_count"] = 50
return None
mock_pg_db._run_with_retry = AsyncMock(side_effect=mock_run_with_retry)
with patch(
"lightrag.kg.postgres_impl.PGVectorStorage._pg_create_table", AsyncMock()
) as mock_create:
await storage.initialize()
# Verify table name contains ada-002
assert "text_embedding_ada_002_1536d" in storage.table_name
# Verify migration was executed (batch migration uses _run_with_retry)
assert len(migration_executed) > 0, "Migration should have been executed"
mock_create.assert_called_once()
async def test_scenario_3_multi_model_coexistence(
mock_client_manager, mock_pg_db, mock_embedding_func
):
"""
Scenario 3: Multiple embedding models coexist
Expected behavior:
- Different embedding models create separate tables
- Tables are isolated by model suffix
- No interference between different models
"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
# Workspace A: uses bge-small (768d)
embedding_func_a = EmbeddingFunc(
embedding_dim=768, func=mock_embedding_func.func, model_name="bge-small"
)
storage_a = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=embedding_func_a,
workspace="workspace_a",
)
# Workspace B: uses bge-large (1024d)
async def embed_func_b(texts, **kwargs):
return np.array([[0.1] * 1024 for _ in texts])
embedding_func_b = EmbeddingFunc(
embedding_dim=1024, func=embed_func_b, model_name="bge-large"
)
storage_b = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=embedding_func_b,
workspace="workspace_b",
)
# Verify different table names
assert storage_a.table_name != storage_b.table_name
assert "bge_small_768d" in storage_a.table_name
assert "bge_large_1024d" in storage_b.table_name
# Mock: both tables don't exist yet
async def mock_check_table_exists(table_name):
return False
mock_pg_db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
with patch(
"lightrag.kg.postgres_impl.PGVectorStorage._pg_create_table", AsyncMock()
) as mock_create:
# Initialize both storages
await storage_a.initialize()
await storage_b.initialize()
# Verify two separate tables were created
assert mock_create.call_count == 2
# Verify table names are different
call_args_list = mock_create.call_args_list
table_names = [call[0][1] for call in call_args_list] # Second positional arg
assert len(set(table_names)) == 2 # Two unique table names
assert storage_a.table_name in table_names
assert storage_b.table_name in table_names
async def test_case1_empty_legacy_auto_cleanup(
mock_client_manager, mock_pg_db, mock_embedding_func
):
"""
Case 1a: Both new and legacy tables exist, but legacy is EMPTY
Expected: Automatically delete empty legacy table (safe cleanup)
"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
embedding_func = EmbeddingFunc(
embedding_dim=1536,
func=mock_embedding_func.func,
model_name="test-model",
)
storage = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=embedding_func,
workspace="test_ws",
)
# Mock: Both tables exist
async def mock_check_table_exists(table_name):
return True # Both new and legacy exist
mock_pg_db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
# Mock: Legacy table is empty (0 records)
async def mock_query(sql, params=None, multirows=False, **kwargs):
if "COUNT(*)" in sql:
if storage.legacy_table_name in sql:
return {"count": 0} # Empty legacy table
else:
return {"count": 100} # New table has data
return {}
mock_pg_db.query = AsyncMock(side_effect=mock_query)
with patch("lightrag.kg.postgres_impl.logger"):
await storage.initialize()
# Verify: Empty legacy table should be automatically cleaned up
# Empty tables are safe to delete without data loss risk
delete_calls = [
call
for call in mock_pg_db.execute.call_args_list
if call[0][0] and "DROP TABLE" in call[0][0]
]
assert len(delete_calls) >= 1, "Empty legacy table should be auto-deleted"
# Check if legacy table was dropped
dropped_table = storage.legacy_table_name
assert any(
dropped_table in str(call) for call in delete_calls
), f"Expected to drop empty legacy table '{dropped_table}'"
print(
f"✅ Case 1a: Empty legacy table '{dropped_table}' auto-deleted successfully"
)
async def test_case1_nonempty_legacy_warning(
mock_client_manager, mock_pg_db, mock_embedding_func
):
"""
Case 1b: Both new and legacy tables exist, and legacy HAS DATA
Expected: Log warning, do not delete legacy (preserve data)
"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
embedding_func = EmbeddingFunc(
embedding_dim=1536,
func=mock_embedding_func.func,
model_name="test-model",
)
storage = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=embedding_func,
workspace="test_ws",
)
# Mock: Both tables exist
async def mock_check_table_exists(table_name):
return True # Both new and legacy exist
mock_pg_db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists)
# Mock: Legacy table has data (50 records)
async def mock_query(sql, params=None, multirows=False, **kwargs):
if "COUNT(*)" in sql:
if storage.legacy_table_name in sql:
return {"count": 50} # Legacy has data
else:
return {"count": 100} # New table has data
return {}
mock_pg_db.query = AsyncMock(side_effect=mock_query)
with patch("lightrag.kg.postgres_impl.logger"):
await storage.initialize()
# Verify: Legacy table with data should be preserved
# We never auto-delete tables that contain data to prevent accidental data loss
delete_calls = [
call
for call in mock_pg_db.execute.call_args_list
if call[0][0] and "DROP TABLE" in call[0][0]
]
# Check if legacy table was deleted (it should not be)
dropped_table = storage.legacy_table_name
legacy_deleted = any(dropped_table in str(call) for call in delete_calls)
assert not legacy_deleted, "Legacy table with data should NOT be auto-deleted"
print(
f"✅ Case 1b: Legacy table '{dropped_table}' with data preserved (warning only)"
)
async def test_case1_sequential_workspace_migration(
mock_client_manager, mock_pg_db, mock_embedding_func
):
"""
Case 1c: Sequential workspace migration (Multi-tenant scenario)
Critical bug fix verification:
Timeline:
1. Legacy table has workspace_a (3 records) + workspace_b (3 records)
2. Workspace A initializes first → Case 3 (only legacy exists) → migrates A's data
3. Workspace B initializes later → Case 3 (both tables exist, legacy has B's data) → should migrate B's data
4. Verify workspace B's data is correctly migrated to new table
This test verifies the migration logic correctly handles multi-tenant scenarios
where different workspaces migrate sequentially.
"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
embedding_func = EmbeddingFunc(
embedding_dim=1536,
func=mock_embedding_func.func,
model_name="test-model",
)
# Mock data: Legacy table has 6 records total (3 from workspace_a, 3 from workspace_b)
mock_rows_a = [
{"id": f"a_{i}", "content": f"A content {i}", "workspace": "workspace_a"}
for i in range(3)
]
mock_rows_b = [
{"id": f"b_{i}", "content": f"B content {i}", "workspace": "workspace_b"}
for i in range(3)
]
# Track migration state
migration_state = {
"new_table_exists": False,
"workspace_a_migrated": False,
"workspace_a_migration_count": 0,
"workspace_b_migration_count": 0,
}
# Step 1: Simulate workspace_a initialization (Case 3 - only legacy exists)
# CRITICAL: Set db.workspace to workspace_a
mock_pg_db.workspace = "workspace_a"
storage_a = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=embedding_func,
workspace="workspace_a",
)
# Mock table_exists for workspace_a
async def mock_check_table_exists_a(table_name):
if table_name == storage_a.legacy_table_name:
return True
if table_name == storage_a.table_name:
return migration_state["new_table_exists"]
return False
mock_pg_db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists_a)
# Mock query for workspace_a (Case 3)
async def mock_query_a(sql, params=None, multirows=False, **kwargs):
sql_upper = sql.upper()
base_name = storage_a.legacy_table_name.upper()
if "COUNT(*)" in sql:
has_model_suffix = "TEST_MODEL_1536D" in sql_upper
is_legacy = base_name in sql_upper and not has_model_suffix
has_workspace_filter = "WHERE workspace" in sql
if is_legacy and has_workspace_filter:
workspace = params[0] if params and len(params) > 0 else None
if workspace == "workspace_a":
return {"count": 3}
elif workspace == "workspace_b":
return {"count": 3}
elif is_legacy and not has_workspace_filter:
# Global count in legacy table
return {"count": 6}
elif has_model_suffix:
if has_workspace_filter:
workspace = params[0] if params and len(params) > 0 else None
if workspace == "workspace_a":
return {"count": migration_state["workspace_a_migration_count"]}
if workspace == "workspace_b":
return {"count": migration_state["workspace_b_migration_count"]}
return {
"count": migration_state["workspace_a_migration_count"]
+ migration_state["workspace_b_migration_count"]
}
elif multirows and "SELECT *" in sql:
if "WHERE workspace" in sql:
workspace = params[0] if params and len(params) > 0 else None
if workspace == "workspace_a":
# Handle keyset pagination
if "id >" in sql:
# params = [workspace, last_id, limit]
last_id = params[1] if len(params) > 1 else None
start_idx = 0
for i, row in enumerate(mock_rows_a):
if row["id"] == last_id:
start_idx = i + 1
break
limit = params[2] if len(params) > 2 else 500
else:
# First batch: params = [workspace, limit]
start_idx = 0
limit = params[1] if len(params) > 1 else 500
end = min(start_idx + limit, len(mock_rows_a))
return mock_rows_a[start_idx:end]
return {}
mock_pg_db.query = AsyncMock(side_effect=mock_query_a)
# Track migration via _run_with_retry (batch migration uses this)
migration_a_executed = []
async def mock_run_with_retry_a(operation, **kwargs):
migration_a_executed.append(True)
migration_state["workspace_a_migration_count"] = len(mock_rows_a)
return None
mock_pg_db._run_with_retry = AsyncMock(side_effect=mock_run_with_retry_a)
# Initialize workspace_a (Case 3)
with patch("lightrag.kg.postgres_impl.logger"):
await storage_a.initialize()
migration_state["new_table_exists"] = True
migration_state["workspace_a_migrated"] = True
print("✅ Step 1: Workspace A initialized")
# Verify migration was executed via _run_with_retry (batch migration uses executemany)
assert (
len(migration_a_executed) > 0
), "Migration should have been executed for workspace_a"
print(f"✅ Step 1: Migration executed {len(migration_a_executed)} batch(es)")
# Step 2: Simulate workspace_b initialization (Case 3 - both exist, but legacy has B's data)
# CRITICAL: Set db.workspace to workspace_b
mock_pg_db.workspace = "workspace_b"
storage_b = PGVectorStorage(
namespace=NameSpace.VECTOR_STORE_CHUNKS,
global_config=config,
embedding_func=embedding_func,
workspace="workspace_b",
)
mock_pg_db.reset_mock()
# Mock table_exists for workspace_b (both exist)
async def mock_check_table_exists_b(table_name):
return True # Both tables exist
mock_pg_db.check_table_exists = AsyncMock(side_effect=mock_check_table_exists_b)
# Mock query for workspace_b (Case 3)
async def mock_query_b(sql, params=None, multirows=False, **kwargs):
sql_upper = sql.upper()
base_name = storage_b.legacy_table_name.upper()
if "COUNT(*)" in sql:
has_model_suffix = "TEST_MODEL_1536D" in sql_upper
is_legacy = base_name in sql_upper and not has_model_suffix
has_workspace_filter = "WHERE workspace" in sql
if is_legacy and has_workspace_filter:
workspace = params[0] if params and len(params) > 0 else None
if workspace == "workspace_b":
return {"count": 3} # workspace_b still has data in legacy
elif workspace == "workspace_a":
return {"count": 0} # workspace_a already migrated
elif is_legacy and not has_workspace_filter:
# Global count: only workspace_b data remains
return {"count": 3}
elif has_model_suffix:
if has_workspace_filter:
workspace = params[0] if params and len(params) > 0 else None
if workspace == "workspace_b":
return {"count": migration_state["workspace_b_migration_count"]}
elif workspace == "workspace_a":
return {"count": 3}
else:
return {"count": 3 + migration_state["workspace_b_migration_count"]}
elif multirows and "SELECT *" in sql:
if "WHERE workspace" in sql:
workspace = params[0] if params and len(params) > 0 else None
if workspace == "workspace_b":
# Handle keyset pagination
if "id >" in sql:
# params = [workspace, last_id, limit]
last_id = params[1] if len(params) > 1 else None
start_idx = 0
for i, row in enumerate(mock_rows_b):
if row["id"] == last_id:
start_idx = i + 1
break
limit = params[2] if len(params) > 2 else 500
else:
# First batch: params = [workspace, limit]
start_idx = 0
limit = params[1] if len(params) > 1 else 500
end = min(start_idx + limit, len(mock_rows_b))
return mock_rows_b[start_idx:end]
return {}
mock_pg_db.query = AsyncMock(side_effect=mock_query_b)
# Track migration via _run_with_retry for workspace_b
migration_b_executed = []
async def mock_run_with_retry_b(operation, **kwargs):
migration_b_executed.append(True)
migration_state["workspace_b_migration_count"] = len(mock_rows_b)
return None
mock_pg_db._run_with_retry = AsyncMock(side_effect=mock_run_with_retry_b)
# Initialize workspace_b (Case 3 - both tables exist)
with patch("lightrag.kg.postgres_impl.logger"):
await storage_b.initialize()
print("✅ Step 2: Workspace B initialized")
# Verify workspace_b migration happens when new table has no workspace_b data
# but legacy table still has workspace_b data.
assert (
len(migration_b_executed) > 0
), "Migration should have been executed for workspace_b"
print("✅ Step 2: Migration executed for workspace_b")
print("\n🎉 Case 1c: Sequential workspace migration verification complete!")
print(" - Workspace A: Migrated successfully (only legacy existed)")
print(" - Workspace B: Migrated successfully (new table empty for workspace_b)")
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_postgres_migration.py",
"license": "MIT License",
"lines": 714,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_qdrant_migration.py | import pytest
from unittest.mock import MagicMock, patch, AsyncMock
import numpy as np
from qdrant_client import models
from lightrag.utils import EmbeddingFunc
from lightrag.kg.qdrant_impl import QdrantVectorDBStorage
# Mock QdrantClient
@pytest.fixture
def mock_qdrant_client():
with patch("lightrag.kg.qdrant_impl.QdrantClient") as mock_client_cls:
client = mock_client_cls.return_value
client.collection_exists.return_value = False
client.count.return_value.count = 0
# Mock payload schema and vector config for get_collection
collection_info = MagicMock()
collection_info.payload_schema = {}
# Mock vector dimension to match mock_embedding_func (768d)
collection_info.config.params.vectors.size = 768
client.get_collection.return_value = collection_info
yield client
# Mock get_data_init_lock to avoid async lock issues in tests
@pytest.fixture(autouse=True)
def mock_data_init_lock():
with patch("lightrag.kg.qdrant_impl.get_data_init_lock") as mock_lock:
mock_lock_ctx = AsyncMock()
mock_lock.return_value = mock_lock_ctx
yield mock_lock
# Mock Embedding function
@pytest.fixture
def mock_embedding_func():
async def embed_func(texts, **kwargs):
return np.array([[0.1] * 768 for _ in texts])
func = EmbeddingFunc(embedding_dim=768, func=embed_func, model_name="test-model")
return func
async def test_qdrant_collection_naming(mock_qdrant_client, mock_embedding_func):
"""Test if collection name is correctly generated with model suffix"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = QdrantVectorDBStorage(
namespace="chunks",
global_config=config,
embedding_func=mock_embedding_func,
workspace="test_ws",
)
# Verify collection name contains model suffix
expected_suffix = "test_model_768d"
assert expected_suffix in storage.final_namespace
assert storage.final_namespace == f"lightrag_vdb_chunks_{expected_suffix}"
async def test_qdrant_migration_trigger(mock_qdrant_client, mock_embedding_func):
"""Test if migration logic is triggered correctly"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = QdrantVectorDBStorage(
namespace="chunks",
global_config=config,
embedding_func=mock_embedding_func,
workspace="test_ws",
)
# Legacy collection name (without model suffix)
legacy_collection = "lightrag_vdb_chunks"
# Setup mocks for migration scenario
# 1. New collection does not exist, only legacy exists
mock_qdrant_client.collection_exists.side_effect = (
lambda name: name == legacy_collection
)
# 2. Legacy collection exists and has data
migration_state = {"new_workspace_count": 0}
def count_mock(collection_name, exact=True, count_filter=None):
mock_result = MagicMock()
if collection_name == legacy_collection:
mock_result.count = 100
elif collection_name == storage.final_namespace:
mock_result.count = migration_state["new_workspace_count"]
else:
mock_result.count = 0
return mock_result
mock_qdrant_client.count.side_effect = count_mock
# 3. Mock scroll for data migration
mock_point = MagicMock()
mock_point.id = "old_id"
mock_point.vector = [0.1] * 768
mock_point.payload = {"content": "test"} # No workspace_id in payload
# When payload_schema is empty, the code first samples payloads to detect workspace_id
# Then proceeds with migration batches
# Scroll calls: 1) Sampling (limit=10), 2) Migration batch, 3) End of migration
mock_qdrant_client.scroll.side_effect = [
([mock_point], "_"), # Sampling scroll - no workspace_id found
([mock_point], "next_offset"), # Migration batch
([], None), # End of migration
]
def upsert_mock(*args, **kwargs):
migration_state["new_workspace_count"] = 100
return None
mock_qdrant_client.upsert.side_effect = upsert_mock
# Initialize storage (triggers migration)
await storage.initialize()
# Verify migration steps
# 1. Legacy count checked
mock_qdrant_client.count.assert_any_call(
collection_name=legacy_collection, exact=True
)
# 2. New collection created
mock_qdrant_client.create_collection.assert_called()
# 3. Data scrolled from legacy
# First call (index 0) is sampling scroll with limit=10
# Second call (index 1) is migration batch with limit=500
assert mock_qdrant_client.scroll.call_count >= 2
# Check sampling scroll
sampling_call = mock_qdrant_client.scroll.call_args_list[0]
assert sampling_call.kwargs["collection_name"] == legacy_collection
assert sampling_call.kwargs["limit"] == 10
# Check migration batch scroll
migration_call = mock_qdrant_client.scroll.call_args_list[1]
assert migration_call.kwargs["collection_name"] == legacy_collection
assert migration_call.kwargs["limit"] == 500
# 4. Data upserted to new
mock_qdrant_client.upsert.assert_called()
# 5. Payload index created
mock_qdrant_client.create_payload_index.assert_called()
async def test_qdrant_no_migration_needed(mock_qdrant_client, mock_embedding_func):
"""Test scenario where new collection already exists (Case 1 in setup_collection)
When only the new collection exists and no legacy collection is found,
the implementation should:
1. Create payload index on the new collection (ensure index exists)
2. NOT attempt any data migration (no scroll calls)
"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = QdrantVectorDBStorage(
namespace="chunks",
global_config=config,
embedding_func=mock_embedding_func,
workspace="test_ws",
)
# Only new collection exists (no legacy collection found)
mock_qdrant_client.collection_exists.side_effect = (
lambda name: name == storage.final_namespace
)
# Initialize
await storage.initialize()
# Should create payload index on the new collection (ensure index)
mock_qdrant_client.create_payload_index.assert_called_with(
collection_name=storage.final_namespace,
field_name="workspace_id",
field_schema=models.KeywordIndexParams(
type=models.KeywordIndexType.KEYWORD,
is_tenant=True,
),
)
# Should NOT migrate (no scroll calls since no legacy collection exists)
mock_qdrant_client.scroll.assert_not_called()
# ============================================================================
# Tests for scenarios described in design document (Lines 606-649)
# ============================================================================
async def test_scenario_1_new_workspace_creation(
mock_qdrant_client, mock_embedding_func
):
"""
场景1:新建workspace
预期:直接创建lightrag_vdb_chunks_text_embedding_3_large_3072d
"""
# Use a large embedding model
large_model_func = EmbeddingFunc(
embedding_dim=3072,
func=mock_embedding_func.func,
model_name="text-embedding-3-large",
)
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = QdrantVectorDBStorage(
namespace="chunks",
global_config=config,
embedding_func=large_model_func,
workspace="test_new",
)
# Case 3: Neither legacy nor new collection exists
mock_qdrant_client.collection_exists.return_value = False
# Initialize storage
await storage.initialize()
# Verify: Should create new collection with model suffix
expected_collection = "lightrag_vdb_chunks_text_embedding_3_large_3072d"
assert storage.final_namespace == expected_collection
# Verify create_collection was called with correct name
create_calls = [
call for call in mock_qdrant_client.create_collection.call_args_list
]
assert len(create_calls) > 0
assert (
create_calls[0][0][0] == expected_collection
or create_calls[0].kwargs.get("collection_name") == expected_collection
)
# Verify no migration was attempted
mock_qdrant_client.scroll.assert_not_called()
print(
f"✅ Scenario 1: New workspace created with collection '{expected_collection}'"
)
async def test_scenario_2_legacy_upgrade_migration(
mock_qdrant_client, mock_embedding_func
):
"""
场景2:从旧版本升级
已存在lightrag_vdb_chunks(无后缀)
预期:自动迁移数据到lightrag_vdb_chunks_text_embedding_ada_002_1536d
注意:迁移后不再自动删除遗留集合,需要手动删除
"""
# Use ada-002 model
ada_func = EmbeddingFunc(
embedding_dim=1536,
func=mock_embedding_func.func,
model_name="text-embedding-ada-002",
)
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = QdrantVectorDBStorage(
namespace="chunks",
global_config=config,
embedding_func=ada_func,
workspace="test_legacy",
)
# Legacy collection name (without model suffix)
legacy_collection = "lightrag_vdb_chunks"
new_collection = storage.final_namespace
# Case 4: Only legacy collection exists
mock_qdrant_client.collection_exists.side_effect = (
lambda name: name == legacy_collection
)
# Mock legacy collection info with 1536d vectors
legacy_collection_info = MagicMock()
legacy_collection_info.payload_schema = {}
legacy_collection_info.config.params.vectors.size = 1536
mock_qdrant_client.get_collection.return_value = legacy_collection_info
migration_state = {"new_workspace_count": 0}
def count_mock(collection_name, exact=True, count_filter=None):
mock_result = MagicMock()
if collection_name == legacy_collection:
mock_result.count = 150
elif collection_name == new_collection:
mock_result.count = migration_state["new_workspace_count"]
else:
mock_result.count = 0
return mock_result
mock_qdrant_client.count.side_effect = count_mock
# Mock scroll results (simulate migration in batches)
mock_points = []
for i in range(10):
point = MagicMock()
point.id = f"legacy-{i}"
point.vector = [0.1] * 1536
# No workspace_id in payload - simulates legacy data
point.payload = {"content": f"Legacy document {i}", "id": f"doc-{i}"}
mock_points.append(point)
# When payload_schema is empty, the code first samples payloads to detect workspace_id
# Then proceeds with migration batches
# Scroll calls: 1) Sampling (limit=10), 2) Migration batch, 3) End of migration
mock_qdrant_client.scroll.side_effect = [
(mock_points, "_"), # Sampling scroll - no workspace_id found in payloads
(mock_points, "offset1"), # Migration batch
([], None), # End of migration
]
def upsert_mock(*args, **kwargs):
migration_state["new_workspace_count"] = 150
return None
mock_qdrant_client.upsert.side_effect = upsert_mock
# Initialize (triggers migration)
await storage.initialize()
# Verify: New collection should be created
expected_new_collection = "lightrag_vdb_chunks_text_embedding_ada_002_1536d"
assert storage.final_namespace == expected_new_collection
# Verify migration steps
# 1. Check legacy count
mock_qdrant_client.count.assert_any_call(
collection_name=legacy_collection, exact=True
)
# 2. Create new collection
mock_qdrant_client.create_collection.assert_called()
# 3. Scroll legacy data
scroll_calls = [call for call in mock_qdrant_client.scroll.call_args_list]
assert len(scroll_calls) >= 1
assert scroll_calls[0].kwargs["collection_name"] == legacy_collection
# 4. Upsert to new collection
upsert_calls = [call for call in mock_qdrant_client.upsert.call_args_list]
assert len(upsert_calls) >= 1
assert upsert_calls[0].kwargs["collection_name"] == new_collection
# Note: Legacy collection is NOT automatically deleted after migration
# Manual deletion is required after data migration verification
print(
f"✅ Scenario 2: Legacy data migrated from '{legacy_collection}' to '{expected_new_collection}'"
)
async def test_scenario_3_multi_model_coexistence(mock_qdrant_client):
"""
场景3:多模型并存
预期:两个独立的collection,互不干扰
"""
# Model A: bge-small with 768d
async def embed_func_a(texts, **kwargs):
return np.array([[0.1] * 768 for _ in texts])
model_a_func = EmbeddingFunc(
embedding_dim=768, func=embed_func_a, model_name="bge-small"
)
# Model B: bge-large with 1024d
async def embed_func_b(texts, **kwargs):
return np.array([[0.2] * 1024 for _ in texts])
model_b_func = EmbeddingFunc(
embedding_dim=1024, func=embed_func_b, model_name="bge-large"
)
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
# Create storage for workspace A with model A
storage_a = QdrantVectorDBStorage(
namespace="chunks",
global_config=config,
embedding_func=model_a_func,
workspace="workspace_a",
)
# Create storage for workspace B with model B
storage_b = QdrantVectorDBStorage(
namespace="chunks",
global_config=config,
embedding_func=model_b_func,
workspace="workspace_b",
)
# Verify: Collection names are different
assert storage_a.final_namespace != storage_b.final_namespace
# Verify: Model A collection
expected_collection_a = "lightrag_vdb_chunks_bge_small_768d"
assert storage_a.final_namespace == expected_collection_a
# Verify: Model B collection
expected_collection_b = "lightrag_vdb_chunks_bge_large_1024d"
assert storage_b.final_namespace == expected_collection_b
# Verify: Different embedding dimensions are preserved
assert storage_a.embedding_func.embedding_dim == 768
assert storage_b.embedding_func.embedding_dim == 1024
print("✅ Scenario 3: Multi-model coexistence verified")
print(f" - Workspace A: {expected_collection_a} (768d)")
print(f" - Workspace B: {expected_collection_b} (1024d)")
print(" - Collections are independent")
async def test_case1_empty_legacy_auto_cleanup(mock_qdrant_client, mock_embedding_func):
"""
Case 1a: 新旧collection都存在,且旧库为空
预期:自动删除旧库
"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = QdrantVectorDBStorage(
namespace="chunks",
global_config=config,
embedding_func=mock_embedding_func,
workspace="test_ws",
)
# Legacy collection name (without model suffix)
legacy_collection = "lightrag_vdb_chunks"
new_collection = storage.final_namespace
# Mock: Both collections exist
mock_qdrant_client.collection_exists.side_effect = lambda name: name in [
legacy_collection,
new_collection,
]
# Mock: Legacy collection is empty (0 records)
def count_mock(collection_name, exact=True, count_filter=None):
mock_result = MagicMock()
if collection_name == legacy_collection:
mock_result.count = 0 # Empty legacy collection
else:
mock_result.count = 100 # New collection has data
return mock_result
mock_qdrant_client.count.side_effect = count_mock
# Mock get_collection for Case 2 check
collection_info = MagicMock()
collection_info.payload_schema = {"workspace_id": True}
mock_qdrant_client.get_collection.return_value = collection_info
# Initialize storage
await storage.initialize()
# Verify: Empty legacy collection should be automatically cleaned up
# Empty collections are safe to delete without data loss risk
delete_calls = [
call for call in mock_qdrant_client.delete_collection.call_args_list
]
assert len(delete_calls) >= 1, "Empty legacy collection should be auto-deleted"
deleted_collection = (
delete_calls[0][0][0]
if delete_calls[0][0]
else delete_calls[0].kwargs.get("collection_name")
)
assert (
deleted_collection == legacy_collection
), f"Expected to delete '{legacy_collection}', but deleted '{deleted_collection}'"
print(
f"✅ Case 1a: Empty legacy collection '{legacy_collection}' auto-deleted successfully"
)
async def test_case1_nonempty_legacy_warning(mock_qdrant_client, mock_embedding_func):
"""
Case 1b: 新旧collection都存在,且旧库有数据
预期:警告但不删除
"""
config = {
"embedding_batch_num": 10,
"vector_db_storage_cls_kwargs": {"cosine_better_than_threshold": 0.8},
}
storage = QdrantVectorDBStorage(
namespace="chunks",
global_config=config,
embedding_func=mock_embedding_func,
workspace="test_ws",
)
# Legacy collection name (without model suffix)
legacy_collection = "lightrag_vdb_chunks"
new_collection = storage.final_namespace
# Mock: Both collections exist
mock_qdrant_client.collection_exists.side_effect = lambda name: name in [
legacy_collection,
new_collection,
]
# Mock: Legacy collection has data (50 records)
def count_mock(collection_name, exact=True, count_filter=None):
mock_result = MagicMock()
if collection_name == legacy_collection:
mock_result.count = 50 # Legacy has data
else:
mock_result.count = 100 # New collection has data
return mock_result
mock_qdrant_client.count.side_effect = count_mock
# Mock get_collection for Case 2 check
collection_info = MagicMock()
collection_info.payload_schema = {"workspace_id": True}
mock_qdrant_client.get_collection.return_value = collection_info
# Initialize storage
await storage.initialize()
# Verify: Legacy collection with data should be preserved
# We never auto-delete collections that contain data to prevent accidental data loss
delete_calls = [
call for call in mock_qdrant_client.delete_collection.call_args_list
]
# Check if legacy collection was deleted (it should not be)
legacy_deleted = any(
(call[0][0] if call[0] else call.kwargs.get("collection_name"))
== legacy_collection
for call in delete_calls
)
assert not legacy_deleted, "Legacy collection with data should NOT be auto-deleted"
print(
f"✅ Case 1b: Legacy collection '{legacy_collection}' with data preserved (warning only)"
)
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_qdrant_migration.py",
"license": "MIT License",
"lines": 456,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_rerank_chunking.py | """
Unit tests for rerank document chunking functionality.
Tests the chunk_documents_for_rerank and aggregate_chunk_scores functions
in lightrag/rerank.py to ensure proper document splitting and score aggregation.
"""
import pytest
from unittest.mock import Mock, patch, AsyncMock
from lightrag.rerank import (
chunk_documents_for_rerank,
aggregate_chunk_scores,
cohere_rerank,
)
class TestChunkDocumentsForRerank:
"""Test suite for chunk_documents_for_rerank function"""
def test_no_chunking_needed_for_short_docs(self):
"""Documents shorter than max_tokens should not be chunked"""
documents = [
"Short doc 1",
"Short doc 2",
"Short doc 3",
]
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=100, overlap_tokens=10
)
# No chunking should occur
assert len(chunked_docs) == 3
assert chunked_docs == documents
assert doc_indices == [0, 1, 2]
def test_chunking_with_character_fallback(self):
"""Test chunking falls back to character-based when tokenizer unavailable"""
# Create a very long document that exceeds character limit
long_doc = "a" * 2000 # 2000 characters
documents = [long_doc, "short doc"]
with patch("lightrag.utils.TiktokenTokenizer", side_effect=ImportError):
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents,
max_tokens=100, # 100 tokens = ~400 chars
overlap_tokens=10, # 10 tokens = ~40 chars
)
# First doc should be split into chunks, second doc stays whole
assert len(chunked_docs) > 2 # At least one chunk from first doc + second doc
assert chunked_docs[-1] == "short doc" # Last chunk is the short doc
# Verify doc_indices maps chunks to correct original document
assert doc_indices[-1] == 1 # Last chunk maps to document 1
def test_chunking_with_tiktoken_tokenizer(self):
"""Test chunking with actual tokenizer"""
# Create document with known token count
# Approximate: "word " = ~1 token, so 200 words ~ 200 tokens
long_doc = " ".join([f"word{i}" for i in range(200)])
documents = [long_doc, "short"]
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=50, overlap_tokens=10
)
# Long doc should be split, short doc should remain
assert len(chunked_docs) > 2
assert doc_indices[-1] == 1 # Last chunk is from second document
# Verify overlapping chunks contain overlapping content
if len(chunked_docs) > 2:
# Check that consecutive chunks from same doc have some overlap
for i in range(len(doc_indices) - 1):
if doc_indices[i] == doc_indices[i + 1] == 0:
# Both chunks from first doc, should have overlap
chunk1_words = chunked_docs[i].split()
chunk2_words = chunked_docs[i + 1].split()
# At least one word should be common due to overlap
assert any(word in chunk2_words for word in chunk1_words[-5:])
def test_empty_documents(self):
"""Test handling of empty document list"""
documents = []
chunked_docs, doc_indices = chunk_documents_for_rerank(documents)
assert chunked_docs == []
assert doc_indices == []
def test_single_document_chunking(self):
"""Test chunking of a single long document"""
# Create document with ~100 tokens
long_doc = " ".join([f"token{i}" for i in range(100)])
documents = [long_doc]
chunked_docs, doc_indices = chunk_documents_for_rerank(
documents, max_tokens=30, overlap_tokens=5
)
# Should create multiple chunks
assert len(chunked_docs) > 1
# All chunks should map to document 0
assert all(idx == 0 for idx in doc_indices)
class TestAggregateChunkScores:
"""Test suite for aggregate_chunk_scores function"""
def test_no_chunking_simple_aggregation(self):
"""Test aggregation when no chunking occurred (1:1 mapping)"""
chunk_results = [
{"index": 0, "relevance_score": 0.9},
{"index": 1, "relevance_score": 0.7},
{"index": 2, "relevance_score": 0.5},
]
doc_indices = [0, 1, 2] # 1:1 mapping
num_original_docs = 3
aggregated = aggregate_chunk_scores(
chunk_results, doc_indices, num_original_docs, aggregation="max"
)
# Results should be sorted by score
assert len(aggregated) == 3
assert aggregated[0]["index"] == 0
assert aggregated[0]["relevance_score"] == 0.9
assert aggregated[1]["index"] == 1
assert aggregated[1]["relevance_score"] == 0.7
assert aggregated[2]["index"] == 2
assert aggregated[2]["relevance_score"] == 0.5
def test_max_aggregation_with_chunks(self):
"""Test max aggregation strategy with multiple chunks per document"""
# 5 chunks: first 3 from doc 0, last 2 from doc 1
chunk_results = [
{"index": 0, "relevance_score": 0.5},
{"index": 1, "relevance_score": 0.8},
{"index": 2, "relevance_score": 0.6},
{"index": 3, "relevance_score": 0.7},
{"index": 4, "relevance_score": 0.4},
]
doc_indices = [0, 0, 0, 1, 1]
num_original_docs = 2
aggregated = aggregate_chunk_scores(
chunk_results, doc_indices, num_original_docs, aggregation="max"
)
# Should take max score for each document
assert len(aggregated) == 2
assert aggregated[0]["index"] == 0
assert aggregated[0]["relevance_score"] == 0.8 # max of 0.5, 0.8, 0.6
assert aggregated[1]["index"] == 1
assert aggregated[1]["relevance_score"] == 0.7 # max of 0.7, 0.4
def test_mean_aggregation_with_chunks(self):
"""Test mean aggregation strategy"""
chunk_results = [
{"index": 0, "relevance_score": 0.6},
{"index": 1, "relevance_score": 0.8},
{"index": 2, "relevance_score": 0.4},
]
doc_indices = [0, 0, 1] # First two chunks from doc 0, last from doc 1
num_original_docs = 2
aggregated = aggregate_chunk_scores(
chunk_results, doc_indices, num_original_docs, aggregation="mean"
)
assert len(aggregated) == 2
assert aggregated[0]["index"] == 0
assert aggregated[0]["relevance_score"] == pytest.approx(0.7) # (0.6 + 0.8) / 2
assert aggregated[1]["index"] == 1
assert aggregated[1]["relevance_score"] == 0.4
def test_first_aggregation_with_chunks(self):
"""Test first aggregation strategy"""
chunk_results = [
{"index": 0, "relevance_score": 0.6},
{"index": 1, "relevance_score": 0.8},
{"index": 2, "relevance_score": 0.4},
]
doc_indices = [0, 0, 1]
num_original_docs = 2
aggregated = aggregate_chunk_scores(
chunk_results, doc_indices, num_original_docs, aggregation="first"
)
assert len(aggregated) == 2
# First should use first score seen for each doc
assert aggregated[0]["index"] == 0
assert aggregated[0]["relevance_score"] == 0.6 # First score for doc 0
assert aggregated[1]["index"] == 1
assert aggregated[1]["relevance_score"] == 0.4
def test_empty_chunk_results(self):
"""Test handling of empty results"""
aggregated = aggregate_chunk_scores([], [], 3, aggregation="max")
assert aggregated == []
def test_documents_with_no_scores(self):
"""Test when some documents have no chunks/scores"""
chunk_results = [
{"index": 0, "relevance_score": 0.9},
{"index": 1, "relevance_score": 0.7},
]
doc_indices = [0, 0] # Both chunks from document 0
num_original_docs = 3 # But we have 3 documents total
aggregated = aggregate_chunk_scores(
chunk_results, doc_indices, num_original_docs, aggregation="max"
)
# Only doc 0 should appear in results
assert len(aggregated) == 1
assert aggregated[0]["index"] == 0
def test_unknown_aggregation_strategy(self):
"""Test that unknown strategy falls back to max"""
chunk_results = [
{"index": 0, "relevance_score": 0.6},
{"index": 1, "relevance_score": 0.8},
]
doc_indices = [0, 0]
num_original_docs = 1
# Use invalid strategy
aggregated = aggregate_chunk_scores(
chunk_results, doc_indices, num_original_docs, aggregation="invalid"
)
# Should fall back to max
assert aggregated[0]["relevance_score"] == 0.8
@pytest.mark.offline
class TestTopNWithChunking:
"""Tests for top_n behavior when chunking is enabled (Bug fix verification)"""
@pytest.mark.asyncio
async def test_top_n_limits_documents_not_chunks(self):
"""
Test that top_n correctly limits documents (not chunks) when chunking is enabled.
Bug scenario: 10 docs expand to 50 chunks. With old behavior, top_n=5 would
return scores for only 5 chunks (possibly all from 1-2 docs). After aggregation,
fewer than 5 documents would be returned.
Fixed behavior: top_n=5 should return exactly 5 documents after aggregation.
"""
# Setup: 5 documents, each producing multiple chunks when chunked
# Using small max_tokens to force chunking
long_docs = [" ".join([f"doc{i}_word{j}" for j in range(50)]) for i in range(5)]
query = "test query"
# First, determine how many chunks will be created by actual chunking
_, doc_indices = chunk_documents_for_rerank(
long_docs, max_tokens=50, overlap_tokens=10
)
num_chunks = len(doc_indices)
# Mock API returns scores for ALL chunks (simulating disabled API-level top_n)
# Give different scores to ensure doc 0 gets highest, doc 1 second, etc.
# Assign scores based on original document index (lower doc index = higher score)
mock_chunk_scores = []
for i in range(num_chunks):
original_doc = doc_indices[i]
# Higher score for lower doc index, with small variation per chunk
base_score = 0.9 - (original_doc * 0.1)
mock_chunk_scores.append({"index": i, "relevance_score": base_score})
mock_response = Mock()
mock_response.status = 200
mock_response.json = AsyncMock(return_value={"results": mock_chunk_scores})
mock_response.request_info = None
mock_response.history = None
mock_response.headers = {}
mock_response.__aenter__ = AsyncMock(return_value=mock_response)
mock_response.__aexit__ = AsyncMock(return_value=None)
mock_session = Mock()
mock_session.post = Mock(return_value=mock_response)
mock_session.__aenter__ = AsyncMock(return_value=mock_session)
mock_session.__aexit__ = AsyncMock(return_value=None)
with patch("lightrag.rerank.aiohttp.ClientSession", return_value=mock_session):
result = await cohere_rerank(
query=query,
documents=long_docs,
api_key="test-key",
base_url="http://test.com/rerank",
enable_chunking=True,
max_tokens_per_doc=50, # Match chunking above
top_n=3, # Request top 3 documents
)
# Verify: should get exactly 3 documents (not unlimited chunks)
assert len(result) == 3
# All results should have valid document indices (0-4)
assert all(0 <= r["index"] < 5 for r in result)
# Results should be sorted by score (descending)
assert all(
result[i]["relevance_score"] >= result[i + 1]["relevance_score"]
for i in range(len(result) - 1)
)
# The top 3 docs should be 0, 1, 2 (highest scores)
result_indices = [r["index"] for r in result]
assert set(result_indices) == {0, 1, 2}
@pytest.mark.asyncio
async def test_api_receives_no_top_n_when_chunking_enabled(self):
"""
Test that the API request does NOT include top_n when chunking is enabled.
This ensures all chunk scores are retrieved for proper aggregation.
"""
documents = [" ".join([f"word{i}" for i in range(100)]), "short doc"]
query = "test query"
captured_payload = {}
mock_response = Mock()
mock_response.status = 200
mock_response.json = AsyncMock(
return_value={
"results": [
{"index": 0, "relevance_score": 0.9},
{"index": 1, "relevance_score": 0.8},
{"index": 2, "relevance_score": 0.7},
]
}
)
mock_response.request_info = None
mock_response.history = None
mock_response.headers = {}
mock_response.__aenter__ = AsyncMock(return_value=mock_response)
mock_response.__aexit__ = AsyncMock(return_value=None)
def capture_post(*args, **kwargs):
captured_payload.update(kwargs.get("json", {}))
return mock_response
mock_session = Mock()
mock_session.post = Mock(side_effect=capture_post)
mock_session.__aenter__ = AsyncMock(return_value=mock_session)
mock_session.__aexit__ = AsyncMock(return_value=None)
with patch("lightrag.rerank.aiohttp.ClientSession", return_value=mock_session):
await cohere_rerank(
query=query,
documents=documents,
api_key="test-key",
base_url="http://test.com/rerank",
enable_chunking=True,
max_tokens_per_doc=30,
top_n=1, # User wants top 1 document
)
# Verify: API payload should NOT have top_n (disabled for chunking)
assert "top_n" not in captured_payload
@pytest.mark.asyncio
async def test_top_n_not_modified_when_chunking_disabled(self):
"""
Test that top_n is passed through to API when chunking is disabled.
"""
documents = ["doc1", "doc2"]
query = "test query"
captured_payload = {}
mock_response = Mock()
mock_response.status = 200
mock_response.json = AsyncMock(
return_value={
"results": [
{"index": 0, "relevance_score": 0.9},
]
}
)
mock_response.request_info = None
mock_response.history = None
mock_response.headers = {}
mock_response.__aenter__ = AsyncMock(return_value=mock_response)
mock_response.__aexit__ = AsyncMock(return_value=None)
def capture_post(*args, **kwargs):
captured_payload.update(kwargs.get("json", {}))
return mock_response
mock_session = Mock()
mock_session.post = Mock(side_effect=capture_post)
mock_session.__aenter__ = AsyncMock(return_value=mock_session)
mock_session.__aexit__ = AsyncMock(return_value=None)
with patch("lightrag.rerank.aiohttp.ClientSession", return_value=mock_session):
await cohere_rerank(
query=query,
documents=documents,
api_key="test-key",
base_url="http://test.com/rerank",
enable_chunking=False, # Chunking disabled
top_n=1,
)
# Verify: API payload should have top_n when chunking is disabled
assert captured_payload.get("top_n") == 1
@pytest.mark.offline
class TestCohereRerankChunking:
"""Integration tests for cohere_rerank with chunking enabled"""
@pytest.mark.asyncio
async def test_cohere_rerank_with_chunking_disabled(self):
"""Test that chunking can be disabled"""
documents = ["doc1", "doc2"]
query = "test query"
# Mock the generic_rerank_api
with patch(
"lightrag.rerank.generic_rerank_api", new_callable=AsyncMock
) as mock_api:
mock_api.return_value = [
{"index": 0, "relevance_score": 0.9},
{"index": 1, "relevance_score": 0.7},
]
result = await cohere_rerank(
query=query,
documents=documents,
api_key="test-key",
enable_chunking=False,
max_tokens_per_doc=100,
)
# Verify generic_rerank_api was called with correct parameters
mock_api.assert_called_once()
call_kwargs = mock_api.call_args[1]
assert call_kwargs["enable_chunking"] is False
assert call_kwargs["max_tokens_per_doc"] == 100
# Result should mirror mocked scores
assert len(result) == 2
assert result[0]["index"] == 0
assert result[0]["relevance_score"] == 0.9
assert result[1]["index"] == 1
assert result[1]["relevance_score"] == 0.7
@pytest.mark.asyncio
async def test_cohere_rerank_with_chunking_enabled(self):
"""Test that chunking parameters are passed through"""
documents = ["doc1", "doc2"]
query = "test query"
with patch(
"lightrag.rerank.generic_rerank_api", new_callable=AsyncMock
) as mock_api:
mock_api.return_value = [
{"index": 0, "relevance_score": 0.9},
{"index": 1, "relevance_score": 0.7},
]
result = await cohere_rerank(
query=query,
documents=documents,
api_key="test-key",
enable_chunking=True,
max_tokens_per_doc=480,
)
# Verify parameters were passed
call_kwargs = mock_api.call_args[1]
assert call_kwargs["enable_chunking"] is True
assert call_kwargs["max_tokens_per_doc"] == 480
# Result should mirror mocked scores
assert len(result) == 2
assert result[0]["index"] == 0
assert result[0]["relevance_score"] == 0.9
assert result[1]["index"] == 1
assert result[1]["relevance_score"] == 0.7
@pytest.mark.asyncio
async def test_cohere_rerank_default_parameters(self):
"""Test default parameter values for cohere_rerank"""
documents = ["doc1"]
query = "test"
with patch(
"lightrag.rerank.generic_rerank_api", new_callable=AsyncMock
) as mock_api:
mock_api.return_value = [{"index": 0, "relevance_score": 0.9}]
result = await cohere_rerank(
query=query, documents=documents, api_key="test-key"
)
# Verify default values
call_kwargs = mock_api.call_args[1]
assert call_kwargs["enable_chunking"] is False
assert call_kwargs["max_tokens_per_doc"] == 4096
assert call_kwargs["model"] == "rerank-v3.5"
# Result should mirror mocked scores
assert len(result) == 1
assert result[0]["index"] == 0
assert result[0]["relevance_score"] == 0.9
@pytest.mark.offline
class TestEndToEndChunking:
"""End-to-end tests for chunking workflow"""
@pytest.mark.asyncio
async def test_end_to_end_chunking_workflow(self):
"""Test complete chunking workflow from documents to aggregated results"""
# Create documents where first one needs chunking
long_doc = " ".join([f"word{i}" for i in range(100)])
documents = [long_doc, "short doc"]
query = "test query"
# Mock the HTTP call inside generic_rerank_api
mock_response = Mock()
mock_response.status = 200
mock_response.json = AsyncMock(
return_value={
"results": [
{"index": 0, "relevance_score": 0.5}, # chunk 0 from doc 0
{"index": 1, "relevance_score": 0.8}, # chunk 1 from doc 0
{"index": 2, "relevance_score": 0.6}, # chunk 2 from doc 0
{"index": 3, "relevance_score": 0.7}, # doc 1 (short)
]
}
)
mock_response.request_info = None
mock_response.history = None
mock_response.headers = {}
# Make mock_response an async context manager (for `async with session.post() as response`)
mock_response.__aenter__ = AsyncMock(return_value=mock_response)
mock_response.__aexit__ = AsyncMock(return_value=None)
mock_session = Mock()
# session.post() returns an async context manager, so return mock_response which is now one
mock_session.post = Mock(return_value=mock_response)
mock_session.__aenter__ = AsyncMock(return_value=mock_session)
mock_session.__aexit__ = AsyncMock(return_value=None)
with patch("lightrag.rerank.aiohttp.ClientSession", return_value=mock_session):
result = await cohere_rerank(
query=query,
documents=documents,
api_key="test-key",
base_url="http://test.com/rerank",
enable_chunking=True,
max_tokens_per_doc=30, # Force chunking of long doc
)
# Should get 2 results (one per original document)
# The long doc's chunks should be aggregated
assert len(result) <= len(documents)
# Results should be sorted by score
assert all(
result[i]["relevance_score"] >= result[i + 1]["relevance_score"]
for i in range(len(result) - 1)
)
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_rerank_chunking.py",
"license": "MIT License",
"lines": 477,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_unified_lock_safety.py | """
Tests for UnifiedLock safety when lock is None.
This test module verifies that get_internal_lock() and get_data_init_lock()
raise RuntimeError when shared data is not initialized, preventing false
security and potential race conditions.
Design: The None check has been moved from UnifiedLock.__aenter__/__enter__
to the lock factory functions (get_internal_lock, get_data_init_lock) for
early failure detection.
Critical Bug 1 (Fixed): When self._lock is None, the code would fail with
AttributeError. Now the check is in factory functions for clearer errors.
Critical Bug 2: In __aexit__, when async_lock.release() fails, the error
recovery logic would attempt to release it again, causing double-release issues.
"""
from unittest.mock import MagicMock, AsyncMock
import pytest
from lightrag.kg.shared_storage import (
UnifiedLock,
get_internal_lock,
get_data_init_lock,
finalize_share_data,
)
class TestUnifiedLockSafety:
"""Test suite for UnifiedLock None safety checks."""
def setup_method(self):
"""Ensure shared data is finalized before each test."""
finalize_share_data()
def teardown_method(self):
"""Clean up after each test."""
finalize_share_data()
def test_get_internal_lock_raises_when_not_initialized(self):
"""
Test that get_internal_lock() raises RuntimeError when shared data is not initialized.
Scenario: Call get_internal_lock() before initialize_share_data() is called.
Expected: RuntimeError raised with clear error message.
This test verifies the None check has been moved to the factory function.
"""
with pytest.raises(
RuntimeError, match="Shared data not initialized.*initialize_share_data"
):
get_internal_lock()
def test_get_data_init_lock_raises_when_not_initialized(self):
"""
Test that get_data_init_lock() raises RuntimeError when shared data is not initialized.
Scenario: Call get_data_init_lock() before initialize_share_data() is called.
Expected: RuntimeError raised with clear error message.
This test verifies the None check has been moved to the factory function.
"""
with pytest.raises(
RuntimeError, match="Shared data not initialized.*initialize_share_data"
):
get_data_init_lock()
@pytest.mark.offline
async def test_aexit_no_double_release_on_async_lock_failure(self):
"""
Test that __aexit__ doesn't attempt to release async_lock twice when it fails.
Scenario: async_lock.release() fails during normal release.
Expected: Recovery logic should NOT attempt to release async_lock again,
preventing double-release issues.
This tests Bug 2 fix: async_lock_released tracking prevents double release.
"""
# Create mock locks
main_lock = MagicMock()
main_lock.acquire = MagicMock()
main_lock.release = MagicMock()
async_lock = AsyncMock()
async_lock.acquire = AsyncMock()
# Make async_lock.release() fail
release_call_count = 0
def mock_release_fail():
nonlocal release_call_count
release_call_count += 1
raise RuntimeError("Async lock release failed")
async_lock.release = MagicMock(side_effect=mock_release_fail)
# Create UnifiedLock with both locks (sync mode with async_lock)
lock = UnifiedLock(
lock=main_lock,
is_async=False,
name="test_double_release",
enable_logging=False,
)
lock._async_lock = async_lock
# Try to use the lock - should fail during __aexit__
try:
async with lock:
pass
except RuntimeError as e:
# Should get the async lock release error
assert "Async lock release failed" in str(e)
# Verify async_lock.release() was called only ONCE, not twice
assert (
release_call_count == 1
), f"async_lock.release() should be called only once, but was called {release_call_count} times"
# Main lock should have been released successfully
main_lock.release.assert_called_once()
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_unified_lock_safety.py",
"license": "MIT License",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_workspace_migration_isolation.py | """
Tests for workspace isolation during PostgreSQL migration.
This test module verifies that setup_table() properly filters migration data
by workspace, preventing cross-workspace data leakage during legacy table migration.
Critical Bug: Migration copied ALL records from legacy table regardless of workspace,
causing workspace A to receive workspace B's data, violating multi-tenant isolation.
"""
import pytest
from unittest.mock import AsyncMock
from lightrag.kg.postgres_impl import PGVectorStorage
class TestWorkspaceMigrationIsolation:
"""Test suite for workspace-scoped migration in PostgreSQL."""
async def test_migration_filters_by_workspace(self):
"""
Test that migration only copies data from the specified workspace.
Scenario: Legacy table contains data from multiple workspaces.
Migrate only workspace_a's data to new table.
Expected: New table contains only workspace_a data, workspace_b data excluded.
"""
db = AsyncMock()
# Configure mock return values to avoid unawaited coroutine warnings
db._create_vector_index.return_value = None
# Track state for new table count (starts at 0, increases after migration)
new_table_record_count = {"count": 0}
# Mock table existence checks
async def table_exists_side_effect(db_instance, name):
if name.lower() == "lightrag_doc_chunks": # legacy
return True
elif name.lower() == "lightrag_doc_chunks_model_1536d": # new
return False # New table doesn't exist initially
return False
# Mock data for workspace_a
mock_records_a = [
{
"id": "a1",
"workspace": "workspace_a",
"content": "content_a1",
"content_vector": [0.1] * 1536,
},
{
"id": "a2",
"workspace": "workspace_a",
"content": "content_a2",
"content_vector": [0.2] * 1536,
},
]
# Mock query responses
async def query_side_effect(sql, params, **kwargs):
multirows = kwargs.get("multirows", False)
sql_upper = sql.upper()
# Count query for new table workspace data (verification before migration)
if (
"COUNT(*)" in sql_upper
and "MODEL_1536D" in sql_upper
and "WHERE WORKSPACE" in sql_upper
):
return new_table_record_count # Initially 0
# Count query with workspace filter (legacy table) - for workspace count
elif "COUNT(*)" in sql_upper and "WHERE WORKSPACE" in sql_upper:
if params and params[0] == "workspace_a":
return {"count": 2} # workspace_a has 2 records
elif params and params[0] == "workspace_b":
return {"count": 3} # workspace_b has 3 records
return {"count": 0}
# Count query for legacy table (total, no workspace filter)
elif (
"COUNT(*)" in sql_upper
and "LIGHTRAG" in sql_upper
and "WHERE WORKSPACE" not in sql_upper
):
return {"count": 5} # Total records in legacy
# SELECT with workspace filter for migration (multirows)
elif "SELECT" in sql_upper and "FROM" in sql_upper and multirows:
workspace = params[0] if params else None
if workspace == "workspace_a":
# Handle keyset pagination: check for "id >" pattern
if "id >" in sql.lower():
# Keyset pagination: params = [workspace, last_id, limit]
last_id = params[1] if len(params) > 1 else None
# Find records after last_id
found_idx = -1
for i, rec in enumerate(mock_records_a):
if rec["id"] == last_id:
found_idx = i
break
if found_idx >= 0:
return mock_records_a[found_idx + 1 :]
return []
else:
# First batch: params = [workspace, limit]
return mock_records_a
return [] # No data for other workspaces
return {}
db.query.side_effect = query_side_effect
db.execute = AsyncMock()
# Mock check_table_exists on db
async def check_table_exists_side_effect(name):
if name.lower() == "lightrag_doc_chunks": # legacy
return True
elif name.lower() == "lightrag_doc_chunks_model_1536d": # new
return False # New table doesn't exist initially
return False
db.check_table_exists = AsyncMock(side_effect=check_table_exists_side_effect)
# Track migration through _run_with_retry calls
migration_executed = []
async def mock_run_with_retry(operation, *args, **kwargs):
migration_executed.append(True)
new_table_record_count["count"] = 2 # Simulate 2 records migrated
return None
db._run_with_retry = AsyncMock(side_effect=mock_run_with_retry)
# Migrate for workspace_a only - correct parameter order
await PGVectorStorage.setup_table(
db,
"LIGHTRAG_DOC_CHUNKS_model_1536d",
workspace="workspace_a", # CRITICAL: Only migrate workspace_a
embedding_dim=1536,
legacy_table_name="LIGHTRAG_DOC_CHUNKS",
base_table="LIGHTRAG_DOC_CHUNKS",
)
# Verify the migration was triggered
assert (
len(migration_executed) > 0
), "Migration should have been executed for workspace_a"
async def test_migration_without_workspace_raises_error(self):
"""
Test that migration without workspace parameter raises ValueError.
Scenario: setup_table called without workspace parameter.
Expected: ValueError is raised because workspace is required.
"""
db = AsyncMock()
# workspace is now a required parameter - calling with None should raise ValueError
with pytest.raises(ValueError, match="workspace must be provided"):
await PGVectorStorage.setup_table(
db,
"lightrag_doc_chunks_model_1536d",
workspace=None, # No workspace - should raise ValueError
embedding_dim=1536,
legacy_table_name="lightrag_doc_chunks",
base_table="lightrag_doc_chunks",
)
async def test_no_cross_workspace_contamination(self):
"""
Test that workspace B's migration doesn't include workspace A's data.
Scenario: Migration for workspace_b only.
Expected: Only workspace_b data is queried, workspace_a data excluded.
"""
db = AsyncMock()
# Configure mock return values to avoid unawaited coroutine warnings
db._create_vector_index.return_value = None
# Track which workspace is being queried
queried_workspace = None
new_table_count = {"count": 0}
# Mock data for workspace_b
mock_records_b = [
{
"id": "b1",
"workspace": "workspace_b",
"content": "content_b1",
"content_vector": [0.3] * 1536,
},
]
async def table_exists_side_effect(db_instance, name):
if name.lower() == "lightrag_doc_chunks": # legacy
return True
elif name.lower() == "lightrag_doc_chunks_model_1536d": # new
return False
return False
async def query_side_effect(sql, params, **kwargs):
nonlocal queried_workspace
multirows = kwargs.get("multirows", False)
sql_upper = sql.upper()
# Count query for new table workspace data (should be 0 initially)
if (
"COUNT(*)" in sql_upper
and "MODEL_1536D" in sql_upper
and "WHERE WORKSPACE" in sql_upper
):
return new_table_count
# Count query with workspace filter (legacy table)
elif "COUNT(*)" in sql_upper and "WHERE WORKSPACE" in sql_upper:
queried_workspace = params[0] if params else None
return {"count": 1} # 1 record for the queried workspace
# Count query for legacy table total (no workspace filter)
elif (
"COUNT(*)" in sql_upper
and "LIGHTRAG" in sql_upper
and "WHERE WORKSPACE" not in sql_upper
):
return {"count": 3} # 3 total records in legacy
# SELECT with workspace filter for migration (multirows)
elif "SELECT" in sql_upper and "FROM" in sql_upper and multirows:
workspace = params[0] if params else None
if workspace == "workspace_b":
# Handle keyset pagination: check for "id >" pattern
if "id >" in sql.lower():
# Keyset pagination: params = [workspace, last_id, limit]
last_id = params[1] if len(params) > 1 else None
# Find records after last_id
found_idx = -1
for i, rec in enumerate(mock_records_b):
if rec["id"] == last_id:
found_idx = i
break
if found_idx >= 0:
return mock_records_b[found_idx + 1 :]
return []
else:
# First batch: params = [workspace, limit]
return mock_records_b
return [] # No data for other workspaces
return {}
db.query.side_effect = query_side_effect
db.execute = AsyncMock()
# Mock check_table_exists on db
async def check_table_exists_side_effect(name):
if name.lower() == "lightrag_doc_chunks": # legacy
return True
elif name.lower() == "lightrag_doc_chunks_model_1536d": # new
return False
return False
db.check_table_exists = AsyncMock(side_effect=check_table_exists_side_effect)
# Track migration through _run_with_retry calls
migration_executed = []
async def mock_run_with_retry(operation, *args, **kwargs):
migration_executed.append(True)
new_table_count["count"] = 1 # Simulate migration
return None
db._run_with_retry = AsyncMock(side_effect=mock_run_with_retry)
# Migrate workspace_b - correct parameter order
await PGVectorStorage.setup_table(
db,
"LIGHTRAG_DOC_CHUNKS_model_1536d",
workspace="workspace_b", # Only migrate workspace_b
embedding_dim=1536,
legacy_table_name="LIGHTRAG_DOC_CHUNKS",
base_table="LIGHTRAG_DOC_CHUNKS",
)
# Verify only workspace_b was queried
assert queried_workspace == "workspace_b", "Should only query workspace_b"
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_workspace_migration_isolation.py",
"license": "MIT License",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_chunking.py | import pytest
from lightrag.exceptions import ChunkTokenLimitExceededError
from lightrag.operate import chunking_by_token_size
from lightrag.utils import Tokenizer, TokenizerInterface
class DummyTokenizer(TokenizerInterface):
"""Simple 1:1 character-to-token mapping."""
def encode(self, content: str):
return [ord(ch) for ch in content]
def decode(self, tokens):
return "".join(chr(token) for token in tokens)
class MultiTokenCharacterTokenizer(TokenizerInterface):
"""
Tokenizer where character-to-token ratio is non-uniform.
This helps catch bugs where code incorrectly counts characters instead of tokens.
Mapping:
- Uppercase letters: 2 tokens each
- Punctuation (!, ?, .): 3 tokens each
- Other characters: 1 token each
"""
def encode(self, content: str):
tokens = []
for ch in content:
if ch.isupper(): # Uppercase = 2 tokens
tokens.extend([ord(ch), ord(ch) + 1000])
elif ch in ["!", "?", "."]: # Punctuation = 3 tokens
tokens.extend([ord(ch), ord(ch) + 2000, ord(ch) + 3000])
else: # Regular chars = 1 token
tokens.append(ord(ch))
return tokens
def decode(self, tokens):
# Simplified decode for testing
result = []
i = 0
while i < len(tokens):
base_token = tokens[i]
# Check if this is part of a multi-token sequence
if (
i + 2 < len(tokens)
and tokens[i + 1] == base_token + 2000
and tokens[i + 2] == base_token + 3000
):
# 3-token punctuation
result.append(chr(base_token))
i += 3
elif i + 1 < len(tokens) and tokens[i + 1] == base_token + 1000:
# 2-token uppercase
result.append(chr(base_token))
i += 2
else:
# Single token
result.append(chr(base_token))
i += 1
return "".join(result)
def make_tokenizer() -> Tokenizer:
return Tokenizer(model_name="dummy", tokenizer=DummyTokenizer())
def make_multi_token_tokenizer() -> Tokenizer:
return Tokenizer(model_name="multi", tokenizer=MultiTokenCharacterTokenizer())
# ============================================================================
# Tests for split_by_character_only=True (raises error on oversized chunks)
# ============================================================================
@pytest.mark.offline
def test_split_by_character_only_within_limit():
"""Test chunking when all chunks are within token limit."""
tokenizer = make_tokenizer()
chunks = chunking_by_token_size(
tokenizer,
"alpha\n\nbeta",
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=10,
)
assert [chunk["content"] for chunk in chunks] == ["alpha", "beta"]
@pytest.mark.offline
def test_split_by_character_only_exceeding_limit_raises():
"""Test that oversized chunks raise ChunkTokenLimitExceededError."""
tokenizer = make_tokenizer()
oversized = "a" * 12
with pytest.raises(ChunkTokenLimitExceededError) as excinfo:
chunking_by_token_size(
tokenizer,
oversized,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=5,
)
err = excinfo.value
assert err.chunk_tokens == len(oversized)
assert err.chunk_token_limit == 5
@pytest.mark.offline
def test_chunk_error_includes_preview():
"""Test that error message includes chunk preview."""
tokenizer = make_tokenizer()
oversized = "x" * 100
with pytest.raises(ChunkTokenLimitExceededError) as excinfo:
chunking_by_token_size(
tokenizer,
oversized,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=10,
)
err = excinfo.value
# Preview should be first 80 chars of a 100-char string
assert err.chunk_preview == "x" * 80
assert "Preview:" in str(err)
@pytest.mark.offline
def test_split_by_character_only_at_exact_limit():
"""Test chunking when chunk is exactly at token limit."""
tokenizer = make_tokenizer()
exact_size = "a" * 10
chunks = chunking_by_token_size(
tokenizer,
exact_size,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=10,
)
assert len(chunks) == 1
assert chunks[0]["content"] == exact_size
assert chunks[0]["tokens"] == 10
@pytest.mark.offline
def test_split_by_character_only_one_over_limit():
"""Test that chunk with one token over limit raises error."""
tokenizer = make_tokenizer()
one_over = "a" * 11
with pytest.raises(ChunkTokenLimitExceededError) as excinfo:
chunking_by_token_size(
tokenizer,
one_over,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=10,
)
err = excinfo.value
assert err.chunk_tokens == 11
assert err.chunk_token_limit == 10
# ============================================================================
# Tests for split_by_character_only=False (recursive splitting)
# ============================================================================
@pytest.mark.offline
def test_split_recursive_oversized_chunk():
"""Test recursive splitting of oversized chunk with split_by_character_only=False."""
tokenizer = make_tokenizer()
# 30 chars - should split into chunks of size 10
oversized = "a" * 30
chunks = chunking_by_token_size(
tokenizer,
oversized,
split_by_character="\n\n",
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=0,
)
# Should create 3 chunks of 10 tokens each
assert len(chunks) == 3
assert all(chunk["tokens"] == 10 for chunk in chunks)
assert all(chunk["content"] == "a" * 10 for chunk in chunks)
@pytest.mark.offline
def test_split_with_chunk_overlap():
"""
Test chunk splitting with overlap using distinctive content.
With distinctive characters, we can verify overlap positions are exact.
Misaligned overlap would produce wrong content and fail the test.
"""
tokenizer = make_tokenizer()
# Each character is unique - enables exact position verification
content = "0123456789abcdefghijklmno" # 25 chars
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=3,
)
# With overlap=3, step size = chunk_size - overlap = 10 - 3 = 7
# Chunks start at positions: 0, 7, 14, 21
assert len(chunks) == 4
# Verify exact content and token counts
assert chunks[0]["tokens"] == 10
assert chunks[0]["content"] == "0123456789" # [0:10]
assert chunks[1]["tokens"] == 10
assert chunks[1]["content"] == "789abcdefg" # [7:17] - overlaps with "789"
assert chunks[2]["tokens"] == 10
assert chunks[2]["content"] == "efghijklmn" # [14:24] - overlaps with "efg"
assert chunks[3]["tokens"] == 4
assert chunks[3]["content"] == "lmno" # [21:25] - overlaps with "lmn"
@pytest.mark.offline
def test_split_multiple_chunks_with_mixed_sizes():
"""Test splitting text with multiple chunks of different sizes."""
tokenizer = make_tokenizer()
# "small\n\nlarge_chunk_here\n\nmedium"
# small: 5 tokens, large_chunk_here: 16 tokens, medium: 6 tokens
content = "small\n\n" + "a" * 16 + "\n\nmedium"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=2,
)
# First chunk "small" should be kept as is (5 tokens)
# Second chunk (16 tokens) should be split into 2 chunks
# Third chunk "medium" should be kept as is (6 tokens)
assert len(chunks) == 4
assert chunks[0]["content"] == "small"
assert chunks[0]["tokens"] == 5
@pytest.mark.offline
def test_split_exact_boundary():
"""Test splitting at exact chunk boundaries."""
tokenizer = make_tokenizer()
# Exactly 20 chars, should split into 2 chunks of 10
content = "a" * 20
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=0,
)
assert len(chunks) == 2
assert chunks[0]["tokens"] == 10
assert chunks[1]["tokens"] == 10
@pytest.mark.offline
def test_split_very_large_text():
"""Test splitting very large text into multiple chunks."""
tokenizer = make_tokenizer()
# 100 chars should create 10 chunks with chunk_size=10, overlap=0
content = "a" * 100
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=0,
)
assert len(chunks) == 10
assert all(chunk["tokens"] == 10 for chunk in chunks)
# ============================================================================
# Edge Cases
# ============================================================================
@pytest.mark.offline
def test_empty_content():
"""Test chunking with empty content."""
tokenizer = make_tokenizer()
chunks = chunking_by_token_size(
tokenizer,
"",
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=10,
)
assert len(chunks) == 1
assert chunks[0]["content"] == ""
assert chunks[0]["tokens"] == 0
@pytest.mark.offline
def test_single_character():
"""Test chunking with single character."""
tokenizer = make_tokenizer()
chunks = chunking_by_token_size(
tokenizer,
"a",
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=10,
)
assert len(chunks) == 1
assert chunks[0]["content"] == "a"
assert chunks[0]["tokens"] == 1
@pytest.mark.offline
def test_no_delimiter_in_content():
"""Test chunking when content has no delimiter."""
tokenizer = make_tokenizer()
content = "a" * 30
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n", # Delimiter not in content
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=0,
)
# Should still split based on token size
assert len(chunks) == 3
assert all(chunk["tokens"] == 10 for chunk in chunks)
@pytest.mark.offline
def test_no_split_character():
"""Test chunking without split_by_character (None)."""
tokenizer = make_tokenizer()
content = "a" * 30
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character=None,
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=0,
)
# Should split based purely on token size
assert len(chunks) == 3
assert all(chunk["tokens"] == 10 for chunk in chunks)
# ============================================================================
# Parameter Combinations
# ============================================================================
@pytest.mark.offline
def test_different_delimiter_newline():
"""Test with single newline delimiter."""
tokenizer = make_tokenizer()
content = "alpha\nbeta\ngamma"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n",
split_by_character_only=True,
chunk_token_size=10,
)
assert len(chunks) == 3
assert [c["content"] for c in chunks] == ["alpha", "beta", "gamma"]
@pytest.mark.offline
def test_delimiter_based_splitting_verification():
"""
Verify that chunks are actually split at delimiter positions.
This test ensures split_by_character truly splits at the delimiter,
not at arbitrary positions.
"""
tokenizer = make_tokenizer()
# Content with clear delimiter boundaries
content = "part1||part2||part3||part4"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="||",
split_by_character_only=True,
chunk_token_size=20,
)
# Should split exactly at || delimiters
assert len(chunks) == 4
assert chunks[0]["content"] == "part1"
assert chunks[1]["content"] == "part2"
assert chunks[2]["content"] == "part3"
assert chunks[3]["content"] == "part4"
# Verify delimiter is not included in chunks
for chunk in chunks:
assert "||" not in chunk["content"]
@pytest.mark.offline
def test_multi_character_delimiter_splitting():
"""
Verify that multi-character delimiters are correctly recognized and not partially matched.
Tests various multi-character delimiter scenarios to ensure the entire delimiter
sequence is used for splitting, not individual characters.
"""
tokenizer = make_tokenizer()
# Test 1: Multi-character delimiter that contains single chars also present elsewhere
content = "data<SEP>more<SEP>final"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="<SEP>",
split_by_character_only=True,
chunk_token_size=50,
)
assert len(chunks) == 3
assert chunks[0]["content"] == "data"
assert chunks[1]["content"] == "more"
assert chunks[2]["content"] == "final"
# Verify full delimiter is not in chunks, not just parts
for chunk in chunks:
assert "<SEP>" not in chunk["content"]
# Test 2: Delimiter appears in middle of content
content = "first><second><third"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="><", # Multi-char delimiter
split_by_character_only=True,
chunk_token_size=50,
)
# Should split at "><" delimiter
assert len(chunks) == 3
assert chunks[0]["content"] == "first"
assert chunks[1]["content"] == "second"
assert chunks[2]["content"] == "third"
# Test 3: Three-character delimiter
content = "section1[***]section2[***]section3"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="[***]",
split_by_character_only=True,
chunk_token_size=50,
)
assert len(chunks) == 3
assert chunks[0]["content"] == "section1"
assert chunks[1]["content"] == "section2"
assert chunks[2]["content"] == "section3"
# Test 4: Delimiter with special regex characters (should be treated literally)
content = "partA...partB...partC"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="...",
split_by_character_only=True,
chunk_token_size=50,
)
assert len(chunks) == 3
assert chunks[0]["content"] == "partA"
assert chunks[1]["content"] == "partB"
assert chunks[2]["content"] == "partC"
@pytest.mark.offline
def test_delimiter_partial_match_not_split():
"""
Verify that partial matches of multi-character delimiters don't cause splits.
Only the complete delimiter sequence should trigger a split.
"""
tokenizer = make_tokenizer()
# Content contains "||" delimiter but also contains single "|"
content = "data|single||data|with|pipes||final"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="||", # Only split on double pipe
split_by_character_only=True,
chunk_token_size=50,
)
# Should split only at "||", not at single "|"
assert len(chunks) == 3
assert chunks[0]["content"] == "data|single"
assert chunks[1]["content"] == "data|with|pipes"
assert chunks[2]["content"] == "final"
# Single "|" should remain in content, but not double "||"
assert "|" in chunks[0]["content"]
assert "|" in chunks[1]["content"]
assert "||" not in chunks[0]["content"]
assert "||" not in chunks[1]["content"]
@pytest.mark.offline
def test_no_delimiter_forces_token_based_split():
"""
Verify that when split_by_character doesn't appear in content,
chunking falls back to token-based splitting.
"""
tokenizer = make_tokenizer()
# Content without the specified delimiter
content = "0123456789abcdefghijklmnop" # 26 chars, no "\n\n"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n", # Delimiter not in content
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=0,
)
# Should fall back to token-based splitting
assert len(chunks) == 3
assert chunks[0]["content"] == "0123456789" # [0:10]
assert chunks[1]["content"] == "abcdefghij" # [10:20]
assert chunks[2]["content"] == "klmnop" # [20:26]
# Verify it didn't somehow split at the delimiter that doesn't exist
for chunk in chunks:
assert "\n\n" not in chunk["content"]
@pytest.mark.offline
def test_delimiter_at_exact_chunk_boundary():
"""
Verify correct behavior when delimiter appears exactly at chunk token limit.
"""
tokenizer = make_tokenizer()
# "segment1\n\nsegment2" where each segment is within limit
content = "12345\n\nabcde"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=10,
)
# Should split at delimiter, not at token count
assert len(chunks) == 2
assert chunks[0]["content"] == "12345"
assert chunks[1]["content"] == "abcde"
@pytest.mark.offline
def test_different_delimiter_comma():
"""Test with comma delimiter."""
tokenizer = make_tokenizer()
content = "one,two,three"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character=",",
split_by_character_only=True,
chunk_token_size=10,
)
assert len(chunks) == 3
assert [c["content"] for c in chunks] == ["one", "two", "three"]
@pytest.mark.offline
def test_zero_overlap():
"""Test with zero overlap (no overlap)."""
tokenizer = make_tokenizer()
content = "a" * 20
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character=None,
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=0,
)
# Should create exactly 2 chunks with no overlap
assert len(chunks) == 2
assert chunks[0]["tokens"] == 10
assert chunks[1]["tokens"] == 10
@pytest.mark.offline
def test_large_overlap():
"""
Test with overlap close to chunk size using distinctive content.
Large overlap (9 out of 10) means step size is only 1, creating many overlapping chunks.
Distinctive characters ensure each chunk has correct positioning.
"""
tokenizer = make_tokenizer()
# Use distinctive characters to verify exact positions
content = "0123456789abcdefghijklmnopqrst" # 30 chars
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character=None,
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=9,
)
# With overlap=9, step size = 10 - 9 = 1
# Chunks start at: 0, 1, 2, 3, ..., 20
# Total chunks = 21 (from position 0 to 20, each taking 10 tokens)
# Wait, let me recalculate: range(0, 30, 1) gives positions 0-29
# But each chunk is 10 tokens, so last chunk starts at position 20
# Actually: positions are 0, 1, 2, ..., 20 (21 chunks) for a 30-char string
# No wait: for i in range(0, 30, 1): if i + 10 <= 30, we can create a chunk
# So positions: 0-20 (chunks of size 10), then 21-29 would be partial
# Actually the loop is: for start in range(0, len(tokens), step):
# range(0, 30, 1) = [0, 1, 2, ..., 29], so 30 chunks total
assert len(chunks) == 30
# Verify first few chunks have correct content with proper overlap
assert chunks[0]["content"] == "0123456789" # [0:10]
assert (
chunks[1]["content"] == "123456789a"
) # [1:11] - overlaps 9 chars with previous
assert (
chunks[2]["content"] == "23456789ab"
) # [2:12] - overlaps 9 chars with previous
assert chunks[3]["content"] == "3456789abc" # [3:13]
# Verify last chunk
assert chunks[-1]["content"] == "t" # [29:30] - last char only
# ============================================================================
# Chunk Order Index Tests
# ============================================================================
@pytest.mark.offline
def test_chunk_order_index_simple():
"""Test that chunk_order_index is correctly assigned."""
tokenizer = make_tokenizer()
content = "a\n\nb\n\nc"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=10,
)
assert len(chunks) == 3
assert chunks[0]["chunk_order_index"] == 0
assert chunks[1]["chunk_order_index"] == 1
assert chunks[2]["chunk_order_index"] == 2
@pytest.mark.offline
def test_chunk_order_index_with_splitting():
"""Test chunk_order_index with recursive splitting."""
tokenizer = make_tokenizer()
content = "a" * 30
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character=None,
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=0,
)
assert len(chunks) == 3
assert chunks[0]["chunk_order_index"] == 0
assert chunks[1]["chunk_order_index"] == 1
assert chunks[2]["chunk_order_index"] == 2
# ============================================================================
# Integration Tests
# ============================================================================
@pytest.mark.offline
def test_mixed_size_chunks_no_error():
"""Test that mixed size chunks work without error in recursive mode."""
tokenizer = make_tokenizer()
# Mix of small and large chunks
content = "small\n\n" + "a" * 50 + "\n\nmedium"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=False,
chunk_token_size=10,
chunk_overlap_token_size=2,
)
# Should handle all chunks without error
assert len(chunks) > 0
# Small chunk should remain intact
assert chunks[0]["content"] == "small"
# Large chunk should be split into multiple pieces
assert any(chunk["content"] == "a" * 10 for chunk in chunks)
# Last chunk should contain "medium"
assert any("medium" in chunk["content"] for chunk in chunks)
@pytest.mark.offline
def test_whitespace_handling():
"""Test that whitespace is properly handled in chunk content."""
tokenizer = make_tokenizer()
content = " alpha \n\n beta "
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=20,
)
# Content should be stripped
assert chunks[0]["content"] == "alpha"
assert chunks[1]["content"] == "beta"
@pytest.mark.offline
def test_consecutive_delimiters():
"""Test handling of consecutive delimiters."""
tokenizer = make_tokenizer()
content = "alpha\n\n\n\nbeta"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=20,
)
# Should split on delimiter and include empty chunks
assert len(chunks) >= 2
assert "alpha" in [c["content"] for c in chunks]
assert "beta" in [c["content"] for c in chunks]
# ============================================================================
# Token vs Character Counting Tests (Multi-Token Characters)
# ============================================================================
@pytest.mark.offline
def test_token_counting_not_character_counting():
"""
Verify chunking uses token count, not character count.
With MultiTokenCharacterTokenizer:
- "aXa" = 3 chars but 4 tokens (a=1, X=2, a=1)
This test would PASS if code incorrectly used character count (3 <= 3)
but correctly FAILS because token count (4 > 3).
"""
tokenizer = make_multi_token_tokenizer()
# "aXa" = 3 characters, 4 tokens
content = "aXa"
with pytest.raises(ChunkTokenLimitExceededError) as excinfo:
chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=3, # 3 token limit
)
err = excinfo.value
assert err.chunk_tokens == 4 # Should be 4 tokens, not 3 characters
assert err.chunk_token_limit == 3
@pytest.mark.offline
def test_token_limit_with_punctuation():
"""
Test that punctuation token expansion is handled correctly.
"Hi!" = 3 chars but 6 tokens (H=2, i=1, !=3)
"""
tokenizer = make_multi_token_tokenizer()
# "Hi!" = 3 characters, 6 tokens (H=2, i=1, !=3)
content = "Hi!"
with pytest.raises(ChunkTokenLimitExceededError) as excinfo:
chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=4,
)
err = excinfo.value
assert err.chunk_tokens == 6
assert err.chunk_token_limit == 4
@pytest.mark.offline
def test_multi_token_within_limit():
"""Test that multi-token characters work when within limit."""
tokenizer = make_multi_token_tokenizer()
# "Hi" = 2 chars, 3 tokens (H=2, i=1)
content = "Hi"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=5,
)
assert len(chunks) == 1
assert chunks[0]["tokens"] == 3
assert chunks[0]["content"] == "Hi"
@pytest.mark.offline
def test_recursive_split_with_multi_token_chars():
"""
Test recursive splitting respects token boundaries, not character boundaries.
"AAAAA" = 5 chars but 10 tokens (each A = 2 tokens)
With chunk_size=6, should split at token positions, not character positions.
"""
tokenizer = make_multi_token_tokenizer()
# "AAAAA" = 5 characters, 10 tokens
content = "AAAAA"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=False,
chunk_token_size=6,
chunk_overlap_token_size=0,
)
# Should split into: [0:6]=3 chars, [6:10]=2 chars
# Not [0:3]=6 tokens, [3:5]=4 tokens (character-based would be wrong)
assert len(chunks) == 2
assert chunks[0]["tokens"] == 6
assert chunks[1]["tokens"] == 4
@pytest.mark.offline
def test_overlap_uses_token_count():
"""
Verify overlap calculation uses token count, not character count.
"aAaAa" = 5 chars, 7 tokens (a=1, A=2, a=1, A=2, a=1)
"""
tokenizer = make_multi_token_tokenizer()
# "aAaAa" = 5 characters, 7 tokens
content = "aAaAa"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=False,
chunk_token_size=4,
chunk_overlap_token_size=2,
)
# Chunks start at token positions: 0, 2, 4, 6
# [0:4]=2 chars, [2:6]=2.5 chars, [4:7]=1.5 chars
assert len(chunks) == 4
assert chunks[0]["tokens"] == 4
assert chunks[1]["tokens"] == 4
assert chunks[2]["tokens"] == 3
assert chunks[3]["tokens"] == 1
@pytest.mark.offline
def test_mixed_multi_token_content():
"""Test chunking with mixed single and multi-token characters."""
tokenizer = make_multi_token_tokenizer()
# "hello\n\nWORLD!" = 12 chars
# hello = 5 tokens, WORLD = 10 tokens (5 chars × 2), ! = 3 tokens
# Total = 18 tokens
content = "hello\n\nWORLD!"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=20,
)
assert len(chunks) == 2
assert chunks[0]["content"] == "hello"
assert chunks[0]["tokens"] == 5
assert chunks[1]["content"] == "WORLD!"
assert chunks[1]["tokens"] == 13 # 10 + 3
@pytest.mark.offline
def test_exact_token_boundary_multi_token():
"""Test splitting exactly at token limit with multi-token characters."""
tokenizer = make_multi_token_tokenizer()
# "AAA" = 3 chars, 6 tokens (each A = 2 tokens)
content = "AAA"
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character="\n\n",
split_by_character_only=True,
chunk_token_size=6,
)
assert len(chunks) == 1
assert chunks[0]["tokens"] == 6
assert chunks[0]["content"] == "AAA"
@pytest.mark.offline
def test_multi_token_overlap_with_distinctive_content():
"""
Verify overlap works correctly with multi-token characters using distinctive content.
With non-uniform tokenization, overlap must be calculated in token space, not character space.
Distinctive characters ensure we catch any misalignment.
Content: "abcABCdef"
- "abc" = 3 tokens (1+1+1)
- "ABC" = 6 tokens (2+2+2)
- "def" = 3 tokens (1+1+1)
- Total = 12 tokens
"""
tokenizer = make_multi_token_tokenizer()
# Distinctive content with mixed single and multi-token chars
content = "abcABCdef" # 9 chars, 12 tokens
chunks = chunking_by_token_size(
tokenizer,
content,
split_by_character=None,
split_by_character_only=False,
chunk_token_size=6,
chunk_overlap_token_size=2,
)
# With chunk_size=6, overlap=2, step=4
# Chunks start at token positions: 0, 4, 8
# Chunk 0: tokens [0:6] = "abcA" (tokens: a=1, b=1, c=1, A=2, total=5... wait)
# Let me recalculate:
# "a"=1, "b"=1, "c"=1, "A"=2, "B"=2, "C"=2, "d"=1, "e"=1, "f"=1
# Token positions: a=0, b=1, c=2, A=3-4, B=5-6, C=7-8, d=9, e=10, f=11
# Chunk 0 [0:6]: covers "abc" (tokens 0-2) + partial "ABC" (tokens 3-5, which is "AB")
# But we need to figure out what characters that maps to...
#
# Actually, let's think in terms of token slicing:
# tokens = [a, b, c, A1, A2, B1, B2, C1, C2, d, e, f]
# Chunk 0 [0:6]: [a, b, c, A1, A2, B1] - decode to "abcAB"
# Chunk 1 [4:10]: [A2, B1, B2, C1, C2, d] - decode to "ABCd"
# Chunk 2 [8:12]: [C2, d, e, f] - decode to... this is problematic
#
# The issue is that multi-token characters might get split across chunks.
# Let me verify what the actual chunking does...
assert len(chunks) == 3
# Just verify token counts are correct - content may vary due to character splitting
assert chunks[0]["tokens"] == 6
assert chunks[1]["tokens"] == 6
assert chunks[2]["tokens"] == 4
@pytest.mark.offline
def test_decode_preserves_content():
"""Verify that decode correctly reconstructs original content."""
tokenizer = make_multi_token_tokenizer()
test_strings = [
"Hello",
"WORLD",
"Test!",
"Mixed?Case.",
"ABC123xyz",
]
for original in test_strings:
tokens = tokenizer.encode(original)
decoded = tokenizer.decode(tokens)
assert decoded == original, f"Failed to decode: {original}"
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_chunking.py",
"license": "MIT License",
"lines": 849,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_workspace_isolation.py | #!/usr/bin/env python
"""
Test script for Workspace Isolation Feature
Comprehensive test suite covering workspace isolation in LightRAG:
1. Pipeline Status Isolation - Data isolation between workspaces
2. Lock Mechanism - Parallel execution for different workspaces, serial for same workspace
3. Backward Compatibility - Legacy code without workspace parameters
4. Multi-Workspace Concurrency - Concurrent operations on different workspaces
5. NamespaceLock Re-entrance Protection - Prevents deadlocks
6. Different Namespace Lock Isolation - Locks isolated by namespace
7. Error Handling - Invalid workspace configurations
8. Update Flags Workspace Isolation - Update flags properly isolated
9. Empty Workspace Standardization - Empty workspace handling
10. JsonKVStorage Workspace Isolation - Integration test for KV storage
11. LightRAG End-to-End Workspace Isolation - Complete E2E test with two instances
Total: 11 test scenarios
"""
import asyncio
import time
import os
import shutil
import numpy as np
import pytest
from pathlib import Path
from typing import List, Tuple, Dict
from lightrag.kg.shared_storage import (
get_final_namespace,
get_namespace_lock,
get_default_workspace,
set_default_workspace,
initialize_share_data,
finalize_share_data,
initialize_pipeline_status,
get_namespace_data,
set_all_update_flags,
clear_all_update_flags,
get_all_update_flags_status,
get_update_flag,
)
# =============================================================================
# Test Configuration
# =============================================================================
# Test configuration is handled via pytest fixtures in conftest.py
# - Use CLI options: --keep-artifacts, --stress-test, --test-workers=N
# - Or environment variables: LIGHTRAG_KEEP_ARTIFACTS, LIGHTRAG_STRESS_TEST, LIGHTRAG_TEST_WORKERS
# Priority: CLI options > Environment variables > Default values
# =============================================================================
# Pytest Fixtures
# =============================================================================
@pytest.fixture(autouse=True)
def setup_shared_data():
"""Initialize shared data before each test"""
initialize_share_data()
yield
finalize_share_data()
async def _measure_lock_parallelism(
workload: List[Tuple[str, str, str]], hold_time: float = 0.05
) -> Tuple[int, List[Tuple[str, str]], Dict[str, float]]:
"""Run lock acquisition workload and capture peak concurrency and timeline.
Args:
workload: List of (name, workspace, namespace) tuples
hold_time: How long each worker holds the lock (seconds)
Returns:
Tuple of (max_parallel, timeline, metrics) where:
- max_parallel: Peak number of concurrent lock holders
- timeline: List of (name, event) tuples tracking execution order
- metrics: Dict with performance metrics (total_duration, max_concurrency, etc.)
"""
running = 0
max_parallel = 0
timeline: List[Tuple[str, str]] = []
start_time = time.time()
async def worker(name: str, workspace: str, namespace: str) -> None:
nonlocal running, max_parallel
lock = get_namespace_lock(namespace, workspace)
async with lock:
running += 1
max_parallel = max(max_parallel, running)
timeline.append((name, "start"))
await asyncio.sleep(hold_time)
timeline.append((name, "end"))
running -= 1
await asyncio.gather(*(worker(*args) for args in workload))
metrics = {
"total_duration": time.time() - start_time,
"max_concurrency": max_parallel,
"avg_hold_time": hold_time,
"num_workers": len(workload),
}
return max_parallel, timeline, metrics
def _assert_no_timeline_overlap(timeline: List[Tuple[str, str]]) -> None:
"""Ensure that timeline events never overlap for sequential execution.
This function implements a finite state machine that validates:
- No overlapping lock acquisitions (only one task active at a time)
- Proper lock release order (task releases its own lock)
- All locks are properly released
Args:
timeline: List of (name, event) tuples where event is "start" or "end"
Raises:
AssertionError: If timeline shows overlapping execution or improper locking
"""
active_task = None
for name, event in timeline:
if event == "start":
if active_task is not None:
raise AssertionError(
f"Task '{name}' started before '{active_task}' released the lock"
)
active_task = name
else:
if active_task != name:
raise AssertionError(
f"Task '{name}' finished while '{active_task}' was expected to hold the lock"
)
active_task = None
if active_task is not None:
raise AssertionError(f"Task '{active_task}' did not release the lock properly")
# =============================================================================
# Test 1: Pipeline Status Isolation Test
# =============================================================================
@pytest.mark.offline
async def test_pipeline_status_isolation():
"""
Test that pipeline status is isolated between different workspaces.
"""
# Purpose: Ensure pipeline_status shared data remains unique per workspace.
# Scope: initialize_pipeline_status and get_namespace_data interactions.
print("\n" + "=" * 60)
print("TEST 1: Pipeline Status Isolation")
print("=" * 60)
# Initialize shared storage
initialize_share_data()
# Initialize pipeline status for two different workspaces
workspace1 = "test_workspace_1"
workspace2 = "test_workspace_2"
await initialize_pipeline_status(workspace1)
await initialize_pipeline_status(workspace2)
# Get pipeline status data for both workspaces
data1 = await get_namespace_data("pipeline_status", workspace=workspace1)
data2 = await get_namespace_data("pipeline_status", workspace=workspace2)
# Verify they are independent objects
assert (
data1 is not data2
), "Pipeline status data objects are the same (should be different)"
# Modify workspace1's data and verify workspace2 is not affected
data1["test_key"] = "workspace1_value"
# Re-fetch to ensure we get the latest data
data1_check = await get_namespace_data("pipeline_status", workspace=workspace1)
data2_check = await get_namespace_data("pipeline_status", workspace=workspace2)
assert "test_key" in data1_check, "test_key not found in workspace1"
assert (
data1_check["test_key"] == "workspace1_value"
), f"workspace1 test_key value incorrect: {data1_check.get('test_key')}"
assert (
"test_key" not in data2_check
), f"test_key leaked to workspace2: {data2_check.get('test_key')}"
print("✅ PASSED: Pipeline Status Isolation")
print(" Different workspaces have isolated pipeline status")
# =============================================================================
# Test 2: Lock Mechanism Test (No Deadlocks)
# =============================================================================
@pytest.mark.offline
async def test_lock_mechanism(stress_test_mode, parallel_workers):
"""
Test that the new keyed lock mechanism works correctly without deadlocks.
Tests both parallel execution for different workspaces and serialization
for the same workspace.
"""
# Purpose: Validate that keyed locks isolate workspaces while serializing
# requests within the same workspace. Scope: get_namespace_lock scheduling
# semantics for both cross-workspace and single-workspace cases.
print("\n" + "=" * 60)
print("TEST 2: Lock Mechanism (No Deadlocks)")
print("=" * 60)
# Test 2.1: Different workspaces should run in parallel
print("\nTest 2.1: Different workspaces locks should be parallel")
# Support stress testing with configurable number of workers
num_workers = parallel_workers if stress_test_mode else 3
parallel_workload = [
(f"ws_{chr(97+i)}", f"ws_{chr(97+i)}", "test_namespace")
for i in range(num_workers)
]
max_parallel, timeline_parallel, metrics = await _measure_lock_parallelism(
parallel_workload
)
assert max_parallel >= 2, (
"Locks for distinct workspaces should overlap; "
f"observed max concurrency: {max_parallel}, timeline={timeline_parallel}"
)
print("✅ PASSED: Lock Mechanism - Parallel (Different Workspaces)")
print(
f" Locks overlapped for different workspaces (max concurrency={max_parallel})"
)
print(
f" Performance: {metrics['total_duration']:.3f}s for {metrics['num_workers']} workers"
)
# Test 2.2: Same workspace should serialize
print("\nTest 2.2: Same workspace locks should serialize")
serial_workload = [
("serial_run_1", "ws_same", "test_namespace"),
("serial_run_2", "ws_same", "test_namespace"),
]
(
max_parallel_serial,
timeline_serial,
metrics_serial,
) = await _measure_lock_parallelism(serial_workload)
assert max_parallel_serial == 1, (
"Same workspace locks should not overlap; "
f"observed {max_parallel_serial} with timeline {timeline_serial}"
)
_assert_no_timeline_overlap(timeline_serial)
print("✅ PASSED: Lock Mechanism - Serial (Same Workspace)")
print(" Same workspace operations executed sequentially with no overlap")
print(
f" Performance: {metrics_serial['total_duration']:.3f}s for {metrics_serial['num_workers']} tasks"
)
# =============================================================================
# Test 3: Backward Compatibility Test
# =============================================================================
@pytest.mark.offline
async def test_backward_compatibility():
"""
Test that legacy code without workspace parameter still works correctly.
"""
# Purpose: Validate backward-compatible defaults when workspace arguments
# are omitted. Scope: get_final_namespace, set/get_default_workspace and
# initialize_pipeline_status fallback behavior.
print("\n" + "=" * 60)
print("TEST 3: Backward Compatibility")
print("=" * 60)
# Test 3.1: get_final_namespace with None should use default workspace
print("\nTest 3.1: get_final_namespace with workspace=None")
set_default_workspace("my_default_workspace")
final_ns = get_final_namespace("pipeline_status")
expected = "my_default_workspace:pipeline_status"
assert final_ns == expected, f"Expected {expected}, got {final_ns}"
print("✅ PASSED: Backward Compatibility - get_final_namespace")
print(f" Correctly uses default workspace: {final_ns}")
# Test 3.2: get_default_workspace
print("\nTest 3.2: get/set default workspace")
set_default_workspace("test_default")
retrieved = get_default_workspace()
assert retrieved == "test_default", f"Expected 'test_default', got {retrieved}"
print("✅ PASSED: Backward Compatibility - default workspace")
print(f" Default workspace set/get correctly: {retrieved}")
# Test 3.3: Empty workspace handling
print("\nTest 3.3: Empty workspace handling")
set_default_workspace("")
final_ns_empty = get_final_namespace("pipeline_status", workspace=None)
expected_empty = "pipeline_status" # Should be just the namespace without ':'
assert (
final_ns_empty == expected_empty
), f"Expected '{expected_empty}', got '{final_ns_empty}'"
print("✅ PASSED: Backward Compatibility - empty workspace")
print(f" Empty workspace handled correctly: '{final_ns_empty}'")
# Test 3.4: None workspace with default set
print("\nTest 3.4: initialize_pipeline_status with workspace=None")
set_default_workspace("compat_test_workspace")
initialize_share_data()
await initialize_pipeline_status(workspace=None) # Should use default
# Try to get data using the default workspace explicitly
data = await get_namespace_data(
"pipeline_status", workspace="compat_test_workspace"
)
assert (
data is not None
), "Failed to initialize pipeline status with default workspace"
print("✅ PASSED: Backward Compatibility - pipeline init with None")
print(" Pipeline status initialized with default workspace")
# =============================================================================
# Test 4: Multi-Workspace Concurrency Test
# =============================================================================
@pytest.mark.offline
async def test_multi_workspace_concurrency():
"""
Test that multiple workspaces can operate concurrently without interference.
Simulates concurrent operations on different workspaces.
"""
# Purpose: Simulate concurrent workloads touching pipeline_status across
# workspaces. Scope: initialize_pipeline_status, get_namespace_lock, and
# shared dictionary mutation while ensuring isolation.
print("\n" + "=" * 60)
print("TEST 4: Multi-Workspace Concurrency")
print("=" * 60)
initialize_share_data()
async def workspace_operations(workspace_id):
"""Simulate operations on a specific workspace"""
print(f"\n [{workspace_id}] Starting operations")
# Initialize pipeline status
await initialize_pipeline_status(workspace_id)
# Get lock and perform operations
lock = get_namespace_lock("test_operations", workspace_id)
async with lock:
# Get workspace data
data = await get_namespace_data("pipeline_status", workspace=workspace_id)
# Modify data
data[f"{workspace_id}_key"] = f"{workspace_id}_value"
data["timestamp"] = time.time()
# Simulate some work
await asyncio.sleep(0.1)
print(f" [{workspace_id}] Completed operations")
return workspace_id
# Run multiple workspaces concurrently
workspaces = ["concurrent_ws_1", "concurrent_ws_2", "concurrent_ws_3"]
start = time.time()
results_list = await asyncio.gather(
*[workspace_operations(ws) for ws in workspaces]
)
elapsed = time.time() - start
print(f"\n All workspaces completed in {elapsed:.2f}s")
# Verify all workspaces completed
assert set(results_list) == set(workspaces), "Not all workspaces completed"
print("✅ PASSED: Multi-Workspace Concurrency - Execution")
print(
f" All {len(workspaces)} workspaces completed successfully in {elapsed:.2f}s"
)
# Verify data isolation - each workspace should have its own data
print("\n Verifying data isolation...")
for ws in workspaces:
data = await get_namespace_data("pipeline_status", workspace=ws)
expected_key = f"{ws}_key"
expected_value = f"{ws}_value"
assert (
expected_key in data
), f"Data not properly isolated for {ws}: missing {expected_key}"
assert (
data[expected_key] == expected_value
), f"Data not properly isolated for {ws}: {expected_key}={data[expected_key]} (expected {expected_value})"
print(f" [{ws}] Data correctly isolated: {expected_key}={data[expected_key]}")
print("✅ PASSED: Multi-Workspace Concurrency - Data Isolation")
print(" All workspaces have properly isolated data")
# =============================================================================
# Test 5: NamespaceLock Re-entrance Protection
# =============================================================================
@pytest.mark.offline
async def test_namespace_lock_reentrance():
"""
Test that NamespaceLock prevents re-entrance in the same coroutine
and allows concurrent use in different coroutines.
"""
# Purpose: Ensure NamespaceLock enforces single entry per coroutine while
# allowing concurrent reuse through ContextVar isolation. Scope: lock
# re-entrance checks and concurrent gather semantics.
print("\n" + "=" * 60)
print("TEST 5: NamespaceLock Re-entrance Protection")
print("=" * 60)
# Test 5.1: Same coroutine re-entrance should fail
print("\nTest 5.1: Same coroutine re-entrance should raise RuntimeError")
lock = get_namespace_lock("test_reentrance", "test_ws")
reentrance_failed_correctly = False
try:
async with lock:
print(" Acquired lock first time")
# Try to acquire the same lock again in the same coroutine
async with lock:
print(" ERROR: Should not reach here - re-entrance succeeded!")
except RuntimeError as e:
if "already acquired" in str(e).lower():
print(f" ✓ Re-entrance correctly blocked: {e}")
reentrance_failed_correctly = True
else:
raise
assert reentrance_failed_correctly, "Re-entrance protection not working"
print("✅ PASSED: NamespaceLock Re-entrance Protection")
print(" Re-entrance correctly raises RuntimeError")
# Test 5.2: Same NamespaceLock instance in different coroutines should succeed
print("\nTest 5.2: Same NamespaceLock instance in different coroutines")
shared_lock = get_namespace_lock("test_concurrent", "test_ws")
concurrent_results = []
async def use_shared_lock(coroutine_id):
"""Use the same NamespaceLock instance"""
async with shared_lock:
concurrent_results.append(f"coroutine_{coroutine_id}_start")
await asyncio.sleep(0.1)
concurrent_results.append(f"coroutine_{coroutine_id}_end")
# This should work because each coroutine gets its own ContextVar
await asyncio.gather(
use_shared_lock(1),
use_shared_lock(2),
)
# Both coroutines should have completed
expected_entries = 4 # 2 starts + 2 ends
assert (
len(concurrent_results) == expected_entries
), f"Expected {expected_entries} entries, got {len(concurrent_results)}"
print("✅ PASSED: NamespaceLock Concurrent Reuse")
print(
f" Same NamespaceLock instance used successfully in {expected_entries//2} concurrent coroutines"
)
# =============================================================================
# Test 6: Different Namespace Lock Isolation
# =============================================================================
@pytest.mark.offline
async def test_different_namespace_lock_isolation():
"""
Test that locks for different namespaces (same workspace) are independent.
"""
# Purpose: Confirm that namespace isolation is enforced even when workspace
# is the same. Scope: get_namespace_lock behavior when namespaces differ.
print("\n" + "=" * 60)
print("TEST 6: Different Namespace Lock Isolation")
print("=" * 60)
print("\nTesting locks with same workspace but different namespaces")
workload = [
("ns_a", "same_ws", "namespace_a"),
("ns_b", "same_ws", "namespace_b"),
("ns_c", "same_ws", "namespace_c"),
]
max_parallel, timeline, metrics = await _measure_lock_parallelism(workload)
assert max_parallel >= 2, (
"Different namespaces within the same workspace should run concurrently; "
f"observed max concurrency {max_parallel} with timeline {timeline}"
)
print("✅ PASSED: Different Namespace Lock Isolation")
print(
f" Different namespace locks ran in parallel (max concurrency={max_parallel})"
)
print(
f" Performance: {metrics['total_duration']:.3f}s for {metrics['num_workers']} namespaces"
)
# =============================================================================
# Test 7: Error Handling
# =============================================================================
@pytest.mark.offline
async def test_error_handling():
"""
Test error handling for invalid workspace configurations.
"""
# Purpose: Validate guardrails for workspace normalization and namespace
# derivation. Scope: set_default_workspace conversions and get_final_namespace
# failure paths when configuration is invalid.
print("\n" + "=" * 60)
print("TEST 7: Error Handling")
print("=" * 60)
# Test 7.0: Missing default workspace should raise ValueError
print("\nTest 7.0: Missing workspace raises ValueError")
with pytest.raises(ValueError):
get_final_namespace("test_namespace", workspace=None)
# Test 7.1: set_default_workspace(None) converts to empty string
print("\nTest 7.1: set_default_workspace(None) converts to empty string")
set_default_workspace(None)
default_ws = get_default_workspace()
# Should convert None to "" automatically
assert default_ws == "", f"Expected empty string, got: '{default_ws}'"
print("✅ PASSED: Error Handling - None to Empty String")
print(
f" set_default_workspace(None) correctly converts to empty string: '{default_ws}'"
)
# Test 7.2: Empty string workspace behavior
print("\nTest 7.2: Empty string workspace creates valid namespace")
# With empty workspace, should create namespace without colon
final_ns = get_final_namespace("test_namespace", workspace="")
assert final_ns == "test_namespace", f"Unexpected namespace: '{final_ns}'"
print("✅ PASSED: Error Handling - Empty Workspace Namespace")
print(f" Empty workspace creates valid namespace: '{final_ns}'")
# Restore default workspace for other tests
set_default_workspace("")
# =============================================================================
# Test 8: Update Flags Workspace Isolation
# =============================================================================
@pytest.mark.offline
async def test_update_flags_workspace_isolation():
"""
Test that update flags are properly isolated between workspaces.
"""
# Purpose: Confirm update flag setters/readers respect workspace scoping.
# Scope: set_all_update_flags, clear_all_update_flags, get_all_update_flags_status,
# and get_update_flag interactions across namespaces.
print("\n" + "=" * 60)
print("TEST 8: Update Flags Workspace Isolation")
print("=" * 60)
initialize_share_data()
workspace1 = "update_flags_ws1"
workspace2 = "update_flags_ws2"
test_namespace = "test_update_flags_ns"
# Initialize namespaces for both workspaces
await initialize_pipeline_status(workspace1)
await initialize_pipeline_status(workspace2)
# Test 8.1: set_all_update_flags isolation
print("\nTest 8.1: set_all_update_flags workspace isolation")
# Create flags for both workspaces (simulating workers)
flag1_obj = await get_update_flag(test_namespace, workspace=workspace1)
flag2_obj = await get_update_flag(test_namespace, workspace=workspace2)
# Initial state should be False
assert flag1_obj.value is False, "Flag1 initial value should be False"
assert flag2_obj.value is False, "Flag2 initial value should be False"
# Set all flags for workspace1
await set_all_update_flags(test_namespace, workspace=workspace1)
# Check that only workspace1's flags are set
assert (
flag1_obj.value is True
), f"Flag1 should be True after set_all_update_flags, got {flag1_obj.value}"
assert (
flag2_obj.value is False
), f"Flag2 should still be False, got {flag2_obj.value}"
print("✅ PASSED: Update Flags - set_all_update_flags Isolation")
print(
f" set_all_update_flags isolated: ws1={flag1_obj.value}, ws2={flag2_obj.value}"
)
# Test 8.2: clear_all_update_flags isolation
print("\nTest 8.2: clear_all_update_flags workspace isolation")
# Set flags for both workspaces
await set_all_update_flags(test_namespace, workspace=workspace1)
await set_all_update_flags(test_namespace, workspace=workspace2)
# Verify both are set
assert flag1_obj.value is True, "Flag1 should be True"
assert flag2_obj.value is True, "Flag2 should be True"
# Clear only workspace1
await clear_all_update_flags(test_namespace, workspace=workspace1)
# Check that only workspace1's flags are cleared
assert (
flag1_obj.value is False
), f"Flag1 should be False after clear, got {flag1_obj.value}"
assert flag2_obj.value is True, f"Flag2 should still be True, got {flag2_obj.value}"
print("✅ PASSED: Update Flags - clear_all_update_flags Isolation")
print(
f" clear_all_update_flags isolated: ws1={flag1_obj.value}, ws2={flag2_obj.value}"
)
# Test 8.3: get_all_update_flags_status workspace filtering
print("\nTest 8.3: get_all_update_flags_status workspace filtering")
# Initialize more namespaces for testing
await get_update_flag("ns_a", workspace=workspace1)
await get_update_flag("ns_b", workspace=workspace1)
await get_update_flag("ns_c", workspace=workspace2)
# Set flags for workspace1
await set_all_update_flags("ns_a", workspace=workspace1)
await set_all_update_flags("ns_b", workspace=workspace1)
# Set flags for workspace2
await set_all_update_flags("ns_c", workspace=workspace2)
# Get status for workspace1 only
status1 = await get_all_update_flags_status(workspace=workspace1)
# Check that workspace1's namespaces are present
# The keys should include workspace1's namespaces but not workspace2's
workspace1_keys = [k for k in status1.keys() if workspace1 in k]
workspace2_keys = [k for k in status1.keys() if workspace2 in k]
assert (
len(workspace1_keys) > 0
), f"workspace1 keys should be present, got {len(workspace1_keys)}"
assert (
len(workspace2_keys) == 0
), f"workspace2 keys should not be present, got {len(workspace2_keys)}"
for key, values in status1.items():
assert all(values), f"All flags in {key} should be True, got {values}"
# Workspace2 query should only surface workspace2 namespaces
status2 = await get_all_update_flags_status(workspace=workspace2)
expected_ws2_keys = {
f"{workspace2}:{test_namespace}",
f"{workspace2}:ns_c",
}
assert (
set(status2.keys()) == expected_ws2_keys
), f"Unexpected namespaces for workspace2: {status2.keys()}"
for key, values in status2.items():
assert all(values), f"All flags in {key} should be True, got {values}"
print("✅ PASSED: Update Flags - get_all_update_flags_status Filtering")
print(
f" Status correctly filtered: ws1 keys={len(workspace1_keys)}, ws2 keys={len(workspace2_keys)}"
)
# =============================================================================
# Test 9: Empty Workspace Standardization
# =============================================================================
@pytest.mark.offline
async def test_empty_workspace_standardization():
"""
Test that empty workspace is properly standardized to "" instead of "_".
"""
# Purpose: Verify namespace formatting when workspace is an empty string.
# Scope: get_final_namespace output and initialize_pipeline_status behavior
# between empty and non-empty workspaces.
print("\n" + "=" * 60)
print("TEST 9: Empty Workspace Standardization")
print("=" * 60)
# Test 9.1: Empty string workspace creates namespace without colon
print("\nTest 9.1: Empty string workspace namespace format")
set_default_workspace("")
final_ns = get_final_namespace("test_namespace", workspace=None)
# Should be just "test_namespace" without colon prefix
assert (
final_ns == "test_namespace"
), f"Unexpected namespace format: '{final_ns}' (expected 'test_namespace')"
print("✅ PASSED: Empty Workspace Standardization - Format")
print(f" Empty workspace creates correct namespace: '{final_ns}'")
# Test 9.2: Empty workspace vs non-empty workspace behavior
print("\nTest 9.2: Empty vs non-empty workspace behavior")
initialize_share_data()
# Initialize with empty workspace
await initialize_pipeline_status(workspace="")
data_empty = await get_namespace_data("pipeline_status", workspace="")
# Initialize with non-empty workspace
await initialize_pipeline_status(workspace="test_ws")
data_nonempty = await get_namespace_data("pipeline_status", workspace="test_ws")
# They should be different objects
assert (
data_empty is not data_nonempty
), "Empty and non-empty workspaces share data (should be independent)"
print("✅ PASSED: Empty Workspace Standardization - Behavior")
print(" Empty and non-empty workspaces have independent data")
# =============================================================================
# Test 10: JsonKVStorage Workspace Isolation (Integration Test)
# =============================================================================
@pytest.mark.offline
async def test_json_kv_storage_workspace_isolation(keep_test_artifacts):
"""
Integration test: Verify JsonKVStorage properly isolates data between workspaces.
Creates two JsonKVStorage instances with different workspaces, writes different data,
and verifies they don't mix.
"""
# Purpose: Ensure JsonKVStorage respects workspace-specific directories and data.
# Scope: storage initialization, upsert/get_by_id operations, and filesystem layout
# inside the temporary working directory.
print("\n" + "=" * 60)
print("TEST 10: JsonKVStorage Workspace Isolation (Integration)")
print("=" * 60)
# Create temporary test directory under project temp/
test_dir = str(
Path(__file__).parent.parent / "temp/test_json_kv_storage_workspace_isolation"
)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.makedirs(test_dir, exist_ok=True)
print(f"\n Using test directory: {test_dir}")
try:
initialize_share_data()
# Mock embedding function
async def mock_embedding_func(texts: list[str]) -> np.ndarray:
return np.random.rand(len(texts), 384) # 384-dimensional vectors
# Global config
global_config = {
"working_dir": test_dir,
"embedding_batch_num": 10,
}
# Test 10.1: Create two JsonKVStorage instances with different workspaces
print(
"\nTest 10.1: Create two JsonKVStorage instances with different workspaces"
)
from lightrag.kg.json_kv_impl import JsonKVStorage
storage1 = JsonKVStorage(
namespace="entities",
workspace="workspace1",
global_config=global_config,
embedding_func=mock_embedding_func,
)
storage2 = JsonKVStorage(
namespace="entities",
workspace="workspace2",
global_config=global_config,
embedding_func=mock_embedding_func,
)
# Initialize both storages
await storage1.initialize()
await storage2.initialize()
print(" Storage1 created: workspace=workspace1, namespace=entities")
print(" Storage2 created: workspace=workspace2, namespace=entities")
# Test 10.2: Write different data to each storage
print("\nTest 10.2: Write different data to each storage")
# Write to storage1 (upsert expects dict[str, dict])
await storage1.upsert(
{
"entity1": {
"content": "Data from workspace1 - AI Research",
"type": "entity",
},
"entity2": {
"content": "Data from workspace1 - Machine Learning",
"type": "entity",
},
}
)
print(" Written to storage1: entity1, entity2")
# Persist data to disk
await storage1.index_done_callback()
print(" Persisted storage1 data to disk")
# Write to storage2
await storage2.upsert(
{
"entity1": {
"content": "Data from workspace2 - Deep Learning",
"type": "entity",
},
"entity2": {
"content": "Data from workspace2 - Neural Networks",
"type": "entity",
},
}
)
print(" Written to storage2: entity1, entity2")
# Persist data to disk
await storage2.index_done_callback()
print(" Persisted storage2 data to disk")
# Test 10.3: Read data from each storage and verify isolation
print("\nTest 10.3: Read data and verify isolation")
# Read from storage1
result1_entity1 = await storage1.get_by_id("entity1")
result1_entity2 = await storage1.get_by_id("entity2")
# Read from storage2
result2_entity1 = await storage2.get_by_id("entity1")
result2_entity2 = await storage2.get_by_id("entity2")
print(f" Storage1 entity1: {result1_entity1}")
print(f" Storage1 entity2: {result1_entity2}")
print(f" Storage2 entity1: {result2_entity1}")
print(f" Storage2 entity2: {result2_entity2}")
# Verify isolation (get_by_id returns dict)
assert result1_entity1 is not None, "Storage1 entity1 should not be None"
assert result1_entity2 is not None, "Storage1 entity2 should not be None"
assert result2_entity1 is not None, "Storage2 entity1 should not be None"
assert result2_entity2 is not None, "Storage2 entity2 should not be None"
assert (
result1_entity1.get("content") == "Data from workspace1 - AI Research"
), "Storage1 entity1 content mismatch"
assert (
result1_entity2.get("content") == "Data from workspace1 - Machine Learning"
), "Storage1 entity2 content mismatch"
assert (
result2_entity1.get("content") == "Data from workspace2 - Deep Learning"
), "Storage2 entity1 content mismatch"
assert (
result2_entity2.get("content") == "Data from workspace2 - Neural Networks"
), "Storage2 entity2 content mismatch"
assert result1_entity1.get("content") != result2_entity1.get(
"content"
), "Storage1 and Storage2 entity1 should have different content"
assert result1_entity2.get("content") != result2_entity2.get(
"content"
), "Storage1 and Storage2 entity2 should have different content"
print("✅ PASSED: JsonKVStorage - Data Isolation")
print(
" Two storage instances correctly isolated: ws1 and ws2 have different data"
)
# Test 10.4: Verify file structure
print("\nTest 10.4: Verify file structure")
ws1_dir = Path(test_dir) / "workspace1"
ws2_dir = Path(test_dir) / "workspace2"
ws1_exists = ws1_dir.exists()
ws2_exists = ws2_dir.exists()
print(f" workspace1 directory exists: {ws1_exists}")
print(f" workspace2 directory exists: {ws2_exists}")
assert ws1_exists, "workspace1 directory should exist"
assert ws2_exists, "workspace2 directory should exist"
print("✅ PASSED: JsonKVStorage - File Structure")
print(f" Workspace directories correctly created: {ws1_dir} and {ws2_dir}")
finally:
# Cleanup test directory (unless keep_test_artifacts is set)
if os.path.exists(test_dir) and not keep_test_artifacts:
shutil.rmtree(test_dir)
print(f"\n Cleaned up test directory: {test_dir}")
elif keep_test_artifacts:
print(f"\n Kept test directory for inspection: {test_dir}")
# =============================================================================
# Test 11: LightRAG End-to-End Integration Test
# =============================================================================
@pytest.mark.offline
async def test_lightrag_end_to_end_workspace_isolation(keep_test_artifacts):
"""
End-to-end test: Create two LightRAG instances with different workspaces,
insert different data, and verify file separation.
Uses mock LLM and embedding functions to avoid external API calls.
"""
# Purpose: Validate that full LightRAG flows keep artifacts scoped per workspace.
# Scope: LightRAG.initialize_storages + ainsert side effects plus filesystem
# verification for generated storage files.
print("\n" + "=" * 60)
print("TEST 11: LightRAG End-to-End Workspace Isolation")
print("=" * 60)
# Create temporary test directory under project temp/
test_dir = str(
Path(__file__).parent.parent
/ "temp/test_lightrag_end_to_end_workspace_isolation"
)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.makedirs(test_dir, exist_ok=True)
print(f"\n Using test directory: {test_dir}")
try:
# Factory function to create different mock LLM functions for each workspace
def create_mock_llm_func(workspace_name):
"""Create a mock LLM function that returns different content based on workspace"""
async def mock_llm_func(
prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
# Add coroutine switching to simulate async I/O and allow concurrent execution
await asyncio.sleep(0)
# Return different responses based on workspace
# Format: entity<|#|>entity_name<|#|>entity_type<|#|>entity_description
# Format: relation<|#|>source_entity<|#|>target_entity<|#|>keywords<|#|>description
if workspace_name == "project_a":
return """entity<|#|>Artificial Intelligence<|#|>concept<|#|>AI is a field of computer science focused on creating intelligent machines.
entity<|#|>Machine Learning<|#|>concept<|#|>Machine Learning is a subset of AI that enables systems to learn from data.
relation<|#|>Machine Learning<|#|>Artificial Intelligence<|#|>subset, related field<|#|>Machine Learning is a key component and subset of Artificial Intelligence.
<|COMPLETE|>"""
else: # project_b
return """entity<|#|>Deep Learning<|#|>concept<|#|>Deep Learning is a subset of machine learning using neural networks with multiple layers.
entity<|#|>Neural Networks<|#|>concept<|#|>Neural Networks are computing systems inspired by biological neural networks.
relation<|#|>Deep Learning<|#|>Neural Networks<|#|>uses, composed of<|#|>Deep Learning uses multiple layers of Neural Networks to learn representations.
<|COMPLETE|>"""
return mock_llm_func
# Mock embedding function
async def mock_embedding_func(texts: list[str]) -> np.ndarray:
# Add coroutine switching to simulate async I/O and allow concurrent execution
await asyncio.sleep(0)
return np.random.rand(len(texts), 384) # 384-dimensional vectors
# Test 11.1: Create two LightRAG instances with different workspaces
print("\nTest 11.1: Create two LightRAG instances with different workspaces")
from lightrag import LightRAG
from lightrag.utils import EmbeddingFunc, Tokenizer
# Create different mock LLM functions for each workspace
mock_llm_func_a = create_mock_llm_func("project_a")
mock_llm_func_b = create_mock_llm_func("project_b")
class _SimpleTokenizerImpl:
def encode(self, content: str) -> list[int]:
return [ord(ch) for ch in content]
def decode(self, tokens: list[int]) -> str:
return "".join(chr(t) for t in tokens)
tokenizer = Tokenizer("mock-tokenizer", _SimpleTokenizerImpl())
rag1 = LightRAG(
working_dir=test_dir,
workspace="project_a",
llm_model_func=mock_llm_func_a,
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=8192,
func=mock_embedding_func,
),
tokenizer=tokenizer,
)
rag2 = LightRAG(
working_dir=test_dir,
workspace="project_b",
llm_model_func=mock_llm_func_b,
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=8192,
func=mock_embedding_func,
),
tokenizer=tokenizer,
)
# Initialize storages
await rag1.initialize_storages()
await rag2.initialize_storages()
print(" RAG1 created: workspace=project_a")
print(" RAG2 created: workspace=project_b")
# Test 11.2: Insert different data to each RAG instance (CONCURRENTLY)
print("\nTest 11.2: Insert different data to each RAG instance (concurrently)")
text_for_project_a = "This document is about Artificial Intelligence and Machine Learning. AI is transforming the world."
text_for_project_b = "This document is about Deep Learning and Neural Networks. Deep learning uses multiple layers."
# Insert to both projects concurrently to test workspace isolation under concurrent load
print(" Starting concurrent insert operations...")
start_time = time.time()
await asyncio.gather(
rag1.ainsert(text_for_project_a), rag2.ainsert(text_for_project_b)
)
elapsed_time = time.time() - start_time
print(f" Inserted to project_a: {len(text_for_project_a)} chars (concurrent)")
print(f" Inserted to project_b: {len(text_for_project_b)} chars (concurrent)")
print(f" Total concurrent execution time: {elapsed_time:.3f}s")
# Test 11.3: Verify file structure
print("\nTest 11.3: Verify workspace directory structure")
project_a_dir = Path(test_dir) / "project_a"
project_b_dir = Path(test_dir) / "project_b"
project_a_exists = project_a_dir.exists()
project_b_exists = project_b_dir.exists()
print(f" project_a directory: {project_a_dir}")
print(f" project_a exists: {project_a_exists}")
print(f" project_b directory: {project_b_dir}")
print(f" project_b exists: {project_b_exists}")
assert project_a_exists, "project_a directory should exist"
assert project_b_exists, "project_b directory should exist"
# List files in each directory
print("\n Files in project_a/:")
for file in sorted(project_a_dir.glob("*")):
if file.is_file():
size = file.stat().st_size
print(f" - {file.name} ({size} bytes)")
print("\n Files in project_b/:")
for file in sorted(project_b_dir.glob("*")):
if file.is_file():
size = file.stat().st_size
print(f" - {file.name} ({size} bytes)")
print("✅ PASSED: LightRAG E2E - File Structure")
print(" Workspace directories correctly created and separated")
# Test 11.4: Verify data isolation by checking file contents
print("\nTest 11.4: Verify data isolation (check file contents)")
# Check if full_docs storage files exist and contain different content
docs_a_file = project_a_dir / "kv_store_full_docs.json"
docs_b_file = project_b_dir / "kv_store_full_docs.json"
if docs_a_file.exists() and docs_b_file.exists():
import json
with open(docs_a_file, "r") as f:
docs_a_content = json.load(f)
with open(docs_b_file, "r") as f:
docs_b_content = json.load(f)
print(f" project_a doc count: {len(docs_a_content)}")
print(f" project_b doc count: {len(docs_b_content)}")
# Verify they contain different data
assert (
docs_a_content != docs_b_content
), "Document storage not properly isolated"
# Verify each workspace contains its own text content
docs_a_str = json.dumps(docs_a_content)
docs_b_str = json.dumps(docs_b_content)
# Check project_a contains its text and NOT project_b's text
assert (
"Artificial Intelligence" in docs_a_str
), "project_a should contain 'Artificial Intelligence'"
assert (
"Machine Learning" in docs_a_str
), "project_a should contain 'Machine Learning'"
assert (
"Deep Learning" not in docs_a_str
), "project_a should NOT contain 'Deep Learning' from project_b"
assert (
"Neural Networks" not in docs_a_str
), "project_a should NOT contain 'Neural Networks' from project_b"
# Check project_b contains its text and NOT project_a's text
assert (
"Deep Learning" in docs_b_str
), "project_b should contain 'Deep Learning'"
assert (
"Neural Networks" in docs_b_str
), "project_b should contain 'Neural Networks'"
assert (
"Artificial Intelligence" not in docs_b_str
), "project_b should NOT contain 'Artificial Intelligence' from project_a"
# Note: "Machine Learning" might appear in project_b's text, so we skip that check
print("✅ PASSED: LightRAG E2E - Data Isolation")
print(" Document storage correctly isolated between workspaces")
print(" project_a contains only its own data")
print(" project_b contains only its own data")
else:
print(" Document storage files not found (may not be created yet)")
print("✅ PASSED: LightRAG E2E - Data Isolation")
print(" Skipped file content check (files not created)")
print("\n ✓ Test complete - workspace isolation verified at E2E level")
finally:
# Cleanup test directory (unless keep_test_artifacts is set)
if os.path.exists(test_dir) and not keep_test_artifacts:
shutil.rmtree(test_dir)
print(f"\n Cleaned up test directory: {test_dir}")
elif keep_test_artifacts:
print(f"\n Kept test directory for inspection: {test_dir}")
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_workspace_isolation.py",
"license": "MIT License",
"lines": 940,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
HKUDS/LightRAG:tests/test_write_json_optimization.py | """
Test suite for write_json optimization
This test verifies:
1. Fast path works for clean data (no sanitization)
2. Slow path applies sanitization for dirty data
3. Sanitization is done during encoding (memory-efficient)
4. Reloading updates shared memory with cleaned data
"""
import os
import json
import tempfile
import pytest
from lightrag.utils import write_json, load_json, SanitizingJSONEncoder
@pytest.mark.offline
class TestWriteJsonOptimization:
"""Test write_json optimization with two-stage approach"""
def test_fast_path_clean_data(self):
"""Test that clean data takes the fast path without sanitization"""
clean_data = {
"name": "John Doe",
"age": 30,
"items": ["apple", "banana", "cherry"],
"nested": {"key": "value", "number": 42},
}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
temp_file = f.name
try:
# Write clean data - should return False (no sanitization)
needs_reload = write_json(clean_data, temp_file)
assert not needs_reload, "Clean data should not require sanitization"
# Verify data was written correctly
loaded_data = load_json(temp_file)
assert loaded_data == clean_data, "Loaded data should match original"
finally:
os.unlink(temp_file)
def test_slow_path_dirty_data(self):
"""Test that dirty data triggers sanitization"""
# Create data with surrogate characters (U+D800 to U+DFFF)
dirty_string = "Hello\ud800World" # Contains surrogate character
dirty_data = {"text": dirty_string, "number": 123}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
temp_file = f.name
try:
# Write dirty data - should return True (sanitization applied)
needs_reload = write_json(dirty_data, temp_file)
assert needs_reload, "Dirty data should trigger sanitization"
# Verify data was written and sanitized
loaded_data = load_json(temp_file)
assert loaded_data is not None, "Data should be written"
assert loaded_data["number"] == 123, "Clean fields should remain unchanged"
# Surrogate character should be removed
assert (
"\ud800" not in loaded_data["text"]
), "Surrogate character should be removed"
finally:
os.unlink(temp_file)
def test_sanitizing_encoder_removes_surrogates(self):
"""Test that SanitizingJSONEncoder removes surrogate characters"""
data_with_surrogates = {
"text": "Hello\ud800\udc00World", # Contains surrogate pair
"clean": "Clean text",
"nested": {"dirty_key\ud801": "value", "clean_key": "clean\ud802value"},
}
# Encode using custom encoder
encoded = json.dumps(
data_with_surrogates, cls=SanitizingJSONEncoder, ensure_ascii=False
)
# Verify no surrogate characters in output
assert "\ud800" not in encoded, "Surrogate U+D800 should be removed"
assert "\udc00" not in encoded, "Surrogate U+DC00 should be removed"
assert "\ud801" not in encoded, "Surrogate U+D801 should be removed"
assert "\ud802" not in encoded, "Surrogate U+D802 should be removed"
# Verify clean parts remain
assert "Clean text" in encoded, "Clean text should remain"
assert "clean_key" in encoded, "Clean keys should remain"
def test_nested_structure_sanitization(self):
"""Test sanitization of deeply nested structures"""
nested_data = {
"level1": {
"level2": {
"level3": {"dirty": "text\ud800here", "clean": "normal text"},
"list": ["item1", "item\ud801dirty", "item3"],
}
}
}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
temp_file = f.name
try:
needs_reload = write_json(nested_data, temp_file)
assert needs_reload, "Nested dirty data should trigger sanitization"
# Verify nested structure is preserved
loaded_data = load_json(temp_file)
assert "level1" in loaded_data
assert "level2" in loaded_data["level1"]
assert "level3" in loaded_data["level1"]["level2"]
# Verify surrogates are removed
dirty_text = loaded_data["level1"]["level2"]["level3"]["dirty"]
assert "\ud800" not in dirty_text, "Nested surrogate should be removed"
# Verify list items are sanitized
list_items = loaded_data["level1"]["level2"]["list"]
assert (
"\ud801" not in list_items[1]
), "List item surrogates should be removed"
finally:
os.unlink(temp_file)
def test_unicode_non_characters_removed(self):
"""Test that Unicode non-characters (U+FFFE, U+FFFF) don't cause encoding errors
Note: U+FFFE and U+FFFF are valid UTF-8 characters (though discouraged),
so they don't trigger sanitization. They only get removed when explicitly
using the SanitizingJSONEncoder.
"""
data_with_nonchars = {"text1": "Hello\ufffeWorld", "text2": "Test\uffffString"}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
temp_file = f.name
try:
# These characters are valid UTF-8, so they take the fast path
needs_reload = write_json(data_with_nonchars, temp_file)
assert not needs_reload, "U+FFFE/U+FFFF are valid UTF-8 characters"
loaded_data = load_json(temp_file)
# They're written as-is in the fast path
assert loaded_data == data_with_nonchars
finally:
os.unlink(temp_file)
def test_mixed_clean_dirty_data(self):
"""Test data with both clean and dirty fields"""
mixed_data = {
"clean_field": "This is perfectly fine",
"dirty_field": "This has\ud800issues",
"number": 42,
"boolean": True,
"null_value": None,
"clean_list": [1, 2, 3],
"dirty_list": ["clean", "dirty\ud801item"],
}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
temp_file = f.name
try:
needs_reload = write_json(mixed_data, temp_file)
assert (
needs_reload
), "Mixed data with dirty fields should trigger sanitization"
loaded_data = load_json(temp_file)
# Clean fields should remain unchanged
assert loaded_data["clean_field"] == "This is perfectly fine"
assert loaded_data["number"] == 42
assert loaded_data["boolean"]
assert loaded_data["null_value"] is None
assert loaded_data["clean_list"] == [1, 2, 3]
# Dirty fields should be sanitized
assert "\ud800" not in loaded_data["dirty_field"]
assert "\ud801" not in loaded_data["dirty_list"][1]
finally:
os.unlink(temp_file)
def test_empty_and_none_strings(self):
"""Test handling of empty and None values"""
data = {
"empty": "",
"none": None,
"zero": 0,
"false": False,
"empty_list": [],
"empty_dict": {},
}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
temp_file = f.name
try:
needs_reload = write_json(data, temp_file)
assert (
not needs_reload
), "Clean empty values should not trigger sanitization"
loaded_data = load_json(temp_file)
assert loaded_data == data, "Empty/None values should be preserved"
finally:
os.unlink(temp_file)
def test_specific_surrogate_udc9a(self):
"""Test specific surrogate character \\udc9a mentioned in the issue"""
# Test the exact surrogate character from the error message:
# UnicodeEncodeError: 'utf-8' codec can't encode character '\\udc9a'
data_with_udc9a = {
"text": "Some text with surrogate\udc9acharacter",
"position": 201, # As mentioned in the error
"clean_field": "Normal text",
}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
temp_file = f.name
try:
# Write data - should trigger sanitization
needs_reload = write_json(data_with_udc9a, temp_file)
assert needs_reload, "Data with \\udc9a should trigger sanitization"
# Verify surrogate was removed
loaded_data = load_json(temp_file)
assert loaded_data is not None
assert "\udc9a" not in loaded_data["text"], "\\udc9a should be removed"
assert (
loaded_data["clean_field"] == "Normal text"
), "Clean fields should remain"
finally:
os.unlink(temp_file)
def test_migration_with_surrogate_sanitization(self):
"""Test that migration process handles surrogate characters correctly
This test simulates the scenario where legacy cache contains surrogate
characters and ensures they are cleaned during migration.
"""
# Simulate legacy cache data with surrogate characters
legacy_data_with_surrogates = {
"cache_entry_1": {
"return": "Result with\ud800surrogate",
"cache_type": "extract",
"original_prompt": "Some\udc9aprompt",
},
"cache_entry_2": {
"return": "Clean result",
"cache_type": "query",
"original_prompt": "Clean prompt",
},
}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
temp_file = f.name
try:
# First write the dirty data directly (simulating legacy cache file)
# Use custom encoder to force write even with surrogates
with open(temp_file, "w", encoding="utf-8") as f:
json.dump(
legacy_data_with_surrogates,
f,
cls=SanitizingJSONEncoder,
ensure_ascii=False,
)
# Load and verify surrogates were cleaned during initial write
loaded_data = load_json(temp_file)
assert loaded_data is not None
# The data should be sanitized
assert (
"\ud800" not in loaded_data["cache_entry_1"]["return"]
), "Surrogate in return should be removed"
assert (
"\udc9a" not in loaded_data["cache_entry_1"]["original_prompt"]
), "Surrogate in prompt should be removed"
# Clean data should remain unchanged
assert (
loaded_data["cache_entry_2"]["return"] == "Clean result"
), "Clean data should remain"
finally:
os.unlink(temp_file)
def test_empty_values_after_sanitization(self):
"""Test that data with empty values after sanitization is properly handled
Critical edge case: When sanitization results in data with empty string values,
we must use 'if cleaned_data is not None' instead of 'if cleaned_data' to ensure
proper reload, since truthy check on dict depends on content, not just existence.
"""
# Create data where ALL values are only surrogate characters
all_dirty_data = {
"key1": "\ud800\udc00\ud801",
"key2": "\ud802\ud803",
}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".json") as f:
temp_file = f.name
try:
# Write dirty data - should trigger sanitization
needs_reload = write_json(all_dirty_data, temp_file)
assert needs_reload, "All-dirty data should trigger sanitization"
# Load the sanitized data
cleaned_data = load_json(temp_file)
# Critical assertions for the edge case
assert cleaned_data is not None, "Cleaned data should not be None"
# Sanitization removes surrogates but preserves keys with empty values
assert cleaned_data == {
"key1": "",
"key2": "",
}, "Surrogates should be removed, keys preserved"
# This dict is truthy because it has keys (even with empty values)
assert cleaned_data, "Dict with keys is truthy"
# Test the actual edge case: empty dict
empty_data = {}
needs_reload2 = write_json(empty_data, temp_file)
assert not needs_reload2, "Empty dict is clean"
reloaded_empty = load_json(temp_file)
assert reloaded_empty is not None, "Empty dict should not be None"
assert reloaded_empty == {}, "Empty dict should remain empty"
assert (
not reloaded_empty
), "Empty dict evaluates to False (the critical check)"
finally:
os.unlink(temp_file)
if __name__ == "__main__":
# Run tests
test = TestWriteJsonOptimization()
print("Running test_fast_path_clean_data...")
test.test_fast_path_clean_data()
print("✓ Passed")
print("Running test_slow_path_dirty_data...")
test.test_slow_path_dirty_data()
print("✓ Passed")
print("Running test_sanitizing_encoder_removes_surrogates...")
test.test_sanitizing_encoder_removes_surrogates()
print("✓ Passed")
print("Running test_nested_structure_sanitization...")
test.test_nested_structure_sanitization()
print("✓ Passed")
print("Running test_unicode_non_characters_removed...")
test.test_unicode_non_characters_removed()
print("✓ Passed")
print("Running test_mixed_clean_dirty_data...")
test.test_mixed_clean_dirty_data()
print("✓ Passed")
print("Running test_empty_and_none_strings...")
test.test_empty_and_none_strings()
print("✓ Passed")
print("Running test_specific_surrogate_udc9a...")
test.test_specific_surrogate_udc9a()
print("✓ Passed")
print("Running test_migration_with_surrogate_sanitization...")
test.test_migration_with_surrogate_sanitization()
print("✓ Passed")
print("Running test_empty_values_after_sanitization...")
test.test_empty_values_after_sanitization()
print("✓ Passed")
print("\n✅ All tests passed!")
| {
"repo_id": "HKUDS/LightRAG",
"file_path": "tests/test_write_json_optimization.py",
"license": "MIT License",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.