repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/mod.rs | crates/wgparry/src/shapes/mod.rs | //! Geometric shape definitions and their GPU shader implementations.
//!
//! This module provides GPU-accelerated geometric primitives used for collision detection
//! and physics simulation.
mod ball;
mod capsule;
mod convex_polyhedron;
mod cuboid;
mod polyline;
mod segment;
mod triangle;
#[cfg(feature = "dim3")]
mod cone;
#[cfg(feature = "dim3")]
mod cylinder;
#[cfg(feature = "dim3")]
mod tetrahedron;
mod shape;
mod trimesh;
mod vtx_idx;
pub use ball::*;
pub use capsule::*;
pub use convex_polyhedron::*;
pub use cuboid::*;
pub use polyline::*;
pub use segment::*;
pub use shape::*;
pub use triangle::*;
pub use trimesh::*;
pub use vtx_idx::*;
#[cfg(feature = "dim3")]
pub use cone::*;
#[cfg(feature = "dim3")]
pub use cylinder::*;
#[cfg(feature = "dim3")]
pub use tetrahedron::*;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/trimesh.rs | crates/wgparry/src/shapes/trimesh.rs | //! Triangle mesh shape.
use crate::bounding_volumes::WgAabb;
use crate::queries::{WgPolygonalFeature, WgProjection, WgRay};
use crate::shapes::{WgConvexPolyhedron, WgTriangle, WgVtxIdx};
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(
WgSim3,
WgSim2,
WgRay,
WgProjection,
WgPolygonalFeature,
WgAabb,
WgTriangle,
WgConvexPolyhedron,
WgVtxIdx,
),
src = "trimesh.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for the trimesh shape.
///
/// The trimesh is defined by a BVH, vertex buffer and a triangle index buffer.
/// The BVH is stored as part of the vertex and index buffer, before the actual vertices and indices.
/// It stores two vectors and one index per AABB.
pub struct WgTriMesh;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/segment.rs | crates/wgparry/src/shapes/segment.rs | //! Line segment shape.
//!
//! A segment is a straight line connecting two points A and B. It's a fundamental
//! building block used by other shapes like capsules and is useful for ray-casting
//! and distance queries.
use crate::queries::{WgProjection, WgRay};
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgSim2, WgRay, WgProjection),
src = "segment.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for the line segment shape.
///
/// This shader provides WGSL implementations for:
/// - Ray-casting against segments.
/// - Point projection onto segments (finding the closest point on the segment).
///
/// A segment is defined by two endpoints A and B. Point projection operations
/// find the closest point on the segment to a given query point, which may be
/// one of the endpoints or an interior point.
pub struct WgSegment;
// TODO:
// #[cfg(test)]
// mod test {
// use super::WgSegment;
// use parry::shape::Segment;
// use wgcore::tensor::GpuVector;
//
// #[futures_test::test]
// #[serial_test::serial]
// async fn gpu_segment() {
// crate::projection::test_utils::test_point_projection::<WgSegment, _>(
// "Segment",
// Segment::new(1.0, 0.5),
// |device, shapes, usages| GpuVector::encase(device, shapes, usages).into_inner(),
// )
// .await;
// }
// }
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/capsule.rs | crates/wgparry/src/shapes/capsule.rs | //! Capsule shape - swept sphere along a line segment.
//!
//! A capsule is defined by a line segment (from point A to point B) and a radius.
//! It can be visualized as a sphere swept along the segment, or as a cylinder with
//! hemispherical caps at both ends.
use crate::queries::{WgPolygonalFeature, WgProjection, WgRay};
use crate::shapes::WgSegment;
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgSim2, WgRay, WgProjection, WgSegment, WgPolygonalFeature),
src = "capsule.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for the capsule shape.
///
/// This shader provides WGSL implementations for:
/// - Ray-casting against capsules.
/// - Point projection onto capsule surfaces.
///
/// A capsule is parameterized by:
/// - A line segment (two endpoints A and B).
/// - A radius.
///
/// The capsule surface is the set of all points at distance `radius` from the segment.
pub struct WgCapsule;
#[cfg(test)]
mod test {
use super::WgCapsule;
use parry::shape::Capsule;
use wgcore::tensor::GpuVector;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_capsule() {
crate::queries::test_utils::test_point_projection::<WgCapsule, _>(
"Capsule",
Capsule::new_y(1.0, 0.5),
|device, shapes, usages| GpuVector::encase(device, shapes, usages).into_inner(),
)
.await;
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/triangle.rs | crates/wgparry/src/shapes/triangle.rs | //! Triangle shape.
//!
//! A triangle is defined by three vertices (A, B, C). Triangles are the fundamental
//! building blocks of triangle meshes and are essential for representing complex
//! geometric surfaces in collision detection.
use crate::bounding_volumes::WgAabb;
use crate::queries::{WgPolygonalFeature, WgProjection, WgRay};
use crate::shapes::WgSegment;
use crate::substitute_aliases;
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(
WgSim3,
WgSim2,
WgRay,
WgProjection,
WgSegment,
WgPolygonalFeature,
WgAabb
),
src = "triangle.wgsl",
src_fn = "substitute_aliases"
)]
/// GPU shader for the triangle shape.
///
/// This shader provides WGSL implementations for geometric operations on triangles,
/// including ray-casting and point projection.
///
/// A triangle is defined by three vertices (A, B, C) in local space.
pub struct WgTriangle;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/ball.rs | crates/wgparry/src/shapes/ball.rs | //! Ball shape - sphere (3D) or circle (2D).
//!
//! The ball is the simplest geometric primitive, defined by a single radius parameter.
//! In 3D, this represents a sphere; in 2D, a circle.
use crate::queries::{WgProjection, WgRay};
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgSim2, WgRay, WgProjection),
src = "ball.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for the ball (sphere/circle) shape.
///
/// This shader provides WGSL implementations for:
/// - Ray-casting against balls
/// - Point projection onto ball surfaces
///
/// The ball is defined by a single radius parameter and is centered at the origin
/// in local space. Use transformations to position and scale balls in world space.
pub struct WgBall;
#[cfg(test)]
mod test {
use super::WgBall;
#[cfg(feature = "dim2")]
use parry2d::shape::Ball;
#[cfg(feature = "dim3")]
use parry3d::shape::Ball;
use wgcore::tensor::GpuVector;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_ball() {
crate::queries::test_utils::test_point_projection::<WgBall, _>(
"Ball",
Ball::new(0.5),
|device, shapes, usages| GpuVector::init(device, shapes, usages).into_inner(),
)
.await;
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/lib.rs | crates/wgebra/src/lib.rs | #![doc = include_str!("../README.md")]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::result_large_err)]
#![warn(missing_docs)]
pub use geometry::*;
pub use linalg::*;
pub mod geometry;
pub mod linalg;
pub mod utils;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/lu.rs | crates/wgebra/src/geometry/lu.rs | use nalgebra::{SMatrix, SVector};
use wgcore::re_exports::encase::ShaderType;
use wgcore::{test_shader_compilation, Shader};
fn substitute2(src: &str) -> String {
src.replace("NROWS", "2u")
.replace("NCOLS", "2u")
.replace("PERM", "vec2<u32>")
.replace("MAT", "mat2x2<f32>")
.replace("IMPORT_PATH", "wgebra::lu2")
}
fn substitute3(src: &str) -> String {
src.replace("NROWS", "3u")
.replace("NCOLS", "3u")
.replace("PERM", "vec3<u32>")
.replace("MAT", "mat3x3<f32>")
.replace("IMPORT_PATH", "wgebra::lu3")
}
fn substitute4(src: &str) -> String {
src.replace("NROWS", "4u")
.replace("NCOLS", "4u")
.replace("PERM", "vec4<u32>")
.replace("MAT", "mat4x4<f32>")
.replace("IMPORT_PATH", "wgebra::lu4")
}
macro_rules! gpu_output_types(
($GpuPermutation: ident, $GpuLU: ident, $R: literal, $C: literal, $Perm: literal) => {
/// Structure describing a permutation sequence applied by the LU decomposition.
#[derive(ShaderType, Copy, Clone, PartialEq)]
#[repr(C)]
pub struct $GpuPermutation {
/// First permutation indices (row `ia[i]` is permuted with row`ib[i]`].
pub ia: SVector<u32, $Perm>,
/// Second permutation indices (row `ia[i]` is permuted with row`ib[i]`].
pub ib: SVector<u32, $Perm>,
/// The number of permutations in `self`. Only the first `len` elements of
/// [`Self::ia`] and [`Self::ib`] need to be taken into account.
pub len: u32,
}
/// GPU representation of a matrix LU decomposition (with partial pivoting).
///
/// See the [nalgebra](https://nalgebra.rs/docs/user_guide/decompositions_and_lapack#lu-with-partial-or-full-pivoting) documentation
/// for details on the LU decomposition.
#[derive(ShaderType, Copy, Clone, PartialEq)]
#[repr(C)]
pub struct $GpuLU {
/// The LU decomposition where both lower and upper-triangular matrices are stored
/// in the same matrix. In particular the diagonal full of `1` of the lower-triangular
/// matrix isn’t stored explicitly.
pub lu: SMatrix<f32, $R, $C>,
/// The row permutations applied during the decomposition.
pub p: $GpuPermutation,
}
}
);
gpu_output_types!(GpuPermutations2, GpuLU2, 2, 2, 2);
gpu_output_types!(GpuPermutations3, GpuLU3, 3, 3, 3);
gpu_output_types!(GpuPermutations4, GpuLU4, 4, 4, 4);
// TODO: rectangular matrices
#[derive(Shader)]
#[shader(src = "lu.wgsl", src_fn = "substitute2")]
/// Shader for computing the LU decomposition of a 2x2 matrix.
pub struct WgLU2;
#[derive(Shader)]
#[shader(src = "lu.wgsl", src_fn = "substitute3")]
/// Shader for computing the LU decomposition of a 3x3 matrix.
pub struct WgLU3;
#[derive(Shader)]
#[shader(src = "lu.wgsl", src_fn = "substitute4")]
/// Shader for computing the LU decomposition of a 4x4 matrix.
pub struct WgLU4;
test_shader_compilation!(WgLU2);
test_shader_compilation!(WgLU3);
test_shader_compilation!(WgLU4);
#[cfg(test)]
mod test {
use super::{GpuLU2, GpuLU3, GpuLU4};
use approx::assert_relative_eq;
use naga_oil::compose::Composer;
use nalgebra::{DVector, Matrix2, Matrix4, Matrix4x3};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgcore::Shader;
use wgpu::BufferUsages;
use {
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
pub fn test_pipeline<S: Shader>(
device: &Device,
substitute: fn(&str) -> String,
) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<MAT>;
@group(0) @binding(1)
var<storage, read_write> out: array<LU>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = lu(in[i]);
}
"#;
let src = substitute(&format!("{}\n{}", S::src(), test_kernel));
let module = Composer::default()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: "",
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
macro_rules! gen_test {
($name: ident, $kernel: ident, $mat: ident, $out: ident, $substitute: ident, $dim: expr) => {
#[futures_test::test]
#[serial_test::serial]
async fn $name() {
let gpu = GpuInstance::new().await.unwrap();
let lu = test_pipeline::<super::$kernel>(gpu.device(), super::$substitute);
let mut encoder = gpu.device().create_command_encoder(&Default::default());
type Mat = $mat<f32>;
type GpuOut = $out;
const LEN: usize = 345;
let mut matrices: DVector<Mat> = DVector::new_random(LEN);
for i in 0..matrices.len() {
let sdp = matrices[i].fixed_rows::<$dim>(0).transpose()
* matrices[i].fixed_rows::<$dim>(0);
matrices[i].fixed_rows_mut::<$dim>(0).copy_from(&sdp);
}
let inputs = GpuVector::init(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<GpuOut> = GpuVector::uninit_encased(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<GpuOut> = GpuVector::uninit_encased(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &lu)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
// Submit.
staging.copy_from_encased(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read_encased(gpu.device()).await.unwrap();
for (m, lu) in matrices.iter().zip(gpu_result.iter()) {
let lu_cpu = m.fixed_rows::<$dim>(0).lu();
assert_relative_eq!(lu_cpu.lu_internal(), &lu.lu, epsilon = 1.0e-3);
// TODO: check the permutation vectors
}
}
};
}
gen_test!(gpu_lu2, WgLU2, Matrix2, GpuLU2, substitute2, 2);
// NOTE: for the 3x3 test we need Matrix4x3 to account for the WGSL mat4x3 padding/alignment.
gen_test!(gpu_lu3, WgLU3, Matrix4x3, GpuLU3, substitute3, 3);
gen_test!(gpu_lu4, WgLU4, Matrix4, GpuLU4, substitute4, 4);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/quat.rs | crates/wgebra/src/geometry/quat.rs | use wgcore::Shader;
// NOTE: interesting perf. comparison between quaternions and matrices:
// https://tech.metail.com/performance-quaternions-gpu/
#[derive(Shader)]
#[shader(src = "quat.wgsl")]
/// Shader exposing a quaternion type and operations for representing 3D rotations.
pub struct WgQuat;
wgcore::test_shader_compilation!(WgQuat);
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/inv.rs | crates/wgebra/src/geometry/inv.rs | use wgcore::Shader;
#[derive(Shader)]
#[shader(src = "inv.wgsl")]
/// Shader exposing small matrix inverses.
pub struct WgInv;
wgcore::test_shader_compilation!(WgInv);
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/svd3.rs | crates/wgebra/src/geometry/svd3.rs | use crate::WgQuat;
use nalgebra::{Matrix4x3, Vector4};
use wgcore::Shader;
#[cfg(test)]
use {
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
/// A 3D SVD as represented on the gpu, with padding (every fourth rows
/// can be ignored).
// TODO: switch to encase?
pub struct GpuSvd3 {
/// First orthogonal matrix of the SVD.
u: Matrix4x3<f32>,
/// Singular values.
s: Vector4<f32>,
/// Second orthogonal matrix of the SVD.
vt: Matrix4x3<f32>,
}
#[derive(Shader)]
#[shader(derive(WgQuat), src = "svd3.wgsl")]
/// Shader for computing the Singular Value Decomposition of 3x3 matrices.
pub struct WgSvd3;
impl WgSvd3 {
#[cfg(test)]
#[doc(hidden)]
pub fn tests(device: &Device) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<mat3x3<f32>>;
@group(0) @binding(1)
var<storage, read_write> out: array<Svd>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = svd(in[i]);
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = WgQuat::composer()
.unwrap()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use super::GpuSvd3;
use approx::assert_relative_eq;
use nalgebra::{DVector, Matrix3, Matrix4x3};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_svd3() {
let gpu = GpuInstance::new().await.unwrap();
let svd = super::WgSvd3::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: usize = 345;
let matrices: DVector<Matrix4x3<f32>> = DVector::new_random(LEN);
let inputs = GpuVector::init(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<GpuSvd3> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<GpuSvd3> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &svd)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
// Submit.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read(gpu.device()).await.unwrap();
for (m, svd) in matrices.iter().zip(gpu_result.iter()) {
let m = m.fixed_rows::<3>(0).into_owned();
let reconstructed = svd.u.fixed_rows::<3>(0).into_owned()
* Matrix3::from_diagonal(&svd.s.fixed_rows::<3>(0))
* svd.vt.fixed_rows::<3>(0).into_owned();
assert_relative_eq!(m, reconstructed, epsilon = 1.0e-4);
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/qr3.rs | crates/wgebra/src/geometry/qr3.rs | use nalgebra::Matrix3;
use wgcore::{test_shader_compilation, Shader};
#[cfg(test)]
use {
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
#[derive(Copy, Clone, Debug, encase::ShaderType)]
#[repr(C)]
/// GPU representation of a 3x3 matrix QR decomposition.
///
/// See the [nalgebra](https://nalgebra.rs/docs/user_guide/decompositions_and_lapack#qr)
/// documentation for details on the QR decomposition.
pub struct GpuQR3 {
/// The QR decomposition’s 3x3 unitary matrix.
pub q: Matrix3<f32>,
/// The QR decomposition’s 3x3 upper-triangular matrix.
pub r: Matrix3<f32>,
}
#[derive(Shader)]
#[shader(src = "qr3.wgsl")]
/// Shader for computing the Singular Value Decomposition of 3x3 matrices.
pub struct WgQR3;
test_shader_compilation!(WgQR3);
impl WgQR3 {
#[doc(hidden)]
#[cfg(test)]
pub fn tests(device: &Device) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<mat3x3<f32>>;
@group(0) @binding(1)
var<storage, read_write> out: array<QR>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = qr(in[i]);
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = WgQR3::composer()
.unwrap()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use super::GpuQR3;
use approx::{assert_relative_eq, relative_eq};
use nalgebra::{DVector, Matrix3};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_qr3() {
let gpu = GpuInstance::new().await.unwrap();
let svd = super::WgQR3::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: usize = 345;
let matrices: DVector<Matrix3<f32>> = DVector::new_random(LEN);
let inputs = GpuVector::encase(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<GpuQR3> = GpuVector::uninit_encased(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<GpuQR3> = GpuVector::uninit_encased(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &svd)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from_encased(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read_encased(gpu.device()).await.unwrap();
let mut allowed_fails = 0;
for (m, qr) in matrices.iter().zip(gpu_result.iter()) {
let qr_na = m.qr();
// NOTE: we allow about 1% of the decompositions to fail, to account for occasionally
// bad random matrices that will fail the test due to an unsuitable epsilon.
// Ideally this percentage should be kept as low as possible, but likely not
// removable entirely.
if allowed_fails == matrices.len() * 2 / 100 {
assert_relative_eq!(qr_na.q(), qr.q, epsilon = 1.0e-4);
assert_relative_eq!(qr_na.r(), qr.r, epsilon = 1.0e-4);
} else if !relative_eq!(qr_na.q(), qr.q, epsilon = 1.0e-4)
|| !relative_eq!(qr_na.r(), qr.r, epsilon = 1.0e-4)
{
allowed_fails += 1;
}
}
println!("Num fails: {}/{}", allowed_fails, matrices.len());
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/qr4.rs | crates/wgebra/src/geometry/qr4.rs | use nalgebra::Matrix4;
use wgcore::{test_shader_compilation, Shader};
#[cfg(test)]
use {
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
/// GPU representation of a 4x4 matrix QR decomposition.
///
/// See the [nalgebra](https://nalgebra.rs/docs/user_guide/decompositions_and_lapack#qr) documentation
/// for details on the QR decomposition.
pub struct GpuQR4 {
/// The QR decomposition’s 4x4 unitary matrix.
pub q: Matrix4<f32>,
/// The QR decomposition’s 4x4 upper-triangular matrix.
pub r: Matrix4<f32>,
}
#[derive(Shader)]
#[shader(src = "qr4.wgsl")]
/// Shader for computing the Singular Value Decomposition of 4x4 matrices.
pub struct WgQR4;
test_shader_compilation!(WgQR4);
impl WgQR4 {
#[doc(hidden)]
#[cfg(test)]
pub fn tests(device: &Device) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<mat4x4<f32>>;
@group(0) @binding(1)
var<storage, read_write> out: array<QR>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = qr(in[i]);
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = WgQR4::composer()
.unwrap()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use super::GpuQR4;
use approx::{assert_relative_eq, relative_eq};
use nalgebra::{DVector, Matrix4};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_qr4() {
let gpu = GpuInstance::new().await.unwrap();
let svd = super::WgQR4::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: usize = 345;
let matrices: DVector<Matrix4<f32>> = DVector::new_random(LEN);
let inputs = GpuVector::init(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<GpuQR4> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<GpuQR4> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &svd)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read(gpu.device()).await.unwrap();
let mut allowed_fails = 0;
for (m, qr) in matrices.iter().zip(gpu_result.iter()) {
let qr_na = m.qr();
// NOTE: we allow about 1% of the decompositions to fail, to account for occasionally
// bad random matrices that will fail the test due to an unsuitable epsilon.
// Ideally this percentage should be kept as low as possible, but likely not
// removable entirely.
if allowed_fails == matrices.len() * 2 / 100 {
assert_relative_eq!(qr_na.q(), qr.q, epsilon = 1.0e-4);
assert_relative_eq!(qr_na.r(), qr.r, epsilon = 1.0e-4);
} else if !relative_eq!(qr_na.q(), qr.q, epsilon = 1.0e-4)
|| !relative_eq!(qr_na.r(), qr.r, epsilon = 1.0e-4)
{
allowed_fails += 1;
}
}
println!("Num fails: {}/{}", allowed_fails, matrices.len());
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/mod.rs | crates/wgebra/src/geometry/mod.rs | //! Geometric transformations.
pub use cholesky::*;
pub use eig2::*;
pub use eig3::*;
pub use eig4::*;
pub use inv::*;
pub use lu::*;
pub use qr2::*;
pub use qr3::*;
pub use qr4::*;
pub use quat::*;
pub use rot2::*;
pub use sim2::*;
pub use sim3::*;
pub use svd2::*;
pub use svd3::*;
mod cholesky;
mod eig2;
mod eig3;
mod eig4;
mod inv;
mod lu;
mod qr2;
mod qr3;
mod qr4;
mod quat;
mod rot2;
mod sim2;
mod sim3;
mod svd2;
mod svd3;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/rot2.rs | crates/wgebra/src/geometry/rot2.rs | use crate::utils::WgTrig;
use wgcore::Shader;
#[derive(Shader)]
#[shader(derive(WgTrig), src = "rot2.wgsl")]
/// Shader exposing a 2D rotation type and operations.
pub struct WgRot2;
wgcore::test_shader_compilation!(WgRot2);
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/eig3.rs | crates/wgebra/src/geometry/eig3.rs | use crate::utils::WgMinMax;
use crate::{WgRot2, WgSymmetricEigen2};
use nalgebra::{Matrix3, Vector3};
use wgcore::{test_shader_compilation, Shader};
#[cfg(test)]
use {
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
#[derive(Copy, Clone, Debug, encase::ShaderType)]
#[repr(C)]
/// GPU representation of a symmetric 3x3 matrix eigendecomposition.
///
/// See the [nalgebra](https://nalgebra.rs/docs/user_guide/decompositions_and_lapack/#eigendecomposition-of-a-hermitian-matrix)
/// documentation for details on the eigendecomposition
pub struct GpuSymmetricEigen3 {
/// Eigenvectors of the matrix.
pub eigenvectors: Matrix3<f32>,
/// Eigenvalues of the matrix.
pub eigenvalues: Vector3<f32>,
}
#[derive(Shader)]
#[shader(derive(WgMinMax, WgSymmetricEigen2, WgRot2), src = "eig3.wgsl")]
/// Shader for computing the eigendecomposition of symmetric 3x3 matrices.
pub struct WgSymmetricEigen3;
test_shader_compilation!(WgSymmetricEigen3);
impl WgSymmetricEigen3 {
#[doc(hidden)]
#[cfg(test)]
pub fn tests(device: &Device) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<mat3x3<f32>>;
@group(0) @binding(1)
var<storage, read_write> out: array<SymmetricEigen>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = symmetric_eigen(in[i]);
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = WgSymmetricEigen3::composer()
.unwrap()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use super::GpuSymmetricEigen3;
use approx::{assert_relative_eq, relative_eq};
use nalgebra::{DVector, Matrix3};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_eig3() {
let gpu = GpuInstance::new().await.unwrap();
let svd = super::WgSymmetricEigen3::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: usize = 345;
let mut matrices: DVector<Matrix3<f32>> = DVector::new_random(LEN);
for mat in matrices.iter_mut() {
*mat = mat.transpose() * *mat; // Make it symmetric.
}
let inputs = GpuVector::encase(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<GpuSymmetricEigen3> = GpuVector::uninit_encased(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<GpuSymmetricEigen3> = GpuVector::uninit_encased(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &svd)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from_encased(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read_encased(gpu.device()).await.unwrap();
let mut allowed_fails = 0;
for (m, eigen) in matrices.iter().zip(gpu_result.iter()) {
println!("eig: (gpu) {:?}", eigen);
println!("eig (na): {:?}", m.symmetric_eigen());
let reconstructed = eigen.eigenvectors
* Matrix3::from_diagonal(&eigen.eigenvalues)
* eigen.eigenvectors.transpose();
println!("reconstructed: {:?}", m.symmetric_eigen().recompose());
// NOTE: we allow about 2% of the decompositions to fail, to account for occasionally
// bad random matrices that will fail the test due to an unsuitable epsilon.
// Ideally this percentage should be kept as low as possible, but likely not
// removable entirely.
if allowed_fails == matrices.len() * 2 / 100 {
assert_relative_eq!(*m, reconstructed, epsilon = 1.0e-4);
} else if !relative_eq!(*m, reconstructed, epsilon = 1.0e-4) {
allowed_fails += 1;
}
}
println!("Num fails: {}/{}", allowed_fails, matrices.len());
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/qr2.rs | crates/wgebra/src/geometry/qr2.rs | use nalgebra::Matrix2;
use wgcore::{test_shader_compilation, Shader};
#[cfg(test)]
use {
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
/// GPU representation of a 2x2 matrix QR decomposition.
///
/// See the [nalgebra](https://nalgebra.rs/docs/user_guide/decompositions_and_lapack#qr)
/// documentation for details on the QR decomposition.
pub struct GpuQR2 {
/// The QR decomposition’s 2x2 unitary matrix.
pub q: Matrix2<f32>,
/// The QR decomposition’s 2x2 upper-triangular matrix.
pub r: Matrix2<f32>,
}
#[derive(Shader)]
#[shader(src = "qr2.wgsl")]
/// Shader for computing the Singular Value Decomposition of 2x2 matrices.
pub struct WgQR2;
test_shader_compilation!(WgQR2);
impl WgQR2 {
#[doc(hidden)]
#[cfg(test)]
pub fn tests(device: &Device) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<mat2x2<f32>>;
@group(0) @binding(1)
var<storage, read_write> out: array<QR>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = qr(in[i]);
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = WgQR2::composer()
.unwrap()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use super::GpuQR2;
use approx::{assert_relative_eq, relative_eq};
use nalgebra::{DVector, Matrix2};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_qr2() {
let gpu = GpuInstance::new().await.unwrap();
let svd = super::WgQR2::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: usize = 345;
let matrices: DVector<Matrix2<f32>> = DVector::new_random(LEN);
let inputs = GpuVector::init(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<GpuQR2> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<GpuQR2> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &svd)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read(gpu.device()).await.unwrap();
let mut allowed_fails = 0;
for (m, qr) in matrices.iter().zip(gpu_result.iter()) {
let qr_na = m.qr();
// NOTE: we allow about 1% of the decompositions to fail, to account for occasionally
// bad random matrices that will fail the test due to an unsuitable epsilon.
// Ideally this percentage should be kept as low as possible, but likely not
// removable entirely.
if allowed_fails == matrices.len() * 2 / 100 {
assert_relative_eq!(qr_na.q(), qr.q, epsilon = 1.0e-4);
assert_relative_eq!(qr_na.r(), qr.r, epsilon = 1.0e-4);
} else if !relative_eq!(qr_na.q(), qr.q, epsilon = 1.0e-4)
|| !relative_eq!(qr_na.r(), qr.r, epsilon = 1.0e-4)
{
allowed_fails += 1;
}
}
println!("Num fails: {}/{}", allowed_fails, matrices.len());
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/sim3.rs | crates/wgebra/src/geometry/sim3.rs | use crate::WgQuat;
use nalgebra::Similarity3;
use wgcore::Shader;
/// A GPU-compatible 3d similarity (uniform scale + rotation + translation).
pub type GpuSim3 = Similarity3<f32>;
#[derive(Shader)]
#[shader(derive(WgQuat), src = "sim3.wgsl")]
/// Shader exposing a 2D similarity (uniform scale + rotation + translation) type and operations.
pub struct WgSim3;
impl WgSim3 {
#[doc(hidden)]
#[cfg(test)]
pub fn tests(device: &wgpu::Device) -> wgpu::ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> test_s1: array<Sim3>;
@group(0) @binding(1)
var<storage, read_write> test_s2: array<Sim3>;
@group(0) @binding(2)
var<storage, read_write> test_p1: array<vec3<f32>>;
@group(0) @binding(3)
var<storage, read_write> test_p2: array<vec3<f32>>;
@group(0) @binding(4)
var<storage, read_write> test_v1: array<vec3<f32>>;
@group(0) @binding(5)
var<storage, read_write> test_v2: array<vec3<f32>>;
@group(0) @binding(6)
var<storage, read_write> test_id: Sim3;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
test_p1[i] = mulPt(test_s1[i], test_p1[i]);
test_p2[i] = invMulPt(test_s2[i],test_p2[i]);
test_v1[i] = mulVec(test_s1[i], test_v1[i]);
test_v2[i] = invMulVec(test_s2[i],test_v2[i]);
test_s1[i] = mul(test_s1[i], test_s2[i]);
test_s2[i] = inv(test_s2[i]);
if i == 0 {
test_id = identity();
}
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = Self::composer()
.unwrap()
.make_naga_module(naga_oil::compose::NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use crate::GpuSim3;
use approx::assert_relative_eq;
use nalgebra::{DVector, Point4, Similarity3, Vector4};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::{GpuScalar, GpuVector};
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_sim3() {
let gpu = GpuInstance::new().await.unwrap();
let sim3 = super::WgSim3::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: u32 = 345;
// Remove large values from translation and scaling as this can (ligitimately)
// throw off the test epsilon.
let clamp_large_values = |mut sim: Similarity3<f32>| {
sim.isometry.translation.vector /= sim.isometry.translation.vector.amax();
sim.set_scaling(sim.scaling() % 10.0);
sim
};
let test_s1: DVector<GpuSim3> = DVector::new_random(LEN as usize).map(clamp_large_values);
let test_s2: DVector<GpuSim3> = DVector::new_random(LEN as usize).map(clamp_large_values);
let test_p1: DVector<Point4<f32>> = DVector::new_random(LEN as usize);
let test_p2: DVector<Point4<f32>> = DVector::new_random(LEN as usize);
let test_v1: DVector<Vector4<f32>> = DVector::new_random(LEN as usize);
let test_v2: DVector<Vector4<f32>> = DVector::new_random(LEN as usize);
let test_id = GpuSim3::identity();
let usages = BufferUsages::STORAGE | BufferUsages::COPY_SRC;
let gpu_test_s1 = GpuVector::init(gpu.device(), &test_s1, usages);
let gpu_test_s2 = GpuVector::init(gpu.device(), &test_s2, usages);
let gpu_test_p1 = GpuVector::init(gpu.device(), &test_p1, usages);
let gpu_test_p2 = GpuVector::init(gpu.device(), &test_p2, usages);
let gpu_test_v1 = GpuVector::init(gpu.device(), &test_v1, usages);
let gpu_test_v2 = GpuVector::init(gpu.device(), &test_v2, usages);
let gpu_test_id = GpuScalar::init(gpu.device(), test_id, usages);
let usages = BufferUsages::MAP_READ | BufferUsages::COPY_DST;
let staging_test_s1 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_s2 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_p1 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_p2 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_v1 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_v2 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_id = GpuScalar::uninit(gpu.device(), usages);
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &sim3)
.bind0([
gpu_test_s1.buffer(),
gpu_test_s2.buffer(),
gpu_test_p1.buffer(),
gpu_test_p2.buffer(),
gpu_test_v1.buffer(),
gpu_test_v2.buffer(),
gpu_test_id.buffer(),
])
.dispatch(LEN);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging_test_s1.copy_from(&mut encoder, &gpu_test_s1);
staging_test_s2.copy_from(&mut encoder, &gpu_test_s2);
staging_test_p1.copy_from(&mut encoder, &gpu_test_p1);
staging_test_p2.copy_from(&mut encoder, &gpu_test_p2);
staging_test_v1.copy_from(&mut encoder, &gpu_test_v1);
staging_test_v2.copy_from(&mut encoder, &gpu_test_v2);
staging_test_id.copy_from(&mut encoder, &gpu_test_id);
gpu.queue().submit(Some(encoder.finish()));
let result_s1 = staging_test_s1.read(gpu.device()).await.unwrap();
let result_s2 = staging_test_s2.read(gpu.device()).await.unwrap();
let result_p1 = staging_test_p1.read(gpu.device()).await.unwrap();
let result_p2 = staging_test_p2.read(gpu.device()).await.unwrap();
let result_v1 = staging_test_v1.read(gpu.device()).await.unwrap();
let result_v2 = staging_test_v2.read(gpu.device()).await.unwrap();
let result_id = staging_test_id.read(gpu.device()).await.unwrap();
for i in 0..LEN as usize {
assert_relative_eq!(result_s1[i], test_s1[i] * test_s2[i], epsilon = 1.0e-5);
assert_relative_eq!(result_s2[i], test_s2[i].inverse(), epsilon = 1.0e-3);
assert_relative_eq!(
result_p1[i].xyz(),
test_s1[i] * test_p1[i].xyz(),
epsilon = 1.0e-4
);
assert_relative_eq!(
result_p2[i].xyz(),
test_s2[i].inverse_transform_point(&test_p2[i].xyz()),
epsilon = 1.0e-3
);
assert_relative_eq!(
result_v1[i].xyz(),
test_s1[i] * test_v1[i].xyz(),
epsilon = 1.0e-4
);
assert_relative_eq!(
result_v2[i].xyz(),
test_s2[i].inverse_transform_vector(&test_v2[i].xyz()),
epsilon = 1.0e-3
);
}
assert_eq!(result_id[0], Similarity3::identity());
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/svd2.rs | crates/wgebra/src/geometry/svd2.rs | use crate::utils::WgTrig;
use nalgebra::{Matrix2, Vector2};
use wgcore::Shader;
#[cfg(test)]
use {
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
/// GPU representation of a 2x2 matrix SVD.
pub struct GpuSvd2 {
/// First orthogonal matrix of the SVD.
pub u: Matrix2<f32>,
/// Singular values.
pub s: Vector2<f32>,
/// Second orthogonal matrix of the SVD.
pub vt: Matrix2<f32>,
}
#[derive(Shader)]
#[shader(derive(WgTrig), src = "svd2.wgsl")]
/// Shader for computing the Singular Value Decomposition of 2x2 matrices.
pub struct WgSvd2;
impl WgSvd2 {
#[cfg(test)]
#[doc(hidden)]
pub fn tests(device: &Device) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<mat2x2<f32>>;
@group(0) @binding(1)
var<storage, read_write> out: array<Svd>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = svd(in[i]);
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = WgTrig::composer()
.unwrap()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use super::GpuSvd2;
use approx::assert_relative_eq;
use nalgebra::{DVector, Matrix2};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_svd2() {
let gpu = GpuInstance::new().await.unwrap();
let svd = super::WgSvd2::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: usize = 345;
let mut matrices: DVector<Matrix2<f32>> = DVector::new_random(LEN);
matrices[0] = Matrix2::zeros(); // The zero matrix can cause issues on some platforms (like macos) with unspecified atan2 on (0, 0).
matrices[1] = Matrix2::identity(); // The identity matrix can cause issues on some platforms.
let inputs = GpuVector::init(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<GpuSvd2> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<GpuSvd2> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &svd)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read(gpu.device()).await.unwrap();
for (m, svd) in matrices.iter().zip(gpu_result.iter()) {
let reconstructed = svd.u * Matrix2::from_diagonal(&svd.s) * svd.vt;
assert_relative_eq!(*m, reconstructed, epsilon = 1.0e-4);
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/cholesky.rs | crates/wgebra/src/geometry/cholesky.rs | use wgcore::{test_shader_compilation, Shader};
fn substitute2(src: &str) -> String {
src.replace("DIM", "2")
.replace("MAT", "mat2x2<f32>")
.replace("IMPORT_PATH", "wgebra::cholesky2")
}
fn substitute3(src: &str) -> String {
src.replace("DIM", "3")
.replace("MAT", "mat3x3<f32>")
.replace("IMPORT_PATH", "wgebra::cholesky3")
}
fn substitute4(src: &str) -> String {
src.replace("DIM", "4")
.replace("MAT", "mat4x4<f32>")
.replace("IMPORT_PATH", "wgebra::cholesky4")
}
#[derive(Shader)]
#[shader(src = "cholesky.wgsl", src_fn = "substitute2")]
/// Shader for computing the Cholesky decomposition of a symmetric-definite-positive 2x2 matrix.
pub struct WgCholesky2;
#[derive(Shader)]
#[shader(src = "cholesky.wgsl", src_fn = "substitute3")]
/// Shader for computing the Cholesky decomposition of a symmetric-definite-positive 2x2 matrix.
pub struct WgCholesky3;
#[derive(Shader)]
#[shader(src = "cholesky.wgsl", src_fn = "substitute4")]
/// Shader for computing the Cholesky decomposition of a symmetric-definite-positive 2x2 matrix.
pub struct WgCholesky4;
test_shader_compilation!(WgCholesky2);
test_shader_compilation!(WgCholesky3);
test_shader_compilation!(WgCholesky4);
#[cfg(test)]
mod test {
use approx::assert_relative_eq;
use naga_oil::compose::Composer;
use nalgebra::{DVector, Matrix2, Matrix4, Matrix4x3};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgcore::Shader;
use wgpu::BufferUsages;
use {
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
pub fn test_pipeline<S: Shader>(
device: &Device,
substitute: fn(&str) -> String,
) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<MAT>;
@group(0) @binding(1)
var<storage, read_write> out: array<MAT>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = cholesky(in[i]);
}
"#;
let src = substitute(&format!("{}\n{}", S::src(), test_kernel));
let module = Composer::default()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: "",
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
macro_rules! gen_test {
($name: ident, $kernel: ident, $mat: ident, $substitute: ident, $dim: expr) => {
#[futures_test::test]
#[serial_test::serial]
async fn $name() {
let gpu = GpuInstance::new().await.unwrap();
let chol = test_pipeline::<super::$kernel>(gpu.device(), super::$substitute);
let mut encoder = gpu.device().create_command_encoder(&Default::default());
type Mat = $mat<f32>;
const LEN: usize = 345;
let mut matrices: DVector<Mat> = DVector::new_random(LEN);
for i in 0..matrices.len() {
let sdp = matrices[i].fixed_rows::<$dim>(0).transpose()
* matrices[i].fixed_rows::<$dim>(0);
matrices[i].fixed_rows_mut::<$dim>(0).copy_from(&sdp);
}
let inputs = GpuVector::init(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<Mat> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<Mat> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &chol)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
// Submit.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read(gpu.device()).await.unwrap();
let mut allowed_fails = 0;
for (m, chol) in matrices.iter().zip(gpu_result.iter()) {
if let Some(chol_cpu) = m.fixed_rows::<$dim>(0).cholesky() {
let chol = chol.fixed_rows::<$dim>(0).into_owned();
if allowed_fails == matrices.len() / 100 {
assert_relative_eq!(chol_cpu.unpack_dirty(), chol, epsilon = 1.0e-3);
} else if !approx::relative_eq!(
chol_cpu.unpack_dirty(),
chol,
epsilon = 1.0e-3
) {
allowed_fails += 1;
}
}
}
println!("Num fails: {}/{}", allowed_fails, matrices.len());
}
};
}
gen_test!(gpu_cholesky2, WgCholesky2, Matrix2, substitute2, 2);
// NOTE: for the 3x3 test we need Matrix4x3 to account for the WGSL mat4x3 padding/alignment.
gen_test!(gpu_cholesky3, WgCholesky3, Matrix4x3, substitute3, 3);
gen_test!(gpu_cholesky4, WgCholesky4, Matrix4, substitute4, 4);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/sim2.rs | crates/wgebra/src/geometry/sim2.rs | use crate::WgRot2;
use nalgebra::{Isometry2, Similarity2};
use wgcore::Shader;
#[derive(Copy, Clone, PartialEq, Debug, Default, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
/// A GPU-compatible 2D similarity (uniform scale + rotation + translation).
pub struct GpuSim2 {
/// The similarity value.
pub similarity: Similarity2<f32>,
// TODO: use encase instead of an explicit padding.
/// Extra padding to fit the layout on the gpu. Its value is irrelevant.
pub padding: f32,
}
impl GpuSim2 {
/// The identity similarity (scale = 1, rotation = identity, translation = 0).
pub fn identity() -> Self {
Self {
similarity: Similarity2::identity(),
padding: 0.0,
}
}
}
impl From<Similarity2<f32>> for GpuSim2 {
fn from(value: Similarity2<f32>) -> Self {
Self {
similarity: value,
padding: 0.0,
}
}
}
impl From<Isometry2<f32>> for GpuSim2 {
fn from(value: Isometry2<f32>) -> Self {
Self {
similarity: Similarity2::from_isometry(value, 1.0),
padding: 0.0,
}
}
}
#[derive(Shader)]
#[shader(derive(WgRot2), src = "sim2.wgsl")]
/// Shader exposing a 2D similarity (uniform scale + rotation + translation) type and operations.
pub struct WgSim2;
impl WgSim2 {
#[doc(hidden)]
#[cfg(test)]
pub fn tests(device: &wgpu::Device) -> wgpu::ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> test_s1: array<Sim2>;
@group(0) @binding(1)
var<storage, read_write> test_s2: array<Sim2>;
@group(0) @binding(2)
var<storage, read_write> test_p1: array<vec2<f32>>;
@group(0) @binding(3)
var<storage, read_write> test_p2: array<vec2<f32>>;
@group(0) @binding(4)
var<storage, read_write> test_v1: array<vec2<f32>>;
@group(0) @binding(5)
var<storage, read_write> test_v2: array<vec2<f32>>;
@group(0) @binding(6)
var<storage, read_write> test_id: Sim2;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
test_p1[i] = mulPt(test_s1[i], test_p1[i]);
test_p2[i] = invMulPt(test_s2[i],test_p2[i]);
test_v1[i] = mulVec(test_s1[i], test_v1[i]);
test_v2[i] = invMulVec(test_s2[i],test_v2[i]);
test_s1[i] = mul(test_s1[i], test_s2[i]);
test_s2[i] = inv(test_s2[i]);
if i == 0 {
test_id = identity();
}
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = Self::composer()
.unwrap()
.make_naga_module(naga_oil::compose::NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use crate::GpuSim2;
use approx::assert_relative_eq;
use nalgebra::{DVector, Point2, Similarity2, Vector2};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::{GpuScalar, GpuVector};
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_sim2() {
let gpu = GpuInstance::new().await.unwrap();
let sim2 = super::WgSim2::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: u32 = 345;
// Remove large values from translation and scaling as this can (ligitimately)
// throw off the test epsilon.
let clamp_large_values = |mut sim: Similarity2<f32>| {
sim.isometry.translation.vector /= sim.isometry.translation.vector.amax();
sim.set_scaling(sim.scaling() % 10.0);
sim
};
let unaligned_s1: DVector<Similarity2<f32>> =
DVector::new_random(LEN as usize).map(clamp_large_values);
let unaligned_s2: DVector<Similarity2<f32>> =
DVector::new_random(LEN as usize).map(clamp_large_values);
let test_s1: DVector<GpuSim2> = unaligned_s1.map(GpuSim2::from);
let test_s2: DVector<GpuSim2> = unaligned_s2.map(GpuSim2::from);
let test_p1: DVector<Point2<f32>> = DVector::new_random(LEN as usize);
let test_p2: DVector<Point2<f32>> = DVector::new_random(LEN as usize);
let test_v1: DVector<Vector2<f32>> = DVector::new_random(LEN as usize);
let test_v2: DVector<Vector2<f32>> = DVector::new_random(LEN as usize);
let test_id = GpuSim2::identity();
let usages = BufferUsages::STORAGE | BufferUsages::COPY_SRC;
let gpu_test_s1 = GpuVector::init(gpu.device(), &test_s1, usages);
let gpu_test_s2 = GpuVector::init(gpu.device(), &test_s2, usages);
let gpu_test_p1 = GpuVector::init(gpu.device(), &test_p1, usages);
let gpu_test_p2 = GpuVector::init(gpu.device(), &test_p2, usages);
let gpu_test_v1 = GpuVector::init(gpu.device(), &test_v1, usages);
let gpu_test_v2 = GpuVector::init(gpu.device(), &test_v2, usages);
let gpu_test_id = GpuScalar::init(gpu.device(), test_id, usages);
let usages = BufferUsages::MAP_READ | BufferUsages::COPY_DST;
let staging_test_s1 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_s2 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_p1 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_p2 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_v1 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_v2 = GpuVector::uninit(gpu.device(), LEN, usages);
let staging_test_id = GpuScalar::uninit(gpu.device(), usages);
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &sim2)
.bind0([
gpu_test_s1.buffer(),
gpu_test_s2.buffer(),
gpu_test_p1.buffer(),
gpu_test_p2.buffer(),
gpu_test_v1.buffer(),
gpu_test_v2.buffer(),
gpu_test_id.buffer(),
])
.dispatch(LEN);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging_test_s1.copy_from(&mut encoder, &gpu_test_s1);
staging_test_s2.copy_from(&mut encoder, &gpu_test_s2);
staging_test_p1.copy_from(&mut encoder, &gpu_test_p1);
staging_test_p2.copy_from(&mut encoder, &gpu_test_p2);
staging_test_v1.copy_from(&mut encoder, &gpu_test_v1);
staging_test_v2.copy_from(&mut encoder, &gpu_test_v2);
staging_test_id.copy_from(&mut encoder, &gpu_test_id);
gpu.queue().submit(Some(encoder.finish()));
let result_s1 = staging_test_s1.read(gpu.device()).await.unwrap();
let result_s2 = staging_test_s2.read(gpu.device()).await.unwrap();
let result_p1 = staging_test_p1.read(gpu.device()).await.unwrap();
let result_p2 = staging_test_p2.read(gpu.device()).await.unwrap();
let result_v1 = staging_test_v1.read(gpu.device()).await.unwrap();
let result_v2 = staging_test_v2.read(gpu.device()).await.unwrap();
let result_id = staging_test_id.read(gpu.device()).await.unwrap();
for i in 0..LEN as usize {
assert_relative_eq!(
result_s1[i].similarity,
unaligned_s1[i] * unaligned_s2[i],
epsilon = 1.0e-5
);
assert_relative_eq!(
result_s2[i].similarity,
unaligned_s2[i].inverse(),
epsilon = 1.0e-3
);
assert_relative_eq!(result_p1[i], unaligned_s1[i] * test_p1[i], epsilon = 1.0e-4);
assert_relative_eq!(
result_p2[i],
unaligned_s2[i].inverse_transform_point(&test_p2[i]),
epsilon = 1.0e-3
);
assert_relative_eq!(result_v1[i], unaligned_s1[i] * test_v1[i], epsilon = 1.0e-4);
assert_relative_eq!(
result_v2[i],
unaligned_s2[i].inverse_transform_vector(&test_v2[i]),
epsilon = 1.0e-3
);
}
assert_eq!(result_id[0].similarity, Similarity2::identity());
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/eig2.rs | crates/wgebra/src/geometry/eig2.rs | use nalgebra::{Matrix2, Vector2};
use wgcore::Shader;
#[cfg(test)]
use {
crate::utils::WgTrig,
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
/// GPU representation of a symmetric 2x2 matrix eigendecomposition.
///
/// See the [nalgebra](https://nalgebra.rs/docs/user_guide/decompositions_and_lapack/#eigendecomposition-of-a-hermitian-matrix)
/// documentation for details on the eigendecomposition
pub struct GpuSymmetricEigen2 {
/// Eigenvectors of the matrix.
pub eigenvectors: Matrix2<f32>,
/// Eigenvalues of the matrix.
pub eigenvalues: Vector2<f32>,
}
#[derive(Shader)]
#[shader(src = "eig2.wgsl")]
/// Shader for computing the eigendecomposition of symmetric 2x2 matrices.
pub struct WgSymmetricEigen2;
impl WgSymmetricEigen2 {
#[doc(hidden)]
#[cfg(test)]
pub fn tests(device: &Device) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<mat2x2<f32>>;
@group(0) @binding(1)
var<storage, read_write> out: array<SymmetricEigen>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = symmetric_eigen(in[i]);
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = WgTrig::composer()
.unwrap()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use super::GpuSymmetricEigen2;
use approx::assert_relative_eq;
use nalgebra::{DVector, Matrix2};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_eig2() {
let gpu = GpuInstance::new().await.unwrap();
let svd = super::WgSymmetricEigen2::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: usize = 345;
let mut matrices: DVector<Matrix2<f32>> = DVector::new_random(LEN);
// matrices[0] = Matrix2::zeros(); // The zero matrix can cause issues on some platforms (like macos) with unspecified atan2 on (0, 0).
// matrices[1] = Matrix2::identity(); // The identity matrix can cause issues on some platforms.
for mat in matrices.iter_mut() {
*mat = mat.transpose() * *mat; // Make it symmetric.
}
let inputs = GpuVector::init(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<GpuSymmetricEigen2> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<GpuSymmetricEigen2> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &svd)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read(gpu.device()).await.unwrap();
for (m, eigen) in matrices.iter().zip(gpu_result.iter()) {
let reconstructed = eigen.eigenvectors
* Matrix2::from_diagonal(&eigen.eigenvalues)
* eigen.eigenvectors.transpose();
assert_relative_eq!(*m, reconstructed, epsilon = 1.0e-4);
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/geometry/eig4.rs | crates/wgebra/src/geometry/eig4.rs | use crate::utils::WgMinMax;
use crate::{WgRot2, WgSymmetricEigen2};
use nalgebra::{Matrix4, Vector4};
use wgcore::{test_shader_compilation, Shader};
#[cfg(test)]
use {
naga_oil::compose::NagaModuleDescriptor,
wgpu::{ComputePipeline, Device},
};
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
/// GPU representation of a symmetric 4x4 matrix eigendecomposition.
///
/// See the [nalgebra](https://nalgebra.rs/docs/user_guide/decompositions_and_lapack/#eigendecomposition-of-a-hermitian-matrix)
/// documentation for details on the eigendecomposition
pub struct GpuSymmetricEigen4 {
/// Eigenvectors of the matrix.
pub eigenvectors: Matrix4<f32>,
/// Eigenvalues of the matrix.
pub eigenvalues: Vector4<f32>,
}
#[derive(Shader)]
#[shader(derive(WgMinMax, WgSymmetricEigen2, WgRot2), src = "eig4.wgsl")]
/// Shader for computing the eigendecomposition of symmetric 4x4 matrices.
pub struct WgSymmetricEigen4;
test_shader_compilation!(WgSymmetricEigen4);
impl WgSymmetricEigen4 {
#[doc(hidden)]
#[cfg(test)]
pub fn tests(device: &Device) -> ComputePipeline {
let test_kernel = r#"
@group(0) @binding(0)
var<storage, read_write> in: array<mat4x4<f32>>;
@group(0) @binding(1)
var<storage, read_write> out: array<SymmetricEigen>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {
let i = invocation_id.x;
out[i] = symmetric_eigen(in[i]);
}
"#;
let src = format!("{}\n{}", Self::src(), test_kernel);
let module = WgSymmetricEigen4::composer()
.unwrap()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: Self::FILE_PATH,
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
}
#[cfg(test)]
mod test {
use super::GpuSymmetricEigen4;
use approx::{assert_relative_eq, relative_eq};
use nalgebra::{DVector, Matrix4};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_eig4() {
let gpu = GpuInstance::new().await.unwrap();
let svd = super::WgSymmetricEigen4::tests(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: usize = 345;
let mut matrices: DVector<Matrix4<f32>> = DVector::new_random(LEN);
for mat in matrices.iter_mut() {
*mat = mat.transpose() * *mat; // Make it symmetric.
}
let inputs = GpuVector::init(gpu.device(), &matrices, BufferUsages::STORAGE);
let result: GpuVector<GpuSymmetricEigen4> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging: GpuVector<GpuSymmetricEigen4> = GpuVector::uninit(
gpu.device(),
matrices.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
// Dispatch the test.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &svd)
.bind0([inputs.buffer(), result.buffer()])
.dispatch(matrices.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
// Check the result is correct.
let gpu_result = staging.read(gpu.device()).await.unwrap();
let mut allowed_fails = 0;
for (m, eigen) in matrices.iter().zip(gpu_result.iter()) {
println!("eig: (gpu) {:?}", eigen);
println!("eig (na): {:?}", m.symmetric_eigen());
let reconstructed = eigen.eigenvectors
* Matrix4::from_diagonal(&eigen.eigenvalues)
* eigen.eigenvectors.transpose();
println!("reconstructed: {:?}", m.symmetric_eigen().recompose());
// NOTE: we allow about 2% of the decompositions to fail, to account for occasionally
// bad random matrices that will fail the test due to an unsuitable epsilon.
// Ideally this percentage should be kept as low as possible, but likely not
// removable entirely.
if allowed_fails == matrices.len() * 2 / 100 {
assert_relative_eq!(*m, reconstructed, epsilon = 1.0e-4);
} else if !relative_eq!(*m, reconstructed, epsilon = 1.0e-4) {
allowed_fails += 1;
}
}
println!("Num fails: {}/{}", allowed_fails, matrices.len());
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/utils/mod.rs | crates/wgebra/src/utils/mod.rs | //! Utilities to address some platform-dependent differences
//! (e.g. for some trigonometric functions).
pub use self::min_max::WgMinMax;
pub use self::trig::WgTrig;
mod min_max;
mod trig;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/utils/min_max.rs | crates/wgebra/src/utils/min_max.rs | use wgcore::Shader;
/// Helper shader functions for calculating the min/max elements of a vector or matrix.
#[derive(Shader)]
#[shader(src = "min_max.wgsl")]
pub struct WgMinMax;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/utils/trig.rs | crates/wgebra/src/utils/trig.rs | use wgcore::Shader;
/// Alternative implementations of some geometric functions on the gpu.
///
/// Some platforms (Metal in particular) has implementations of some trigonometric functions
/// that are not numerically stable. This is the case for example for `atan2` and `atanh` that
/// may occasionally lead to NaNs. This shader exposes alternative implementations for numerically
/// stable versions of these functions to ensure good behavior across all platforms.
#[derive(Shader)]
#[shader(src = "trig.wgsl")]
pub struct WgTrig;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/linalg/shape.rs | crates/wgebra/src/linalg/shape.rs | use naga_oil::compose::ShaderDefValue;
use std::collections::HashMap;
use wgcore::Shader;
#[derive(Shader)]
#[shader(src = "shape.wgsl")]
/// A shader for handling matrix/vector indexing based on their shape of type
/// [`wgcore::shapes::ViewShape`].
pub struct Shape;
/// Shader definitions setting the `ROW_MAJOR` boolean macro for shaders supporting conditional
/// compilation for switching row-major and column-major matrix handling.
pub fn row_major_shader_defs() -> HashMap<String, ShaderDefValue> {
[("ROW_MAJOR".to_string(), ShaderDefValue::Bool(true))].into()
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/linalg/op_assign.rs | crates/wgebra/src/linalg/op_assign.rs | use crate::linalg::shape::Shape;
use bytemuck::Pod;
use naga_oil::compose::{ComposerError, NagaModuleDescriptor};
use naga_oil::redirect::Redirector;
use wgcore::kernel::KernelDispatch;
use wgcore::shapes::ViewShapeBuffers;
use wgcore::tensor::GpuVectorView;
use wgcore::utils;
use wgcore::Shader;
use wgpu::{ComputePass, ComputePipeline, Device};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[non_exhaustive]
/// The desired operation for the [`OpAssign`] kernel.
pub enum OpAssignVariant {
/// Sum: `a += b`
Add,
/// Subtraction: `a -= b`
Sub,
/// Product: `a *= b`
Mul,
/// Division: `a /= b`
Div,
/// Copy: `a = b`
Copy,
}
impl OpAssignVariant {
fn kernel_fn(self) -> &'static str {
match self {
Self::Add => "add_f32",
Self::Sub => "sub_f32",
Self::Mul => "mul_f32",
Self::Div => "div_f32",
Self::Copy => "copy_f32",
}
}
}
// TODO: we could probably use proc-macros to specify the modules this
// has to be composed with?
/// A GPU kernel for performing the operation described by [`OpAssignVariant`].
pub struct OpAssign(pub ComputePipeline, pub OpAssignVariant);
impl OpAssign {
/// WGSL source file for `OpAssign`.
pub const SRC: &'static str = include_str!("op_assign.wgsl");
/// The WGSL file path.
pub const FILE_PATH: &'static str = "wgebra/src/op_assign.wgsl";
/// Creates the compute pipeline for the operation described by the given [`OpAssignVariant`].
pub fn new(device: &Device, op: OpAssignVariant) -> Result<Self, ComposerError> {
let module = Shape::composer()?.make_naga_module(NagaModuleDescriptor {
source: Self::SRC,
file_path: Self::FILE_PATH,
..Default::default()
})?;
let mut redirector = Redirector::new(module);
redirector
.redirect_function("placeholder", op.kernel_fn(), &Default::default())
.unwrap();
Ok(OpAssign(
utils::load_module(device, "main", redirector.into_module().unwrap()),
op,
))
}
/// Queues the operation for computing `in_out_a ?= in_b` where `?` depends on the
/// [`OpAssignVariant`] selected when creating `self`.
pub fn dispatch<'a, 'b, T: Pod>(
&'a self,
device: &Device,
shapes: &ViewShapeBuffers,
pass: &mut ComputePass,
in_out_a: impl Into<GpuVectorView<'b, T>>,
in_b: impl Into<GpuVectorView<'b, T>>,
) {
let in_out_a = in_out_a.into();
let in_b = in_b.into();
assert_eq!(
in_out_a.shape().size[0],
in_b.shape().size[0],
"Op-assign: dimension mismatch."
);
let a_shape_buf = shapes.get(device, in_out_a.shape());
let b_shape_buf = shapes.get(device, in_b.shape());
KernelDispatch::new(device, pass, &self.0)
.bind0([&a_shape_buf, &b_shape_buf, in_out_a.buffer(), in_b.buffer()])
.dispatch(in_out_a.shape().size[0].div_ceil(64));
}
}
#[cfg(test)]
mod test {
use super::{OpAssign, OpAssignVariant};
use nalgebra::DVector;
use wgcore::gpu::GpuInstance;
use wgcore::kernel::CommandEncoderExt;
use wgcore::shapes::ViewShapeBuffers;
use wgcore::tensor::TensorBuilder;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_op_assign() {
let ops = [
OpAssignVariant::Add,
OpAssignVariant::Sub,
OpAssignVariant::Mul,
OpAssignVariant::Div,
];
let gpu = GpuInstance::new().await.unwrap();
let shapes = ViewShapeBuffers::new();
for op in ops {
let op_assign = OpAssign::new(gpu.device(), op).unwrap();
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: u32 = 1757;
let v0 = DVector::from_fn(LEN as usize, |i, _| i as f32 + 0.1);
let v1 = DVector::from_fn(LEN as usize, |i, _| i as f32 * 10.0 + 0.1);
let gpu_v0 = TensorBuilder::vector(LEN, BufferUsages::STORAGE | BufferUsages::COPY_SRC)
.build_init(gpu.device(), v0.as_slice());
let gpu_v1 = TensorBuilder::vector(LEN, BufferUsages::STORAGE)
.build_init(gpu.device(), v1.as_slice());
let staging =
TensorBuilder::vector(LEN, BufferUsages::MAP_READ | BufferUsages::COPY_DST)
.build(gpu.device());
let mut pass = encoder.compute_pass("test", None);
op_assign.dispatch(gpu.device(), &shapes, &mut pass, &gpu_v0, &gpu_v1);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from(&mut encoder, &gpu_v0);
gpu.queue().submit(Some(encoder.finish()));
let cpu_result = match op {
OpAssignVariant::Add => v0 + v1,
OpAssignVariant::Sub => v0 - v1,
OpAssignVariant::Mul => v0.component_mul(&v1),
OpAssignVariant::Div => v0.component_div(&v1),
OpAssignVariant::Copy => v1.clone(),
};
approx::assert_relative_eq!(
DVector::from(staging.read(gpu.device()).await.unwrap()),
cpu_result,
epsilon = 1.0e-7
);
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/linalg/reduce.rs | crates/wgebra/src/linalg/reduce.rs | use crate::linalg::shape::Shape;
use bytemuck::Pod;
use naga_oil::compose::{ComposerError, NagaModuleDescriptor};
use naga_oil::redirect::Redirector;
use nalgebra::DVector;
use wgcore::kernel::KernelDispatch;
use wgcore::shapes::ViewShapeBuffers;
use wgcore::tensor::{GpuScalar, GpuVectorView};
use wgcore::utils;
use wgcore::Shader;
use wgpu::{ComputePass, ComputePipeline, Device};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
#[non_exhaustive]
/// The desired operation for the [`Reduce`] kernel.
pub enum ReduceOp {
/// Minimum: `result = min(input[0], min(input[1], ...))`
Min,
/// Maximum: `result = max(input[0], max(input[1], ...))`
Max,
/// Sum: `result = input[0] + input[1] ...`
Sum,
/// Product: `result = input[0] * input[1] ...`
Prod,
/// Squared norm: `result = input[0] * input[0] + input[1] * input[1] ...`
SqNorm,
}
impl ReduceOp {
const fn init_fn(self) -> &'static str {
match self {
Self::Min => "init_max_f32",
Self::Max => "init_min_f32",
Self::Sum => "init_zero",
Self::Prod => "init_one",
Self::SqNorm => "init_zero",
}
}
const fn workspace_fn(self) -> &'static str {
match self {
Self::Min => "reduce_min_f32",
Self::Max => "reduce_max_f32",
Self::Sum => "reduce_sum_f32",
Self::Prod => "reduce_prod_f32",
Self::SqNorm => "reduce_sqnorm_f32",
}
}
const fn reduce_fn(self) -> &'static str {
match self {
Self::Min => "reduce_min_f32",
Self::Max => "reduce_max_f32",
Self::Sum => "reduce_sum_f32",
Self::Prod => "reduce_prod_f32",
Self::SqNorm => "reduce_sum_f32", // reduce_sqnorm only happens in workspace
}
}
}
/// A GPU kernel for performing the operation described by [`ReduceOp`].
pub struct Reduce(pub ComputePipeline, pub ReduceOp);
impl Reduce {
/// WGSL source file for `Reduce`.
pub const SRC: &'static str = include_str!("reduce.wgsl");
/// The WGSL file path.
pub const FILE_PATH: &'static str = "wgebra/src/reduce.wgsl";
/// Creates the compute pipeline for the operation described by the given [`ReduceOp`].
pub fn new(device: &Device, op: ReduceOp) -> Result<Self, ComposerError> {
let module = Shape::composer()?.make_naga_module(NagaModuleDescriptor {
source: Self::SRC,
file_path: Self::FILE_PATH,
..Default::default()
})?;
let mut redirector = Redirector::new(module);
redirector
.redirect_function(
"workspace_placeholder",
op.workspace_fn(),
&Default::default(),
)
.unwrap();
redirector
.redirect_function("init_placeholder", op.init_fn(), &Default::default())
.unwrap();
redirector
.redirect_function("reduce_placeholder", op.reduce_fn(), &Default::default())
.unwrap();
Ok(Self(
utils::load_module(device, "main", redirector.into_module().unwrap()),
op,
))
}
/// Dispatch the operation for computing `result = reduce(value)` where `reduce` depends on the
/// [`ReduceOp`] selected when creating `self`.
pub fn dispatch<'a, T: Pod>(
&self,
device: &Device,
shapes: &ViewShapeBuffers,
pass: &mut ComputePass,
value: impl Into<GpuVectorView<'a, T>>,
result: &GpuScalar<T>,
) {
let value = value.into();
let shape_buf = shapes.get(device, value.shape());
KernelDispatch::new(device, pass, &self.0)
.bind0([&shape_buf, value.buffer(), result.buffer()])
.dispatch(1);
}
#[doc(hidden)]
pub fn eval_cpu(&self, val: &DVector<f32>) -> f32 {
match self.1 {
ReduceOp::Min => val.min(),
ReduceOp::Max => val.max(),
ReduceOp::Prod => val.product(),
ReduceOp::Sum => val.sum(),
ReduceOp::SqNorm => val.norm_squared(),
}
}
}
#[cfg(test)]
mod test {
use super::ReduceOp;
use nalgebra::DVector;
use wgcore::gpu::GpuInstance;
use wgcore::kernel::CommandEncoderExt;
use wgcore::shapes::ViewShapeBuffers;
use wgcore::tensor::TensorBuilder;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_reduce() {
let gpu = GpuInstance::new().await.unwrap();
let shapes = ViewShapeBuffers::new();
let ops = [
ReduceOp::Min,
ReduceOp::Max,
ReduceOp::Sum,
ReduceOp::SqNorm,
ReduceOp::Prod,
];
for op in ops {
println!("Testing: {:?}", op);
let reduce = super::Reduce::new(gpu.device(), op).unwrap();
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: usize = 345;
let numbers: DVector<f32> = DVector::new_random(LEN);
let vector = TensorBuilder::vector(numbers.len() as u32, BufferUsages::STORAGE)
.build_init(gpu.device(), numbers.as_slice());
let result = TensorBuilder::scalar(BufferUsages::STORAGE | BufferUsages::COPY_SRC)
.build(gpu.device());
let staging = TensorBuilder::scalar(BufferUsages::MAP_READ | BufferUsages::COPY_DST)
.build(gpu.device());
let mut pass = encoder.compute_pass("test", None);
reduce.dispatch(gpu.device(), &shapes, &mut pass, &vector, &result);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
approx::assert_relative_eq!(
staging.read(gpu.device()).await.unwrap()[0],
reduce.eval_cpu(&numbers),
epsilon = 1.0e-3
);
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/linalg/mod.rs | crates/wgebra/src/linalg/mod.rs | //! Fundamental linear-algebra matrix/vector operations.
mod gemm;
mod gemv;
mod op_assign;
mod reduce;
mod shape;
pub use gemm::{Gemm, GemmVariant};
pub use gemv::{Gemv, GemvVariant};
pub use op_assign::{OpAssign, OpAssignVariant};
pub use reduce::{Reduce, ReduceOp};
pub use shape::{row_major_shader_defs, Shape};
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/linalg/gemv.rs | crates/wgebra/src/linalg/gemv.rs | use crate::linalg::shape::Shape;
use bytemuck::Pod;
use wgcore::kernel::KernelDispatch;
use wgcore::shapes::ViewShapeBuffers;
use wgcore::tensor::GpuCubeView;
use wgcore::Shader;
use wgpu::{ComputePass, ComputePipeline, Device};
#[derive(Shader)]
#[shader(derive(Shape), src = "gemv.wgsl", composable = false)]
/// Shader for computing the product of a matrix and a vector.
pub struct Gemv {
/// The compute pipeline for `matrix * vector`.
pub gemv: ComputePipeline,
/// A compute pipeline for `matrix * vector` leveraging workgroup reduction.
pub gemv_fast: ComputePipeline,
/// The compute pipeline for `transpose(matrix) * vector`.
pub gemv_tr: ComputePipeline,
/// A compute pipeline for `transpose(matrix) * vector` leveraging workgroup reduction.
pub gemv_tr_fast: ComputePipeline,
}
/// Variants used to select the specific kernel to dispatch from the [`Gemv`] shader.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum GemvVariant {
/// Multiplication of a vector by a matrix.
Gemv,
/// An optimized version for multiplication of a vector by a matrix.
GemvFast,
/// Multiplication of a vector by a transposed matrix.
GemvTr,
/// An optimized version for multiplication of a vector by a transposed matrix.
GemvTrFast,
}
impl Gemv {
/// Dispatches this shader to compute `out = m * v`.
pub fn dispatch<'a, 'b, T: Pod>(
&'a self,
device: &Device,
shapes: &ViewShapeBuffers,
pass: &mut ComputePass,
out: impl Into<GpuCubeView<'b, T>>,
m: impl Into<GpuCubeView<'b, T>>,
v: impl Into<GpuCubeView<'b, T>>,
) {
self.dispatch_generic(device, shapes, pass, out, m, v, GemvVariant::Gemv)
}
/// Dispatches this shader to compute `out = tr(m) * v`.
pub fn dispatch_tr<'a, 'b, T: Pod>(
&'a self,
device: &Device,
shapes: &ViewShapeBuffers,
pass: &mut ComputePass,
out: impl Into<GpuCubeView<'b, T>>,
m: impl Into<GpuCubeView<'b, T>>,
v: impl Into<GpuCubeView<'b, T>>,
) {
self.dispatch_generic(device, shapes, pass, out, m, v, GemvVariant::GemvTr)
}
/// Dispatches the matrix-vector multiplication variant indicated by the given [`GemvVariant`].
pub fn dispatch_generic<'a, 'b, T: Pod>(
&'a self,
device: &Device,
shapes: &ViewShapeBuffers,
pass: &mut ComputePass,
out: impl Into<GpuCubeView<'b, T>>,
m: impl Into<GpuCubeView<'b, T>>,
v: impl Into<GpuCubeView<'b, T>>,
mut variant: GemvVariant,
) {
let out = out.into();
let m = m.into();
let v = v.into();
let [out_nrows, out_ncols, out_nmats] = out.shape().size;
// Check dimensions.
{
let v_rows = v.shape().size[0];
let (m_rows, m_cols) = match variant {
GemvVariant::Gemv | GemvVariant::GemvFast => (m.shape().size[0], m.shape().size[1]),
GemvVariant::GemvTr | GemvVariant::GemvTrFast => {
(m.shape().size[1], m.shape().size[0])
}
};
assert_eq!(m_cols, v_rows, "Gemv: dimension mismatch.");
assert_eq!(m_rows, out_nrows, "Gemv: dimension mismatch.");
}
let out_shape_buf = shapes.get(device, out.shape());
let m_shape_buf = shapes.get(device, m.shape());
let v_shape_buf = shapes.get(device, v.shape());
// More compatibility check.
// TODO: switch to a fallback version when any of these check don’t pass.
if variant == GemvVariant::GemvTrFast {
// Switch to the non-fast version if we dont have the right alignment.
if m.shape().size[0] % (WORKGROUP_SIZE * 4) != 0 {
variant = GemvVariant::GemvTr;
}
}
let pipeline = match variant {
GemvVariant::Gemv => &self.gemv,
GemvVariant::GemvFast => &self.gemv_fast,
GemvVariant::GemvTr => &self.gemv_tr,
GemvVariant::GemvTrFast => &self.gemv_tr_fast,
};
const WORKGROUP_SIZE: u32 = 32;
let dispatch = match variant {
// Each thread handles 4 rows of the matrix, there is no special
// consideration of workgroup threads.
GemvVariant::Gemv | GemvVariant::GemvTr => out_nrows.div_ceil(WORKGROUP_SIZE),
// Each workgroup handles 4 entire rows of the matrix.
GemvVariant::GemvFast | GemvVariant::GemvTrFast => {
// TODO: automatically fallback to the non-fast version if this condition isn’t met?
assert_eq!(out_nrows % 4, 0);
out_nrows.div_ceil(4)
}
};
KernelDispatch::new(device, pass, pipeline)
.bind0([
&out_shape_buf,
&m_shape_buf,
&v_shape_buf,
out.buffer(),
m.buffer(),
v.buffer(),
])
.dispatch([dispatch, out_ncols, out_nmats]);
}
}
#[cfg(test)]
mod test {
use crate::GemvVariant;
use nalgebra::{DMatrix, DVector};
use wgcore::gpu::GpuInstance;
use wgcore::kernel::CommandEncoderExt;
use wgcore::shapes::ViewShapeBuffers;
use wgcore::tensor::TensorBuilder;
use wgcore::Shader;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_gemv() {
let gpu = GpuInstance::new().await.unwrap();
let gemv = super::Gemv::from_device(gpu.device()).unwrap();
let shapes = ViewShapeBuffers::new();
const NROWS: u32 = 1024;
const NCOLS: u32 = 1024;
let m_cpu = DMatrix::<f32>::new_random(NROWS as usize, NCOLS as usize);
let v_cpu = DVector::<f32>::new_random(NCOLS as usize);
let lhs_cpu = DVector::<f32>::new_random(NROWS as usize);
let m = TensorBuilder::matrix(NROWS, NCOLS, BufferUsages::STORAGE)
.build_init(gpu.device(), m_cpu.as_slice());
let v = TensorBuilder::vector(v_cpu.nrows() as u32, BufferUsages::STORAGE)
.build_init(gpu.device(), v_cpu.as_slice());
let result = TensorBuilder::vector(NROWS, BufferUsages::STORAGE | BufferUsages::COPY_SRC)
.build_init(gpu.device(), lhs_cpu.as_slice());
let staging = TensorBuilder::vector(NROWS, BufferUsages::MAP_READ | BufferUsages::COPY_DST)
.build(gpu.device());
for variant in [
GemvVariant::Gemv,
GemvVariant::GemvTr,
GemvVariant::GemvFast,
GemvVariant::GemvTrFast,
] {
println!("Checking variant: {:?}", variant);
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("test", None);
gemv.dispatch_generic(gpu.device(), &shapes, &mut pass, &result, &m, &v, variant);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
let gpu_result = staging.read(gpu.device()).await.unwrap();
let cpu_result = match variant {
GemvVariant::Gemv | GemvVariant::GemvFast => &m_cpu * &v_cpu,
GemvVariant::GemvTr | GemvVariant::GemvTrFast => m_cpu.tr_mul(&v_cpu),
};
approx::assert_relative_eq!(DVector::from(gpu_result), cpu_result, epsilon = 1.0e-3);
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgebra/src/linalg/gemm.rs | crates/wgebra/src/linalg/gemm.rs | use crate::linalg::shape::Shape;
use bytemuck::Pod;
use wgcore::kernel::KernelDispatch;
use wgcore::shapes::ViewShapeBuffers;
use wgcore::tensor::GpuCubeView;
use wgcore::Shader;
use wgpu::{ComputePass, ComputePipeline, Device};
#[derive(Shader)]
#[shader(derive(Shape), src = "gemm.wgsl", composable = false)]
/// Shader for computing the product of two matrices.
pub struct Gemm {
/// The compute pipeline for `matrix1 * matrix2`.
pub gemm: ComputePipeline,
/// A compute pipeline for `matrix1 * matrix2` leveraging workgroup reduction.
pub gemm_fast: ComputePipeline,
/// The compute pipeline for `transpose(matrix1) * matrix2`.
pub gemm_tr: ComputePipeline,
/// A compute pipeline for `transpose(matrix1) * matrix2` leveraging workgroup reduction.
pub gemm_tr_fast: ComputePipeline,
}
/// Variants used to select the specific kernel to dispatch from the [`Gemm`] shader.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum GemmVariant {
/// The compute pipeline for `matrix1 * matrix2`.
Gemm,
/// A compute pipeline for `matrix1 * matrix2` leveraging workgroup reduction.
GemmFast,
/// The compute pipeline for `transpose(matrix1) * matrix2`.
GemmTr,
/// A compute pipeline for `transpose(matrix1) * matrix2` leveraging workgroup reduction.
GemmTrFast,
}
impl Gemm {
/// Dispatch this shader to compute `out = m1 * m2`.
pub fn dispatch<'a, 'b, T: Pod>(
&'a self,
device: &Device,
shapes: &ViewShapeBuffers,
pass: &mut ComputePass,
out: impl Into<GpuCubeView<'b, T>>,
m1: impl Into<GpuCubeView<'b, T>>,
m2: impl Into<GpuCubeView<'b, T>>,
) {
self.dispatch_generic(device, shapes, pass, out, m1, m2, GemmVariant::Gemm)
}
/// Dispatch this shader to compute `out = tr(m1) * m2`.
pub fn dispatch_tr<'a, 'b, T: Pod>(
&'a self,
device: &Device,
shapes: &ViewShapeBuffers,
pass: &mut ComputePass,
out: impl Into<GpuCubeView<'b, T>>,
m1: impl Into<GpuCubeView<'b, T>>,
m2: impl Into<GpuCubeView<'b, T>>,
) {
self.dispatch_generic(device, shapes, pass, out, m1, m2, GemmVariant::GemmTr)
}
/// Dispatches the matrix-vector multiplication variant indicated by the given [`GemmVariant`].
pub fn dispatch_generic<'a, 'b, T: Pod>(
&'a self,
device: &Device,
shapes: &ViewShapeBuffers,
pass: &mut ComputePass,
out: impl Into<GpuCubeView<'b, T>>,
m1: impl Into<GpuCubeView<'b, T>>,
m2: impl Into<GpuCubeView<'b, T>>,
variant: GemmVariant,
) {
let out = out.into();
let m1 = m1.into();
let m2 = m2.into();
let [out_rows, out_cols, out_mats] = out.shape().size;
// Check dimensions.
{
let (m_rows, m_cols) = match variant {
GemmVariant::Gemm | GemmVariant::GemmFast => {
(m1.shape().size[0], m1.shape().size[1])
}
GemmVariant::GemmTr | GemmVariant::GemmTrFast => {
(m1.shape().size[1], m1.shape().size[0])
}
};
assert_eq!(m_cols, m2.shape().size[0], "Gemm: dimension mismatch.");
assert_eq!(m_rows, out_rows, "Gemm: dimension mismatch.");
assert_eq!(out_cols, m2.shape().size[1], "Gemm: dimension mismatch.");
assert_eq!(out_mats, m1.shape().size[2], "Gemm: dimension mismatch.");
assert_eq!(out_mats, m2.shape().size[2], "Gemm: dimension mismatch.");
}
let out_shape_buf = shapes.get(device, out.shape());
let m1_shape_buf = shapes.get(device, m1.shape());
let m2_shape_buf = shapes.get(device, m2.shape());
let pipeline = match variant {
GemmVariant::Gemm => &self.gemm,
GemmVariant::GemmFast => &self.gemm_fast,
GemmVariant::GemmTr => &self.gemm_tr,
GemmVariant::GemmTrFast => &self.gemm_tr_fast,
};
let dispatch = match variant {
// Each thread handles 4 rows of the matrix, there is no special
// consideration of workgroup threads.
GemmVariant::Gemm | GemmVariant::GemmTr => out_rows.div_ceil(64),
// Each workgroup handles 4 entire rows of the matrix.
GemmVariant::GemmFast | GemmVariant::GemmTrFast => out_rows.div_ceil(4),
};
KernelDispatch::new(device, pass, pipeline)
.bind0([
&out_shape_buf,
&m1_shape_buf,
&m2_shape_buf,
out.buffer(),
m1.buffer(),
m2.buffer(),
])
.dispatch([dispatch, out_mats, 1]);
}
}
#[cfg(test)]
mod test {
use crate::GemmVariant;
use approx::assert_relative_eq;
use nalgebra::DMatrix;
use wgcore::gpu::GpuInstance;
use wgcore::kernel::CommandEncoderExt;
use wgcore::shapes::ViewShapeBuffers;
use wgcore::tensor::TensorBuilder;
use wgcore::Shader;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_gemm() {
let gpu = GpuInstance::new().await.unwrap();
let gemm = super::Gemm::from_device(gpu.device()).unwrap();
let shapes = ViewShapeBuffers::new();
const NROWS: u32 = 256;
const NCOLS: u32 = 256;
let m1_cpu = DMatrix::<f32>::new_random(NROWS as usize, NCOLS as usize);
let m2_cpu = DMatrix::<f32>::new_random(NCOLS as usize, NROWS as usize);
let lhs_cpu = DMatrix::<f32>::zeros(NROWS as usize, NROWS as usize);
let m1 = TensorBuilder::matrix(NROWS, NCOLS, BufferUsages::STORAGE)
.build_init(gpu.device(), m1_cpu.as_slice());
let m2 = TensorBuilder::matrix(NCOLS, NROWS, BufferUsages::STORAGE)
.build_init(gpu.device(), m2_cpu.as_slice());
let result =
TensorBuilder::matrix(NROWS, NROWS, BufferUsages::STORAGE | BufferUsages::COPY_SRC)
.build_init(gpu.device(), lhs_cpu.as_slice());
let staging = TensorBuilder::matrix(
NROWS,
NROWS,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
)
.build(gpu.device());
for variant in [
GemmVariant::Gemm,
GemmVariant::GemmTr,
GemmVariant::GemmFast,
GemmVariant::GemmTrFast,
] {
println!("Checking variant: {:?}", variant);
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("test", None);
gemm.dispatch_generic(
gpu.device(),
&shapes,
&mut pass,
result.as_embedded_view(),
m1.as_embedded_view(),
m2.as_embedded_view(),
variant,
);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging.copy_from(&mut encoder, &result);
gpu.queue().submit(Some(encoder.finish()));
let gpu_result = staging.read(gpu.device()).await.unwrap();
let cpu_result = match variant {
GemmVariant::Gemm | GemmVariant::GemmFast => &m1_cpu * &m2_cpu,
GemmVariant::GemmTr | GemmVariant::GemmTrFast => m1_cpu.tr_mul(&m2_cpu),
};
let gpu_result = DMatrix::from_vec(NROWS as usize, NROWS as usize, gpu_result);
assert_relative_eq!(gpu_result, cpu_result, epsilon = 1.0e-3);
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/hot_reloading.rs | crates/wgcore/src/hot_reloading.rs | //! Utility to detect changed files for shader hot-reloading.
use async_channel::Receiver;
use notify::{Event, EventKind};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
#[cfg(not(target_family = "wasm"))]
use notify::Watcher;
#[cfg(doc)]
use crate::Shader;
/// State for tracking file changes.
pub struct HotReloadState {
#[cfg(not(target_family = "wasm"))]
watcher: notify::RecommendedWatcher,
rcv: Receiver<notify::Result<Event>>,
file_changed: HashMap<PathBuf, bool>,
}
impl HotReloadState {
/// Initializes the file-tracking context.
///
/// To register a shader for change-tracking call [`Shader::watch_sources`] once with the state
/// returned by this function.
/// To register a file for change-tracking, call [`HotReloadState::watch_file`].
pub fn new() -> notify::Result<Self> {
let (_snd, _rcv) = async_channel::unbounded();
Ok(Self {
#[cfg(not(target_family = "wasm"))]
watcher: notify::recommended_watcher(move |msg| {
// TODO: does hot-reloading make sense on wasm anyway?
let _ = _snd.send_blocking(msg);
})?,
rcv: _rcv,
file_changed: Default::default(),
})
}
/// Saves in `self` the set of watched files that changed since the last time this function
/// was called.
///
/// Once this call completes, the [`Self::file_changed`] method can be used to check if a
/// particular file (assuming it was added to the watch list with [`Self::watch_file`]) has
/// changed since the last time [`Self::update_changes`] was called.
pub fn update_changes(&mut self) {
for changed in self.file_changed.values_mut() {
*changed = false;
}
while let Ok(event) = self.rcv.try_recv() {
if let Ok(event) = event {
if event.need_rescan() || matches!(event.kind, EventKind::Modify(_)) {
for path in event.paths {
self.file_changed.insert(path, true);
}
}
}
}
}
/// Registers a files for change-tracking.
pub fn watch_file(&mut self, _path: &Path) -> notify::Result<()> {
#[cfg(not(target_family = "wasm"))]
if !self.file_changed.contains_key(_path) {
self.watcher
.watch(_path, notify::RecursiveMode::NonRecursive)?;
// NOTE: this won’t insert if the watch failed.
self.file_changed.insert(_path.to_path_buf(), false);
}
Ok(())
}
/// Checks if the specified file change was detected at the time of calling [`Self::update_changes`].
pub fn file_changed(&self, path: &Path) -> bool {
self.file_changed.get(path).copied().unwrap_or_default()
}
/// Gets the list of files watched for hot-reloading, as well as there last known change status.
pub fn watched_files(&self) -> impl Iterator<Item = (&PathBuf, bool)> {
self.file_changed
.iter()
.map(|(path, changed)| (path, *changed))
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/lib.rs | crates/wgcore/src/lib.rs | #![doc = include_str!("../README.md")]
// #![warn(missing_docs)]
#![allow(clippy::result_large_err)]
pub mod composer;
pub mod gpu;
pub mod hot_reloading;
pub mod indirect;
pub mod kernel;
pub mod shader;
pub mod shapes;
pub mod tensor;
pub mod timestamps;
pub mod utils;
pub use bytemuck::Pod;
pub use shader::{Shader, ShaderRegistry};
#[cfg(feature = "derive")]
pub use wgcore_derive::*;
/// Third-party modules re-exports.
pub mod re_exports {
pub use bytemuck;
pub use encase;
pub use naga_oil::{
self,
compose::{ComposableModuleDescriptor, Composer, ComposerError, NagaModuleDescriptor},
};
pub use notify;
pub use paste;
pub use wgpu::{self, Device};
}
/// A macro that declares a test that will check compilation of the shader identified by the given
/// struct implementing `Shader`.
#[macro_export]
macro_rules! test_shader_compilation {
($ty: ident) => {
wgcore::test_shader_compilation!($ty, wgcore);
};
($ty: ident, $wgcore: ident) => {
wgcore::test_shader_compilation!($ty, wgcore, Default::default());
};
($ty: ident, $wgcore: ident, $shader_defs: expr) => {
$wgcore::re_exports::paste::paste! {
#[cfg(test)]
mod [<test_shader_compiles_ $ty>] {
use super::$ty;
use naga_oil::compose::NagaModuleDescriptor;
use $wgcore::Shader;
use $wgcore::gpu::GpuInstance;
use $wgcore::utils;
#[futures_test::test]
#[serial_test::serial]
async fn shader_compiles() {
// Add a dumb entry point for testing.
let src = format!(
"{}
@compute @workgroup_size(1, 1, 1)
fn macro_generated_test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {{}}
",
$ty::src()
);
let gpu = GpuInstance::new().await.unwrap();
let module = $ty::composer()
.unwrap()
.make_naga_module(NagaModuleDescriptor {
source: &src,
file_path: $ty::FILE_PATH,
shader_defs: $shader_defs,
..Default::default()
})
.unwrap();
let _ = utils::load_module(gpu.device(), "macro_generated_test", module);
}
}
}
};
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/gpu.rs | crates/wgcore/src/gpu.rs | //! Utilities struct to initialize a gpu device.
use std::sync::Arc;
use wgpu::{Adapter, Backends, Device, Instance, InstanceDescriptor, Queue};
/// Helper struct to initialize a device and its queue.
pub struct GpuInstance {
_instance: Instance, // TODO: do we have to keep this around?
adapter: Adapter, // TODO: do we have to keep this around?
device: Arc<Device>,
queue: Queue,
}
impl GpuInstance {
pub async fn new() -> anyhow::Result<Self> {
Self::with_backends(Backends::all()).await
}
pub async fn without_gl() -> anyhow::Result<Self> {
Self::with_backends(Backends::all() & (!Backends::GL)).await
}
/// Initializes a wgpu instance and create its queue.
pub async fn with_backends(backends: Backends) -> anyhow::Result<Self> {
let instance_desc = InstanceDescriptor {
backends,
..Default::default()
};
let instance = wgpu::Instance::new(&instance_desc);
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::HighPerformance,
..Default::default()
})
.await
.map_err(|_| anyhow::anyhow!("Failed to initialize gpu adapter."))?;
let (device, queue) = adapter
.request_device(&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::TIMESTAMP_QUERY,
required_limits: wgpu::Limits {
max_buffer_size: 600_000_000,
max_storage_buffer_binding_size: 600_000_000,
..Default::default()
},
memory_hints: Default::default(),
trace: wgpu::Trace::Off,
})
.await
.map_err(|e| anyhow::anyhow!("{:?}", e))?;
Ok(Self {
_instance: instance,
adapter,
device: Arc::new(device),
queue,
})
}
/// The `wgpu` adapter.
pub fn adapter(&self) -> &Adapter {
&self.adapter
}
/// The `wgpu` device.
pub fn device(&self) -> &Device {
&self.device
}
/// The shared `wgpu` device.
pub fn device_arc(&self) -> Arc<Device> {
self.device.clone()
}
/// The `wgpu` queue.
pub fn queue(&self) -> &Queue {
&self.queue
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/kernel.rs | crates/wgcore/src/kernel.rs | //! Utilities for queueing and dispatching kernels.
use crate::timestamps::GpuTimestamps;
use std::sync::Arc;
use wgpu::{Buffer, CommandEncoder, ComputePass, ComputePassDescriptor, ComputePipeline, Device};
pub trait CommandEncoderExt {
fn compute_pass<'encoder>(
&'encoder mut self,
label: &str,
timestamps: Option<&mut GpuTimestamps>,
) -> ComputePass<'encoder>;
}
impl CommandEncoderExt for CommandEncoder {
fn compute_pass<'encoder>(
&'encoder mut self,
label: &str,
timestamps: Option<&mut GpuTimestamps>,
) -> ComputePass<'encoder> {
let desc = ComputePassDescriptor {
label: Some(label),
timestamp_writes: timestamps.and_then(|ts| ts.next_compute_pass_timestamp_writes()),
};
self.begin_compute_pass(&desc)
}
}
/// Trait implemented for workgroup sizes in gpu kernel invocations.
///
/// The purpose of this trait is mainly to be able to pass both a single `u32` or an
/// array `[u32; 3]` as the workgorup size in [`KernelDispatch::dispatch`].
pub trait WorkgroupSize {
/// Converts `self` into the actual workgroup sizes passed to the kernel invocation.
fn into_workgroups_size(self) -> [u32; 3];
}
impl WorkgroupSize for u32 {
fn into_workgroups_size(self) -> [u32; 3] {
[self, 1, 1]
}
}
impl WorkgroupSize for [u32; 3] {
fn into_workgroups_size(self) -> [u32; 3] {
self
}
}
// TODO: remove the other KernelInvocation*.
pub struct KernelDispatch<'a, 'encoder> {
device: &'a Device,
pass: &'a mut ComputePass<'encoder>,
pipeline: &'a ComputePipeline,
queueable: bool,
}
impl<'a, 'encoder> KernelDispatch<'a, 'encoder> {
pub fn new(
device: &'a Device,
pass: &'a mut ComputePass<'encoder>,
pipeline: &'a ComputePipeline,
) -> Self {
pass.set_pipeline(pipeline);
Self {
device,
pass,
pipeline,
queueable: true,
}
}
pub fn pass(&mut self) -> &mut ComputePass<'encoder> {
self.pass
}
/// Binds `INPUTS` consecutive buffers to the bind group with id 0.
///
/// This method is less versatile than [`Self::bind`] and [`Self::bind_at`] but covers one
/// of the most common cases. This will bind `ipunts[i]` to the storage binding `i` of bind
/// group 0.
pub fn bind0<const INPUTS: usize>(self, inputs: [&Buffer; INPUTS]) -> Self {
self.bind(0, inputs)
}
/// Binds `INPUTS` consecutive buffers to the bind group with id `bind_group_id`.
///
/// This method is more versatile than [`Self::bind0`], but less than [`Self::bind_at`]. This
/// will bind `ipunts[i]` to the storage binding `i` of bind group `bind_group_id`.
pub fn bind<const INPUTS: usize>(self, bind_group_id: u32, inputs: [&Buffer; INPUTS]) -> Self {
let mut inputs = inputs.map(|b| (b, 0));
for (id, input) in inputs.iter_mut().enumerate() {
input.1 = id as u32;
}
self.bind_at(bind_group_id, inputs)
}
/// Binds `INPUTS` buffers with arbitrary storage binding ids, to the bind group with id
/// `bind_group_id`.
///
/// This method is more versatile than [`Self::bind0`], and [`Self::bind`]. This
/// will bind `inputs[i].0` to the storage binding `inputs[i].1` of bind group `bind_group_id`.
pub fn bind_at<const INPUTS: usize>(
mut self,
bind_group_id: u32,
inputs: [(&Buffer, u32); INPUTS],
) -> Self {
let entries = inputs.map(|(input, binding)| {
// TODO: 0 is not the only invalid binding size.
// See https://github.com/gfx-rs/wgpu/issues/253
if input.size() == 0 {
self.queueable = false;
}
wgpu::BindGroupEntry {
binding,
resource: input.as_entire_binding(),
}
});
if !self.queueable {
return self;
}
let bind_group_layout = self.pipeline.get_bind_group_layout(bind_group_id);
let bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &bind_group_layout,
entries: &entries,
});
self.pass.set_bind_group(bind_group_id, &bind_group, &[]);
self
}
/// Queues the kernel invocation into the compute pass that was given to
/// [`KernelDispatch::new`].
///
/// The invocation will be configured with the given `workgroups` size (typically specified as
/// a single `u32` or a `[u32; 3]`).
pub fn dispatch(self, workgroups: impl WorkgroupSize) {
let workgroup_size = workgroups.into_workgroups_size();
// NOTE: we don’t need to queue if the workgroup is empty.
if self.queueable && workgroup_size[0] * workgroup_size[1] * workgroup_size[2] > 0 {
self.pass
.dispatch_workgroups(workgroup_size[0], workgroup_size[1], workgroup_size[2]);
}
}
/// Queues the indirect kernel invocation into the compute pass that was given to
/// [`KernelDispatch::new`].
///
/// The invocation will be configured with an indirect `workgroups` size specified with a
/// `Buffer` that must contain exactly one instance of [`wgpu::util::DispatchIndirectArgs`].
pub fn dispatch_indirect(self, workgroups: &Buffer) {
if !self.queueable {
return;
}
self.pass.dispatch_workgroups_indirect(workgroups, 0);
}
}
/// Workgroup sizes for a direct or indirect dispatch.
pub enum Workgroups {
/// Workgroup size for direct dispatch. Each element must be non-zero.
Direct([u32; 3]),
/// Workgroup for indirect dispatch. Must be a buffer containing exactly one instance of
/// [`wgpu::util::DispatchIndirectArgs`].
Indirect(Arc<Buffer>),
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/tensor.rs | crates/wgcore/src/tensor.rs | //! Utilities for initializing and slicing tensors, matrices, vectors, and scalars gpu storage
//! buffers.
use crate::gpu::GpuInstance;
use crate::shapes::ViewShape;
use bytemuck::Pod;
use encase::internal::{CreateFrom, ReadFrom, WriteInto};
use encase::{ShaderSize, ShaderType, StorageBuffer};
use nalgebra::{Dim, IsContiguous, Matrix, Storage};
use std::marker::PhantomData;
use std::mem::size_of;
use wgpu::{
util::{BufferInitDescriptor, DeviceExt},
Buffer, BufferAddress, BufferDescriptor, BufferUsages, BufferView, CommandEncoder, Device,
};
#[derive(Copy, Clone)]
pub struct ColumnMajor;
#[derive(Copy, Clone)]
pub struct RowMajor;
pub trait MatrixOrdering: Copy + Clone {
fn is_row_major() -> bool;
fn is_column_major() -> bool {
!Self::is_row_major()
}
}
impl MatrixOrdering for ColumnMajor {
fn is_row_major() -> bool {
false
}
}
impl MatrixOrdering for RowMajor {
fn is_row_major() -> bool {
true
}
}
/// A storage buffer containing a single value.
pub type GpuScalar<T> = GpuTensor<T, 0>;
/// A storage buffer containing a vector.
pub type GpuVector<T> = GpuTensor<T, 1>;
/// A storage buffer containing a matrix.
pub type GpuMatrix<T> = GpuTensor<T, 2>;
/// A storage buffer containing a cube (order-3 tensor).
pub type GpuCube<T> = GpuTensor<T, 3>;
/// A view, over a storage buffer, containing a single value.
pub type GpuScalarView<'a, T, Ordering = ColumnMajor> = GpuTensorView<'a, T, Ordering, 0>;
/// A view, over a storage buffer, containing a vector.
pub type GpuVectorView<'a, T, Ordering = ColumnMajor> = GpuTensorView<'a, T, Ordering, 1>;
/// A view, over a storage buffer, containing a matrix.
pub type GpuMatrixView<'a, T, Ordering = ColumnMajor> = GpuTensorView<'a, T, Ordering, 2>;
/// A view, over a storage buffer, containing a cube (order-3 tensor).
pub type GpuCubeView<'a, T, Ordering = ColumnMajor> = GpuTensorView<'a, T, Ordering, 3>;
/// Helper struct for creating gpu storage buffers (scalars, vectors, matrices, tensors).
///
/// When building a scalar, vector, or matrix tensor, it might be more convenient to call
/// [`GpuScalar::init`], [`GpuVector::init`], [`GpuMatrix::init`] (or their `encase` variants:
/// [`GpuScalar::encase`], [`GpuVector::encase`], [`GpuMatrix::encase`]; or their uninitialized
/// variants [`GpuScalar::uninit`], [`GpuVector::uninit`], [`GpuMatrix::uninit`]).
pub struct TensorBuilder<const DIM: usize> {
shape: [u32; DIM],
usage: BufferUsages,
label: Option<String>,
}
impl TensorBuilder<0> {
/// Starts building a storage buffer containing a single scalar value.
pub fn scalar(usage: BufferUsages) -> Self {
Self::tensor([], usage)
}
}
impl TensorBuilder<1> {
/// Starts building a storage buffer containing a vector.
pub fn vector(dim: u32, usage: BufferUsages) -> Self {
Self::tensor([dim], usage)
}
}
impl TensorBuilder<2> {
/// Starts building a storage buffer containing a single matrix with `nrows` rows and
/// `ncols` columns.
pub fn matrix(nrows: u32, ncols: u32, usage: BufferUsages) -> Self {
Self::tensor([nrows, ncols], usage)
}
}
impl<const DIM: usize> TensorBuilder<DIM> {
/// Starts building a storage buffer containing a tensor with the specified `shape`.
pub fn tensor(shape: [u32; DIM], usage: BufferUsages) -> Self {
Self {
shape,
usage,
label: None,
}
}
/// The number of elements in this tensor.
fn len(&self) -> u64 {
self.shape.into_iter().map(|s| s as u64).product()
}
/// Sets the debug label of this tensor.
pub fn label(mut self, label: String) -> Self {
self.label = Some(label);
self
}
/// Builds the gpu tensor.
pub fn build<T: Pod>(self, device: &Device) -> GpuTensor<T, DIM> {
let bytes_len = std::mem::size_of::<T>() as u64 * self.len();
let buffer = device.create_buffer(&BufferDescriptor {
label: self.label.as_deref(),
size: bytes_len,
usage: self.usage,
mapped_at_creation: false,
});
GpuTensor {
shape: self.shape,
buffer,
phantom: PhantomData,
}
}
/// Builds the gpu tensor.
pub fn build_uninit_encased<T: ShaderType>(self, device: &Device) -> GpuTensor<T, DIM> {
let bytes_len = T::min_size().get() * self.len();
let buffer = device.create_buffer(&BufferDescriptor {
label: self.label.as_deref(),
size: bytes_len,
usage: self.usage,
mapped_at_creation: false,
});
GpuTensor {
shape: self.shape,
buffer,
phantom: PhantomData,
}
}
/// Builds this tensor with raw bytes given for its initial value.
pub fn build_bytes<T>(self, device: &Device, data: &[u8]) -> GpuTensor<T, DIM> {
let buffer = device.create_buffer_init(&BufferInitDescriptor {
label: self.label.as_deref(),
contents: bytemuck::cast_slice(data),
usage: self.usage,
});
GpuTensor {
shape: self.shape,
buffer,
phantom: PhantomData,
}
}
/// Builds this tensor with raw bytes given for its initial value.
pub fn build_encase<T>(self, device: &Device, data: impl AsRef<[T]>) -> GpuTensor<T, DIM>
where
T: ShaderType + ShaderSize + WriteInto,
{
let vector = data.as_ref();
let mut bytes = vec![]; // TODO: can we avoid the allocation?
let mut buffer = StorageBuffer::new(&mut bytes);
buffer.write(vector).unwrap();
self.build_bytes(device, &bytes)
}
/// Builds this tensor with an array of values given for its initial value.
pub fn build_init<T: Pod>(self, device: &Device, data: &[T]) -> GpuTensor<T, DIM> {
assert!(
data.len() as u64 >= self.len(),
"Incorrect number of elements provided for initializing Tensor.\
Expected at least {}, found {}",
self.len(),
data.len()
);
let len = self.len();
self.build_bytes::<T>(device, bytemuck::cast_slice(&data[..len as usize]))
}
}
/// A tensor stored in the GPU.
///
/// When the tensor is a matrix, they are generally seen as being column-major.
pub struct GpuTensor<T, const DIM: usize> {
shape: [u32; DIM],
buffer: Buffer,
phantom: PhantomData<T>,
}
impl<T, const DIM: usize> GpuTensor<T, DIM> {
/// Does this tensor contain zero elements?
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// The number of elements in this tensor.
pub fn len(&self) -> u64 {
self.shape.into_iter().map(|s| s as u64).product()
}
/// The size, in bytes, of this tensor’s content.
pub fn bytes_len(&self) -> u64
where
T: Pod,
{
std::mem::size_of::<T>() as u64 * self.len()
}
/// The size, in bytes, of this tensor’s content.
pub fn bytes_len_encased(&self) -> u64
where
T: ShaderType,
{
T::min_size().get() * self.len()
}
/// Queues a buffer-to-buffer copy from `source` to `self`.
///
/// Panics if the lengths do not match.
pub fn copy_from(&self, encoder: &mut CommandEncoder, source: &GpuTensor<T, DIM>)
where
T: Pod,
{
assert_eq!(self.len(), source.len());
encoder.copy_buffer_to_buffer(&source.buffer, 0, &self.buffer, 0, self.bytes_len())
}
pub fn copy_from_encased(&self, encoder: &mut CommandEncoder, source: &GpuTensor<T, DIM>)
where
T: ShaderType,
{
assert_eq!(self.len(), source.len());
encoder.copy_buffer_to_buffer(&source.buffer, 0, &self.buffer, 0, self.bytes_len_encased())
}
/// Queues a buffer-to-buffer copy from `source` to `self`.
pub fn copy_from_view<'a, Ordering>(
&self,
encoder: &mut CommandEncoder,
source: impl Into<GpuTensorView<'a, T, Ordering, DIM>>,
) where
T: Pod,
{
let source = source.into();
assert_eq!(
source.view_shape.size[0],
if DIM == 0 { 1 } else { self.shape[0] }
);
encoder.copy_buffer_to_buffer(
source.buffer,
source.view_shape.offset as BufferAddress * size_of::<T>() as BufferAddress,
&self.buffer,
0,
self.bytes_len(),
)
}
/// The tensor’s shape (typically `[num_rows, num_cols, ...]`).
pub fn shape(&self) -> [u32; DIM] {
self.shape
}
/// The tensor’s underlying wgpu buffer.
pub fn buffer(&self) -> &Buffer {
&self.buffer
}
/// Extracts the underlying buffer.
pub fn into_inner(self) -> Buffer {
self.buffer
}
/// Builds a tensor view sharing the same shape, stride, and buffer, as `self`.
pub fn as_view<Ordering: MatrixOrdering>(&self) -> GpuTensorView<'_, T, Ordering, DIM> {
self.into()
}
// TODO: not sure if there is an official name for this operation.
pub fn as_embedded_view<Ordering: MatrixOrdering, const DIM2: usize>(
&self,
) -> GpuTensorView<'_, T, Ordering, DIM2> {
assert!(
DIM2 >= DIM,
"Can only embed into a higher-order tensor view."
);
let mut embedded_shape = [1; DIM2];
embedded_shape[..DIM].copy_from_slice(&self.shape[..DIM]);
self.reshape(embedded_shape, None, None)
}
/// Reads the buffer’s content into a vector.
pub async fn read_bytes<'a>(&'a self, device: &'a Device) -> anyhow::Result<BufferView<'a>> {
// TODO: could probably be optimized?
let buffer_slice = self.buffer.slice(..);
#[cfg(not(target_arch = "wasm32"))]
{
let (sender, receiver) = async_channel::bounded(1);
buffer_slice.map_async(wgpu::MapMode::Read, move |v| {
sender.send_blocking(v).unwrap()
});
let _ = device.poll(wgpu::PollType::wait());
receiver.recv().await?.unwrap();
}
#[cfg(target_arch = "wasm32")]
{
let (sender, receiver) = async_channel::bounded(1);
buffer_slice.map_async(wgpu::MapMode::Read, move |v| {
let _ = sender.force_send(v).unwrap();
});
device.poll(wgpu::PollType::wait())?;
receiver.recv().await?.unwrap();
}
let data = buffer_slice.get_mapped_range();
Ok(data)
}
/// Reads the buffer’s content into a slice.
pub async fn read_to(&self, device: &Device, out: &mut [T]) -> anyhow::Result<()>
where
T: Pod,
{
let data = self.read_bytes(device).await?;
let result = bytemuck::try_cast_slice(&data)?;
out.copy_from_slice(result);
drop(data);
self.buffer.unmap();
Ok(())
}
pub async fn slow_read(&self, gpu: &GpuInstance) -> Vec<T>
where
T: Pod,
{
// Create staging buffer and copy into it.
let staging: GpuTensor<T, DIM> = TensorBuilder::tensor(
self.shape(),
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
)
.build(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
staging.copy_from(&mut encoder, self);
gpu.queue().submit(Some(encoder.finish()));
staging.read(gpu.device()).await.unwrap()
}
pub async fn slow_read_encased(&self, gpu: &GpuInstance) -> Vec<T>
where
T: ShaderType + ReadFrom + ShaderSize + CreateFrom,
{
// Create staging buffer and copy into it.
let staging: GpuTensor<T, DIM> = TensorBuilder::tensor(
self.shape(),
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
)
.build_uninit_encased(gpu.device());
let mut encoder = gpu.device().create_command_encoder(&Default::default());
staging.copy_from_encased(&mut encoder, self);
gpu.queue().submit(Some(encoder.finish()));
staging.read_encased(gpu.device()).await.unwrap()
}
/// Reads the buffer’s content into a vector.
pub async fn read(&self, device: &Device) -> anyhow::Result<Vec<T>>
where
T: Pod,
{
let data = self.read_bytes(device).await?;
let result = bytemuck::try_cast_slice(&data)?.to_vec();
drop(data);
self.buffer.unmap();
Ok(result)
}
/// Reads the buffer’s content into a vector.
pub async fn read_encased(&self, device: &Device) -> anyhow::Result<Vec<T>>
where
T: ShaderType + ReadFrom + ShaderSize + CreateFrom,
{
let data = self.read_bytes(device).await?;
let mut result = vec![];
let bytes = data.as_ref();
let buffer = StorageBuffer::new(&bytes);
buffer.read(&mut result)?;
drop(data);
self.buffer.unmap();
Ok(result)
}
}
// TODO: add a compile-time constraint for DIM1 <= DIM2
impl<'a, T, Ordering: MatrixOrdering, const DIM1: usize, const DIM2: usize>
From<&'a GpuTensor<T, DIM1>> for GpuTensorView<'a, T, Ordering, DIM2>
{
fn from(val: &'a GpuTensor<T, DIM1>) -> Self {
val.as_embedded_view()
}
}
/// A view over a tensor.
///
/// This is typically useful to extract a single matrix or column from a tensor. Note that,
/// currently, two elements from the same rows are required to be consecutive (row stride = 1).
#[derive(Copy, Clone)]
pub struct GpuTensorView<'a, T, Ordering, const DIM: usize> {
view_shape: ViewShape,
buffer: &'a Buffer,
phantom: PhantomData<(T, Ordering)>,
}
impl<'a, T, Ordering, const DIM: usize> GpuTensorView<'a, T, Ordering, DIM> {
/// The view’s shape.
pub fn shape(&self) -> ViewShape {
self.view_shape
}
/// The view’s underlying buffer.
pub fn buffer(&self) -> &'a Buffer {
self.buffer
}
}
impl<T> GpuVectorView<'_, T> {
/// Is this view empty?
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// The number of elements in this vector view.
pub fn len(&self) -> u32 {
self.view_shape.size[0]
}
pub fn rows(&self, i: u32, nrows: u32) -> Self {
assert!(
i + nrows <= self.len(),
"Rows slice range out of bounds: {}..{}",
i,
i + nrows
);
GpuTensorView {
view_shape: ViewShape {
size: [nrows, 1, 1],
stride: self.view_shape.stride,
stride_mat: self.view_shape.stride_mat,
offset: self.view_shape.offset + i,
},
buffer: self.buffer,
phantom: PhantomData,
}
}
}
impl<'a, T, Ordering> GpuCubeView<'a, T, Ordering> {
pub fn matrix(&self, matrix_id: u32) -> GpuMatrixView<'a, T, Ordering> {
let [nrows, ncols, nmats] = self.view_shape.size;
assert!(matrix_id < nmats);
GpuTensorView {
view_shape: ViewShape {
size: [nrows, ncols, 1],
stride: self.view_shape.stride,
stride_mat: 1,
offset: self.view_shape.offset + self.view_shape.stride_mat * matrix_id,
},
buffer: self.buffer,
phantom: PhantomData,
}
}
}
impl<T, Ordering> GpuMatrixView<'_, T, Ordering> {
pub fn columns(&self, first_col: u32, ncols: u32) -> Self {
let nrows = self.view_shape.size[0];
GpuTensorView {
view_shape: ViewShape {
size: [nrows, ncols, 1],
stride: self.view_shape.stride,
stride_mat: self.view_shape.stride_mat,
offset: self.view_shape.offset + self.view_shape.stride * first_col,
},
buffer: self.buffer,
phantom: PhantomData,
}
}
pub fn rows(&self, first_row: u32, nrows: u32) -> Self {
let ncols = self.view_shape.size[1];
GpuTensorView {
view_shape: ViewShape {
size: [nrows, ncols, 1],
stride: self.view_shape.stride,
stride_mat: self.view_shape.stride_mat,
offset: self.view_shape.offset + first_row,
},
buffer: self.buffer,
phantom: PhantomData,
}
}
}
impl<T, const DIM: usize> GpuTensor<T, DIM> {
pub fn reshape<Ordering: MatrixOrdering, const DIM2: usize>(
&self,
shape: [u32; DIM2],
stride: Option<u32>,
stride_mat: Option<u32>,
) -> GpuTensorView<'_, T, Ordering, DIM2> {
assert!(shape.iter().product::<u32>() <= self.shape.iter().product::<u32>());
let mut size = [1; 3];
size[..DIM2].copy_from_slice(&shape[..DIM2]);
let default_stride = if Ordering::is_column_major() {
shape[0]
} else {
shape.get(1).copied().unwrap_or(1)
};
GpuTensorView {
view_shape: ViewShape {
size,
stride: stride.unwrap_or(default_stride),
stride_mat: stride_mat.unwrap_or(shape[0] * shape.get(1).copied().unwrap_or(1)),
offset: 0,
},
buffer: &self.buffer,
phantom: PhantomData,
}
}
}
impl<T> GpuMatrix<T> {
/// Allocates a new matrix on the gpu with uninitialized elements.
pub fn uninit(device: &Device, nrows: u32, ncols: u32, usage: BufferUsages) -> Self
where
T: Pod,
{
TensorBuilder::matrix(nrows, ncols, usage).build(device)
}
pub fn uninit_encased(device: &Device, nrows: u32, ncols: u32, usage: BufferUsages) -> Self
where
T: ShaderType,
{
TensorBuilder::matrix(nrows, ncols, usage).build_uninit_encased(device)
}
/// Allocates a new matrix on the gpu initialized from `matrix`.
pub fn init<R: Dim, C: Dim, S: Storage<T, R, C> + IsContiguous>(
device: &Device,
matrix: &Matrix<T, R, C, S>,
usage: BufferUsages,
) -> Self
where
T: Pod + nalgebra::Scalar,
{
TensorBuilder::matrix(matrix.nrows() as u32, matrix.ncols() as u32, usage)
.build_init(device, matrix.as_slice())
}
/// Takes a view over the `i`-th column of `self`.
pub fn column(&self, i: u32) -> GpuVectorView<'_, T> {
GpuTensorView {
view_shape: ViewShape {
size: [self.shape[0], 1, 1],
stride: 1,
stride_mat: 1,
offset: self.shape[0] * i,
},
buffer: &self.buffer,
phantom: PhantomData,
}
}
pub fn slice(&self, (i, j): (u32, u32), (nrows, ncols): (u32, u32)) -> GpuMatrixView<'_, T> {
GpuTensorView {
view_shape: ViewShape {
size: [nrows, ncols, 1],
stride: self.shape[0],
stride_mat: self.shape[0] * self.shape[1],
offset: i + j * nrows,
},
buffer: &self.buffer,
phantom: PhantomData,
}
}
pub fn columns(&self, first_col: u32, ncols: u32) -> GpuMatrixView<'_, T> {
let nrows = self.shape[0];
GpuTensorView {
view_shape: ViewShape {
size: [nrows, ncols, 1],
stride: nrows,
stride_mat: self.shape[0] * self.shape[1],
offset: first_col * nrows,
},
buffer: &self.buffer,
phantom: PhantomData,
}
}
pub fn rows(&self, first_row: u32, nrows: u32) -> GpuMatrixView<'_, T> {
let ncols = self.shape[1];
GpuTensorView {
view_shape: ViewShape {
size: [nrows, ncols, 1],
stride: self.shape[0],
stride_mat: self.shape[0] * self.shape[1],
offset: first_row,
},
buffer: &self.buffer,
phantom: PhantomData,
}
}
}
impl<T> GpuVector<T> {
/// Allocates a new vector on the gpu initialized from `vector`.
///
/// If `T` implements `Pod`, use [`GpuMatrix::init`] instead.
pub fn encase(device: &Device, vector: impl AsRef<[T]>, usage: BufferUsages) -> Self
where
T: ShaderType + ShaderSize + WriteInto,
{
let vector = vector.as_ref();
TensorBuilder::vector(vector.len() as u32, usage).build_encase(device, vector)
}
/// Allocates a new uninitialized vector on the gpu for `len` elements of type `T`.
pub fn uninit(device: &Device, len: u32, usage: BufferUsages) -> Self
where
T: Pod,
{
TensorBuilder::vector(len, usage).build(device)
}
/// Allocates a new uninitialized vector on the gpu for `len` elements of type `T`.
pub fn uninit_encased(device: &Device, len: u32, usage: BufferUsages) -> Self
where
T: ShaderType,
{
TensorBuilder::vector(len, usage).build_uninit_encased(device)
}
/// Allocates a new vector on the gpu initialized from `vector`.
///
/// If `T` does not implement `Pod`, use [`GpuMatrix::encase`] instead.
pub fn init(device: &Device, vector: impl AsRef<[T]>, usage: BufferUsages) -> Self
where
T: Pod,
{
let v = vector.as_ref();
TensorBuilder::vector(v.len() as u32, usage).build_init(device, v.as_ref())
}
/// Takes a view, over this vector, with `num_rows` rows starting at row `first_row`.
pub fn rows(&self, first_row: u32, num_rows: u32) -> GpuVectorView<'_, T> {
GpuTensorView {
view_shape: ViewShape {
size: [num_rows, 1, 1],
stride: self.shape[0],
stride_mat: self.shape[0],
offset: first_row,
},
buffer: &self.buffer,
phantom: PhantomData,
}
}
}
impl<T> GpuScalar<T> {
/// Allocates a new gpu storage buffer with a single uninitialized element.
pub fn uninit(device: &Device, usage: BufferUsages) -> Self
where
T: Pod,
{
TensorBuilder::scalar(usage).build(device)
}
pub fn uninit_encased(device: &Device, usage: BufferUsages) -> Self
where
T: ShaderType,
{
TensorBuilder::scalar(usage).build_uninit_encased(device)
}
/// Allocates a new gpu storage buffer with a single element initialized to `value`.
pub fn init(device: &Device, value: T, usage: BufferUsages) -> Self
where
T: Pod,
{
TensorBuilder::scalar(usage).build_init(device, &[value])
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/indirect.rs | crates/wgcore/src/indirect.rs | #[derive(Copy, Clone, Debug, PartialEq, Eq, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
pub struct DispatchIndirectArgs {
pub x: u32,
pub y: u32,
pub z: u32,
}
#[cfg(feature = "derive")]
use crate::Shader;
#[cfg(feature = "derive")]
#[derive(Shader)]
#[shader(src = "indirect.wgsl", krate = "crate")]
pub struct WgIndirect;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/utils.rs | crates/wgcore/src/utils.rs | //! Utilities for creating a ComputePipeline from source or from a naga module.
use wgpu::naga::Module;
use wgpu::{
ComputePipeline, ComputePipelineDescriptor, Device, PipelineCompilationOptions,
ShaderRuntimeChecks,
};
/// Creates a compute pipeline from the shader sources `content` and the name of its `entry_point`.
pub fn load_shader(device: &Device, entry_point: &str, content: &str) -> ComputePipeline {
let shader = unsafe {
device.create_shader_module_trusted(
wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(content)),
},
ShaderRuntimeChecks::unchecked(),
)
};
device.create_compute_pipeline(&ComputePipelineDescriptor {
label: Some(entry_point),
layout: None,
module: &shader,
entry_point: Some(entry_point),
compilation_options: Default::default(),
cache: None,
})
}
/// Creates a compute pipeline from the shader `module` and the name of its `entry_point`.
pub fn load_module(device: &Device, entry_point: &str, module: Module) -> ComputePipeline {
let shader = unsafe {
device.create_shader_module_trusted(
wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Naga(std::borrow::Cow::Owned(module)),
},
ShaderRuntimeChecks::unchecked(),
)
};
device.create_compute_pipeline(&ComputePipelineDescriptor {
label: Some(entry_point),
layout: None,
module: &shader,
entry_point: Some(entry_point),
compilation_options: PipelineCompilationOptions {
zero_initialize_workgroup_memory: false,
..Default::default()
},
cache: None,
})
}
/// Convents a naga module to its WGSL string representation.
pub fn naga_module_to_wgsl(module: &Module) -> String {
use wgpu::naga;
let mut validator =
naga::valid::Validator::new(naga::valid::ValidationFlags::all(), Default::default());
let info = validator.validate(module).unwrap();
naga::back::wgsl::write_string(module, &info, naga::back::wgsl::WriterFlags::EXPLICIT_TYPES)
.unwrap()
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/composer.rs | crates/wgcore/src/composer.rs | //! Extensions over naga-oil’s Composer.
use naga_oil::compose::{
ComposableModuleDefinition, ComposableModuleDescriptor, Composer, ComposerError,
};
/// An extension trait for the naga-oil `Composer` to work around some of its limitations.
pub trait ComposerExt {
/// Adds a composable module to `self` only if it hasn’t been added yet.
///
/// Currently, `naga-oil` behaves strangely (some symbols stop resolving) if the same module is
/// added twice. This function checks if the module has already been added. If it was already
/// added, then `self` is left unchanged and `Ok(None)` is returned.
fn add_composable_module_once(
&mut self,
desc: ComposableModuleDescriptor<'_>,
) -> Result<Option<&ComposableModuleDefinition>, ComposerError>;
}
impl ComposerExt for Composer {
fn add_composable_module_once(
&mut self,
desc: ComposableModuleDescriptor<'_>,
) -> Result<Option<&ComposableModuleDefinition>, ComposerError> {
// NOTE: extract the module name manually for avoiding duplicate. This is **much** faster
// than retrieving the name through `Preprocessor::get_preprocessor_metadata`.
let module_name = desc
.source
.lines()
.find(|line| line.contains("define_import_path"))
.map(|line| {
line.replace("#define_import_path", "")
.replace(";", "")
.trim()
.to_string()
});
if let Some(name) = &module_name {
if self.contains_module(name) {
// Module already exists, don’t insert it.
return Ok(None);
}
}
self.add_composable_module(desc).map(Some)
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/shapes.rs | crates/wgcore/src/shapes.rs | //! Tensor shape definition.
use crate::tensor::MatrixOrdering;
use dashmap::DashMap;
use std::sync::{Arc, Mutex};
use wgpu::util::{BufferInitDescriptor, DeviceExt};
use wgpu::{Buffer, BufferUsages, Device, Queue};
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
/// The shape of a matrix view over a GPU tensor.
pub struct ViewShape {
/// The tensor view’s number of rows, columns, and matrices.
pub size: [u32; 3],
/// The view’s column stride (number of elements between two columns).
pub stride: u32,
/// The view’s matrix stride (number of elements between two matrices in the tensor).
pub stride_mat: u32,
/// Index of the first element of the view on the underlying buffer.
pub offset: u32,
}
impl ViewShape {
/// Converts the shape `self` for a buffer `&[f32]` to a buffer `&[vec4f]`.
pub fn f32_to_vec4<Ordering: MatrixOrdering>(self) -> Self {
let size = if Ordering::is_column_major() {
[self.size[0] / 4, self.size[1], self.size[2]]
} else {
[self.size[0], self.size[1] / 4, self.size[2]]
};
Self {
size,
stride: self.stride / 4,
stride_mat: self.stride_mat / 4,
offset: self.offset / 4,
}
}
}
/// A map between a `ViewShape` and an uniform storage `Buffer` containing its value on the gpu.
///
/// Ideally, we should use push-constants for view shapes. Unfortunately, push-constants is an
/// optional extension, so we have to emulate them with uniforms for maximum portability.
#[derive(Default)]
pub struct ViewShapeBuffers {
// TODO: once we switch to wgpu 14, we can store a `Buffer` directly instead of
// `Arc<Buffer>` (they will be clonable), and we can also store the `Device`
// here to simplify `self.get` and the kernel dispatch apis.
buffers: DashMap<ViewShape, Arc<Buffer>>,
tmp_buffers: DashMap<ViewShape, Arc<Buffer>>,
recycled: Mutex<Vec<Arc<Buffer>>>,
}
impl ViewShapeBuffers {
/// Creates an empty map.
pub fn new() -> Self {
Self {
buffers: DashMap::new(),
tmp_buffers: DashMap::new(),
recycled: Mutex::new(vec![]),
}
}
pub fn clear_tmp(&self) {
let mut recycled = self.recycled.lock().unwrap();
self.tmp_buffers.retain(|_, buffer| {
recycled.push(buffer.clone());
false
})
}
pub fn put_tmp(&self, device: &Device, queue: &Queue, shape: ViewShape) {
if self.contains(shape) {
return;
}
let mut recycled = self.recycled.lock().unwrap();
let buffer = if let Some(buffer) = recycled.pop() {
queue.write_buffer(&buffer, 0, bytemuck::cast_slice(&[shape]));
buffer
} else {
drop(recycled);
Self::make_buffer(
device,
shape,
BufferUsages::UNIFORM | BufferUsages::COPY_DST,
)
};
self.tmp_buffers.insert(shape, buffer);
}
fn make_buffer(device: &Device, shape: ViewShape, usage: BufferUsages) -> Arc<Buffer> {
Arc::new(device.create_buffer_init(&BufferInitDescriptor {
label: None,
contents: bytemuck::cast_slice(&[shape]),
usage,
}))
}
pub fn contains(&self, shape: ViewShape) -> bool {
self.buffers.contains_key(&shape) || self.tmp_buffers.contains_key(&shape)
}
/// Gets of insert the gpu uniform storage `Buffer` containing the value of `shape`.
pub fn get(&self, device: &Device, shape: ViewShape) -> Arc<Buffer> {
if let Some(buffer) = self.tmp_buffers.get(&shape) {
return buffer.value().clone();
}
self.buffers
.entry(shape)
.or_insert_with(|| Self::make_buffer(device, shape, BufferUsages::UNIFORM))
.clone()
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/shader.rs | crates/wgcore/src/shader.rs | //! Trait for reusable gpu shaders.
use crate::hot_reloading::HotReloadState;
use dashmap::DashMap;
use naga_oil::compose::{Composer, ComposerError};
use std::any::TypeId;
use std::path::{Path, PathBuf};
use std::sync::OnceLock;
use wgpu::naga::Module;
use wgpu::{Device, Label, ShaderModule, ShaderRuntimeChecks};
/// The global shader registry used by various auto-implemented method of `Shader` for loading the
/// shader.
///
/// To access the global shader registry, call [`ShaderRegistry::get`].
/// Whenever a shader source is needed (e.g. as a dependency of another, for instantiating one
/// of its kernel, for hot-reloading), the path registered in this map will take precedence in the
/// automatically-generated implementation of [`Shader::wgsl_path`]. If no path is provided
/// by this registry, the absolute path detected automatically by the `derive(Shader)` will be
/// applied. If neither exist, the shader loading code will default to the shader sources that
/// were embedded at the time of compilation of the module.
#[derive(Debug, Default, Clone)]
pub struct ShaderRegistry {
paths: DashMap<TypeId, PathBuf>,
}
impl ShaderRegistry {
/// Gets the global shader registry used by various auto-implemented method of `Shader` for loading the
/// shader.
pub fn get() -> &'static ShaderRegistry {
static SHADER_REGISTRY: OnceLock<ShaderRegistry> = OnceLock::new();
SHADER_REGISTRY.get_or_init(ShaderRegistry::default)
}
/// Registers the path for the given shader.
///
/// Whenever the shader sources is needed as a dependency or as a kernel, it will be loaded
/// from disk from this file path. This overwrites any previously registered path.
pub fn set_path<T: Shader>(&self, path: PathBuf) {
self.paths.insert(TypeId::of::<T>(), path);
}
/// Gets the registered path, if any, for the shader `T`.
pub fn get_path<T: Shader>(&self) -> Option<PathBuf> {
self.paths.get(&TypeId::of::<T>()).map(|p| p.clone())
}
/// Unregisters the path for the given shader.
pub fn remove_path<T: Shader>(&self) {
self.paths.remove(&TypeId::of::<T>());
}
}
/// A composable gpu shader (with or without associated compute pipelines).
///
/// This trait serves as the basis for the shader compatibility feature of `wgcore`. If the
/// implementor of this trait is a struct and has no fields of type other than `ComputePipeline`,
/// thin this trait can be automatically derive using the `Shader` proc-macro:
///
/// ```ignore
/// #[derive(Shader)]
/// #[shader(src = "compose_dependency.wgsl")]
/// struct ComposableShader;
/// ```
pub trait Shader: Sized + 'static {
/// Path of the shader’s `.wgsl` file.
const FILE_PATH: &'static str;
/// Instantiates this `Shader` from a gpu `device`.
///
/// This is generally used to instantiate all the `ComputeShader` fields of `self`.
fn from_device(device: &wgpu::Device) -> Result<Self, ComposerError>;
/// This shader’s sources (before dependency and macro resolution).
fn src() -> String;
/// This shader’s WGSL sources as a single file (after dependency and macro resolution).
fn flat_wgsl() -> Result<String, ComposerError> {
let module = Self::naga_module()?;
Ok(crate::utils::naga_module_to_wgsl(&module))
}
/// The naga [`Module`] built from this shader.
fn naga_module() -> Result<Module, ComposerError>;
/// The [`ShaderModule`] built from this shader.
fn shader_module(device: &wgpu::Device, label: Label) -> Result<ShaderModule, ComposerError> {
Ok(unsafe {
device.create_shader_module_trusted(
wgpu::ShaderModuleDescriptor {
label,
source: wgpu::ShaderSource::Naga(std::borrow::Cow::Owned(Self::naga_module()?)),
},
ShaderRuntimeChecks::unchecked(),
)
})
}
/// Add to `composer` the composable module definition of `Self` (if there are any) and all its
/// shader dependencies .
fn compose(composer: &mut Composer) -> Result<(), ComposerError>;
/// A composer filled with the module definition of `Self` (if there is any) and all its
/// shader dependencies.
fn composer() -> Result<Composer, ComposerError> {
let mut composer = Composer::default();
Self::compose(&mut composer)?;
Ok(composer)
}
/// The path of this wgsl shader source file.
///
/// This returns the path from the global [`ShaderRegistry`] if it was set. Otherwise, this returns
/// the path automatically-computed by the `derive(Shader)`. If that failed too, returns `None`.
fn wgsl_path() -> Option<PathBuf>;
/// Registers in the global [`ShaderRegistry`] known path for this shader.
///
/// Any function form `Self` relying on the shader’s path, including hot-reloading,
/// will rely on this path. Note that calling [`Self::watch_sources`] is necessary for
/// hot-reloading to automatically detect changes at the new path.
fn set_wgsl_path<P: AsRef<Path>>(path: P) {
ShaderRegistry::get()
.paths
.insert(TypeId::of::<Self>(), path.as_ref().to_path_buf());
}
/// Registers all the source files, for `Self` and all its shader dependencies, for change
/// detection.
fn watch_sources(state: &mut HotReloadState) -> notify::Result<()>;
/// Checks if this shader (or any of its dependencies) need to be reloaded due to a change
/// from disk.
fn needs_reload(state: &HotReloadState) -> bool;
/// Reloads this shader if it on any of its dependencies have been changed from disk.
fn reload_if_changed(
&mut self,
device: &Device,
state: &HotReloadState,
) -> Result<bool, ComposerError> {
if Self::needs_reload(state) {
*self = Self::from_device(device)?;
Ok(true)
} else {
Ok(false)
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/src/timestamps.rs | crates/wgcore/src/timestamps.rs | //! A convenient wrapper for handling gpu timestamps.
//!
//! Note that this is strongly inspired from wgpu’s timestamp queries example:
//! <https://github.com/gfx-rs/wgpu/blob/trunk/examples/src/timestamp_queries/mod.rs>
use wgpu::{BufferAsyncError, ComputePass, ComputePassTimestampWrites, Device, QuerySet, Queue};
/// A set of gpu timestamps, generally useful to determine shader execution times.
pub struct GpuTimestamps {
set: wgpu::QuerySet,
resolve_buffer: wgpu::Buffer,
destination_buffer: wgpu::Buffer,
capacity: u32,
len: u32,
}
impl GpuTimestamps {
/// Creates a set of gpu timestamps that has room for at most `capacity` timestamps.
pub fn new(device: &wgpu::Device, capacity: u32) -> Self {
GpuTimestamps {
set: device.create_query_set(&wgpu::QuerySetDescriptor {
label: Some("timestamps queries"),
count: capacity,
ty: wgpu::QueryType::Timestamp,
}),
resolve_buffer: device.create_buffer(&wgpu::BufferDescriptor {
label: Some("timestamps resolve buffer"),
size: std::mem::size_of::<u64>() as u64 * capacity as u64,
usage: wgpu::BufferUsages::COPY_SRC | wgpu::BufferUsages::QUERY_RESOLVE,
mapped_at_creation: false,
}),
destination_buffer: device.create_buffer(&wgpu::BufferDescriptor {
label: Some("timestamps dest buffer"),
size: std::mem::size_of::<u64>() as u64 * capacity as u64,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
}),
capacity,
len: 0,
}
}
/// Returns `true` if no timestamps was registered in this set.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// How many timestamps are registered in this set.
pub fn len(&self) -> usize {
self.len as usize
}
/// The underlying wgpu `QuerySet`.
pub fn query_set(&self) -> &QuerySet {
&self.set
}
/// Reserves the next two timestamp slots from this set and returns the corresponding
/// `ComputePassTimestampWrites` descriptor to be given to a compute pass creation to measure
/// its execution times.
///
/// Returns `None` if there is no room form two additional timestamps in `self`.
pub fn next_compute_pass_timestamp_writes(&mut self) -> Option<ComputePassTimestampWrites<'_>> {
let ids = self.next_query_indices::<2>()?;
Some(wgpu::ComputePassTimestampWrites {
query_set: &self.set,
beginning_of_pass_write_index: Some(ids[0]),
end_of_pass_write_index: Some(ids[1]),
})
}
/// Allocate a single timestamp into this set and return its index.
///
/// Returns `None` if adding one timestamp would exceed this set’s capacity.
pub fn next_query_index(&mut self) -> Option<u32> {
self.next_query_indices::<1>().map(|idx| idx[0])
}
/// Allocate `COUNT` timestamp to this set and return their indices.
///
/// Returns `None` if adding `COUNT` timestamp would exceed this set’s capacity.
pub fn next_query_indices<const COUNT: usize>(&mut self) -> Option<[u32; COUNT]> {
if COUNT == 0 {
return Some([0; COUNT]);
}
if self.len + (COUNT as u32) - 1 < self.capacity {
Some([0; COUNT].map(|_| {
self.len += 1;
self.len - 1
}))
} else {
None
}
}
/// Allocate a single timestamp into this set, and write it into the given `compute_pass`
/// with [`ComputePass::write_timestamp`].
pub fn write_next_timestamp(&mut self, compute_pass: &mut ComputePass) -> Option<u32> {
let id = self.next_query_index()?;
compute_pass.write_timestamp(&self.set, id);
Some(id)
}
/// Writes the timestamp identified by `query_index` into the `compute_pass` using
/// [`ComputePass::write_timestamp`]. It is assumed that the `query_index` has already
/// been allocated into this set, such that `query_index < self.len()`.
pub fn write_timestamp_at(&mut self, compute_pass: &mut ComputePass, query_index: u32) -> bool {
if query_index < self.capacity {
compute_pass.write_timestamp(&self.set, query_index);
true
} else {
false
}
}
/// Appends to the `encoder` commands to resolve the underlying query set and to retrieve the
/// timestamp information from the gpu.
pub fn resolve(&self, encoder: &mut wgpu::CommandEncoder) {
encoder.resolve_query_set(
&self.set,
// TODO(https://github.com/gfx-rs/wgpu/issues/3993): Musn't be larger than the number valid queries in the set.
0..self.len,
&self.resolve_buffer,
0,
);
encoder.copy_buffer_to_buffer(
&self.resolve_buffer,
0,
&self.destination_buffer,
0,
self.resolve_buffer.size(),
);
}
/// Wait for the timestamps to be readable as a CPU/RAM buffer and return their raw (integer)
/// values.
///
/// Because this method is async, it is more suitable than `GpuTimestamps::wait_for_results`
/// to be called from an async context, or when targeting web platforms.
///
/// Note that the result is given as a vector or raw integer timestamps. To convert them
/// into actual time measurements they need be multiplied by `Queue::get_timestamp_period`. See
/// [`GpuTimestamps::wait_for_results_ms_async`] for a method that applies that multiplication
/// automatically.
pub async fn wait_for_results_async(
&self,
_device: &Device,
) -> Result<Vec<u64>, BufferAsyncError> {
let (snd, rcv) = async_channel::bounded(1);
self.destination_buffer
.slice(..)
.map_async(wgpu::MapMode::Read, move |r| {
#[cfg(not(target_arch = "wasm32"))]
{
snd.send_blocking(r).unwrap();
}
#[cfg(target_arch = "wasm32")]
{
let _ = snd.force_send(r).unwrap();
}
});
#[cfg(not(target_arch = "wasm32"))]
let _ = _device.poll(wgpu::PollType::wait());
rcv.recv().await.unwrap()?;
let timestamps = {
let timestamp_view = self
.destination_buffer
.slice(
..(std::mem::size_of::<u64>() as wgpu::BufferAddress
* self.capacity as wgpu::BufferAddress),
)
.get_mapped_range();
bytemuck::cast_slice(×tamp_view).to_vec()
};
self.destination_buffer.unmap();
Ok(timestamps)
}
/// Wait for the timestamps to be readable as a CPU/RAM buffer and return their values in
/// milliseconds.
///
/// Because this method is async, it is more suitable than `GpuTimestamps::wait_for_results`
/// to be called from an async context, or when targeting web platforms.
pub async fn wait_for_results_ms_async(
&self,
queue: &Queue,
device: &Device,
) -> Result<Vec<f64>, BufferAsyncError> {
let timestamps = self.wait_for_results_async(device).await?;
let period = queue.get_timestamp_period();
Ok(Self::timestamps_to_ms(×tamps, period))
}
/// The blocking counterpart of [`GpuTimestamps::wait_for_results_async`].
///
/// This is not compatible with web platforms.
pub fn wait_for_results(&self, device: &wgpu::Device) -> Vec<u64> {
self.destination_buffer
.slice(..)
.map_async(wgpu::MapMode::Read, |_| ());
let _ = device.poll(wgpu::PollType::wait());
let timestamps = {
let timestamp_view = self
.destination_buffer
.slice(
..(std::mem::size_of::<u64>() as wgpu::BufferAddress
* self.capacity as wgpu::BufferAddress),
)
.get_mapped_range();
bytemuck::cast_slice(×tamp_view).to_vec()
};
self.destination_buffer.unmap();
timestamps
}
/// The blocking counterpart of [`GpuTimestamps::wait_for_results_ms`].
///
/// This is not compatible with web platforms.
pub fn wait_for_results_ms(&self, device: &Device, queue: &Queue) -> Vec<f64> {
let timestamps = self.wait_for_results(device);
let period = queue.get_timestamp_period();
Self::timestamps_to_ms(×tamps, period)
}
/// Converts a set of raw timestamps into milliseconds.
///
/// The `timestamp_period` should be the result of a call to [`Queue::get_timestamp_period`].
pub fn timestamps_to_ms(timestamps: &[u64], timestamp_period: f32) -> Vec<f64> {
timestamps
.iter()
.map(|t| *t as f64 * timestamp_period as f64 / 1_000_000.0)
.collect()
}
/// Clears this set of timestamp.
///
/// This sets the logical length to zero but the capacity/gpu buffer sizes are not modified.
pub fn clear(&mut self) {
self.len = 0;
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/examples/hot_reloading.rs | crates/wgcore/examples/hot_reloading.rs | #[cfg(not(feature = "derive"))]
std::compile_error!(
r#"
###############################################################
## The `derive` feature must be enabled to run this example. ##
###############################################################
"#
);
use wgcore::gpu::GpuInstance;
use wgcore::hot_reloading::HotReloadState;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuScalar;
use wgcore::Shader;
use wgpu::{BufferUsages, ComputePipeline};
#[derive(Shader)]
#[shader(src = "hot_reloading.wgsl", composable = false)]
struct ShaderHotReloading {
main: ComputePipeline,
}
#[async_std::main]
async fn main() -> anyhow::Result<()> {
// Initialize the gpu device and its queue.
//
// Note that `GpuInstance` is just a simple helper struct for initializing the gpu resources.
// You are free to initialize them independently if more control is needed, or reuse the ones
// that were already created/owned by e.g., a game engine.
let gpu = GpuInstance::new().await?;
// Load and compile our kernel. The `from_device` function was generated by the `Shader` derive.
// Note that its dependency to `Composable` is automatically resolved by the `Shader` derive
// too.
let mut kernel = ShaderHotReloading::from_device(gpu.device())?;
// Create the buffers.
let buffer = GpuScalar::init(
gpu.device(),
0u32,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging = GpuScalar::init(
gpu.device(),
0u32,
BufferUsages::COPY_DST | BufferUsages::MAP_READ,
);
// Init hot-reloading.
let mut hot_reload = HotReloadState::new()?;
ShaderHotReloading::watch_sources(&mut hot_reload)?;
// Queue the operation.
println!("#############################");
println!("Edit the file `hot_reloading.wgsl`.\nThe updated result will be printed below whenever a change is detected.");
println!("#############################");
for loop_id in 0.. {
// Detect & apply changes.
hot_reload.update_changes();
match kernel.reload_if_changed(gpu.device(), &hot_reload) {
Ok(changed) => {
if changed || loop_id == 0 {
// We detected a change (or this is the first loop).
// Encode & submit the operation to the gpu.
let mut encoder = gpu.device().create_command_encoder(&Default::default());
// Run our kernel.
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &kernel.main)
.bind0([buffer.buffer()])
.dispatch(1);
drop(pass);
// Copy the result to the staging buffer.
staging.copy_from(&mut encoder, &buffer);
gpu.queue().submit(Some(encoder.finish()));
let result_read = staging.read(gpu.device()).await.unwrap();
println!("Current result value: {}", result_read[0]);
}
}
Err(e) => {
// Hot-reloading failed, likely due to a syntax error in the shader.
println!("Hot reloading error: {:?}", e);
}
}
}
Ok(())
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/examples/timestamp_queries.rs | crates/wgcore/examples/timestamp_queries.rs | #[cfg(not(feature = "derive"))]
std::compile_error!(
r#"
###############################################################
## The `derive` feature must be enabled to run this example. ##
###############################################################
"#
);
use wgcore::gpu::GpuInstance;
use wgcore::hot_reloading::HotReloadState;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgcore::timestamps::GpuTimestamps;
use wgcore::Shader;
use wgpu::{BufferUsages, ComputePipeline};
#[derive(Shader)]
#[shader(src = "timestamp_queries.wgsl", composable = false)]
struct ShaderTimestampQueries {
main: ComputePipeline,
}
#[async_std::main]
async fn main() -> anyhow::Result<()> {
// Initialize the gpu device and its queue.
//
// Note that `GpuInstance` is just a simple helper struct for initializing the gpu resources.
// You are free to initialize them independently if more control is needed, or reuse the ones
// that were already created/owned by e.g., a game engine.
let gpu = GpuInstance::new().await?;
// Load and compile our kernel. The `from_device` function was generated by the `Shader` derive.
// Note that its dependency to `Composable` is automatically resolved by the `Shader` derive
// too.
let mut kernel = ShaderTimestampQueries::from_device(gpu.device())?;
// Create the buffers.
const LEN: u32 = 2_000_000;
let buffer = GpuVector::init(
gpu.device(),
vec![0u32; LEN as usize],
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
// Init hot-reloading.
// We are setting up hot-reloading so that we can change somme elements in the shader
// (like the iteration count) and see how that affects performances live.
let mut hot_reload = HotReloadState::new()?;
ShaderTimestampQueries::watch_sources(&mut hot_reload)?;
// Init timestamp queries.
// To measure the time of one kernel, we need two timestamps (one for when it starts and one for
// when it stopped).
let mut timestamps = GpuTimestamps::new(gpu.device(), 2);
// Queue the operation.
println!("#############################");
println!("Edit the file `timestamp_queries.wgsl` (for example by multiplying or dividing NUM_ITERS by 10).\nThe updated runtime will be printed below whenever a change is detected.");
println!("#############################");
for _loop_id in 0.. {
// Detect & apply changes.
hot_reload.update_changes();
match kernel.reload_if_changed(gpu.device(), &hot_reload) {
Ok(changed) => {
if changed {
// Clear the timestamps to reuse in the next loop.
timestamps.clear();
// We detected a change (or this is the first loop).
// Encode & submit the operation to the gpu.
let mut encoder = gpu.device().create_command_encoder(&Default::default());
// Declare a compute pass with timestamps enabled.
let mut pass =
encoder.compute_pass("timestamp_queries_test", Some(&mut timestamps));
// Dispatch our kernel.
KernelDispatch::new(gpu.device(), &mut pass, &kernel.main)
.bind0([buffer.buffer()])
.dispatch(LEN.div_ceil(64));
drop(pass);
// Resolve the timestamp queries.
timestamps.resolve(&mut encoder);
gpu.queue().submit(Some(encoder.finish()));
// Read and print the kernel’s runtime.
let timestamps_read = timestamps.wait_for_results_ms(gpu.device(), gpu.queue());
println!(
"Current run time: {}ms",
timestamps_read[1] - timestamps_read[0]
);
}
}
Err(e) => {
// Hot-reloading failed, likely due to a syntax error in the shader.
println!("Hot reloading error: {:?}", e);
}
}
}
Ok(())
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/examples/overwrite.rs | crates/wgcore/examples/overwrite.rs | #[cfg(not(feature = "derive"))]
std::compile_error!(
r#"
###############################################################
## The `derive` feature must be enabled to run this example. ##
###############################################################
"#
);
use nalgebra::DVector;
use std::fmt::Debug;
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgcore::Shader;
use wgpu::{BufferUsages, ComputePipeline};
// Declare our shader module that contains our composable functions.
// Note that we don’t build any compute pipeline from this wgsl file.
#[derive(Shader)]
#[shader(
src = "compose_dependency.wgsl" // Shader source code, will be embedded in the exe with `include_str!`
)]
struct Composable;
#[derive(Shader)]
#[shader(
derive(Composable), // This shader depends on the `Composable` shader.
src = "compose_kernel.wgsl", // Shader source code, will be embedded in the exe with `include_str!`.
composable = false // This shader doesn’t export any symbols reusable from other wgsl shaders.
)]
struct WgKernel {
// This ComputePipeline field indicates that the Shader macro needs to generate the boilerplate
// for loading the compute pipeline in `WgKernel::from_device`.
main: ComputePipeline,
}
#[derive(Copy, Clone, PartialEq, Default, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
pub struct MyStruct {
value: f32,
}
// Optional: makes the debug output more concise.
impl Debug for MyStruct {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.value)
}
}
#[async_std::main]
async fn main() -> anyhow::Result<()> {
// Initialize the gpu device and its queue.
//
// Note that `GpuInstance` is just a simple helper struct for initializing the gpu resources.
// You are free to initialize them independently if more control is needed, or reuse the ones
// that were already created/owned by e.g., a game engine.
let gpu = GpuInstance::new().await?;
// Load and compile our kernel. The `from_device` function was generated by the `Shader` derive.
// Note that its dependency to `Composable` is automatically resolved by the `Shader` derive
// too.
let kernel_before_overwrite = WgKernel::from_device(gpu.device())?;
// Run the original shader.
let result_before_overwrite = run_kernel(&gpu, &kernel_before_overwrite).await;
// Overwrite the sources of the dependency module.
// Since we are running this with `cargo run --example`, the path is relative to the
// `target/debug` folder.
Composable::set_wgsl_path("../../crates/wgcore/examples/overwritten_dependency.wgsl");
// Recompile our kernel.
let kernel_after_overwrite = WgKernel::from_device(gpu.device())?;
// Run the modified kernel.
let result_after_overwrite = run_kernel(&gpu, &kernel_after_overwrite).await;
println!("Result before overwrite: {:?}", result_before_overwrite);
println!("Result after overwrite: {:?}", result_after_overwrite);
Ok(())
}
async fn run_kernel(gpu: &GpuInstance, kernel: &WgKernel) -> Vec<MyStruct> {
// Create the buffers.
const LEN: u32 = 10;
let a_data = DVector::from_fn(LEN as usize, |i, _| MyStruct { value: i as f32 });
let b_data = DVector::from_fn(LEN as usize, |i, _| MyStruct {
value: i as f32 * 10.0,
});
let a_buf = GpuVector::init(
gpu.device(),
&a_data,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let b_buf = GpuVector::init(gpu.device(), &b_data, BufferUsages::STORAGE);
let staging = GpuVector::uninit(
gpu.device(),
LEN,
BufferUsages::COPY_DST | BufferUsages::MAP_READ,
);
// Encode & submit the operation to the gpu.
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &kernel.main)
.bind0([a_buf.buffer(), b_buf.buffer()])
.dispatch(LEN.div_ceil(64));
drop(pass);
// Copy the result to the staging buffer.
staging.copy_from(&mut encoder, &a_buf);
gpu.queue().submit(Some(encoder.finish()));
// Read the result back from the gpu.
staging
.read(gpu.device())
.await
.expect("Failed to read result from the GPU.")
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/examples/compose.rs | crates/wgcore/examples/compose.rs | #[cfg(not(feature = "derive"))]
std::compile_error!(
r#"
###############################################################
## The `derive` feature must be enabled to run this example. ##
###############################################################
"#
);
use nalgebra::DVector;
use std::fmt::Debug;
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgcore::Shader;
use wgpu::{BufferUsages, ComputePipeline};
// Declare our shader module that contains our composable functions.
// Note that we don’t build any compute pipeline from this wgsl file.
#[derive(Shader)]
#[shader(
src = "compose_dependency.wgsl" // Shader source code, will be embedded in the exe with `include_str!`
)]
struct Composable;
#[derive(Shader)]
#[shader(
derive(Composable), // This shader depends on the `Composable` shader.
src = "compose_kernel.wgsl", // Shader source code, will be embedded in the exe with `include_str!`.
composable = false // This shader doesn’t export any symbols reusable from other wgsl shaders.
)]
struct WgKernel {
// This ComputePipeline field indicates that the Shader macro needs to generate the boilerplate
// for loading the compute pipeline in `WgKernel::from_device`.
main: ComputePipeline,
}
#[derive(Copy, Clone, PartialEq, Default, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
pub struct MyStruct {
value: f32,
}
// Optional: makes the debug output more concise.
impl Debug for MyStruct {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.value)
}
}
#[async_std::main]
async fn main() -> anyhow::Result<()> {
// Initialize the gpu device and its queue.
//
// Note that `GpuInstance` is just a simple helper struct for initializing the gpu resources.
// You are free to initialize them independently if more control is needed, or reuse the ones
// that were already created/owned by e.g., a game engine.
let gpu = GpuInstance::new().await?;
// Load and compile our kernel. The `from_device` function was generated by the `Shader` derive.
// Note that its dependency to `Composable` is automatically resolved by the `Shader` derive
// too.
let kernel = WgKernel::from_device(gpu.device())?;
println!("######################################");
println!("###### Composed shader sources: ######");
println!("######################################");
println!("{}", WgKernel::flat_wgsl()?);
// Now, let’s actually run our kernel.
let result = run_kernel(&gpu, &kernel).await;
println!("Result: {:?}", result);
Ok(())
}
async fn run_kernel(gpu: &GpuInstance, kernel: &WgKernel) -> Vec<MyStruct> {
// Create the buffers.
const LEN: u32 = 10;
let a_data = DVector::from_fn(LEN as usize, |i, _| MyStruct { value: i as f32 });
let b_data = DVector::from_fn(LEN as usize, |i, _| MyStruct {
value: i as f32 * 10.0,
});
let a_buf = GpuVector::init(
gpu.device(),
&a_data,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let b_buf = GpuVector::init(gpu.device(), &b_data, BufferUsages::STORAGE);
let staging = GpuVector::uninit(
gpu.device(),
LEN,
BufferUsages::COPY_DST | BufferUsages::MAP_READ,
);
// Encode & submit the operation to the gpu.
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &kernel.main)
.bind0([a_buf.buffer(), b_buf.buffer()])
.dispatch(LEN.div_ceil(64));
drop(pass);
// Copy the result to the staging buffer.
staging.copy_from(&mut encoder, &a_buf);
gpu.queue().submit(Some(encoder.finish()));
// Read the result back from the gpu.
staging
.read(gpu.device())
.await
.expect("Failed to read result from the GPU.")
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/examples/buffer_readback.rs | crates/wgcore/examples/buffer_readback.rs | use nalgebra::DVector;
use wgcore::gpu::GpuInstance;
use wgcore::tensor::GpuVector;
use wgpu::BufferUsages;
#[async_std::main]
async fn main() -> anyhow::Result<()> {
// Initialize the gpu device and its queue.
//
// Note that `GpuInstance` is just a simple helper struct for initializing the gpu resources.
// You are free to initialize them independently if more control is needed, or reuse the ones
// that were already created/owned by e.g., a game engine.
let gpu = GpuInstance::new().await?;
// Create the buffers.
const LEN: u32 = 10;
let buffer_data = DVector::from_fn(LEN as usize, |i, _| i as u32);
let buffer = GpuVector::init(
gpu.device(),
&buffer_data,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging = GpuVector::uninit(
gpu.device(),
LEN,
BufferUsages::COPY_DST | BufferUsages::MAP_READ,
);
// Queue the operation.
// Encode & submit the operation to the gpu.
let mut encoder = gpu.device().create_command_encoder(&Default::default());
// Copy the result to the staging buffer.
staging.copy_from(&mut encoder, &buffer);
gpu.queue().submit(Some(encoder.finish()));
let read = DVector::from(staging.read(gpu.device()).await?);
assert_eq!(buffer_data, read);
println!("Buffer copy & read succeeded!");
println!("Original: {:?}", buffer_data);
println!("Readback: {:?}", read);
Ok(())
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore/examples/encase.rs | crates/wgcore/examples/encase.rs | #[cfg(not(feature = "derive"))]
std::compile_error!(
r#"
###############################################################
## The `derive` feature must be enabled to run this example. ##
###############################################################
"#
);
use nalgebra::Vector4;
use wgcore::gpu::GpuInstance;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgcore::Shader;
use wgpu::{BufferUsages, ComputePipeline};
#[derive(Copy, Clone, PartialEq, Debug, Default, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
pub struct BytemuckStruct {
value: f32,
}
#[derive(Copy, Clone, PartialEq, Debug, Default, encase::ShaderType)]
#[repr(C)]
pub struct EncaseStruct {
value: f32,
// This implies some internal padding, so we can’t rely on bytemuck.
// Encase will handle that properly.
value2: Vector4<f32>,
}
#[derive(Shader)]
#[shader(src = "encase.wgsl", composable = false)]
struct ShaderEncase {
main: ComputePipeline,
}
#[async_std::main]
async fn main() -> anyhow::Result<()> {
// Initialize the gpu device and its queue.
//
// Note that `GpuInstance` is just a simple helper struct for initializing the gpu resources.
// You are free to initialize them independently if more control is needed, or reuse the ones
// that were already created/owned by e.g., a game engine.
let gpu = GpuInstance::new().await?;
// Load and compile our kernel. The `from_device` function was generated by the `Shader` derive.
// Note that its dependency to `Composable` is automatically resolved by the `Shader` derive
// too.
let kernel = ShaderEncase::from_device(gpu.device())?;
// Create the buffers.
const LEN: u32 = 1000;
let a_data = (0..LEN)
.map(|x| EncaseStruct {
value: x as f32,
value2: Vector4::repeat(x as f32 * 10.0),
})
.collect::<Vec<_>>();
let b_data = (0..LEN)
.map(|x| BytemuckStruct { value: x as f32 })
.collect::<Vec<_>>();
// Call `encase` instead of `init` because `EncaseStruct` isn’t `Pod`.
// The `encase` function has a bit of overhead so bytemuck should be preferred whenever possible.
let a_buf = GpuVector::encase(gpu.device(), &a_data, BufferUsages::STORAGE);
let b_buf = GpuVector::init(gpu.device(), &b_data, BufferUsages::STORAGE);
// Encode & submit the operation to the gpu.
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &kernel.main)
.bind0([a_buf.buffer(), b_buf.buffer()])
.dispatch(LEN.div_ceil(64));
drop(pass);
gpu.queue().submit(Some(encoder.finish()));
Ok(())
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgcore-derive/src/lib.rs | crates/wgcore-derive/src/lib.rs | //! Derive proc-macros for `wgcore`.
extern crate proc_macro;
use darling::util::PathList;
use darling::{FromDeriveInput, FromField};
use proc_macro::TokenStream;
use quote::{quote, ToTokens};
use syn::{Data, DataStruct, Path};
#[derive(FromDeriveInput, Clone)]
#[darling(attributes(shader))]
struct DeriveShadersArgs {
#[darling(default)]
pub derive: PathList,
#[darling(default)]
pub composable: Option<bool>,
pub src: String,
#[darling(default)]
pub src_fn: Option<Path>,
#[darling(default)]
pub shader_defs: Option<Path>,
#[darling(default = "default_crate_path")]
pub krate: Path,
}
fn default_crate_path() -> Path {
syn::parse_quote!(wgcore)
}
#[derive(FromField, Clone)]
#[darling(attributes(shader))]
struct DeriveShadersFieldArgs {
#[darling(default)]
pub kernel: Option<String>,
}
#[proc_macro_derive(Shader, attributes(shader))]
pub fn derive_shader(item: TokenStream) -> TokenStream {
let input = syn::parse_macro_input!(item as syn::DeriveInput);
let struct_identifier = &input.ident;
let derive_shaders = match DeriveShadersArgs::from_derive_input(&input) {
Ok(v) => v,
Err(e) => {
return e.write_errors().into();
}
};
match &input.data {
Data::Struct(DataStruct { fields, .. }) => {
/*
* Field attributes.
*/
let mut kernels_to_build = vec![];
let src_path = derive_shaders.src;
for field in fields.iter() {
let field_args = match DeriveShadersFieldArgs::from_field(field) {
Ok(v) => v,
Err(e) => {
return e.write_errors().into();
}
};
let ident = field.ident.as_ref().expect("unnamed fields not supported").into_token_stream();
let kernel_name = field_args.kernel.map(|k| quote! { #k }).unwrap_or_else(|| quote! { stringify!(#ident) });
let krate = &derive_shaders.krate;
if fields.len() == 1 {
// Don't clone the module if there is only one field.
kernels_to_build.push(quote! {
#ident: #krate::utils::load_module(device, #kernel_name, module),
});
} else {
kernels_to_build.push(quote! {
#ident: #krate::utils::load_module(device, #kernel_name, module.clone()),
});
}
}
let krate = &derive_shaders.krate;
let shader_defs = derive_shaders.shader_defs.map(|defs| quote! { #defs() })
.unwrap_or_else(|| quote! { Default::default() });
let raw_src = quote! {
// First try to find a path from the shader registry.
// If doesn't exist in the registry, try the absolute path.
// If it doesn't exist in the absolute path, load the embedded string.
if let Some(path) = Self::wgsl_path() {
// TODO: handle error
std::fs::read_to_string(path).unwrap()
} else {
include_str!(#src_path).to_string()
}
};
let src = derive_shaders.src_fn.map(|f| quote! { #f(&#raw_src) })
.unwrap_or_else(|| quote! { #raw_src });
let naga_module = quote! {
Self::composer().and_then(|mut c|
c.make_naga_module(#krate::re_exports::naga_oil::compose::NagaModuleDescriptor {
source: &Self::src(),
file_path: Self::FILE_PATH,
shader_defs: #shader_defs,
..Default::default()
})
)
};
let from_device = if !kernels_to_build.is_empty() {
quote! {
let module = #naga_module?;
Ok(Self {
#(
#kernels_to_build
)*
})
}
} else {
quote! {
Ok(Self)
}
};
/*
* Derive shaders.
*/
let to_derive: Vec<_> = derive_shaders
.derive
.iter()
.map(|p| p.into_token_stream())
.collect();
let composable = derive_shaders.composable.unwrap_or(true);
quote! {
#[automatically_derived]
impl #krate::shader::Shader for #struct_identifier {
const FILE_PATH: &'static str = #src_path;
fn from_device(device: &#krate::re_exports::Device) -> Result<Self, #krate::re_exports::ComposerError> {
#from_device
}
fn src() -> String {
#src
}
fn naga_module() -> Result<#krate::re_exports::wgpu::naga::Module, #krate::re_exports::ComposerError> {
#naga_module
}
fn wgsl_path() -> Option<std::path::PathBuf> {
if let Some(path) = #krate::ShaderRegistry::get().get_path::<#struct_identifier>() {
Some(path.clone())
} else {
// NOTE: this is a bit fragile, and won't work if the current working directory
// isn't the root of the workspace the binary crate is being run from.
// Ideally we need `proc_macro2::Span::source_file` but it is currently unstable.
// See: https://users.rust-lang.org/t/how-to-get-the-macro-called-file-path-in-a-rust-procedural-macro/109613/5
std::path::Path::new(file!())
.parent()?
.join(Self::FILE_PATH)
.canonicalize().ok()
}
}
fn compose(composer: &mut #krate::re_exports::Composer) -> Result<(), #krate::re_exports::ComposerError> {
use #krate::composer::ComposerExt;
#(
#to_derive::compose(composer)?;
)*
if #composable {
composer
.add_composable_module_once(#krate::re_exports::ComposableModuleDescriptor {
source: &Self::src(),
file_path: Self::FILE_PATH,
shader_defs: #shader_defs,
..Default::default()
})?;
}
Ok(())
}
/*
* Hot reloading.
*/
fn watch_sources(state: &mut #krate::hot_reloading::HotReloadState) -> #krate::re_exports::notify::Result<()> {
#(
#to_derive::watch_sources(state)?;
)*
if let Some(path) = Self::wgsl_path() {
state.watch_file(&path)?;
}
Ok(())
}
fn needs_reload(state: &#krate::hot_reloading::HotReloadState) -> bool {
#(
if #to_derive::needs_reload(state) {
return true;
}
)*
Self::wgsl_path()
.map(|path| state.file_changed(&path))
.unwrap_or_default()
}
}
}
}
_ => unimplemented!(),
}
.into()
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/lib.rs | crates/wgrapier/src/lib.rs | //! GPU-accelerated rigid-body physics engine built on WebGPU/WGSL.
//!
//! **wgrapier** provides a high-performance physics simulation system that runs entirely on the GPU,
//! enabling massively parallel physics computation for thousands of rigid bodies. It is designed to
//! work seamlessly across platforms including web and desktop WebGPU.
//!
//! # See Also
//!
//! - [`wgparry`]: GPU collision detection library used by wgrapier.
//! - [`rapier`]: CPU-based physics engine that this crate is based on.
//! - [`wgcore`]: Foundation crate providing shader composition and GPU utilities.
#![doc = include_str!("../README.md")]
#![warn(missing_docs)]
#![allow(clippy::result_large_err)]
#![allow(clippy::too_many_arguments)]
#[cfg(feature = "dim2")]
pub extern crate rapier2d as rapier;
#[cfg(feature = "dim3")]
pub extern crate rapier3d as rapier;
#[cfg(feature = "dim2")]
pub extern crate wgparry2d as wgparry;
#[cfg(feature = "dim3")]
pub extern crate wgparry3d as wgparry;
pub mod dynamics;
pub mod pipeline;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/pipeline.rs | crates/wgrapier/src/pipeline.rs | //! Physics simulation pipeline orchestrating broad-phase, narrow-phase, and constraint solving.
//!
//! This module provides the high-level physics pipeline that coordinates all stages of a physics
//! simulation step on the GPU. The pipeline manages collision detection, contact generation,
//! constraint solving, and integration.
use crate::dynamics::body::{GpuLocalMassProperties, GpuVelocity, GpuWorldMassProperties};
use crate::dynamics::{
prefix_sum::{PrefixSumWorkspace, WgPrefixSum},
ColoringArgs, GpuImpulseJointSet, GpuSimParams, GpuTwoBodyConstraint,
GpuTwoBodyConstraintBuilder, JointSolverArgs, SolverArgs, WarmstartArgs, WgColoring,
WgJointSolver, WgMpropsUpdate, WgSolver, WgWarmstart,
};
use crate::wgparry::{
broad_phase::{Lbvh, WgBruteForceBroadPhase, WgNarrowPhase},
queries::GpuIndexedContact,
shapes::GpuShape,
};
use naga_oil::compose::ComposerError;
use nalgebra::Vector4;
use rapier::dynamics::{ImpulseJointSet, RigidBodySet};
use rapier::geometry::ColliderSet;
use std::collections::HashMap;
use std::time::Duration;
use wgcore::gpu::GpuInstance;
use wgcore::indirect::DispatchIndirectArgs;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::{GpuScalar, GpuVector};
use wgcore::timestamps::GpuTimestamps;
use wgcore::Shader;
use wgparry::broad_phase::LbvhState;
use wgparry::math::{GpuSim, Point};
use wgparry::shapes::ShapeBuffers;
use wgpu::{BufferUsages, Device};
/// Performance statistics collected during a physics simulation step.
///
/// This structure tracks timing and iteration counts for various stages of the physics pipeline,
/// useful for profiling and optimization.
#[derive(Default, Copy, Clone, Debug)]
pub struct RunStats {
/// Number of colors used in the graph coloring algorithm for parallel constraint solving.
pub num_colors: u32,
/// Duration from the start of the step until collision pair count is read back from GPU.
pub start_to_pairs_count_time: Duration,
/// Time spent on the graph coloring algorithm.
pub coloring_time: Duration,
/// Number of iterations the coloring algorithm took to converge.
pub coloring_iterations: u32,
/// Time spent on the fallback coloring method (if the primary method failed).
pub coloring_fallback_time: Duration,
/// Total simulation time including GPU-to-CPU readbacks.
pub total_simulation_time_with_readback: Duration,
/// GPU timestamp for updating the mass properties.
pub timestamp_update_mass_props: f64,
/// GPU timestamp for the broad-phase collision detection.
pub timestamp_broad_phase: f64,
/// GPU timestamp for the narrow-phase contact generation.
pub timestamp_narrow_phase: f64,
/// GPU timestamp for constraint solver preparation.
pub timestamp_solver_prep: f64,
/// GPU timestamp for the constraint solver.
pub timestamp_solver_solve: f64,
}
impl RunStats {
/// Returns the total simulation time in milliseconds.
pub fn total_simulation_time_ms(&self) -> f32 {
self.total_simulation_time_with_readback.as_secs_f32() * 1000.0
}
}
/// GPU-resident physics simulation state containing all rigid bodies, shapes, and solver data.
///
/// This structure holds all the buffers needed for a complete physics simulation on the GPU:
/// - Rigid body poses, velocities, and mass properties
/// - Collision shapes and contact data
/// - Constraints and solver state
/// - Auxiliary data structures (LBVH, prefix sum workspace, etc.)
///
/// The state can be initialized from CPU-side Rapier data structures and then updated
/// entirely on the GPU each frame.
pub struct GpuPhysicsState {
sim_params: GpuScalar<GpuSimParams>,
poses: GpuVector<GpuSim>,
local_mprops: GpuVector<GpuLocalMassProperties>,
mprops: GpuVector<GpuWorldMassProperties>,
vels: GpuVector<GpuVelocity>,
solver_vels: GpuVector<GpuVelocity>,
solver_vels_out: GpuVector<GpuVelocity>,
solver_vels_inc: GpuVector<GpuVelocity>,
vertex_buffers: GpuVector<Point<f32>>,
index_buffers: GpuVector<u32>,
shapes: GpuVector<GpuShape>,
num_shapes: GpuScalar<u32>,
num_shapes_indirect: GpuScalar<[u32; 3]>,
collision_pairs: GpuVector<[u32; 2]>,
collision_pairs_len: GpuScalar<u32>,
collision_pairs_len_staging: GpuScalar<u32>,
collision_pairs_indirect: GpuScalar<DispatchIndirectArgs>,
contacts: GpuVector<GpuIndexedContact>,
contacts_len: GpuScalar<u32>,
contacts_indirect: GpuScalar<DispatchIndirectArgs>,
new_constraints: GpuVector<GpuTwoBodyConstraint>,
new_constraint_builders: GpuVector<GpuTwoBodyConstraintBuilder>,
new_constraints_counts: GpuVector<u32>,
new_body_constraint_ids: GpuVector<u32>,
old_constraints: GpuVector<GpuTwoBodyConstraint>,
old_constraint_builders: GpuVector<GpuTwoBodyConstraintBuilder>,
old_constraints_counts: GpuVector<u32>,
old_body_constraint_ids: GpuVector<u32>,
constraints_colors: GpuVector<u32>,
colored: GpuVector<u32>,
constraints_rands: GpuVector<u32>,
curr_color: GpuScalar<u32>,
uncolored: GpuScalar<u32>,
uncolored_staging: GpuScalar<u32>,
lbvh: LbvhState,
joints: GpuImpulseJointSet,
prefix_sum_workspace: PrefixSumWorkspace,
#[allow(dead_code)]
debug_aabb_mins: GpuVector<Vector4<f32>>,
#[allow(dead_code)]
debug_aabb_maxs: GpuVector<Vector4<f32>>,
}
impl GpuPhysicsState {
/// Creates a new GPU physics state from CPU-side Rapier data structures.
///
/// This method extracts rigid body and collider data from Rapier's CPU representations
/// and uploads them to GPU buffers. Each collider is treated as a separate rigid body
/// in the GPU simulation.
///
/// # Parameters
///
/// - `device`: The WebGPU device used to allocate GPU buffers.
/// - `bodies`: The set of rigid bodies from Rapier.
/// - `colliders`: The set of colliders from Rapier.
///
/// # Panics
///
/// Panics if any rigid body has more than one collider attached, as this is not currently supported.
///
/// # GPU Memory Allocation
///
/// This method allocates significant GPU memory for:
/// - Body poses, velocities, and mass properties.
/// - Collision shapes and contact buffers.
/// - Constraint solver data structures.
/// - LBVH acceleration structure.
pub fn from_rapier(
device: &Device,
bodies: &RigidBodySet,
colliders: &ColliderSet,
impulse_joints: &ImpulseJointSet,
use_jacobi: bool,
) -> Self {
let mut rb_poses = Vec::new();
let mut rb_local_mprops = Vec::new();
let mut rb_mprops = Vec::new();
let mut shapes = Vec::new();
let mut shape_buffers = ShapeBuffers::default();
let mut body_ids = HashMap::new();
for (_, co) in colliders.iter() {
let parent = co.parent().map(|h| &bodies[h]);
if let Some(parent) = parent {
assert_eq!(
parent.colliders().len(),
1,
"Only bodies with exactly one collider are supported."
);
}
let mut local_mprops = GpuLocalMassProperties::default();
let mut mprops = GpuWorldMassProperties {
com: parent.map(|body| *body.translation()).unwrap_or_default(), // TODO: is this still needed?
..Default::default()
};
if parent.map(|b| !b.is_dynamic()).unwrap_or(true) {
local_mprops.inv_mass.fill(0.0);
local_mprops.inv_principal_inertia = nalgebra::zero();
mprops.inv_mass.fill(0.0);
mprops.inv_inertia = nalgebra::zero();
}
if let Some(h) = co.parent() {
let id = rb_poses.len();
body_ids.insert(h, id as u32);
}
rb_local_mprops.push(local_mprops);
rb_mprops.push(mprops);
shapes.push(
GpuShape::from_parry(co.shape(), &mut shape_buffers).expect("Unsupported shape"),
);
#[cfg(feature = "dim2")]
rb_poses.push(GpuSim::from(*co.position()));
#[cfg(feature = "dim3")]
rb_poses.push(GpuSim::from_isometry(*co.position(), 1.0));
}
// NOTE: wgpu doesn’t like empty storage buffer bindings.
// So if the vertex/index buffers are empty, add some dummy value instead of leaving
// them empty. This won’t have any performance impact.
if shape_buffers.vertices.is_empty() {
shape_buffers.vertices.push(Point::origin());
}
if shape_buffers.indices.is_empty() {
shape_buffers.indices.extend_from_slice(&[0; 3]);
}
let vertex_buffers =
GpuVector::encase(device, &shape_buffers.vertices, BufferUsages::STORAGE);
let index_buffers = GpuVector::init(device, &shape_buffers.indices, BufferUsages::STORAGE);
let joints = GpuImpulseJointSet::from_rapier(device, impulse_joints, &body_ids);
let num_bodies = rb_poses.len();
let rb_vels = vec![GpuVelocity::default(); num_bodies];
let storage: BufferUsages = BufferUsages::STORAGE | BufferUsages::COPY_SRC;
let shapes = GpuVector::init(device, &shapes, storage);
let num_shapes = GpuScalar::init(device, num_bodies as u32, BufferUsages::UNIFORM);
let num_shapes_indirect = GpuScalar::init(
device,
[num_bodies.div_ceil(64) as u32, 1, 1],
BufferUsages::STORAGE | BufferUsages::INDIRECT,
);
const DEFAULT_CONTACT_COUNTS: u32 = 1024; // NOTE: this will be resized automatically.
let collision_pairs = GpuVector::uninit(device, DEFAULT_CONTACT_COUNTS, storage);
let collision_pairs_len =
GpuScalar::uninit(device, BufferUsages::STORAGE | BufferUsages::COPY_SRC);
let collision_pairs_len_staging =
GpuScalar::uninit(device, BufferUsages::MAP_READ | BufferUsages::COPY_DST);
let collision_pairs_indirect =
GpuScalar::uninit(device, BufferUsages::STORAGE | BufferUsages::INDIRECT);
let contacts = GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS, storage);
let contacts_len = GpuScalar::uninit(device, storage);
let contacts_indirect =
GpuScalar::uninit(device, BufferUsages::STORAGE | BufferUsages::INDIRECT);
let old_constraints = GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS, storage);
let old_constraint_builders =
GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS, storage);
let new_constraints = GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS, storage);
let new_constraint_builders =
GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS, storage);
let constraints_colors = GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS, storage);
let colored = GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS, storage);
let constraints_rands = GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS, storage);
let old_constraints_counts = GpuVector::uninit_encased(device, num_bodies as u32, storage);
let new_constraints_counts = GpuVector::uninit_encased(device, num_bodies as u32, storage);
let old_body_constraint_ids =
GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS * 2, storage);
let new_body_constraint_ids =
GpuVector::uninit_encased(device, DEFAULT_CONTACT_COUNTS * 2, storage);
let mut sim_params = if use_jacobi {
GpuSimParams::jacobi()
} else {
GpuSimParams::tgs_soft()
};
sim_params.dt /= sim_params.num_solver_iterations as f32;
Self {
sim_params: GpuScalar::init(
device,
sim_params,
BufferUsages::STORAGE | BufferUsages::UNIFORM,
),
vels: GpuVector::encase(device, &rb_vels, storage),
solver_vels: GpuVector::encase(device, &rb_vels, storage),
solver_vels_out: GpuVector::encase(device, &rb_vels, storage),
solver_vels_inc: GpuVector::encase(device, &rb_vels, storage),
joints,
local_mprops: GpuVector::encase(device, &rb_local_mprops, storage),
mprops: GpuVector::encase(device, &rb_mprops, storage),
poses: GpuVector::init(
device,
&rb_poses,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
),
vertex_buffers,
index_buffers,
shapes,
num_shapes,
num_shapes_indirect,
collision_pairs,
collision_pairs_len,
collision_pairs_len_staging,
collision_pairs_indirect,
contacts,
contacts_len,
contacts_indirect,
old_constraints,
old_constraint_builders,
old_constraints_counts,
new_constraints,
new_constraint_builders,
new_constraints_counts,
constraints_colors,
colored,
constraints_rands,
curr_color: GpuScalar::init(
device,
0,
BufferUsages::STORAGE
| BufferUsages::UNIFORM
| BufferUsages::COPY_DST
| BufferUsages::COPY_SRC,
),
uncolored: GpuScalar::init(
device,
0,
BufferUsages::STORAGE | BufferUsages::COPY_DST | BufferUsages::COPY_SRC,
),
uncolored_staging: GpuScalar::init(
device,
0,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
),
old_body_constraint_ids,
new_body_constraint_ids,
prefix_sum_workspace: PrefixSumWorkspace::default(),
debug_aabb_mins: GpuVector::uninit(device, num_bodies as u32, storage),
debug_aabb_maxs: GpuVector::uninit(device, num_bodies as u32, storage),
lbvh: LbvhState::new(device).unwrap(),
}
}
/// Returns a reference to the GPU buffer containing rigid body poses.
///
/// The poses are represented as similarity transformations (position + rotation + scale)
/// in world space.
pub fn poses(&self) -> &GpuVector<GpuSim> {
&self.poses
}
/// The set of joints part of the simulation.
pub fn joints(&self) -> &GpuImpulseJointSet {
&self.joints
}
/// Returns a reference to the GPU buffer containing collision shapes.
///
/// Each shape corresponds to one rigid body in the simulation.
pub fn shapes(&self) -> &GpuVector<GpuShape> {
&self.shapes
}
}
/// The main GPU physics pipeline coordinating all simulation stages.
///
/// This structure contains all the compute shaders needed to run a complete physics simulation
/// on the GPU. It orchestrates the following stages in each simulation step:
///
/// 1. **Gravity application**: Updates velocities with gravitational forces.
/// 2. **Broad-phase**: Uses LBVH to find potentially colliding pairs.
/// 3. **Narrow-phase**: Generates detailed contact information for collision pairs.
/// 4. **Constraint preparation**: Converts contacts into solver constraints.
/// 5. **Graph coloring**: Colors constraints to enable parallel solving.
/// 6. **Constraint solving**: Iteratively solves constraints using TGS or PGS.
/// 7. **Integration**: Updates poses based on solved velocities.
pub struct GpuPhysicsPipeline {
gravity: WgMpropsUpdate,
#[allow(dead_code)]
broad_phase: WgBruteForceBroadPhase,
narrow_phase: WgNarrowPhase,
solver: WgSolver,
joint_solver: WgJointSolver,
prefix_sum: WgPrefixSum,
lbvh: Lbvh,
coloring: WgColoring,
warmstart: WgWarmstart,
}
impl GpuPhysicsPipeline {
/// Creates a new physics pipeline from a WebGPU device.
///
/// This method compiles all the compute shaders needed for the physics simulation and
/// creates the compute pipelines.
///
/// # Parameters
///
/// - `device`: The WebGPU device used for shader compilation
///
/// # Returns
///
/// Returns `Ok(Self)` if all shaders compiled successfully, or a [`ComposerError`]
/// if shader compilation failed.
pub fn from_device(device: &Device) -> Result<Self, ComposerError> {
Ok(Self {
gravity: WgMpropsUpdate::from_device(device)?,
broad_phase: WgBruteForceBroadPhase::from_device(device)?,
narrow_phase: WgNarrowPhase::from_device(device)?,
solver: WgSolver::from_device(device)?,
joint_solver: WgJointSolver::from_device(device)?,
prefix_sum: WgPrefixSum::from_device(device)?,
lbvh: Lbvh::from_device(device)?,
coloring: WgColoring::from_device(device)?,
warmstart: WgWarmstart::from_device(device)?,
})
}
/// Executes one physics simulation timestep on the GPU.
///
/// This method runs the complete physics pipeline:
/// 1. Update world-space mass-properties.
/// 2. Builds LBVH and finds collision pairs (broad-phase).
/// 3. Generates contact manifolds (narrow-phase).
/// 4. Prepares solver constraints from contacts.
/// 5. Colors constraints for parallel solving.
/// 6. Solves constraints iteratively using TGS.
/// 7. Integrates velocities to update poses.
///
/// # Buffer Resizing
///
/// If the number of collision pairs exceeds buffer capacity, this method automatically
/// allocates larger buffers (next power of two) and re-runs the broad-phase.
pub async fn step(
&self,
gpu: &GpuInstance,
state: &mut GpuPhysicsState,
mut timestamps: Option<&mut GpuTimestamps>,
use_jacobi: bool,
) -> RunStats {
let mut stats = RunStats::default();
let t_phase1 = web_time::Instant::now();
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("step_simulation", timestamps.as_deref_mut());
KernelDispatch::new(gpu.device(), &mut pass, &self.gravity.main)
.bind0([
state.mprops.buffer(),
state.local_mprops.buffer(),
state.poses.buffer(),
])
.dispatch(state.poses.len().div_ceil(64) as u32);
drop(pass);
let mut pass = encoder.compute_pass("lbvh", timestamps.as_deref_mut());
// state.broad_phase.dispatch(
// gpu.device(),
// &mut pass,
// state.poses.len() as u32,
// &state.poses,
// &state.shapes,
// &state.num_shapes,
// &state.collision_pairs,
// &state.collision_pairs_len,
// &state.collision_pairs_indirect,
// &state.debug_aabb_mins,
// &state.debug_aabb_maxs,
// );
self.lbvh.update_tree(
gpu.device(),
&mut pass,
&mut state.lbvh,
state.poses.len() as u32,
&state.poses,
&state.vertex_buffers,
&state.shapes,
&state.num_shapes,
);
self.lbvh.find_pairs(
gpu.device(),
&mut pass,
&mut state.lbvh,
state.poses.len() as u32,
&state.num_shapes,
&state.collision_pairs,
&state.collision_pairs_len,
&state.collision_pairs_indirect,
);
drop(pass);
state
.collision_pairs_len_staging
.copy_from(&mut encoder, &state.collision_pairs_len);
gpu.queue().submit(Some(encoder.finish()));
let mut num_collision_pairs = [0u32];
state
.collision_pairs_len_staging
.read_to(gpu.device(), &mut num_collision_pairs)
.await
.unwrap();
let num_collision_pairs = num_collision_pairs[0];
stats.start_to_pairs_count_time = t_phase1.elapsed();
let mut encoder = gpu.device().create_command_encoder(&Default::default());
// TODO PERF: since we are reading the num_collision_pairs anyway for the sake of buffer resizing,
// we might as well just use this for dispatch instead of doing indirect dispatch
// (and thus remove `collision_pairs_indirect`).
if num_collision_pairs >= state.collision_pairs.len() as u32 {
let storage: BufferUsages = BufferUsages::STORAGE | BufferUsages::COPY_SRC;
// The collision buffers are too small, resize them.
let desired_len = num_collision_pairs.next_power_of_two();
// println!(
// "REALLOCATING BUFFERS. Need {}, found {}, allocating: {}",
// num_collision_pairs,
// state.collision_pairs.len(),
// desired_len,
// );
// TODO: encapsulate that somewhere
state.collision_pairs = GpuVector::uninit(gpu.device(), desired_len, storage);
state.contacts = GpuVector::uninit_encased(gpu.device(), desired_len, storage);
state.old_constraints = GpuVector::uninit_encased(gpu.device(), desired_len, storage);
state.old_constraint_builders =
GpuVector::uninit_encased(gpu.device(), desired_len, storage);
state.old_body_constraint_ids =
GpuVector::uninit_encased(gpu.device(), desired_len * 2, storage);
state.new_constraints = GpuVector::uninit_encased(gpu.device(), desired_len, storage);
state.new_constraint_builders =
GpuVector::uninit_encased(gpu.device(), desired_len, storage);
state.new_body_constraint_ids =
GpuVector::uninit_encased(gpu.device(), desired_len * 2, storage);
state.constraints_colors =
GpuVector::uninit_encased(gpu.device(), desired_len, storage);
state.colored = GpuVector::uninit_encased(gpu.device(), desired_len, storage);
state.constraints_rands = GpuVector::uninit_encased(gpu.device(), desired_len, storage);
// Re-run the broad-phase with the correct buffer lengths.
let mut pass = encoder.compute_pass("lbvh-after-resize", None);
self.lbvh.find_pairs(
gpu.device(),
&mut pass,
&mut state.lbvh,
state.poses.len() as u32,
&state.num_shapes,
&state.collision_pairs,
&state.collision_pairs_len,
&state.collision_pairs_indirect,
);
drop(pass);
}
let mut pass = encoder.compute_pass("narrow phase", timestamps.as_deref_mut());
self.narrow_phase.dispatch(
gpu.device(),
&mut pass,
state.poses.len() as u32,
&state.poses,
&state.shapes,
&state.vertex_buffers,
&state.index_buffers,
&state.collision_pairs,
&state.collision_pairs_len,
&state.collision_pairs_indirect,
&state.contacts,
&state.contacts_len,
&state.contacts_indirect,
);
drop(pass);
let mut pass = encoder.compute_pass("jacobi prep", timestamps.as_deref_mut());
let mut solver_args = SolverArgs {
num_colliders: state.poses.len() as u32,
contacts: &state.contacts,
contacts_len: &state.contacts_len,
contacts_len_indirect: &state.contacts_indirect,
constraints: &state.new_constraints,
constraint_builders: &state.new_constraint_builders,
sim_params: &state.sim_params,
colliders_len: &state.num_shapes,
colliders_len_indirect: &state.num_shapes_indirect,
poses: &state.poses,
vels: &state.vels,
solver_vels: &state.solver_vels,
solver_vels_out: &state.solver_vels_out,
solver_vels_inc: &state.solver_vels_inc,
mprops: &state.mprops,
local_mprops: &state.local_mprops,
body_constraint_counts: &state.new_constraints_counts,
body_constraint_ids: &state.new_body_constraint_ids,
constraints_colors: &state.constraints_colors,
curr_color: &state.curr_color,
prefix_sum: &self.prefix_sum,
num_colors: 0,
};
let joint_solver_args = JointSolverArgs {
sim_params: &state.sim_params,
poses: &state.poses,
mprops: &state.mprops,
local_mprops: &state.local_mprops,
joints: &state.joints,
solver_vels: &state.solver_vels,
};
self.solver.prepare(
gpu.device(),
&mut pass,
solver_args,
&mut state.prefix_sum_workspace,
);
// NOTE: if webgpu allowed to (but it doesn’t), we could run this kernel completely in parallel of the graph coloring.
let warmstart_args = WarmstartArgs {
contacts_len: &state.contacts_len,
old_body_constraint_counts: &state.old_constraints_counts,
old_constraint_builders: &state.old_constraint_builders,
old_body_constraint_ids: &state.old_body_constraint_ids,
old_constraints: &state.old_constraints,
new_constraints: &state.new_constraints,
new_constraint_builders: &state.new_constraint_builders,
contacts_len_indirect: &state.contacts_indirect,
};
if !use_jacobi {
self.warmstart
.transfer_warmstart_impulses(gpu.device(), &mut pass, warmstart_args);
}
drop(pass);
gpu.queue().submit(Some(encoder.finish()));
// self.slow_verify_collision_pair_lists(gpu, state).await;
// NOTE: jacobi doesn’t need graph coloring.
if !use_jacobi {
let coloring_args = ColoringArgs {
contacts_len_indirect: &state.contacts_indirect,
body_constraint_counts: &state.new_constraints_counts,
body_constraint_ids: &state.new_body_constraint_ids,
constraints: &state.new_constraints,
constraints_colors: &state.constraints_colors,
constraints_rands: &state.constraints_rands,
curr_color: &state.curr_color,
uncolored: &state.uncolored,
uncolored_staging: &state.uncolored_staging,
contacts_len: &state.contacts_len,
colored: &state.colored,
};
if let Some(num_colors) = self
.coloring
.dispatch_topo_gc(gpu, coloring_args, &mut stats)
.await
{
solver_args.num_colors = num_colors;
} else {
solver_args.num_colors = self
.coloring
.dispatch_luby(gpu, coloring_args, &mut stats)
.await;
}
stats.num_colors = solver_args.num_colors;
}
// // gpu.queue().submit(Some(encoder.finish()));
// println!("Found collision pairs: {}", num_collision_pairs);
// return;
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("solve", timestamps);
self.solver.solve_tgs(
gpu.device(),
&mut pass,
&self.joint_solver,
solver_args,
joint_solver_args,
use_jacobi,
);
drop(pass);
gpu.queue().submit(Some(encoder.finish()));
// println!("Simulation time: {}.", t0.elapsed().as_secs_f32() * 1000.0);
// Swap buffers.
std::mem::swap(&mut state.old_constraints, &mut state.new_constraints);
std::mem::swap(
&mut state.old_constraint_builders,
&mut state.new_constraint_builders,
);
std::mem::swap(
&mut state.old_body_constraint_ids,
&mut state.new_body_constraint_ids,
);
std::mem::swap(
&mut state.old_constraints_counts,
&mut state.new_constraints_counts,
);
stats
}
/// Debugging helper to verify collision pair lists on the CPU.
///
/// This method reads back all collision data from the GPU and validates the constraint
/// graph structure, checking that constraints are properly associated with bodies.
/// It's useful for debugging broad-phase or constraint solver issues.
///
/// # Performance Warning
///
/// This method performs extensive CPU-GPU synchronization and should only be used
/// for debugging, never in production code.
#[allow(dead_code)] // Very helpful piece of code for debugging the broad-phase's result.
async fn slow_verify_collision_pair_lists(
&self,
gpu: &GpuInstance,
state: &mut GpuPhysicsState,
) {
let new_poses = state.poses.slow_read(gpu).await;
let num_constraints = state.contacts_len.slow_read(gpu).await[0];
let num_collision_pairs = state.collision_pairs_len.slow_read(gpu).await[0];
let ids = state.new_body_constraint_ids.slow_read(gpu).await;
let mut counts = state.new_constraints_counts.slow_read(gpu).await;
println!("First constraint id: {}", ids[0]);
println!("Num constraints: {}", num_constraints);
println!("Num collision pairs: {}", num_collision_pairs);
{
counts.insert(0, 0);
let graph: HashMap<usize, Vec<u32>> = counts
.windows(2)
.enumerate()
.map(|(body_id, rng)| {
let idx = ids[rng[0] as usize..rng[1] as usize].to_vec();
(body_id, idx)
})
.collect();
let mut c2b = vec![vec![]; num_constraints as usize];
println!("Num constraints: {}", num_constraints);
for (bid, constraints) in graph.iter() {
for cid in constraints {
c2b[*cid as usize].push(*bid);
assert!(
c2b[*cid as usize].len() <= 2,
"Constraint {} involves: {:?}",
*cid,
&c2b[*cid as usize]
);
}
}
// let mut colors = vec![-1; num_constraints as usize];
//
// for cid in 0..num_constraints {}
for (k, w) in counts.windows(2).enumerate() {
if w[1] - w[0] > 64 {
println!(
"Significant count for body {}: [{},{}] = {}. pose: {:?}",
k,
w[0],
w[1],
w[1] - w[0],
new_poses[k]
);
}
}
}
for pos in &new_poses {
#[allow(clippy::eq_op)] // We want to check for nans.
if pos != pos {
println!("###### Incorrect new pos: {:?}", pos);
}
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/coloring.rs | crates/wgrapier/src/dynamics/coloring.rs | //! Graph coloring algorithms for parallel constraint solving.
//!
//! This module implements two graph coloring algorithms that enable parallel constraint solving
//! on the GPU:
//!
//! # TOPO-GC (Topological Graph Coloring)
//!
//! A fast, coloring algorithm that typically produces fewer colors and converges
//! in fewer iterations. This is the primary algorithm used by default.
//!
//! **Algorithm**: Iteratively assigns colors to constraints based on local topology. Conflicts
//! are detected and resolved in each iteration until convergence.
//!
//! **Advantages**:
//! - Fast convergence (typically < 10 iterations).
//! - Produces fewer colors (better parallelism for constraints resolution).
//!
//! **Disadvantages**:
//! - May fail to converge for highly complex constraint graphs (if too many colors are needed).
//! - Falls back to Luby if it doesn't converge within iteration limit.
//!
//! # Luby's Algorithm
//!
//! A randomized coloring algorithm used as a fallback when TOPO-GC fails or for very complex
//! constraint graphs.
//!
//! **Algorithm**: Each constraint randomly selects itself or neighbors in each iteration.
//! Selected constraints that don't conflict get the same color.
//!
//! **Advantages**:
//! - Always converges (probabilistically).
//! - Handles arbitrary constraint graphs.
//!
//! **Disadvantages**:
//! - Slower convergence.
//! - May produce more colors (less parallelism for constraints resolution).
use crate::dynamics::{GpuTwoBodyConstraint, WgBody, WgConstraint, WgSimParams};
use crate::pipeline::RunStats;
use wgcore::gpu::GpuInstance;
use wgcore::indirect::DispatchIndirectArgs;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::{GpuScalar, GpuVector};
use wgcore::Shader;
use wgparry::{dim_shader_defs, substitute_aliases};
use wgpu::{BufferAddress, ComputePipeline};
/// GPU shaders for constraint graph coloring.
///
/// Contains compute pipelines for both TOPO-GC and Luby's algorithm.
#[derive(Shader)]
#[shader(
derive(WgSimParams, WgBody, WgConstraint),
src = "coloring.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs",
composable = false
)]
pub struct WgColoring {
/// Initializes state for Luby's algorithm.
reset_luby: ComputePipeline,
/// One iteration of Luby's coloring.
step_graph_coloring_luby: ComputePipeline,
/// Initializes state for TOPO-GC algorithm.
reset_topo_gc: ComputePipeline,
/// Resets the completion flag for TOPO-GC iteration.
reset_completion_flag_topo_gc: ComputePipeline,
/// One iteration of TOPO-GC coloring.
step_graph_coloring_topo_gc: ComputePipeline,
/// Detects and fixes conflicts in TOPO-GC coloring.
fix_conflicts_topo_gc: ComputePipeline,
}
/// Arguments for graph coloring dispatch.
///
/// Contains all GPU buffers needed by the coloring algorithms.
#[derive(Copy, Clone)]
pub struct ColoringArgs<'a> {
/// Indirect dispatch arguments based on contact count.
pub contacts_len_indirect: &'a GpuScalar<DispatchIndirectArgs>,
/// Number of constraints per body.
pub body_constraint_counts: &'a GpuVector<u32>,
/// Constraint IDs associated with each body.
pub body_constraint_ids: &'a GpuVector<u32>,
/// The constraints to be colored.
pub constraints: &'a GpuVector<GpuTwoBodyConstraint>,
/// Output: color assigned to each constraint.
pub constraints_colors: &'a GpuVector<u32>,
/// Random values for Luby's algorithm.
pub constraints_rands: &'a GpuVector<u32>,
/// Current color being assigned.
pub curr_color: &'a GpuScalar<u32>,
/// Count of uncolored constraints (or changed flag for TOPO-GC).
pub uncolored: &'a GpuScalar<u32>,
/// Staging buffer for reading uncolored count on CPU.
pub uncolored_staging: &'a GpuScalar<u32>,
/// Total number of contacts.
pub contacts_len: &'a GpuScalar<u32>,
/// Buffer tracking which constraints are colored.
pub colored: &'a GpuVector<u32>,
}
impl WgColoring {
#[allow(dead_code)]
const WORKGROUP_SIZE: u32 = 64;
/// Executes Luby's randomized graph coloring algorithm.
///
/// This method runs Luby's algorithm iteratively until all constraints are colored.
/// Each iteration assigns one color to a subset of constraints.
///
/// # Parameters
///
/// - `gpu`: GPU instance for command submission
/// - `args`: Coloring arguments containing constraint graph buffers
/// - `stats`: Statistics structure to record coloring performance
///
/// # Returns
///
/// The total number of colors used. **Note**: Colors are 1-indexed, so valid
/// color indices are `[1..result]`.
///
/// # CPU-GPU Synchronization
///
/// This method requires CPU-GPU synchronization after each iteration to check
/// if any constraints remain uncolored, which can add overhead.
pub async fn dispatch_luby<'a>(
&self,
gpu: &GpuInstance,
args: ColoringArgs<'a>,
stats: &mut RunStats,
) -> u32 {
let t0 = web_time::Instant::now();
let queue = gpu.queue();
let device = gpu.device();
let mut encoder = device.create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("coloring_reset", None);
KernelDispatch::new(device, &mut pass, &self.reset_luby)
.bind_at(
0,
[
(args.constraints_colors.buffer(), 3),
(args.constraints_rands.buffer(), 4),
(args.contacts_len.buffer(), 7),
],
)
.dispatch_indirect(args.contacts_len_indirect.buffer());
drop(pass);
queue.submit(Some(encoder.finish()));
let _first_time = 0;
let mut num_colors = 0;
for color in 1u32.. {
let mut encoder = device.create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("coloring", None);
queue.write_buffer(args.curr_color.buffer(), 0, bytemuck::cast_slice(&[color]));
queue.write_buffer(args.uncolored.buffer(), 0, bytemuck::cast_slice(&[0u32]));
KernelDispatch::new(device, &mut pass, &self.step_graph_coloring_luby)
.bind0([
args.body_constraint_counts.buffer(),
args.body_constraint_ids.buffer(),
args.constraints.buffer(),
args.constraints_colors.buffer(),
args.constraints_rands.buffer(),
args.curr_color.buffer(),
args.uncolored.buffer(),
args.contacts_len.buffer(),
])
.dispatch_indirect(args.contacts_len_indirect.buffer());
drop(pass);
encoder.copy_buffer_to_buffer(
args.uncolored.buffer(),
0,
args.uncolored_staging.buffer(),
0,
size_of::<u32>() as BufferAddress,
);
queue.submit(Some(encoder.finish()));
let mut uncolored = [0u32];
args.uncolored_staging
.read_to(gpu.device(), &mut uncolored)
.await
.unwrap();
if uncolored[0] == 0 {
num_colors = color + 1;
break;
}
}
stats.num_colors = num_colors;
stats.coloring_fallback_time = t0.elapsed();
num_colors
}
/// Executes the TOPO-GC (Topological Graph Coloring) algorithm.
///
/// TOPO-GC is the primary coloring algorithm, typically faster and producing fewer colors
/// than Luby. It may fail to converge for very complex graphs, in which case it returns
/// `None` and the caller should fall back to [`dispatch_luby`](Self::dispatch_luby).
///
/// # Parameters
///
/// - `gpu`: GPU instance for command submission
/// - `args`: Coloring arguments containing constraint graph buffers
/// - `stats`: Statistics structure to record coloring performance
///
/// # Returns
///
/// - `Some(num_colors)` if coloring succeeded. **Note**: Colors are 1-indexed.
/// - `None` if the algorithm failed to converge (exceeded iteration limit).
///
/// # Performance Optimization
///
/// To reduce CPU-GPU synchronization overhead, this method batches 10 iterations
/// per readback. In the future, the batch size could be dynamically adjusted based on previous
/// frame's convergence rate for better performance.
pub async fn dispatch_topo_gc<'a>(
&self,
gpu: &GpuInstance,
args: ColoringArgs<'a>,
stats: &mut RunStats,
) -> Option<u32> {
let t0 = web_time::Instant::now();
let queue = gpu.queue();
let device = gpu.device();
let mut encoder = device.create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("coloring_reset", None);
KernelDispatch::new(device, &mut pass, &self.reset_topo_gc)
.bind_at(
0,
[
(args.constraints_colors.buffer(), 3),
(args.contacts_len.buffer(), 7),
],
)
.bind_at(1, [(args.colored.buffer(), 1)])
.dispatch_indirect(args.contacts_len_indirect.buffer());
drop(pass);
queue.submit(Some(encoder.finish()));
let mut num_loops = 0;
let mut max_color = [0u32];
loop {
num_loops += 1;
if num_loops > 64 {
stats.coloring_time = t0.elapsed();
return None;
}
let mut encoder = device.create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("coloring", None);
// PERF: we queue multiple passes directly to reduce the frequency when we have to read
// the stop flag on the CPU side (it has a huge overhead, not entirely sure why).
// TODO PERF: we could auto-adjust the number of inner loops based on the last frame’s
// color count. That way we can reduce to a minimum the number of times we
// end up having to run more than a single inner loop before convergence.
for _ in 0..10 {
KernelDispatch::new(device, &mut pass, &self.reset_completion_flag_topo_gc)
.bind0([])
.bind_at(1, [(args.uncolored.buffer(), 0)])
.dispatch(1);
KernelDispatch::new(device, &mut pass, &self.step_graph_coloring_topo_gc)
.bind_at(
0,
[
(args.body_constraint_counts.buffer(), 0),
(args.body_constraint_ids.buffer(), 1),
(args.constraints.buffer(), 2),
(args.constraints_colors.buffer(), 3),
(args.contacts_len.buffer(), 7),
],
)
.bind_at(
1,
[(args.uncolored.buffer(), 0), (args.colored.buffer(), 1)],
)
.dispatch_indirect(args.contacts_len_indirect.buffer());
KernelDispatch::new(device, &mut pass, &self.fix_conflicts_topo_gc)
.bind_at(
0,
[
(args.body_constraint_counts.buffer(), 0),
(args.body_constraint_ids.buffer(), 1),
(args.constraints.buffer(), 2),
(args.constraints_colors.buffer(), 3),
(args.contacts_len.buffer(), 7),
],
)
.bind_at(
1,
[(args.uncolored.buffer(), 0), (args.colored.buffer(), 1)],
)
.dispatch_indirect(args.contacts_len_indirect.buffer());
}
drop(pass);
encoder.copy_buffer_to_buffer(
args.uncolored.buffer(),
0,
args.uncolored_staging.buffer(),
0,
size_of::<u32>() as BufferAddress,
);
queue.submit(Some(encoder.finish()));
args.uncolored_staging
.read_to(gpu.device(), &mut max_color)
.await
.unwrap();
if max_color[0] != 0 {
break;
}
}
stats.coloring_time = t0.elapsed();
stats.num_colors = max_color[0]; // Don’t add 1 for the color count.
stats.coloring_iterations = num_loops;
Some(max_color[0] + 1) // NOTE: color indices are 1-based.
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/solver.rs | crates/wgrapier/src/dynamics/solver.rs | //! GPU-parallel constraint solver using graph coloring.
//!
//! This module implements a constraint-based physics solver that runs entirely on the GPU.
//! It uses graph coloring to enable parallel constraint solving while avoiding data races.
//!
//! # Solver Variants
//!
//! The module supports two main solver algorithms:
//!
//! - `Soft-TGS` (the same approach as Rapier and other engines like Box2D). This operates by splitting the simulation
//! timestep into smaller substeps in order to lower errors caused by nonlinearities (e.g. rotations). Each substep
//! is solved with a single PGS iteration (with bias) followed by position update, followed by another PGS iteration
//! (without bias).
//! - `Soft-Jacobi`: this is similar to `Soft-TGS` but using a pseudo-Jacobi solver instead of PGS. It is "Jacobi-like"
//! because, instead of solving each constraint independently in parallel, each **body** is solved
//! in parallel. This means that each thread will solve all the constraints affecting a given body
//! independently. Note that this technically violates Newton’s third law. However, vanilla Jacobi
//! proved to be entirely useless (too unstable) so we made this compromise.
use crate::dynamics::constraint::{GpuTwoBodyConstraint, GpuTwoBodyConstraintBuilder};
use crate::dynamics::joint::{JointSolverArgs, WgJointSolver};
use crate::dynamics::prefix_sum::{PrefixSumWorkspace, WgPrefixSum};
use crate::dynamics::{GpuLocalMassProperties, GpuVelocity, GpuWorldMassProperties, WgConstraint};
use crate::dynamics::{GpuSimParams, WgBody, WgSimParams};
use wgcore::indirect::DispatchIndirectArgs;
use wgcore::kernel::KernelDispatch;
use wgcore::tensor::{GpuScalar, GpuVector};
use wgcore::Shader;
use wgebra::geometry::WgInv;
use wgparry::math::GpuSim;
use wgparry::queries::GpuIndexedContact;
use wgparry::{dim_shader_defs, substitute_aliases};
use wgpu::{ComputePass, ComputePipeline, Device};
/// GPU shader bundle for the constraint solver.
///
/// This shader contains all the compute pipelines needed for constraint-based physics solving,
/// including constraint initialization, sorting, solving, and integration.
#[derive(Shader)]
#[shader(
derive(WgSimParams, WgBody, WgConstraint, WgInv),
src = "solver.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs",
composable = false
)]
pub struct WgSolver {
/// Initializes constraints from contact manifolds.
init_constraints: ComputePipeline,
/// Sorts constraints by body pairs for cache coherency.
sort_constraints: ComputePipeline,
/// Updates nonlinear constraint terms during substeps.
update_constraints: ComputePipeline,
/// Clears solver velocities and constraint counts.
cleanup: ComputePipeline,
/// Applies warmstart impulses from previous frame.
warmstart: ComputePipeline,
/// Applies warmstart impulses from previous frame, without relying on graph coloring.
warmstart_without_colors: ComputePipeline,
/// Jacobi iteration step (parallel, lock-free).
step_jacobi: ComputePipeline,
/// Gauss-Seidel iteration step (sequential per color).
step_gauss_seidel: ComputePipeline,
/// Initializes solver velocity increments.
init_solver_vels_inc: ComputePipeline,
/// Applies accumulated solver velocity increments.
apply_solver_vels_inc: ComputePipeline,
/// Integrates positions from velocities.
integrate: ComputePipeline,
/// Writes solver velocities back to body storage.
finalize: ComputePipeline,
/// Removes CFM and bias terms for velocity-only solving.
remove_cfm_and_bias: ComputePipeline,
/// Resets the current color index to 0.
reset_color: ComputePipeline,
/// Increments the current color index.
inc_color: ComputePipeline,
}
/// Arguments for constraint solver dispatch.
///
/// This structure bundles all the GPU buffers and parameters needed for running
/// the constraint solver. It's used by both the [`WgSolver::prepare`] and
/// [`WgSolver::solve_tgs`] methods.
#[derive(Copy, Clone)]
pub struct SolverArgs<'a> {
/// Total number of colliders in the simulation.
pub num_colliders: u32,
/// Contact manifolds generated by narrow-phase.
pub contacts: &'a GpuVector<GpuIndexedContact>,
/// Number of contacts.
pub contacts_len: &'a GpuScalar<u32>,
/// Indirect dispatch arguments based on contact count.
pub contacts_len_indirect: &'a GpuScalar<DispatchIndirectArgs>,
/// Solver constraints (output from constraint initialization).
pub constraints: &'a GpuVector<GpuTwoBodyConstraint>,
/// Builder data for initializing constraints.
pub constraint_builders: &'a GpuVector<GpuTwoBodyConstraintBuilder>,
/// Global simulation parameters.
pub sim_params: &'a GpuScalar<GpuSimParams>,
/// Number of colliders (as buffer).
pub colliders_len: &'a GpuScalar<u32>,
/// Indirect dispatch arguments for collider count.
pub colliders_len_indirect: &'a GpuScalar<[u32; 3]>,
/// Rigid body poses.
pub poses: &'a GpuVector<GpuSim>,
/// Rigid body velocities.
pub vels: &'a GpuVector<GpuVelocity>,
/// Solver working velocities.
pub solver_vels: &'a GpuVector<GpuVelocity>,
/// Solver output velocities (Jacobi only).
pub solver_vels_out: &'a GpuVector<GpuVelocity>,
/// Accumulated velocity increments during substeps.
pub solver_vels_inc: &'a GpuVector<GpuVelocity>,
/// World-space mass properties.
pub mprops: &'a GpuVector<GpuWorldMassProperties>,
/// Local-space mass properties.
pub local_mprops: &'a GpuVector<GpuLocalMassProperties>,
/// Number of constraints per body.
pub body_constraint_counts: &'a GpuVector<u32>,
/// Constraint IDs associated with each body.
pub body_constraint_ids: &'a GpuVector<u32>,
/// Color assigned to each constraint by graph coloring.
pub constraints_colors: &'a GpuVector<u32>,
/// Current color being processed.
pub curr_color: &'a GpuScalar<u32>,
/// Prefix sum shader for building constraint ranges.
pub prefix_sum: &'a WgPrefixSum,
/// Total number of colors from graph coloring.
pub num_colors: u32,
}
impl WgSolver {
const WORKGROUP_SIZE: u32 = 64;
/// Prepares constraints for solving.
///
/// This method:
/// 1. Clears solver velocities and constraint counts
/// 2. Initializes constraints from contact manifolds
/// 3. Performs prefix sum to build body-to-constraint mapping
/// 4. Sorts constraints by body pairs
///
/// # Parameters
///
/// - `device`: The WebGPU device
/// - `pass`: Active compute pass for dispatching kernels
/// - `args`: Solver arguments containing all necessary buffers
/// - `prefix_sum_workspace`: Workspace for the prefix sum algorithm
///
/// # GPU Performance
///
/// Uses indirect dispatches based on the number of contacts, allowing the GPU
/// to handle variable workloads efficiently.
pub fn prepare<'a>(
&self,
device: &Device,
pass: &mut ComputePass,
args: SolverArgs<'a>,
prefix_sum_workspace: &'a mut PrefixSumWorkspace,
) {
KernelDispatch::new(device, pass, &self.cleanup)
.bind_at(
0,
[
(args.solver_vels.buffer(), 3),
(args.body_constraint_counts.buffer(), 5),
],
)
.bind_at(
1,
[
(args.vels.buffer(), 2),
(args.mprops.buffer(), 3),
(args.colliders_len.buffer(), 4),
],
)
.dispatch(args.num_colliders.div_ceil(Self::WORKGROUP_SIZE));
// Init constraints.
KernelDispatch::new(device, pass, &self.init_constraints)
.bind_at(
0,
[
(args.contacts.buffer(), 0),
(args.contacts_len.buffer(), 1),
(args.constraints.buffer(), 2),
(args.body_constraint_counts.buffer(), 5),
],
)
.bind_at(
1,
[
(args.sim_params.buffer(), 0),
(args.poses.buffer(), 1),
(args.vels.buffer(), 2),
(args.mprops.buffer(), 3),
(args.constraint_builders.buffer(), 7),
],
)
.dispatch_indirect(args.contacts_len_indirect.buffer());
args.prefix_sum.dispatch(
device,
pass,
prefix_sum_workspace,
args.body_constraint_counts,
);
KernelDispatch::new(device, pass, &self.sort_constraints)
.bind_at(
0,
[
(args.contacts.buffer(), 0),
(args.contacts_len.buffer(), 1),
(args.body_constraint_counts.buffer(), 5),
(args.body_constraint_ids.buffer(), 6),
],
)
.bind_at(1, [(args.mprops.buffer(), 3)])
.dispatch_indirect(args.contacts_len_indirect.buffer());
}
/// Solves constraints using the TGS (Total Gauss-Seidel) algorithm.
///
/// TGS is a constraint solver that provides better stability and convergence
/// than traditional PGS. It uses multiple substeps with intermediate position integration
/// and separate phases for bias (penetration correction) and velocity solving.
///
/// # Algorithm Steps (per substep)
///
/// 1. Apply accumulated velocity increments.
/// 2. Update nonlinear constraint terms based on new positions.
/// 3. Warmstart: apply previous frame's impulses.
/// 4. Solve with bias: correct penetrations.
/// 5. Integrate positions.
/// 6. Solve without bias: solve for velocities only.
pub fn solve_tgs<'a>(
&self,
device: &Device,
pass: &mut ComputePass,
joint_solver: &WgJointSolver,
mut args: SolverArgs<'a>,
joint_args: JointSolverArgs<'a>,
use_jacobi: bool,
) {
let num_substeps = 4; // TODO: make this configurable.
/*
* Init solver vel increments.
*/
KernelDispatch::new(device, pass, &self.init_solver_vels_inc)
.bind_at(0, [])
.bind_at(
1,
[
(args.sim_params.buffer(), 0),
(args.mprops.buffer(), 3),
(args.colliders_len.buffer(), 4),
(args.solver_vels_inc.buffer(), 6),
],
)
.dispatch((args.vels.len() as u32).div_ceil(Self::WORKGROUP_SIZE));
joint_solver.init(device, pass, &joint_args);
for _ in 0..num_substeps {
/*
* Apply solver velocities increments.
*/
KernelDispatch::new(device, pass, &self.apply_solver_vels_inc)
.bind_at(0, [(args.solver_vels.buffer(), 3)])
.bind_at(
1,
[
(args.colliders_len.buffer(), 4),
(args.solver_vels_inc.buffer(), 6),
],
)
.dispatch((args.vels.len() as u32).div_ceil(Self::WORKGROUP_SIZE));
/*
* Update nonlinear terms.
*/
KernelDispatch::new(device, pass, &self.update_constraints)
.bind_at(
0,
[
(args.contacts_len.buffer(), 1),
(args.constraints.buffer(), 2),
],
)
.bind_at(
1,
[
(args.sim_params.buffer(), 0),
(args.poses.buffer(), 1),
(args.constraint_builders.buffer(), 7),
],
)
.dispatch_indirect(args.contacts_len_indirect.buffer());
joint_solver.update(device, pass, &joint_args);
/*
* Warmstart.
*/
if !use_jacobi {
const WARMSTART_WITHOUT_COLORS: bool = false;
if WARMSTART_WITHOUT_COLORS {
KernelDispatch::new(device, pass, &self.warmstart_without_colors)
.bind_at(
0,
[
(args.constraints.buffer(), 2),
(args.solver_vels.buffer(), 3),
(args.body_constraint_counts.buffer(), 5),
(args.body_constraint_ids.buffer(), 6),
],
)
.bind_at(1, [(args.colliders_len.buffer(), 4)])
.dispatch(args.vels.len().div_ceil(Self::WORKGROUP_SIZE as u64) as u32);
} else {
KernelDispatch::new(device, pass, &self.reset_color)
.bind_at(0, [(args.curr_color.buffer(), 8)])
.dispatch(1);
for _ in 0..args.num_colors {
KernelDispatch::new(device, pass, &self.warmstart)
.bind_at(
0,
[
(args.contacts_len.buffer(), 1),
(args.constraints.buffer(), 2),
(args.solver_vels.buffer(), 3),
(args.constraints_colors.buffer(), 7),
(args.curr_color.buffer(), 8),
],
)
.dispatch_indirect(args.contacts_len_indirect.buffer());
KernelDispatch::new(device, pass, &self.inc_color)
.bind_at(0, [(args.curr_color.buffer(), 8)])
.dispatch(1);
}
}
}
/*
* Solve with bias.
*/
joint_solver.solve(device, pass, &joint_args, true);
if !use_jacobi {
KernelDispatch::new(device, pass, &self.reset_color)
.bind_at(0, [(args.curr_color.buffer(), 8)])
.dispatch(1);
for _ in 0..args.num_colors {
KernelDispatch::new(device, pass, &self.step_gauss_seidel)
.bind_at(
0,
[
(args.contacts_len.buffer(), 1),
(args.constraints.buffer(), 2),
(args.solver_vels.buffer(), 3),
(args.constraints_colors.buffer(), 7),
(args.curr_color.buffer(), 8),
],
)
.dispatch_indirect(args.contacts_len_indirect.buffer());
KernelDispatch::new(device, pass, &self.inc_color)
.bind_at(0, [(args.curr_color.buffer(), 8)])
.dispatch(1);
}
} else {
KernelDispatch::new(device, pass, &self.step_jacobi)
.bind_at(
0,
[
(args.constraints.buffer(), 2),
(args.solver_vels.buffer(), 3),
(args.solver_vels_out.buffer(), 4),
(args.body_constraint_counts.buffer(), 5),
(args.body_constraint_ids.buffer(), 6),
],
)
.bind_at(1, [(args.colliders_len.buffer(), 4)])
.dispatch(args.vels.len().div_ceil(Self::WORKGROUP_SIZE as u64) as u32);
std::mem::swap(&mut args.solver_vels, &mut args.solver_vels_out);
}
/*
* Integrate positions only.
*/
KernelDispatch::new(device, pass, &self.integrate)
.bind_at(0, [(args.solver_vels.buffer(), 3)])
.bind_at(
1,
[
(args.sim_params.buffer(), 0),
(args.poses.buffer(), 1),
(args.colliders_len.buffer(), 4),
(args.local_mprops.buffer(), 5),
],
)
.dispatch((args.vels.len() as u32).div_ceil(Self::WORKGROUP_SIZE));
/*
* Solve WITHOUT bias.
*/
joint_solver.solve(device, pass, &joint_args, false);
KernelDispatch::new(device, pass, &self.remove_cfm_and_bias)
.bind_at(
0,
[
(args.contacts_len.buffer(), 1),
(args.constraints.buffer(), 2),
],
)
.dispatch_indirect(args.contacts_len_indirect.buffer());
if !use_jacobi {
KernelDispatch::new(device, pass, &self.reset_color)
.bind_at(0, [(args.curr_color.buffer(), 8)])
.dispatch(1);
for _ in 0..args.num_colors {
KernelDispatch::new(device, pass, &self.step_gauss_seidel)
.bind_at(
0,
[
(args.contacts_len.buffer(), 1),
(args.constraints.buffer(), 2),
(args.solver_vels.buffer(), 3),
(args.constraints_colors.buffer(), 7),
(args.curr_color.buffer(), 8),
],
)
.dispatch_indirect(args.contacts_len_indirect.buffer());
KernelDispatch::new(device, pass, &self.inc_color)
.bind_at(0, [(args.curr_color.buffer(), 8)])
.dispatch(1);
}
} else {
KernelDispatch::new(device, pass, &self.step_jacobi)
.bind_at(
0,
[
(args.constraints.buffer(), 2),
(args.solver_vels.buffer(), 3),
(args.solver_vels_out.buffer(), 4),
(args.body_constraint_counts.buffer(), 5),
(args.body_constraint_ids.buffer(), 6),
],
)
.bind_at(1, [(args.colliders_len.buffer(), 4)])
.dispatch(args.vels.len().div_ceil(Self::WORKGROUP_SIZE as u64) as u32);
std::mem::swap(&mut args.solver_vels, &mut args.solver_vels_out);
}
}
/*
* Writeback body velocities.
*/
KernelDispatch::new(device, pass, &self.finalize)
.bind_at(0, [(args.solver_vels.buffer(), 3)])
.bind_at(
1,
[(args.vels.buffer(), 2), (args.colliders_len.buffer(), 4)],
)
.dispatch((args.vels.len() as u32).div_ceil(Self::WORKGROUP_SIZE));
}
}
wgcore::test_shader_compilation!(WgSolver, wgcore, wgparry::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/joint.rs | crates/wgrapier/src/dynamics/joint.rs | use crate::dynamics::{
GpuLocalMassProperties, GpuSimParams, GpuVelocity, GpuWorldMassProperties, WgBody, WgSimParams,
};
use encase::ShaderType;
use nalgebra::Vector2;
use rapier::dynamics::{
GenericJoint, ImpulseJoint, ImpulseJointSet, JointLimits, JointMotor, RigidBodyHandle,
};
use rapier::math::SPATIAL_DIM;
use rapier::prelude::MotorModel;
use std::collections::HashMap;
use wgcore::kernel::KernelDispatch;
use wgcore::tensor::{GpuScalar, GpuVector};
use wgcore::Shader;
use wgebra::{WgQuat, WgRot2, WgSim2, WgSim3};
use wgparry::math::{AngVector, GpuSim, Vector};
use wgparry::{dim_shader_defs, substitute_aliases};
use wgpu::{BufferUsages, ComputePass, ComputePipeline, Device};
#[cfg(feature = "dim2")]
use wgebra::GpuSim2;
#[cfg(feature = "dim3")]
use {
nalgebra::{Similarity3, Vector4},
wgebra::GpuSim3,
};
#[derive(Copy, Clone, Debug, ShaderType)]
pub(crate) struct GpuImpulseJoint {
body_a: u32,
body_b: u32,
data: GpuGenericJoint,
}
impl GpuImpulseJoint {
pub fn from_rapier(joint: &ImpulseJoint, body_id: &HashMap<RigidBodyHandle, u32>) -> Self {
Self {
body_a: body_id[&joint.body1],
body_b: body_id[&joint.body2],
data: joint.data.into(),
}
}
}
#[derive(Copy, Clone, Debug, ShaderType)]
#[cfg(feature = "dim2")]
struct EncasedGpuSim {
rotation: Vector2<f32>,
translation: Vector2<f32>,
scale: f32,
}
#[cfg(feature = "dim2")]
impl From<GpuSim2> for EncasedGpuSim {
fn from(value: GpuSim2) -> Self {
Self {
rotation: [
value.similarity.isometry.rotation.re,
value.similarity.isometry.rotation.im,
]
.into(),
translation: value.similarity.isometry.translation.vector,
scale: value.similarity.scaling(),
}
}
}
#[derive(Copy, Clone, Debug, ShaderType)]
#[cfg(feature = "dim3")]
struct EncasedGpuSim {
rotation: Vector4<f32>,
translation_scale: Vector4<f32>,
}
#[cfg(feature = "dim3")]
impl From<GpuSim3> for EncasedGpuSim {
fn from(value: GpuSim3) -> Self {
Self {
rotation: value.isometry.rotation.coords,
translation_scale: value.isometry.translation.vector.push(value.scaling()),
}
}
}
#[derive(Copy, Clone, Debug, ShaderType)]
struct GpuJointConstraintBuilder {
body1: u32,
body2: u32,
joint_id: u32,
joint: GpuGenericJoint,
constraint_id: u32,
}
#[derive(Copy, Clone, Debug, ShaderType)]
struct GpuGenericJoint {
local_frame_a: EncasedGpuSim,
local_frame_b: EncasedGpuSim,
locked_axes: u32,
limit_axes: u32,
motor_axes: u32,
coupled_axes: u32,
limits: [GpuJointLimits; SPATIAL_DIM],
motors: [GpuJointMotor; SPATIAL_DIM],
}
impl From<GenericJoint> for GpuGenericJoint {
fn from(value: GenericJoint) -> Self {
Self {
#[cfg(feature = "dim2")]
local_frame_a: GpuSim::from(value.local_frame1).into(),
#[cfg(feature = "dim2")]
local_frame_b: GpuSim::from(value.local_frame2).into(),
#[cfg(feature = "dim3")]
local_frame_a: Similarity3::from_isometry(value.local_frame1, 1.0).into(),
#[cfg(feature = "dim3")]
local_frame_b: Similarity3::from_isometry(value.local_frame2, 1.0).into(),
locked_axes: value.locked_axes.bits() as u32,
limit_axes: value.limit_axes.bits() as u32,
motor_axes: value.motor_axes.bits() as u32,
coupled_axes: value.coupled_axes.bits() as u32,
limits: value.limits.map(|e| e.into()),
motors: value.motors.map(|e| e.into()),
}
}
}
#[derive(Copy, Clone, Debug, ShaderType)]
struct GpuJointLimits {
min: f32,
max: f32,
impulse: f32,
}
impl From<JointLimits<f32>> for GpuJointLimits {
fn from(value: JointLimits<f32>) -> Self {
Self {
min: value.min,
max: value.max,
impulse: value.impulse,
}
}
}
#[derive(Copy, Clone, Debug, ShaderType)]
struct GpuJointMotor {
target_vel: f32,
target_pos: f32,
stiffness: f32,
damping: f32,
max_force: f32,
impulse: f32,
model: u32,
}
impl From<JointMotor> for GpuJointMotor {
fn from(value: JointMotor) -> Self {
Self {
target_vel: value.target_vel,
target_pos: value.target_pos,
stiffness: value.stiffness,
damping: value.damping,
max_force: value.max_force,
impulse: value.impulse,
model: match value.model {
MotorModel::AccelerationBased => 0,
MotorModel::ForceBased => 1,
},
}
}
}
#[derive(Copy, Clone, Debug, ShaderType)]
pub(crate) struct GpuJointConstraint {
solver_vel_a: u32,
solver_vel_b: u32,
im_a: Vector<f32>,
im_b: Vector<f32>,
elements: [GpuJointConstraintElement; SPATIAL_DIM],
len: u32,
}
#[derive(Copy, Clone, Debug, ShaderType)]
struct GpuJointConstraintElement {
joint_id: u32,
impulse: f32,
impulse_bounds: Vector2<f32>,
lin_jac: Vector<f32>,
ang_jac_a: AngVector<f32>,
ang_jac_b: AngVector<f32>,
ii_ang_jac_a: AngVector<f32>,
ii_ang_jac_b: AngVector<f32>,
inv_lhs: f32,
rhs: f32,
rhs_wo_bias: f32,
cfm_gain: f32,
cfm_coeff: f32,
}
/// A set of impulse joints simulated on the GPU.
pub struct GpuImpulseJointSet {
len: u32,
num_colors: u32,
max_color_group_len: u32,
num_joints: GpuScalar<u32>,
curr_color: GpuScalar<u32>,
color_groups: GpuVector<u32>,
joints: GpuVector<GpuImpulseJoint>,
builders: GpuVector<GpuJointConstraintBuilder>,
constraints: GpuVector<GpuJointConstraint>,
}
impl GpuImpulseJointSet {
/// Converts a set of Rapier joints to a set of GPU joints.
pub fn from_rapier(
device: &Device,
joints: &ImpulseJointSet,
body_ids: &HashMap<RigidBodyHandle, u32>,
) -> Self {
let usage = BufferUsages::STORAGE;
let len = joints.len() as u32;
let max_body_id = body_ids.values().copied().max().unwrap_or_default();
// Convert joints.
let mut unsorted_gpu_joints = vec![];
for (_, joint) in joints.iter() {
unsorted_gpu_joints.push(GpuImpulseJoint::from_rapier(joint, body_ids));
}
/*
* Run a simple static greedy graph coloring, and group the joints.
*/
let mut colors = vec![];
let mut body_masks = vec![0u128; max_body_id as usize + 1];
// Find colors.
for joint in &unsorted_gpu_joints {
// TODO: don’t take fixed bodies into account for the coloring.
let a = joint.body_a as usize;
let b = joint.body_b as usize;
let mask = body_masks[a] | body_masks[b];
let color = mask.trailing_ones();
colors.push(color);
body_masks[a] |= 1 << color;
body_masks[b] |= 1 << color;
}
let num_colors = colors
.iter()
.copied()
.max()
.map(|n| n + 1)
.unwrap_or_default();
let mut color_groups = vec![0u32; num_colors as usize];
// Count size of color groups.
for color in &colors {
color_groups[*color as usize] += 1;
}
let max_color_group_len = color_groups.iter().copied().max().unwrap_or_default();
// println!(
// "Found {} colors. Max len: {}",
// num_colors, max_color_group_len
// );
// Prefix sum.
for i in 0..color_groups.len().saturating_sub(1) {
color_groups[i + 1] += color_groups[i];
}
// Bucket sort.
let mut target = color_groups.clone();
target.insert(0, 0);
let mut sorted_gpu_joints = unsorted_gpu_joints.clone();
for (joint, color) in unsorted_gpu_joints.iter().zip(colors.iter()) {
sorted_gpu_joints[target[*color as usize] as usize] = *joint;
target[*color as usize] += 1;
}
Self {
len,
num_colors,
max_color_group_len,
num_joints: GpuScalar::init(device, len, usage | BufferUsages::UNIFORM),
curr_color: GpuScalar::init(device, 0, usage),
color_groups: GpuVector::init(device, &color_groups, usage),
joints: GpuVector::encase(device, &sorted_gpu_joints, usage),
builders: GpuVector::uninit_encased(device, len, usage),
constraints: GpuVector::uninit_encased(device, len, usage),
}
}
/// Is this set empty?
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// The number of joints in this set.
pub fn len(&self) -> usize {
self.len as usize
}
}
#[derive(Shader)]
#[shader(
derive(WgSim2, WgSim3),
src = "joint.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// Shader definition of joints.
pub struct WgJoint;
#[derive(Shader)]
#[shader(
derive(WgJoint),
src = "joint_constraint.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// Shader definition of joint constraints.
pub struct WgJointConstraint;
#[derive(Shader)]
#[shader(
derive(
WgJoint,
WgJointConstraint,
WgSimParams,
WgBody,
WgSim2,
WgSim3,
WgQuat,
WgRot2
),
src = "joint_constraint_builder.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// A solver responsible for initializing and solving impulse-based joint constraints.
pub struct WgJointSolver {
init: ComputePipeline,
update: ComputePipeline,
solve: ComputePipeline,
remove_bias: ComputePipeline,
reset_color: ComputePipeline,
inc_color: ComputePipeline,
}
/// Arguments given to the joint solver.
pub struct JointSolverArgs<'a> {
/// The simulation parameters.
pub sim_params: &'a GpuScalar<GpuSimParams>,
/// The set of joints to solve.
pub joints: &'a GpuImpulseJointSet,
/// The body solvers.
pub solver_vels: &'a GpuVector<GpuVelocity>,
/// Rigid body poses.
pub poses: &'a GpuVector<GpuSim>,
/// World-space mass properties.
pub mprops: &'a GpuVector<GpuWorldMassProperties>,
/// Local-space mass properties.
pub local_mprops: &'a GpuVector<GpuLocalMassProperties>,
}
impl WgJointSolver {
const WORKGROUP_SIZE: u32 = 64;
/// Generate joint constraints for this set of joint.
pub fn init(&self, device: &Device, pass: &mut ComputePass, args: &JointSolverArgs) {
KernelDispatch::new(device, pass, &self.init)
.bind_at(
0,
[
(args.joints.num_joints.buffer(), 0),
(args.joints.joints.buffer(), 1),
(args.joints.builders.buffer(), 2),
(args.joints.constraints.buffer(), 3),
],
)
.bind_at(
1,
[(args.poses.buffer(), 1), (args.local_mprops.buffer(), 3)],
)
.dispatch(args.joints.len.div_ceil(Self::WORKGROUP_SIZE))
}
/// Updates the non-linear terms of the joint constraints.
pub fn update(&self, device: &Device, pass: &mut ComputePass, args: &JointSolverArgs) {
KernelDispatch::new(device, pass, &self.update)
.bind_at(
0,
[
(args.joints.num_joints.buffer(), 0),
(args.joints.builders.buffer(), 2),
(args.joints.constraints.buffer(), 3),
],
)
.bind_at(
1,
[
(args.sim_params.buffer(), 0),
(args.poses.buffer(), 1),
(args.mprops.buffer(), 4),
],
)
.dispatch(args.joints.len.div_ceil(Self::WORKGROUP_SIZE))
}
/// Apply a single Projected-Gauss-Seidel step for solving joints.
///
/// This intended to be used in the inner-loop of the TGS solver.
pub fn solve(
&self,
device: &Device,
pass: &mut ComputePass,
args: &JointSolverArgs,
use_bias: bool,
) {
if !use_bias {
KernelDispatch::new(device, pass, &self.remove_bias)
.bind_at(
0,
[
(args.joints.num_joints.buffer(), 0),
(args.joints.constraints.buffer(), 3),
],
)
.dispatch(args.joints.len.div_ceil(Self::WORKGROUP_SIZE))
}
KernelDispatch::new(device, pass, &self.reset_color)
.bind_at(0, [(args.joints.curr_color.buffer(), 4)])
.dispatch(1);
for _ in 0..args.joints.num_colors {
KernelDispatch::new(device, pass, &self.solve)
.bind_at(
0,
[
(args.joints.constraints.buffer(), 3),
(args.joints.curr_color.buffer(), 4),
(args.joints.color_groups.buffer(), 5),
],
)
.bind_at(1, [(args.solver_vels.buffer(), 2)])
// TODO PERF: figure out a way to dispatch a number of threads that fits
// more tightly the size of the current color.
.dispatch(
args.joints
.max_color_group_len
.div_ceil(Self::WORKGROUP_SIZE),
);
KernelDispatch::new(device, pass, &self.inc_color)
.bind_at(0, [(args.joints.curr_color.buffer(), 4)])
.dispatch(1);
}
}
}
wgcore::test_shader_compilation!(WgJoint, wgcore, wgparry::dim_shader_defs());
wgcore::test_shader_compilation!(WgJointConstraint, wgcore, wgparry::dim_shader_defs());
wgcore::test_shader_compilation!(WgJointSolver, wgcore, wgparry::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/warmstart.rs | crates/wgrapier/src/dynamics/warmstart.rs | //! Warmstarting mechanism for constraint solver temporal coherence.
//!
//! Warmstarting reuses impulses from the previous simulation frame to initialize the
//! constraint solver, significantly improving convergence speed and stability. This exploits
//! temporal coherence - the observation that adjacent frames in a simulation tend to have
//! similar contact configurations and required impulses.
//!
//! # How It Works
//!
//! 1. After solving constraints in frame N, impulse accumulators are stored.
//! 2. In frame N+1, new contacts are matched against old contacts.
//! 3. Matching contacts inherit their previous impulses as starting guesses.
//! 4. The solver converges faster since it starts closer to the solution.
use crate::dynamics::WgConstraint;
use crate::dynamics::{GpuTwoBodyConstraint, GpuTwoBodyConstraintBuilder};
use wgcore::indirect::DispatchIndirectArgs;
use wgcore::kernel::KernelDispatch;
use wgcore::tensor::{GpuScalar, GpuVector};
use wgcore::Shader;
use wgparry::{dim_shader_defs, substitute_aliases};
use wgpu::{ComputePass, ComputePipeline, Device};
/// GPU shader for transferring warmstart impulses between frames.
///
/// This shader matches new contacts against old contacts and transfers impulse
/// accumulators when a match is found.
#[derive(Shader)]
#[shader(
derive(WgConstraint),
src = "warmstart.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs",
composable = false
)]
pub struct WgWarmstart {
/// Compute pipeline that matches contacts and transfers impulses.
transfer_warmstart_impulses: ComputePipeline,
}
/// Arguments for warmstart dispatch.
///
/// Contains buffers for both old (previous frame) and new (current frame) constraint data.
#[derive(Copy, Clone)]
pub struct WarmstartArgs<'a> {
/// Number of contacts in current frame.
pub contacts_len: &'a GpuScalar<u32>,
/// Constraint counts per body from previous frame.
pub old_body_constraint_counts: &'a GpuVector<u32>,
/// Constraint IDs per body from previous frame.
pub old_body_constraint_ids: &'a GpuVector<u32>,
/// Solver constraints from previous frame.
pub old_constraints: &'a GpuVector<GpuTwoBodyConstraint>,
/// Constraint builders from previous frame.
pub old_constraint_builders: &'a GpuVector<GpuTwoBodyConstraintBuilder>,
/// Solver constraints for current frame (to be warmstarted).
pub new_constraints: &'a GpuVector<GpuTwoBodyConstraint>,
/// Constraint builders for current frame.
pub new_constraint_builders: &'a GpuVector<GpuTwoBodyConstraintBuilder>,
/// Indirect dispatch arguments based on contact count.
pub contacts_len_indirect: &'a GpuScalar<DispatchIndirectArgs>,
}
impl WgWarmstart {
/// Transfers warmstart impulses from old constraints to new constraints.
///
/// This method dispatches a compute shader that searches for matching contacts
/// between the previous and current frames. When a match is found, the impulse
/// accumulator from the old contact is copied to the new contact.
///
/// # Parameters
///
/// - `device`: The WebGPU device.
/// - `pass`: Active compute pass for dispatching.
/// - `args`: Warmstart arguments containing old and new constraint buffers.
pub fn transfer_warmstart_impulses<'a>(
&self,
device: &Device,
pass: &mut ComputePass,
args: WarmstartArgs<'a>,
) {
KernelDispatch::new(device, pass, &self.transfer_warmstart_impulses)
.bind0([
args.contacts_len.buffer(),
args.old_body_constraint_counts.buffer(),
args.old_body_constraint_ids.buffer(),
args.old_constraints.buffer(),
args.old_constraint_builders.buffer(),
args.new_constraints.buffer(),
args.new_constraint_builders.buffer(),
])
.dispatch_indirect(args.contacts_len_indirect.buffer());
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/mod.rs | crates/wgrapier/src/dynamics/mod.rs | //! Rigid-body dynamics: forces, velocities, constraints, and solvers.
//!
//! This module contains all the components needed for simulating rigid-body dynamics on the GPU:
//!
//! # Core Components
//!
//! - **body**: Rigid body definitions including mass properties, velocities, and poses
//! - **constraint**: Contact constraints and their data structures for collision resolution
//! - **gravity**: Gravity force application
//! - **sim_params**: Global simulation parameters (timestep, solver iterations, etc.)
//!
//! # Constraint Solver
//!
//! - **solver_jacobi**: GPU-parallel constraint solver using graph coloring
//! - **coloring**: Graph coloring algorithms (TOPO-GC and Luby) for parallelizing constraint solving
//! - **warmstart**: Warmstarting mechanism to reuse impulses from previous frame
//!
//! # Utilities
//!
//! - **[`prefix_sum`]**: GPU prefix sum algorithm used for parallel data compaction
//!
//! # Physics Concepts
//!
//! ## Constraint-Based Dynamics
//!
//! The physics simulation uses a constraint-based approach where contacts between bodies
//! are modeled as constraints that restrict relative motion. Each constraint generates
//! impulses that modify body velocities to resolve penetrations and simulate friction.
//!
//! ## Parallel Solving with Graph Coloring
//!
//! To solve constraints in parallel on the GPU, a graph coloring algorithm assigns colors
//! to constraints such that no two constraints of the same color share a body. This allows
//! all constraints with the same color to be solved simultaneously without data races.
//!
//! ## TGS (Total Gauss-Seidel) Solver
//!
//! The default solver uses a variation of Gauss-Seidel iteration with substeps and
//! bias/no-bias phases for improved stability and convergence.
pub use body::{
BodyDesc, GpuBodySet, GpuForce, GpuLocalMassProperties, GpuVelocity, GpuWorldMassProperties,
WgBody,
};
pub use coloring::{ColoringArgs, WgColoring};
pub use constraint::{
GpuTwoBodyConstraint, GpuTwoBodyConstraintBuilder, GpuTwoBodyConstraintInfos, WgConstraint,
};
pub use joint::{GpuImpulseJointSet, JointSolverArgs, WgJointSolver};
pub use mprops_update::WgMpropsUpdate;
pub use sim_params::{GpuSimParams, WgSimParams};
pub use solver::{SolverArgs, WgSolver};
pub use warmstart::{WarmstartArgs, WgWarmstart};
pub mod body;
mod coloring;
mod constraint;
mod joint;
mod mprops_update;
pub mod prefix_sum;
mod sim_params;
mod solver;
mod warmstart;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/mprops_update.rs | crates/wgrapier/src/dynamics/mprops_update.rs | //! Rigid-bodies world-space mass properties calculation.
use crate::dynamics::{WgBody, WgSimParams};
use wgcore::Shader;
use wgparry::{dim_shader_defs, substitute_aliases};
use wgpu::ComputePipeline;
/// GPU shader for updating the world-space mass properties of rigid-bodies.
#[derive(Shader)]
#[shader(
derive(WgBody, WgSimParams),
src = "mprops_update.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs",
composable = false
)]
pub struct WgMpropsUpdate {
/// Compute pipeline for the gravity application kernel.
///
/// Expected bind group layout:
/// - Group 0: World mass properties, local mass properties, poses (storage buffers)
pub main: ComputePipeline,
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/body.rs | crates/wgrapier/src/dynamics/body.rs | //! Rigid-body definitions, mass properties, velocities, and GPU storage.
//!
//! This module provides the core data structures for representing rigid bodies on the GPU,
//! including their poses, velocities, forces, and mass properties. It also provides
//! [`GpuBodySet`] for managing collections of rigid bodies in GPU memory.
use encase::ShaderType;
use num_traits::Zero;
use rapier::geometry::ColliderHandle;
use rapier::prelude::MassProperties;
use rapier::{
dynamics::{RigidBodyHandle, RigidBodySet},
geometry::ColliderSet,
};
use wgcore::tensor::GpuVector;
use wgcore::Shader;
use wgebra::{WgQuat, WgSim2, WgSim3};
use wgparry::math::{AngVector, AngularInertia, GpuSim, Point, Vector};
use wgparry::shapes::{GpuShape, ShapeBuffers};
use wgparry::{dim_shader_defs, substitute_aliases};
use wgpu::{BufferUsages, Device};
#[cfg(feature = "dim3")]
use nalgebra::Vector4;
#[cfg(feature = "dim3")]
use wgebra::GpuSim3;
#[derive(ShaderType, Copy, Clone, PartialEq)]
#[repr(C)]
/// Linear and angular forces with a layout compatible with the corresponding WGSL struct.
pub struct GpuForce {
/// The linear part of the force.
pub linear: Vector<f32>,
/// The angular part of the force (aka. the torque).
pub angular: AngVector<f32>,
}
#[derive(ShaderType, Copy, Clone, PartialEq, Default, Debug)]
#[repr(C)]
/// Linear and angular velocities with a layout compatible with the corresponding WGSL struct.
pub struct GpuVelocity {
/// The linear (translational) velocity.
pub linear: Vector<f32>,
/// The angular (rotational) velocity.
pub angular: AngVector<f32>,
}
#[derive(ShaderType, Copy, Clone, PartialEq)]
#[repr(C)]
/// Rigid-body mass-properties, with a layout compatible with the corresponding WGSL struct.
pub struct GpuLocalMassProperties {
/// Square root of inverse principal inertia (scalar in 2D).
#[cfg(feature = "dim2")]
pub inv_principal_inertia: f32,
#[cfg(feature = "dim3")]
inv_ref_frame: Vector4<f32>,
/// Square root of inverse principal inertia tensor (3D vector in 3D).
#[cfg(feature = "dim3")]
pub inv_principal_inertia: nalgebra::Vector3<f32>,
/// The inverse mass.
pub inv_mass: Vector<f32>,
/// The center-of-mass.
pub com: Vector<f32>, // ShaderType isn’t implemented for Point
}
#[derive(ShaderType, Copy, Clone, PartialEq)]
#[repr(C)]
/// Rigid-body mass-properties, with a layout compatible with the corresponding WGSL struct.
pub struct GpuWorldMassProperties {
/// The inverse angular inertia tensor.
pub inv_inertia: AngularInertia<f32>,
/// The inverse mass.
pub inv_mass: Vector<f32>,
/// The center-of-mass.
pub com: Vector<f32>, // ShaderType isn’t implemented for Point
}
impl From<MassProperties> for GpuLocalMassProperties {
fn from(props: MassProperties) -> Self {
GpuLocalMassProperties {
inv_principal_inertia: props.inv_principal_inertia,
#[cfg(feature = "dim3")]
inv_ref_frame: props.principal_inertia_local_frame.coords,
inv_mass: Vector::repeat(props.inv_mass),
com: props.local_com.coords,
}
}
}
impl Default for GpuLocalMassProperties {
fn default() -> Self {
GpuLocalMassProperties {
#[rustfmt::skip]
#[cfg(feature = "dim2")]
inv_principal_inertia: 1.0,
#[cfg(feature = "dim3")]
inv_ref_frame: Vector4::w(),
#[cfg(feature = "dim3")]
inv_principal_inertia: Vector::repeat(1.0),
inv_mass: Vector::repeat(1.0),
com: Vector::zeros(),
}
}
}
impl Default for GpuWorldMassProperties {
fn default() -> Self {
GpuWorldMassProperties {
#[rustfmt::skip]
#[cfg(feature = "dim2")]
inv_inertia: 1.0,
#[cfg(feature = "dim3")]
inv_inertia: AngularInertia::identity(),
inv_mass: Vector::repeat(1.0),
com: Vector::zeros(),
}
}
}
/// A set of rigid-bodies stored on the gpu.
pub struct GpuBodySet {
len: u32,
shapes_data: Vec<GpuShape>, // TODO: exists only for convenience in the MPM simulation.
pub(crate) mprops: GpuVector<GpuWorldMassProperties>,
pub(crate) local_mprops: GpuVector<GpuLocalMassProperties>,
pub(crate) vels: GpuVector<GpuVelocity>,
pub(crate) poses: GpuVector<GpuSim>,
// TODO: support other shape types.
// TODO: support a shape with a shift relative to the body.
pub(crate) shapes: GpuVector<GpuShape>,
// TODO: it’s a bit weird that we store the vertex buffer but not the
// index buffer. This is because our only use-case currently
// is from wgsparkl which has its own way of storing indices.
pub(crate) shapes_local_vertex_buffers: GpuVector<Point<f32>>,
pub(crate) shapes_vertex_buffers: GpuVector<Point<f32>>,
pub(crate) shapes_vertex_collider_id: GpuVector<u32>, // NOTE: this is a bit of a hack for wgsparkl
}
#[derive(Copy, Clone)]
/// Helper struct for defining a rigid-body to be added to a [`GpuBodySet`].
pub struct BodyDesc {
/// The rigid-body’s mass-properties in local-space.
pub local_mprops: GpuLocalMassProperties,
/// The rigid-body’s mass-properties in world-space.
pub mprops: GpuWorldMassProperties,
/// The rigid-body’s linear and angular velocities.
pub vel: GpuVelocity,
/// The rigid-body’s world-space pose.
pub pose: GpuSim,
/// The rigid-body’s shape.
pub shape: GpuShape,
}
impl Default for BodyDesc {
fn default() -> Self {
Self {
local_mprops: Default::default(),
mprops: Default::default(),
vel: Default::default(),
pose: Default::default(),
shape: GpuShape::cuboid(Vector::repeat(0.5)),
}
}
}
/// Coupling mode between a GPU body and the physics simulation.
///
/// This controls whether a body is affected by physics forces or acts as a kinematic body.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Default)]
pub enum BodyCoupling {
/// One-way coupling: the body affects other bodies but is not affected by them.
///
/// This is useful for kinematic bodies that move independently of physics forces.
OneWay,
/// Two-way coupling: the body both affects and is affected by other bodies.
///
/// This is the standard mode for dynamic rigid bodies.
#[default]
TwoWays,
}
/// Associates a Rapier body/collider pair with a coupling mode.
///
/// Used when creating a [`GpuBodySet`] from Rapier data structures to specify
/// which bodies should have two-way vs one-way coupling.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct BodyCouplingEntry {
/// The Rapier rigid body handle.
pub body: RigidBodyHandle,
/// The Rapier collider handle.
pub collider: ColliderHandle,
/// The coupling mode for this body.
pub mode: BodyCoupling,
}
impl GpuBodySet {
/// Returns `true` if this set contains no rigid bodies.
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Returns the number of rigid bodies in this set.
pub fn len(&self) -> u32 {
self.len
}
/// Creates a new GPU body set from Rapier rigid bodies and colliders.
///
/// # Parameters
///
/// - `device`: The WebGPU device for allocating GPU buffers.
/// - `bodies`: Rapier rigid body set.
/// - `colliders`: Rapier collider set.
pub fn from_rapier(
device: &Device,
bodies: &RigidBodySet,
colliders: &ColliderSet,
coupling: &[BodyCouplingEntry], // Only relevant to wgsparkl
) -> Self {
let mut shape_buffers = ShapeBuffers::default();
let mut gpu_bodies = vec![];
let mut pt_collider_ids = vec![];
for (co_id, coupling) in coupling.iter().enumerate() {
let co = &colliders[coupling.collider];
let rb = &bodies[coupling.body];
let prev_len = shape_buffers.vertices.len();
let shape = GpuShape::from_parry(co.shape(), &mut shape_buffers)
.expect("Unsupported shape type");
for _ in prev_len..shape_buffers.vertices.len() {
pt_collider_ids.push(co_id as u32);
}
let zero_mprops = MassProperties::zero();
let two_ways_coupling = rb.is_dynamic() && coupling.mode == BodyCoupling::TwoWays;
let desc = BodyDesc {
vel: GpuVelocity {
linear: *rb.linvel(),
#[allow(clippy::clone_on_copy)] // Needed for 2D/3D switch.
angular: rb.angvel().clone(),
},
#[cfg(feature = "dim2")]
pose: (*rb.position()).into(),
#[cfg(feature = "dim3")]
pose: GpuSim3::from_isometry(*rb.position(), 1.0),
shape,
local_mprops: if two_ways_coupling {
rb.mass_properties().local_mprops.into()
} else {
zero_mprops.into()
},
mprops: Default::default(),
};
gpu_bodies.push(desc);
}
Self::new(device, &gpu_bodies, &pt_collider_ids, &shape_buffers)
}
/// Create a set of `bodies` on the gpu.
pub fn new(
device: &Device,
bodies: &[BodyDesc],
pt_collider_ids: &[u32],
shape_buffers: &ShapeBuffers,
) -> Self {
#[allow(clippy::type_complexity)]
let (local_mprops, (mprops, (vels, (poses, shapes_data)))): (
Vec<_>,
(Vec<_>, (Vec<_>, (Vec<_>, Vec<_>))),
) = bodies
.iter()
.copied()
// NOTE: Looks silly, but we can’t just collect into (Vec, Vec, Vec).
.map(|b| (b.local_mprops, (b.mprops, (b.vel, (b.pose, b.shape)))))
.collect();
// TODO: (api design) how can we let the user pick the buffer usages?
Self {
len: bodies.len() as u32,
mprops: GpuVector::encase(device, &mprops, BufferUsages::STORAGE),
local_mprops: GpuVector::encase(device, &local_mprops, BufferUsages::STORAGE),
vels: GpuVector::encase(
device,
&vels,
BufferUsages::STORAGE | BufferUsages::COPY_DST,
),
poses: GpuVector::init(
device,
&poses,
BufferUsages::STORAGE | BufferUsages::COPY_DST | BufferUsages::COPY_SRC,
),
shapes: GpuVector::init(device, &shapes_data, BufferUsages::STORAGE),
shapes_local_vertex_buffers: GpuVector::encase(
device,
&shape_buffers.vertices,
BufferUsages::STORAGE,
),
shapes_vertex_buffers: GpuVector::encase(
device,
// TODO: init in world-space directly?
&shape_buffers.vertices,
BufferUsages::STORAGE,
),
shapes_vertex_collider_id: GpuVector::init(
device,
pt_collider_ids,
BufferUsages::STORAGE,
),
shapes_data,
}
}
/// GPU storage buffer containing the poses of every rigid-body.
pub fn poses(&self) -> &GpuVector<GpuSim> {
&self.poses
}
/// GPU storage buffer containing the velocities of every rigid-body.
pub fn vels(&self) -> &GpuVector<GpuVelocity> {
&self.vels
}
/// GPU storage buffer containing the world-space mass-properties of every rigid-body.
pub fn mprops(&self) -> &GpuVector<GpuWorldMassProperties> {
&self.mprops
}
/// GPU storage buffer containing the local-space mass-properties of every rigid-body.
pub fn local_mprops(&self) -> &GpuVector<GpuLocalMassProperties> {
&self.local_mprops
}
/// GPU storage buffer containing the shape of every rigid-body.
pub fn shapes(&self) -> &GpuVector<GpuShape> {
&self.shapes
}
/// Returns the GPU buffer containing shape vertices in world-space.
///
/// This buffer is updated each frame as bodies move.
pub fn shapes_vertex_buffers(&self) -> &GpuVector<Point<f32>> {
&self.shapes_vertex_buffers
}
/// Returns the GPU buffer containing shape vertices in local-space.
///
/// These are the original vertex positions before transformation.
pub fn shapes_local_vertex_buffers(&self) -> &GpuVector<Point<f32>> {
&self.shapes_local_vertex_buffers
}
/// Returns the GPU buffer mapping each vertex to its collider ID.
///
/// This is used by wgsparkl for particle-body interactions.
pub fn shapes_vertex_collider_id(&self) -> &GpuVector<u32> {
&self.shapes_vertex_collider_id
}
/// Returns a CPU-side slice of the shape data.
///
/// Useful for accessing shape information without GPU readback.
pub fn shapes_data(&self) -> &[GpuShape] {
&self.shapes_data
}
}
#[derive(Shader)]
#[shader(
derive(WgQuat, WgSim3, WgSim2),
src = "body.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// Shader defining structs related to rigid-bodies, as well as functions to compute point velocities
/// and update world-space mass-properties.
pub struct WgBody;
// TODO: this test won’t pass due to the lack of `substitute_aliases`
// and `dim_shader_defs` in the macro. Figure out a way to make this work.
// wgcore::test_shader_compilation!(WgBody);
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/prefix_sum.rs | crates/wgrapier/src/dynamics/prefix_sum.rs | //! GPU parallel prefix sum (scan) algorithm.
//!
//! This module implements an efficient parallel prefix sum on the GPU using a work-efficient
//! algorithm. Prefix sum is a fundamental parallel primitive used throughout the physics engine.
//!
//! # What is Prefix Sum?
//!
//! Given an input array `[a₀, a₁, a₂, ..., aₙ]`, the prefix sum produces:
//! `[0, a₀, a₀+a₁, a₀+a₁+a₂, ..., a₀+a₁+...+aₙ₋₁]`
//!
//! Note the special variant used here: a 0 is prepended as the first element, which is useful
//! for computing array indices and offsets.
use nalgebra::DVector;
use wgcore::kernel::KernelDispatch;
use wgcore::tensor::GpuVector;
use wgcore::Shader;
use wgpu::{BufferUsages, ComputePass, ComputePipeline, Device};
/// GPU shader for parallel prefix sum.
///
/// This shader implements a work-efficient parallel scan algorithm optimized for GPUs.
#[derive(Shader)]
#[shader(src = "prefix_sum.wgsl", composable = false)]
pub struct WgPrefixSum {
/// Main prefix sum kernel (both up-sweep and down-sweep).
prefix_sum: ComputePipeline,
/// Kernel for adding partial sums from coarser levels.
add_data_grp: ComputePipeline,
}
impl WgPrefixSum {
const THREADS: u32 = 256;
/// Dispatches the prefix sum algorithm on GPU data.
///
/// # Parameters
///
/// - `device`: The WebGPU device
/// - `pass`: Active compute pass
/// - `workspace`: Workspace containing auxiliary buffers (resized automatically if needed)
/// - `data`: Input/output buffer (modified in-place)
///
/// # Panics
///
/// Panics if `THREADS` is not 256, as the shared memory size is hardcoded in the shader.
pub fn dispatch(
&self,
device: &Device,
pass: &mut ComputePass,
workspace: &mut PrefixSumWorkspace,
data: &GpuVector<u32>,
) {
// If this assert fails, the kernel launches bellow must be changed because we are using
// a fixed size for the shared memory currently.
assert_eq!(
Self::THREADS,
256,
"Internal error: prefix sum assumes a thread count equal to 256"
);
workspace.reserve(device, data.len() as u32);
let ngroups0 = workspace.stages[0].buffer.len() as u32;
let aux0 = &workspace.stages[0].buffer;
KernelDispatch::new(device, pass, &self.prefix_sum)
.bind0([data.buffer(), aux0.buffer()])
.dispatch(ngroups0);
for i in 0..workspace.num_stages - 1 {
let ngroups = workspace.stages[i + 1].buffer.len() as u32;
let buf = workspace.stages[i].buffer.buffer();
let aux = workspace.stages[i + 1].buffer.buffer();
KernelDispatch::new(device, pass, &self.prefix_sum)
.bind0([buf, aux])
.dispatch(ngroups);
}
if workspace.num_stages > 2 {
for i in (0..workspace.num_stages - 2).rev() {
let ngroups = workspace.stages[i + 1].buffer.len() as u32;
let buf = workspace.stages[i].buffer.buffer();
let aux = workspace.stages[i + 1].buffer.buffer();
KernelDispatch::new(device, pass, &self.add_data_grp)
.bind0([buf, aux])
.dispatch(ngroups);
}
}
if workspace.num_stages > 1 {
KernelDispatch::new(device, pass, &self.add_data_grp)
.bind0([data.buffer(), aux0.buffer()])
.dispatch(ngroups0);
}
}
/// CPU reference implementation of the prefix sum algorithm.
///
/// This method computes the same result as the GPU version but on the CPU.
/// Useful for testing and verification.
///
/// # Parameters
///
/// - `v`: Input/output vector (modified in-place)
pub fn eval_cpu(&self, v: &mut DVector<u32>) {
for i in 0..v.len() - 1 {
v[i + 1] += v[i];
}
// NOTE: we actually have a special variant of the prefix-sum
// where the result is as if a 0 was appendend to the input vector.
for i in (1..v.len()).rev() {
v[i] = v[i - 1];
}
v[0] = 0;
}
}
/// One stage in the multi-level prefix sum hierarchy.
struct PrefixSumStage {
/// Maximum number of elements this stage can handle.
capacity: u32,
/// GPU buffer for storing partial sums at this level.
buffer: GpuVector<u32>,
}
/// Workspace containing auxiliary buffers for hierarchical prefix sum.
///
/// The workspace maintains a hierarchy of buffers for the multi-level scan algorithm.
/// It automatically resizes when the input data size changes.
#[derive(Default)]
pub struct PrefixSumWorkspace {
stages: Vec<PrefixSumStage>,
num_stages: usize,
}
impl PrefixSumWorkspace {
/// Creates a new empty workspace.
pub fn new() -> Self {
Self {
stages: vec![],
num_stages: 0,
}
}
/// Creates a workspace pre-allocated for a specific buffer size.
///
/// # Parameters
///
/// - `device`: The WebGPU device for allocating buffers
/// - `buffer_len`: Size of the data buffer that will be scanned
pub fn with_capacity(device: &Device, buffer_len: u32) -> Self {
let mut result = Self {
stages: vec![],
num_stages: 0,
};
result.reserve(device, buffer_len);
result
}
/// Ensures the workspace has sufficient capacity for a given buffer size.
///
/// Resizes auxiliary buffers if needed. This is called automatically by [`WgPrefixSum::dispatch`].
///
/// # Parameters
///
/// - `device`: The WebGPU device for allocating buffers
/// - `buffer_len`: Size of the data buffer that will be scanned
pub fn reserve(&mut self, device: &Device, buffer_len: u32) {
let mut stage_len = buffer_len.div_ceil(WgPrefixSum::THREADS);
if self.stages.is_empty() || self.stages[0].capacity < stage_len {
// Reinitialize the auxiliary buffers.
self.stages.clear();
while stage_len != 1 {
let buffer = GpuVector::init(
device,
DVector::<u32>::zeros(stage_len as usize),
BufferUsages::STORAGE,
);
self.stages.push(PrefixSumStage {
capacity: stage_len,
buffer,
});
stage_len = stage_len.div_ceil(WgPrefixSum::THREADS);
}
// The last stage always has only 1 element.
self.stages.push(PrefixSumStage {
capacity: 1,
buffer: GpuVector::init(device, DVector::<u32>::zeros(1), BufferUsages::STORAGE),
});
self.num_stages = self.stages.len();
} else if self.stages[0].buffer.len() as u32 != stage_len {
// The stages have big enough buffers, but we need to adjust their length.
self.num_stages = 0;
while stage_len != 1 {
self.num_stages += 1;
stage_len = stage_len.div_ceil(WgPrefixSum::THREADS);
}
// The last stage always has only 1 element.
self.num_stages += 1;
}
}
/*
pub fn read_max_scan_value(&mut self) -> cust::error::CudaResult<u32> {
for stage in &self.stages {
if stage.len == 1 {
// This is the last stage, it contains the total sum.
let mut value = [0u32];
stage.buffer.index(0).copy_to(&mut value)?;
return Ok(value[0]);
}
}
panic!("The GPU prefix sum has not been initialized yet.")
}
*/
}
#[cfg(test)]
mod test {
use super::{PrefixSumWorkspace, WgPrefixSum};
use nalgebra::DVector;
use wgcore::gpu::GpuInstance;
use wgcore::kernel::CommandEncoderExt;
use wgcore::tensor::GpuVector;
use wgcore::Shader;
use wgpu::BufferUsages;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_prefix_sum() {
const LEN: u32 = 15071;
let gpu = GpuInstance::new().await.unwrap();
let prefix_sum = WgPrefixSum::from_device(gpu.device()).unwrap();
let inputs = vec![
DVector::<u32>::from_fn(LEN as usize, |_, _| 1),
DVector::<u32>::from_fn(LEN as usize, |i, _| i as u32),
DVector::<u32>::new_random(LEN as usize).map(|e| e % 10_000),
];
for v_cpu in inputs {
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let v_gpu = GpuVector::init(
gpu.device(),
&v_cpu,
BufferUsages::STORAGE | BufferUsages::COPY_SRC,
);
let staging = GpuVector::uninit(
gpu.device(),
v_cpu.len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
let mut workspace = PrefixSumWorkspace::with_capacity(gpu.device(), v_cpu.len() as u32);
let mut pass = encoder.compute_pass("test", None);
prefix_sum.dispatch(gpu.device(), &mut pass, &mut workspace, &v_gpu);
drop(pass);
staging.copy_from(&mut encoder, &v_gpu);
let t0 = web_time::Instant::now();
gpu.queue().submit(Some(encoder.finish()));
let gpu_result = staging.read(gpu.device()).await.unwrap();
println!("Gpu time: {}", t0.elapsed().as_secs_f32());
let mut cpu_result = v_cpu.clone();
let t0 = web_time::Instant::now();
prefix_sum.eval_cpu(&mut cpu_result);
println!("Cpu time: {}", t0.elapsed().as_secs_f32());
// println!("input: {:?}", v_cpu);
// println!("cpu output: {:?}", cpu_result);
// println!("gpu output: {:?}", gpu_result);
assert_eq!(DVector::from(gpu_result), cpu_result);
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/constraint.rs | crates/wgrapier/src/dynamics/constraint.rs | //! Contact constraints for collision resolution.
//!
//! This module defines the constraint structures used to resolve collisions between rigid bodies.
//! Constraints are generated from contact manifolds and solved iteratively to compute impulses
//! that prevent penetration and simulate friction.
use encase::ShaderType;
use wgcore::Shader;
use wgparry::math::{AngVector, Vector};
use wgparry::{dim_shader_defs, queries::WgContact, substitute_aliases};
#[cfg(feature = "dim3")]
use nalgebra::Vector2;
/// WGSL shader defining constraint structures and helper functions.
///
/// This shader can be imported by constraint solver shaders to access constraint
/// data structures and computation helpers.
#[derive(Shader)]
#[shader(
derive(WgContact),
src = "constraint.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
pub struct WgConstraint;
#[cfg(feature = "dim2")]
const SUB_LEN: usize = 1;
#[cfg(feature = "dim2")]
const MAX_CONSTRAINTS_PER_MANIFOLD: usize = 2;
#[cfg(feature = "dim3")]
const SUB_LEN: usize = 2;
#[cfg(feature = "dim3")]
const MAX_CONSTRAINTS_PER_MANIFOLD: usize = 4;
/// Geometric information for one contact point within a constraint.
///
/// This structure stores the contact geometry and relative velocities needed
/// to build the constraint's solver representation.
#[derive(Copy, Clone, PartialEq, Debug, ShaderType)]
pub struct GpuTwoBodyConstraintInfos {
/// Relative tangential velocity at the contact point.
pub tangent_vel: Vector<f32>,
/// Relative normal velocity at the contact point.
pub normal_vel: f32,
/// Contact point in body A's local space.
pub local_pt_a: Vector<f32>,
/// Contact point in body B's local space.
pub local_pt_b: Vector<f32>,
/// Penetration distance (negative for separation).
pub dist: f32,
}
/// Builder structure containing geometric info for all contact points in a manifold.
///
/// This is an intermediate representation used during constraint initialization.
/// It stores the raw contact geometry before computing the solver representation.
#[derive(Copy, Clone, PartialEq, Debug, ShaderType)]
pub struct GpuTwoBodyConstraintBuilder {
/// Array of contact point information (up to 2 points in 2D, 4 in 3D).
pub infos: [GpuTwoBodyConstraintInfos; MAX_CONSTRAINTS_PER_MANIFOLD],
}
/// A two-body contact constraint ready for iterative solving.
///
/// This structure contains all the precomputed data needed by the constraint solver,
/// including effective masses, Jacobian terms, impulse accumulators, and target
/// velocities (right-hand sides).
///
/// Each constraint can contain multiple contact points (up to 2 in 2D, 4 in 3D),
/// with both normal and tangent components for each point.
#[derive(Copy, Clone, PartialEq, Debug, ShaderType)]
pub struct GpuTwoBodyConstraint {
/// Contact normal direction for body A.
pub dir_a: Vector<f32>,
/// First tangent direction for friction (3D only).
#[cfg(feature = "dim3")]
pub tangent_a: Vector<f32>,
/// Inverse mass of body A.
pub im_a: Vector<f32>,
/// Inverse mass of body B.
pub im_b: Vector<f32>,
/// Constraint force mixing factor for regularization.
pub cfm_factor: f32,
/// Friction cone limit (max tangential impulse).
pub limit: f32,
/// Index of body A in the solver arrays.
pub solver_body_a: u32,
/// Index of body B in the solver arrays.
pub solver_body_b: u32,
/// Array of constraint elements, one per contact point.
pub elements: [GpuTwoBodyConstraintElement; MAX_CONSTRAINTS_PER_MANIFOLD],
/// Number of active contact points in this constraint.
pub len: u32,
}
/// One element (contact point) within a two-body constraint.
///
/// Contains both the normal and tangent parts for a single contact point.
#[derive(Copy, Clone, PartialEq, Debug, ShaderType)]
pub struct GpuTwoBodyConstraintElement {
/// Normal constraint component (prevents penetration).
pub normal_part: GpuTwoBodyConstraintNormalPart,
/// Tangent constraint component (applies friction).
pub tangent_part: GpuTwoBodyConstraintTangentPart,
}
/// Normal (non-penetration) constraint data for one contact point.
///
/// This stores the Jacobian terms, effective mass, impulse accumulators,
/// and right-hand side for the normal constraint.
#[derive(Copy, Clone, PartialEq, Debug, ShaderType)]
pub struct GpuTwoBodyConstraintNormalPart {
/// Angular Jacobian component for body A.
pub torque_dir_a: AngVector<f32>,
/// Angular Jacobian component for body A, multiplied by the inverse angular inertia.
pub ii_torque_dir_a: AngVector<f32>,
/// Angular Jacobian component for body B.
pub torque_dir_b: AngVector<f32>,
/// Angular Jacobian component for body B, multiplied by the inverse angular inertia.
pub ii_torque_dir_b: AngVector<f32>,
/// Right-hand side with bias term (used in stabilization phase).
pub rhs: f32,
/// Right-hand side without bias term (used in velocity solving phase).
pub rhs_wo_bias: f32,
/// Current iteration's impulse.
pub impulse: f32,
/// Accumulated impulse from all iterations (for clamping and warmstart).
pub impulse_accumulator: f32,
/// Effective mass (inverse of the constraint's mass matrix).
pub r: f32,
}
/// Tangent (friction) constraint data for one contact point.
///
/// This stores the Jacobian terms, effective masses, impulse accumulators,
/// and right-hand sides for the friction constraints. In 2D there is one
/// tangent direction; in 3D there are two.
#[derive(Copy, Clone, PartialEq, Debug, ShaderType)]
pub struct GpuTwoBodyConstraintTangentPart {
/// Angular Jacobian components for body A (one or two directions).
pub torque_dir_a: [AngVector<f32>; SUB_LEN],
/// Angular Jacobian components for body A (one or two directions) multiplied by the angular inertia tensor.
pub ii_torque_dir_a: [AngVector<f32>; SUB_LEN],
/// Angular Jacobian components for body B (one or two directions).
pub torque_dir_b: [AngVector<f32>; SUB_LEN],
/// Angular Jacobian components for body B (one or two directions) multiplied by the angular inertia tensor.
pub ii_torque_dir_b: [AngVector<f32>; SUB_LEN],
/// Right-hand sides with bias terms.
pub rhs: [f32; SUB_LEN],
/// Right-hand sides without bias terms.
pub rhs_wo_bias: [f32; SUB_LEN],
/// Current iteration's impulses (2D: 1 direction).
#[cfg(feature = "dim2")]
pub impulse: [f32; 1],
/// Accumulated impulses (2D: 1 direction).
#[cfg(feature = "dim2")]
pub impulse_accumulator: [f32; 1],
/// Effective masses (2D: 1 value).
#[cfg(feature = "dim2")]
pub r: [f32; 1],
/// Current iteration's impulses (3D: 2 directions).
#[cfg(feature = "dim3")]
pub impulse: Vector2<f32>,
/// Accumulated impulses (3D: 2 directions).
#[cfg(feature = "dim3")]
pub impulse_accumulator: Vector2<f32>,
/// Effective masses (3D: 3 values for 2x2 symmetric matrix).
#[cfg(feature = "dim3")]
pub r: [f32; 3],
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src/dynamics/sim_params.rs | crates/wgrapier/src/dynamics/sim_params.rs | //! Simulation parameters controlling physics behavior and solver settings.
//!
//! This module defines the parameters that control how the physics engine behaves,
//! including timestep length, solver iterations, contact compliance, and various
//! tolerances and thresholds.
use wgcore::Shader;
/// WGSL shader defining simulation parameter structures.
///
/// This shader can be imported by other shaders that need access to simulation parameters.
#[derive(Shader)]
#[shader(src = "sim_params.wgsl")]
pub struct WgSimParams;
/// Simulation parameters for a physics timestep.
///
/// This structure controls all aspects of the physics simulation including:
/// - Timestep length
/// - Contact and joint constraint compliance (via spring parameters)
/// - Solver iteration counts
/// - Error tolerances and prediction distances
/// - Warmstarting coefficient
///
/// The parameters use a physically-based spring model for constraint regularization,
/// controlled by natural frequency and damping ratio values. This provides more
/// intuitive tuning compared to raw compliance values.
///
/// # Memory Layout
///
/// This struct is `Pod` and `Zeroable`, making it safe to upload directly to GPU
/// uniform buffers. The `#[repr(C)]` ensures a consistent memory layout.
#[derive(Copy, Clone, Debug, PartialEq, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
pub struct GpuSimParams {
/// The timestep length (default: `1.0 / 60.0`).
pub dt: f32,
/// > 0: the damping ratio used by the springs for contact constraint stabilization.
///
/// Larger values make the constraints more compliant (allowing more visible
/// penetrations before stabilization).
/// (default `5.0`).
pub contact_damping_ratio: f32,
/// > 0: the natural frequency used by the springs for contact constraint regularization.
///
/// Increasing this value will make it so that penetrations get fixed more quickly at the
/// expense of potential jitter effects due to overshooting. In order to make the simulation
/// look stiffer, it is recommended to increase the [`Self::contact_damping_ratio`] instead of this
/// value.
/// (default: `30.0`).
pub contact_natural_frequency: f32,
/// > 0: the natural frequency used by the springs for joint constraint regularization.
///
/// Increasing this value will make it so that penetrations get fixed more quickly.
/// (default: `1.0e6`).
pub joint_natural_frequency: f32,
/// The fraction of critical damping applied to the joint for constraints regularization.
///
/// Larger values make the constraints more compliant (allowing more joint
/// drift before stabilization).
/// (default `1.0`).
pub joint_damping_ratio: f32,
/// The coefficient in `[0, 1]` applied to warmstart impulses, i.e., impulses that are used as the
/// initial solution (instead of 0) at the next simulation step.
///
/// This should generally be set to 1.
///
/// (default `1.0`).
pub warmstart_coefficient: f32,
/// The approximate size of most dynamic objects in the scene.
///
/// This value is used internally to estimate some length-based tolerance. In particular, the
/// values `allowed_linear_error`, `max_corrective_velocity`,
/// and `prediction_distance` are scaled by this value implicitly.
///
/// This value can be understood as the number of units-per-meter in your physical world compared
/// to a human-sized world in meter. For example, in a 2d game, if your typical object size is 100
/// pixels, set the [`Self::length_unit`] parameter to 100.0. The physics engine will interpret
/// it as if 100 pixels is equivalent to 1 meter in its various internal threshold.
/// (default `1.0`).
pub length_unit: f32,
/// Amount of penetration the engine won’t attempt to correct (default: `0.001m`).
///
/// This value is implicitly scaled by [`GpuSimParams::length_unit`].
pub normalized_allowed_linear_error: f32,
/// Maximum amount of penetration the solver will attempt to resolve in one timestep (default: `10.0`).
///
/// This value is implicitly scaled by [`GpuSimParams::length_unit`].
pub normalized_max_corrective_velocity: f32,
/// The maximal distance separating two objects that will generate predictive contacts (default: `0.002m`).
///
/// This value is implicitly scaled by [`GpuSimParams::length_unit`].
pub normalized_prediction_distance: f32,
/// The number of solver iterations run by the constraints solver for calculating forces (default: `4`).
pub num_solver_iterations: u32,
}
impl GpuSimParams {
/// The inverse of the time-stepping length, i.e. the steps per seconds (Hz).
///
/// This is zero if `self.dt` is zero.
#[inline(always)]
pub fn inv_dt(&self) -> f32 {
if self.dt == 0.0 {
0.0
} else {
1.0 / self.dt
}
}
/// Sets the inverse time-stepping length (i.e. the frequency).
///
/// This automatically recompute `self.dt`.
#[inline]
pub fn set_inv_dt(&mut self, inv_dt: f32) {
if inv_dt == 0.0 {
self.dt = 0.0
} else {
self.dt = 1.0 / inv_dt
}
}
/// Initialize the simulation parameters with settings matching the TGS-soft solver
/// with warmstarting.
///
/// This is the default configuration, equivalent to [`GpuSimParams::default()`].
pub fn tgs_soft() -> Self {
Self {
dt: 1.0 / 60.0,
contact_natural_frequency: 30.0,
contact_damping_ratio: 5.0,
joint_natural_frequency: 1.0e6,
joint_damping_ratio: 1.0,
warmstart_coefficient: 1.0,
num_solver_iterations: 4,
// TODO: what is the optimal value for min_island_size?
// It should not be too big so that we don't end up with
// huge islands that don't fit in cache.
// However we don't want it to be too small and end up with
// tons of islands, reducing SIMD parallelism opportunities.
normalized_allowed_linear_error: 0.001,
normalized_max_corrective_velocity: 10.0,
normalized_prediction_distance: 0.002,
length_unit: 1.0,
}
}
/// Initializes the integration parameters for a Jacobi solver.
pub fn jacobi() -> Self {
Self {
// Jacobi tends to already be overly energetic without warmstart.
warmstart_coefficient: 0.0,
..Self::tgs_soft()
}
}
}
impl Default for GpuSimParams {
fn default() -> Self {
Self::tgs_soft()
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src_testbed/lib.rs | crates/wgrapier/src_testbed/lib.rs | #![allow(clippy::result_large_err)]
#![allow(clippy::too_many_arguments)]
#[cfg(feature = "dim2")]
use rapier2d as rapier;
#[cfg(feature = "dim3")]
use rapier3d as rapier;
#[cfg(feature = "dim2")]
use wgrapier2d as wgrapier;
#[cfg(feature = "dim3")]
use wgrapier3d as wgrapier;
mod backend;
mod graphics;
mod ui;
use backend::{BackendType, CpuBackend, GpuBackend, PhysicsBackend};
use graphics::{setup_graphics, update_instances, RenderContext};
use ui::{render_compiling_message, render_ui, PhysicsContext, RunState};
#[cfg(feature = "dim2")]
use kiss3d::camera::FixedView;
use kiss3d::light::Light;
#[cfg(feature = "dim3")]
use kiss3d::prelude::PlanarFixedView;
#[cfg(feature = "dim2")]
use kiss3d::prelude::Sidescroll;
use kiss3d::window::Window;
use rapier::geometry::ColliderSet;
use rapier::prelude::{ImpulseJointSet, RigidBodySet};
use wgcore::gpu::GpuInstance;
use wgrapier::pipeline::{GpuPhysicsPipeline, RunStats};
pub struct SimulationState {
pub bodies: RigidBodySet,
pub colliders: ColliderSet,
pub impulse_joints: ImpulseJointSet,
}
pub type SimulationBuilders = Vec<(&'static str, fn() -> SimulationState)>;
pub struct Testbed {
builders: SimulationBuilders,
selected_demo: usize,
backend_type: BackendType,
run_state: RunState,
run_stats: RunStats,
gpu_init_error: Option<String>,
/// Cached GPU pipeline to avoid recompilation when switching demos
cached_gpu_pipeline: Option<GpuPhysicsPipeline>,
}
impl Testbed {
pub fn from_builders(builders: SimulationBuilders) -> Self {
Self {
builders,
selected_demo: 0,
backend_type: BackendType::Gpu { use_jacobi: false },
run_state: RunState::Paused,
run_stats: RunStats::default(),
gpu_init_error: None,
cached_gpu_pipeline: None,
}
}
pub fn with_backend(mut self, backend_type: BackendType) -> Self {
self.backend_type = backend_type;
self
}
pub async fn run(mut self) {
let mut window = Window::new("wgrapier demos");
window.set_light(Light::StickToCamera);
// Set up cameras first so we can render the "compiling" message
#[cfg(feature = "dim2")]
let (mut camera2d, mut camera3d) = {
let mut sidescroll = Sidescroll::default();
sidescroll.look_at(nalgebra::Point2::new(0.0, 100.0), 7.5);
(sidescroll, FixedView::new())
};
#[cfg(feature = "dim3")]
let (mut camera2d, mut camera3d) = {
let arc_ball = kiss3d::prelude::ArcBall::new(
nalgebra::Point3::new(-100.0, 100.0, -100.0),
nalgebra::Point3::new(0.0, 40.0, 0.0),
);
(PlanarFixedView::new(), arc_ball)
};
// Try to initialize GPU, fallback to CPU if it fails
let gpu = match GpuInstance::without_gl().await {
Ok(gpu) => Some(gpu),
Err(e) => {
// GPU initialization failed, force CPU backend
self.gpu_init_error = Some(format!(
"GPU backend not available, initialization failed:\n\"{}\"\n",
e
));
self.backend_type = BackendType::Cpu;
None
}
};
// Check if we need to compile shaders (GPU backend without cached pipeline).
let needs_shader_compilation = matches!(self.backend_type, BackendType::Gpu { .. })
&& self.cached_gpu_pipeline.is_none();
// Render a "compiling shaders" message before doing the actual compilation.
// The app will freeze during the compilation, so we need to draw this before.
if needs_shader_compilation {
// Don’t run a single render pass. It can take a few frames for the window/canvas to
// show up so we don’t want the app to freeze before the message is actually visible.
for _ in 0..100 {
window
.render_with_cameras(&mut camera3d, &mut camera2d)
.await;
render_compiling_message(&mut window);
}
}
let phys = (self.builders[0].1)();
let mut physics = setup_physics(
gpu.as_ref(),
&phys,
self.backend_type,
&mut self.gpu_init_error,
&mut self.cached_gpu_pipeline,
)
.await;
let mut render_ctx = setup_graphics(&mut window, &phys).await;
while window
.render_with_cameras(&mut camera3d, &mut camera2d)
.await
{
let ui_res = render_ui(
&mut window,
&self.builders,
&mut self.selected_demo,
&mut self.backend_type,
&mut self.run_state,
&self.run_stats,
&mut physics,
gpu.as_ref(),
&self.gpu_init_error,
);
if let Some(new_demo) = ui_res.new_selected_demo {
self.selected_demo = new_demo;
let phys = (self.builders[new_demo].1)();
render_ctx.clear();
// Extract pipeline from current GPU backend if present
if let PhysicsBackend::Gpu(gpu_backend) = physics.backend {
self.cached_gpu_pipeline = Some(gpu_backend.into_pipeline());
}
physics = setup_physics(
gpu.as_ref(),
&phys,
self.backend_type,
&mut self.gpu_init_error,
&mut self.cached_gpu_pipeline,
)
.await;
render_ctx = setup_graphics(&mut window, &phys).await;
}
self.step_simulation(gpu.as_ref(), &mut physics, &mut render_ctx)
.await;
}
}
async fn step_simulation(
&mut self,
gpu: Option<&GpuInstance>,
physics: &mut PhysicsContext,
render_ctx: &mut RenderContext,
) {
if self.run_state != RunState::Paused {
self.run_stats = physics.backend.step(gpu, physics.timestamps.as_mut()).await;
}
if self.run_state == RunState::Step {
self.run_state = RunState::Paused;
}
// Update instances using set_instances for efficient rendering
update_instances(render_ctx, &physics.backend);
}
}
async fn setup_physics(
gpu: Option<&GpuInstance>,
phys: &SimulationState,
backend_type: BackendType,
gpu_error: &mut Option<String>,
cached_pipeline: &mut Option<GpuPhysicsPipeline>,
) -> PhysicsContext {
let backend = match backend_type {
BackendType::Gpu { use_jacobi } => {
// Try to create GPU backend, fallback to CPU if it fails
let gpu = gpu.unwrap();
// Try to reuse cached pipeline or create a new one
if let Some(pipeline) = cached_pipeline.take() {
// Fast path: reuse existing pipeline
let gpu_backend = GpuBackend::with_pipeline(gpu, pipeline, phys, use_jacobi).await;
PhysicsBackend::Gpu(gpu_backend)
} else {
// Slow path: compile shaders for the first time
match GpuBackend::try_new(gpu, phys, use_jacobi).await {
Ok(gpu_backend) => PhysicsBackend::Gpu(gpu_backend),
Err(e) => {
// GPU backend creation failed, fallback to CPU
*gpu_error = Some(format!(
"GPU backend initialization failed: {}. Using CPU backend.",
e
));
PhysicsBackend::Cpu(CpuBackend::new(SimulationState {
bodies: phys.bodies.clone(),
colliders: phys.colliders.clone(),
impulse_joints: phys.impulse_joints.clone(),
}))
}
}
}
}
BackendType::Cpu => PhysicsBackend::Cpu(CpuBackend::new(SimulationState {
bodies: phys.bodies.clone(),
colliders: phys.colliders.clone(),
impulse_joints: phys.impulse_joints.clone(),
})),
};
PhysicsContext::new(backend)
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src_testbed/graphics.rs | crates/wgrapier/src_testbed/graphics.rs | #[cfg(feature = "dim2")]
use rapier2d as rapier;
#[cfg(feature = "dim3")]
use rapier3d as rapier;
use crate::backend::PhysicsBackend;
use crate::SimulationState;
use kiss3d::nalgebra::Point3;
#[cfg(feature = "dim3")]
use kiss3d::prelude::InstanceData;
#[cfg(feature = "dim3")]
use kiss3d::scene::SceneNode;
use kiss3d::window::Window;
use rapier::math::DIM;
use rapier::parry::shape::ShapeType;
use std::collections::HashMap;
use std::ops::MulAssign;
#[cfg(feature = "dim2")]
use {
kiss3d::{
prelude::{PlanarInstanceData, PlanarSceneNode},
resource::PlanarMesh,
},
nalgebra::{Point2, UnitComplex, Vector2},
rapier::parry::bounding_volume::Aabb,
std::cell::RefCell,
std::rc::Rc,
};
pub struct InstancedNodeEntry {
pub index: usize,
pub color: [f32; 4],
pub scale: [f32; DIM],
}
pub struct InstancedNode {
#[cfg(feature = "dim2")]
pub node: PlanarSceneNode,
#[cfg(feature = "dim3")]
pub node: SceneNode,
pub entries: Vec<InstancedNodeEntry>,
#[cfg(feature = "dim2")]
pub data: Vec<PlanarInstanceData>,
#[cfg(feature = "dim3")]
pub data: Vec<InstanceData>,
}
pub struct RenderContext {
pub instances: Vec<InstancedNode>,
}
impl RenderContext {
pub fn clear(&mut self) {
for instance in &mut self.instances {
instance.node.unlink();
}
self.instances.clear();
}
}
/// Set up a simple scene using instancing for efficient rendering
pub async fn setup_graphics(window: &mut Window, phys: &SimulationState) -> RenderContext {
let fixed_color = Point3::new(0.6, 0.6, 0.6);
let mut instances = HashMap::new();
let mut singletons = vec![];
for (i, (_, co)) in phys.colliders.iter().enumerate() {
let shape = co.shape();
let is_fixed = co.parent().map(|h| phys.bodies[h].is_fixed()) != Some(false);
let color = if is_fixed {
fixed_color
} else {
let coeff = (1.0 - 0.15 * (i % 5) as f32) / 255.0;
match shape.shape_type() {
ShapeType::Ball => Point3::new(55.0, 126.0, 184.0) * coeff,
ShapeType::Cuboid => Point3::new(55.0, 126.0, 34.0) * coeff,
#[cfg(feature = "dim3")]
ShapeType::Cylinder => Point3::new(140.0, 86.0, 75.0) * coeff,
#[cfg(feature = "dim3")]
ShapeType::Cone => Point3::new(255.0, 217.0, 47.0) * coeff,
ShapeType::Capsule => Point3::new(204.0, 121.0, 167.0) * coeff,
#[cfg(feature = "dim3")]
ShapeType::ConvexPolyhedron => Point3::new(228.0, 26.0, 28.0) * coeff,
_ => Point3::new(255.0, 127.0, 0.0) * coeff,
}
};
match shape.shape_type() {
ShapeType::Ball => {
let instanced_node = instances.entry(ShapeType::Ball).or_insert_with(|| {
#[cfg(feature = "dim2")]
let node = window.add_circle(0.5);
#[cfg(feature = "dim3")]
let node = {
// NOTE: the default kiss3d sphere is a bit slow to render when we have
// 100K+ instances because it uses a lot of subdivision. Create one
// with lower details.
let lowres_sphere = kiss3d::procedural::sphere(1.0, 10, 10, true);
window.add_render_mesh(lowres_sphere, nalgebra::Vector3::repeat(1.0))
};
InstancedNode {
node,
entries: vec![],
data: vec![],
}
});
let ball = shape.as_ball().unwrap();
instanced_node.entries.push(InstancedNodeEntry {
index: i,
color: [color.x, color.y, color.z, 1.0],
scale: [ball.radius * 2.0; DIM],
})
}
ShapeType::Cuboid => {
let instanced_node = instances.entry(ShapeType::Cuboid).or_insert_with(|| {
#[cfg(feature = "dim2")]
let node = window.add_rectangle(1.0, 1.0);
#[cfg(feature = "dim3")]
let node = window.add_cube(1.0, 1.0, 1.0);
InstancedNode {
node,
entries: vec![],
data: vec![],
}
});
let cuboid = shape.as_cuboid().unwrap();
instanced_node.entries.push(InstancedNodeEntry {
index: i,
color: [color.x, color.y, color.z, 1.0],
scale: (cuboid.half_extents * 2.0).into(),
})
}
#[cfg(feature = "dim3")]
ShapeType::Cylinder => {
let instanced_node = instances.entry(ShapeType::Cylinder).or_insert_with(|| {
let node = window.add_cylinder(1.0, 1.0);
InstancedNode {
node,
entries: vec![],
data: vec![],
}
});
let cyl = shape.as_cylinder().unwrap();
instanced_node.entries.push(InstancedNodeEntry {
index: i,
color: [color.x, color.y, color.z, 1.0],
scale: [cyl.radius, cyl.half_height * 2.0, cyl.radius],
})
}
#[cfg(feature = "dim3")]
ShapeType::Cone => {
let instanced_node = instances.entry(ShapeType::Cone).or_insert_with(|| {
let node = window.add_cone(1.0, 1.0);
InstancedNode {
node,
entries: vec![],
data: vec![],
}
});
let c = shape.as_cone().unwrap();
instanced_node.entries.push(InstancedNodeEntry {
index: i,
color: [color.x, color.y, color.z, 1.0],
scale: [c.radius, c.half_height * 2.0, c.radius],
})
}
ShapeType::Capsule => {
let instanced_node = instances.entry(ShapeType::Capsule).or_insert_with(|| {
#[cfg(feature = "dim2")]
let node = window.add_planar_capsule(0.5, 1.0);
#[cfg(feature = "dim3")]
let node = window.add_capsule(0.5, 1.0);
InstancedNode {
node,
entries: vec![],
data: vec![],
}
});
let c = shape.as_capsule().unwrap();
instanced_node.entries.push(InstancedNodeEntry {
index: i,
color: [color.x, color.y, color.z, 1.0],
#[cfg(feature = "dim2")]
scale: [c.radius * 2.0, c.segment.length()],
#[cfg(feature = "dim3")]
scale: [c.radius * 2.0, c.segment.length(), c.radius * 2.0],
})
}
#[cfg(feature = "dim2")]
ShapeType::ConvexPolygon => {
let poly = shape.as_convex_polygon().unwrap();
let node = window
.add_convex_polygon(poly.points().to_vec(), nalgebra::Vector2::repeat(1.0));
let mut singleton = InstancedNode {
node,
entries: vec![],
data: vec![],
};
singleton.entries.push(InstancedNodeEntry {
index: i,
color: [color.x, color.y, color.z, 1.0],
scale: [1.0, 1.0],
});
singletons.push(singleton);
}
#[cfg(feature = "dim3")]
ShapeType::ConvexPolyhedron => {
use kiss3d::procedural::RenderMesh;
let poly = shape.as_convex_polyhedron().unwrap();
let (vtx, idx) = poly.to_trimesh();
let trimesh = rapier::parry::shape::TriMesh::new(vtx, idx).unwrap();
let mut render = RenderMesh::from(trimesh);
// Use face normals as vertex normals for flat shading.
render.replicate_vertices();
render.recompute_normals();
let node = window.add_render_mesh(render, nalgebra::Vector3::repeat(1.0));
let mut singleton = InstancedNode {
node,
entries: vec![],
data: vec![],
};
singleton.entries.push(InstancedNodeEntry {
index: i,
color: [color.x, color.y, color.z, 1.0],
scale: [1.0, 1.0, 1.0],
});
singletons.push(singleton);
}
#[cfg(feature = "dim3")]
ShapeType::TriMesh => {
use kiss3d::procedural::RenderMesh;
let trimesh = shape.as_trimesh().unwrap();
let mut render = RenderMesh::from(trimesh.clone());
// Use face normals as vertex normals for flat shading.
render.replicate_vertices();
render.recompute_normals();
let node = window.add_render_mesh(render, nalgebra::Vector3::repeat(1.0));
let mut singleton = InstancedNode {
node,
entries: vec![],
data: vec![],
};
singleton.entries.push(InstancedNodeEntry {
index: i,
color: [color.x, color.y, color.z, 1.0],
scale: [1.0; DIM],
});
singletons.push(singleton);
}
#[cfg(feature = "dim2")]
ShapeType::Polyline => {
let polyline = shape.as_polyline().unwrap();
let mut vtx = vec![];
let mut idx = vec![];
for segment in polyline.segments() {
let thickness = 0.2;
let center = nalgebra::center(&segment.a, &segment.b);
let length = nalgebra::distance(&segment.a, &segment.b);
let rot =
UnitComplex::rotation_between(&Vector2::y(), &segment.scaled_direction());
let aabb = Aabb::from_half_extents(
Point2::origin(),
Vector2::new(thickness, length / 2.0),
);
let (mut seg_vtx, mut seg_idx) = aabb.to_trimesh();
seg_vtx.iter_mut().for_each(|pt| {
*pt = rot * *pt + center.coords;
});
seg_idx.iter_mut().for_each(|ii| {
ii[0] += vtx.len() as u32;
ii[1] += vtx.len() as u32;
ii[2] += vtx.len() as u32;
});
vtx.extend_from_slice(&seg_vtx);
idx.extend(seg_idx.iter().map(|ii| Point3::from(ii.map(|i| i as u16))));
}
let node = window.add_planar_mesh(
Rc::new(RefCell::new(PlanarMesh::new(vtx, idx, None, false))),
Vector2::repeat(1.0),
);
let mut singleton = InstancedNode {
node,
entries: vec![],
data: vec![],
};
singleton.entries.push(InstancedNodeEntry {
index: i,
color: [color.x, color.y, color.z, 1.0],
scale: [1.0; DIM],
});
singletons.push(singleton);
}
_ => todo!(),
}
}
RenderContext {
instances: (instances.into_values().chain(singletons.into_iter())).collect(),
}
}
/// Update rendering instances with current physics poses
pub fn update_instances(render_ctx: &mut RenderContext, physics_backend: &PhysicsBackend) {
for instanced_node in &mut render_ctx.instances {
instanced_node.data.clear();
for entry in &instanced_node.entries {
let pose = physics_backend.poses()[entry.index];
#[cfg(feature = "dim2")]
{
let position = pose.similarity.isometry.translation.vector.into();
// Convert rotation to a 2x2 matrix
let mut deformation = pose
.similarity
.isometry
.rotation
.to_rotation_matrix()
.into_inner();
deformation.column_mut(0).mul_assign(entry.scale[0]);
deformation.column_mut(1).mul_assign(entry.scale[1]);
instanced_node.data.push(PlanarInstanceData {
position,
deformation,
color: entry.color,
});
}
#[cfg(feature = "dim3")]
{
let position = pose.isometry.translation.vector.into();
// Convert rotation to a 3x3 matrix
let mut deformation = pose.isometry.rotation.to_rotation_matrix().into_inner();
deformation.column_mut(0).mul_assign(entry.scale[0]);
deformation.column_mut(1).mul_assign(entry.scale[1]);
deformation.column_mut(2).mul_assign(entry.scale[2]);
instanced_node.data.push(InstanceData {
position,
deformation,
color: entry.color,
});
}
}
instanced_node.node.set_instances(&instanced_node.data);
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src_testbed/ui.rs | crates/wgrapier/src_testbed/ui.rs | #[cfg(feature = "dim2")]
use wgrapier2d as wgrapier;
#[cfg(feature = "dim3")]
use wgrapier3d as wgrapier;
use crate::backend::{BackendType, PhysicsBackend};
use crate::SimulationBuilders;
use kiss3d::egui::CollapsingHeader;
use kiss3d::window::Window;
use wgcore::gpu::GpuInstance;
use wgcore::timestamps::GpuTimestamps;
use wgrapier::pipeline::RunStats;
const TIMESTAMP_QUERIES_CAPACITY: u32 = 20;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum RunState {
Running,
Paused,
Step,
}
#[derive(Default, Copy, Clone)]
pub struct UiInteractions {
pub new_selected_demo: Option<usize>,
}
pub struct PhysicsContext {
pub backend: PhysicsBackend,
pub timestamps: Option<GpuTimestamps>,
}
impl PhysicsContext {
pub fn new(backend: PhysicsBackend) -> Self {
Self {
backend,
timestamps: None,
}
}
}
pub fn render_compiling_message(window: &mut Window) {
window.draw_ui(|ctx| {
kiss3d::egui::Window::new("Settings").show(ctx, |ui| {
ui.colored_label(
kiss3d::egui::Color32::from_rgb(100, 180, 255),
"⏳ COMPILING SHADERS...\nThe app will freeze for a few seconds.\n\nIf nothing happens after a minute or two, check the dev console for an error.",
);
});
});
}
pub fn render_ui(
window: &mut Window,
builders: &SimulationBuilders,
selected_demo: &mut usize,
backend_type: &mut BackendType,
run_state: &mut RunState,
run_stats: &RunStats,
physics: &mut PhysicsContext,
gpu: Option<&GpuInstance>,
gpu_init_error: &Option<String>,
) -> UiInteractions {
let mut result = UiInteractions::default();
window.draw_ui(|ctx| {
kiss3d::egui::Window::new("Settings").show(ctx, |ui| {
// Display GPU error message if present
if let Some(error_msg) = gpu_init_error {
ui.colored_label(
kiss3d::egui::Color32::from_rgb(255, 50, 50),
format!("⚠ {}", error_msg),
);
ui.separator();
}
if matches!(backend_type, BackendType::Gpu { .. }) && run_stats.total_simulation_time_with_readback.as_secs_f32() > 0.1 {
ui.colored_label(
kiss3d::egui::Color32::from_rgb(255, 165, 0),
#[cfg(not(target_arch = "wasm32"))]
format!("⚠ running slow? If you have both an integrated and discrete GPU, ensure the discrete GPU is in use.\nSelected GPU: \"{}\"",
gpu.as_ref().unwrap().adapter().get_info().name),
#[cfg(target_arch = "wasm32")]
format!("⚠ running slow? If you have both an integrated and discrete GPU, ensure your browser runs exclusively on the discrete GPU."),
);
ui.separator();
}
let mut changed = false;
kiss3d::egui::ComboBox::from_label("selected sample")
.selected_text(builders[*selected_demo].0)
.show_ui(ui, |ui| {
for (i, (name, _)) in builders.iter().enumerate() {
changed = ui.selectable_value(selected_demo, i, *name).changed() || changed;
}
});
if changed {
result.new_selected_demo = Some(*selected_demo);
}
ui.separator();
// Backend selection
ui.label("Physics Backend:");
ui.horizontal(|ui| {
let mut backend_changed = false;
if gpu.is_some()
&& ui
.radio(
matches!(*backend_type, BackendType::Gpu { .. }),
"GPU (wgrapier)",
)
.clicked()
&& !matches!(*backend_type, BackendType::Gpu { .. })
{
*backend_type = BackendType::Gpu { use_jacobi: false };
backend_changed = true;
}
if ui
.radio(*backend_type == BackendType::Cpu, "CPU (rapier)")
.clicked()
&& *backend_type != BackendType::Cpu
{
*backend_type = BackendType::Cpu;
backend_changed = true;
}
if backend_changed {
// Reset the scene.
result.new_selected_demo = Some(*selected_demo);
}
});
// NOTE: The jacobi solver is pretty much useless for the 2D version so we
// don’t even let the user enable it.
#[cfg(feature = "dim3")]
if let BackendType::Gpu { use_jacobi } = backend_type {
if ui
.checkbox(use_jacobi, "Jacobi solver (more perf, less stable)")
.changed()
{
// Reset the simulation with the new solver.
result.new_selected_demo = Some(*selected_demo);
}
}
ui.separator();
ui.label(format!("Bodies count: {}", physics.backend.num_bodies()));
ui.label(format!("Joints count: {}", physics.backend.num_joints()));
if *backend_type == BackendType::Cpu {
ui.label(format!(
"Total: {:.2}ms - {} fps",
run_stats.total_simulation_time_ms(),
(1000.0f32 / run_stats.total_simulation_time_ms()).round()
));
} else if matches!(*backend_type, BackendType::Gpu { .. }) {
let mut timestamps_enabled = physics.timestamps.is_some();
if ui
.checkbox(&mut timestamps_enabled, "enable timestamp queries")
.changed()
{
if timestamps_enabled {
let gpu = gpu.unwrap();
physics.timestamps =
Some(GpuTimestamps::new(gpu.device(), TIMESTAMP_QUERIES_CAPACITY));
} else {
physics.timestamps = None;
}
}
if let Some(gpu) = gpu {
ui.collapsing("GPU infos", |ui| {
ui.label(format!("{:#?}", gpu.adapter().get_info()));
});
}
CollapsingHeader::new(format!(
"Total phys. runtime: {:.2}ms - {} fps",
run_stats.total_simulation_time_ms(),
(1000.0f32 / run_stats.total_simulation_time_ms()).round()
))
.id_salt("total")
.show(ui, |ui| {
ui.label(format!("num_colors: {}", run_stats.num_colors));
ui.label(format!(
"constraints_coloring_time: {:.2}",
run_stats.coloring_time.as_secs_f32() * 1000.0
));
ui.label(format!(
"coloring_iterations: {} x 10",
run_stats.coloring_iterations
));
ui.label(format!(
"start_to_pairs_count_time: {:.2}",
run_stats.start_to_pairs_count_time.as_secs_f32() * 1000.0
));
ui.label(format!(
"coloring_fallback_time: {:.2}",
run_stats.coloring_fallback_time.as_secs_f32() * 1000.0
));
});
ui.collapsing("timestamp queries", |ui| {
ui.label(format!(
"timestamp_update_mass_props: {:.2}",
run_stats.timestamp_update_mass_props
));
ui.label(format!(
"timestamp_broad_phase: {:.2}",
run_stats.timestamp_broad_phase
));
ui.label(format!(
"timestamp_narrow_phase: {:.2}",
run_stats.timestamp_narrow_phase
));
ui.label(format!(
"timestamp_solver_prep: {:.2}",
run_stats.timestamp_solver_prep
));
ui.label(format!(
"timestamp_solver_solve: {:.2}",
run_stats.timestamp_solver_solve
));
});
}
ui.horizontal(|ui| {
let play_pause_label = if *run_state == RunState::Running {
"Pause"
} else {
"Play"
};
if ui.button(play_pause_label).clicked() {
if *run_state == RunState::Running {
*run_state = RunState::Paused;
} else {
*run_state = RunState::Running;
}
}
if ui.button("Step").clicked() {
*run_state = RunState::Step;
}
if ui.button("Restart").clicked() {
result.new_selected_demo = Some(*selected_demo);
}
});
});
});
result
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src_testbed/backend/gpu.rs | crates/wgrapier/src_testbed/backend/gpu.rs | #[cfg(feature = "dim2")]
use wgrapier2d as wgrapier;
#[cfg(feature = "dim3")]
use wgrapier3d as wgrapier;
use super::SimulationBackend;
use crate::SimulationState;
use wgcore::gpu::GpuInstance;
use wgcore::tensor::GpuVector;
use wgcore::timestamps::GpuTimestamps;
use wgpu::BufferUsages;
use wgrapier::pipeline::{GpuPhysicsPipeline, GpuPhysicsState, RunStats};
use wgrapier::wgparry::math::GpuSim;
/// GPU-based physics backend using wgrapier
pub struct GpuBackend {
pipeline: GpuPhysicsPipeline,
state: GpuPhysicsState,
poses_staging: GpuVector<GpuSim>,
poses_cache: Vec<GpuSim>,
use_jacobi: bool,
}
impl GpuBackend {
/// Attempts to create a new GPU backend, returning an error if initialization fails.
///
/// This method can fail if:
/// - Shader compilation fails
/// - GPU device doesn't support required features
/// - Memory allocation fails
pub async fn try_new(
gpu: &GpuInstance,
phys: &SimulationState,
use_jacobi: bool,
) -> Result<Self, String> {
let pipeline = GpuPhysicsPipeline::from_device(gpu.device())
.map_err(|e| format!("Failed to compile shaders: {}", e))?;
let state = GpuPhysicsState::from_rapier(
gpu.device(),
&phys.bodies,
&phys.colliders,
&phys.impulse_joints,
use_jacobi,
);
let poses_staging = GpuVector::uninit(
gpu.device(),
state.poses().len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
let poses_cache = state.poses().slow_read(gpu).await;
Ok(Self {
pipeline,
state,
poses_staging,
poses_cache,
use_jacobi,
})
}
/// Creates a new GPU backend with a pre-compiled pipeline.
///
/// This is faster than [`try_new`](Self::try_new) when switching demos because
/// it reuses the existing pipeline instead of recompiling shaders.
pub async fn with_pipeline(
gpu: &GpuInstance,
pipeline: GpuPhysicsPipeline,
phys: &SimulationState,
use_jacobi: bool,
) -> Self {
let state = GpuPhysicsState::from_rapier(
gpu.device(),
&phys.bodies,
&phys.colliders,
&phys.impulse_joints,
use_jacobi,
);
let poses_staging = GpuVector::uninit(
gpu.device(),
state.poses().len() as u32,
BufferUsages::MAP_READ | BufferUsages::COPY_DST,
);
let poses_cache = state.poses().slow_read(gpu).await;
Self {
pipeline,
state,
poses_staging,
poses_cache,
use_jacobi,
}
}
/// Extracts the pipeline from this backend, consuming it.
///
/// Useful for reusing the pipeline when switching demos.
pub fn into_pipeline(self) -> GpuPhysicsPipeline {
self.pipeline
}
/// Creates a new GPU backend, panicking if initialization fails.
///
/// Use [`try_new`](Self::try_new) for error handling.
#[allow(dead_code)]
pub async fn new(gpu: &GpuInstance, phys: &SimulationState, use_jacobi: bool) -> Self {
Self::try_new(gpu, phys, use_jacobi).await.unwrap()
}
}
impl SimulationBackend for GpuBackend {
fn poses(&self) -> &[GpuSim] {
&self.poses_cache
}
fn num_bodies(&self) -> usize {
self.poses_cache.len()
}
fn num_joints(&self) -> usize {
self.state.joints().len()
}
async fn step(
&mut self,
gpu: Option<&GpuInstance>,
mut timestamps: Option<&mut GpuTimestamps>,
) -> RunStats {
let gpu = gpu.unwrap();
if let Some(timestamps) = &mut timestamps {
timestamps.clear();
}
let t0 = web_time::Instant::now();
let mut run_stats = self
.pipeline
.step(
gpu,
&mut self.state,
timestamps.as_deref_mut(),
self.use_jacobi,
)
.await;
// Read back poses and timestamps from GPU
let mut encoder = gpu.device().create_command_encoder(&Default::default());
if let Some(timestamps) = &mut timestamps {
timestamps.resolve(&mut encoder);
}
self.poses_staging
.copy_from(&mut encoder, self.state.poses());
gpu.queue().submit(Some(encoder.finish()));
self.poses_cache
.resize(self.poses_staging.len() as usize, Default::default());
self.poses_staging
.read_to(gpu.device(), &mut self.poses_cache)
.await
.unwrap();
run_stats.total_simulation_time_with_readback = t0.elapsed();
let timestamps = if let Some(timestamps) = &mut timestamps {
timestamps
.wait_for_results_ms_async(gpu.queue(), gpu.device())
.await
.unwrap()
} else {
vec![]
};
if !timestamps.is_empty() {
let timings = timestamps
.chunks_exact(2)
.map(|t| t[1] - t[0])
.collect::<Vec<_>>();
run_stats.timestamp_update_mass_props = timings[0];
run_stats.timestamp_broad_phase = timings[1];
run_stats.timestamp_narrow_phase = timings[2];
run_stats.timestamp_solver_prep = timings[3];
run_stats.timestamp_solver_solve = timings[4];
}
run_stats
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src_testbed/backend/mod.rs | crates/wgrapier/src_testbed/backend/mod.rs | mod cpu;
mod gpu;
pub use cpu::CpuBackend;
pub use gpu::GpuBackend;
#[cfg(feature = "dim2")]
use wgrapier2d as wgrapier;
#[cfg(feature = "dim3")]
use wgrapier3d as wgrapier;
use wgcore::gpu::GpuInstance;
use wgcore::timestamps::GpuTimestamps;
use wgrapier::pipeline::RunStats;
use wgrapier::wgparry::math::GpuSim;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum BackendType {
Cpu,
Gpu { use_jacobi: bool },
}
/// Trait for physics simulation backends (CPU or GPU)
pub trait SimulationBackend {
/// Get the current poses for rendering
fn poses(&self) -> &[GpuSim];
fn num_bodies(&self) -> usize;
fn num_joints(&self) -> usize;
/// Step the simulation
async fn step(
&mut self,
gpu: Option<&GpuInstance>,
timestamps: Option<&mut GpuTimestamps>,
) -> RunStats;
}
#[allow(clippy::large_enum_variant)]
pub enum PhysicsBackend {
Cpu(CpuBackend),
Gpu(GpuBackend),
}
impl PhysicsBackend {
pub async fn step(
&mut self,
gpu: Option<&GpuInstance>,
timestamps: Option<&mut GpuTimestamps>,
) -> RunStats {
match self {
PhysicsBackend::Cpu(backend) => backend.step(gpu, timestamps).await,
PhysicsBackend::Gpu(backend) => backend.step(gpu, timestamps).await,
}
}
pub fn poses(&self) -> &[GpuSim] {
match self {
PhysicsBackend::Cpu(backend) => backend.poses(),
PhysicsBackend::Gpu(backend) => backend.poses(),
}
}
pub fn num_bodies(&self) -> usize {
match self {
PhysicsBackend::Cpu(backend) => backend.num_bodies(),
PhysicsBackend::Gpu(backend) => backend.num_bodies(),
}
}
pub fn num_joints(&self) -> usize {
match self {
PhysicsBackend::Cpu(backend) => backend.num_joints(),
PhysicsBackend::Gpu(backend) => backend.num_joints(),
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/src_testbed/backend/cpu.rs | crates/wgrapier/src_testbed/backend/cpu.rs | #[cfg(feature = "dim2")]
use rapier2d as rapier;
#[cfg(feature = "dim3")]
use rapier3d as rapier;
#[cfg(feature = "dim2")]
use wgrapier2d as wgrapier;
#[cfg(feature = "dim3")]
use wgrapier3d as wgrapier;
use super::SimulationBackend;
use crate::SimulationState;
#[cfg(feature = "dim2")]
use nalgebra::Similarity2;
#[cfg(feature = "dim3")]
use nalgebra::Similarity3;
#[cfg(feature = "dim3")]
use rapier::dynamics::FrictionModel;
use rapier::dynamics::{CCDSolver, IntegrationParameters, IslandManager};
use rapier::geometry::{BroadPhaseBvh, ColliderSet, NarrowPhase};
use rapier::prelude::{ImpulseJointSet, MultibodyJointSet, PhysicsPipeline, RigidBodySet};
use wgcore::gpu::GpuInstance;
use wgcore::timestamps::GpuTimestamps;
use wgrapier::pipeline::RunStats;
use wgrapier::wgparry::math::GpuSim;
/// CPU-based physics backend using rapier
pub struct CpuBackend {
pipeline: PhysicsPipeline,
integration_parameters: IntegrationParameters,
islands: IslandManager,
broad_phase: BroadPhaseBvh,
narrow_phase: NarrowPhase,
bodies: RigidBodySet,
colliders: ColliderSet,
impulse_joints: ImpulseJointSet,
multibody_joints: MultibodyJointSet,
ccd_solver: CCDSolver,
poses_cache: Vec<GpuSim>,
}
impl CpuBackend {
pub fn new(phys: SimulationState) -> Self {
let mut poses_cache = Vec::new();
let mut shapes_cache = Vec::new();
// Build initial poses and shapes from the simulation state
for (_, co) in phys.colliders.iter() {
#[cfg(feature = "dim2")]
{
poses_cache.push(GpuSim {
similarity: Similarity2::from_isometry(*co.position(), 1.0),
padding: Default::default(),
});
}
#[cfg(feature = "dim3")]
{
poses_cache.push(Similarity3::from_isometry(*co.position(), 1.0));
}
shapes_cache.push(co.shared_shape().clone());
}
#[allow(unused_mut)] // mut not needed in 2D but needed in 3d.
let mut params = IntegrationParameters::default();
// NOTE: to keep the comparison fair, use the same friction model as the GPU version
// (the GPU doesn’t implement twist friction yet).
#[cfg(feature = "dim3")]
{
params.friction_model = FrictionModel::Coulomb;
}
Self {
pipeline: PhysicsPipeline::new(),
integration_parameters: params,
islands: IslandManager::new(),
broad_phase: BroadPhaseBvh::new(),
narrow_phase: NarrowPhase::new(),
bodies: phys.bodies,
colliders: phys.colliders,
impulse_joints: phys.impulse_joints,
multibody_joints: MultibodyJointSet::new(),
ccd_solver: CCDSolver::new(),
poses_cache,
}
}
}
impl SimulationBackend for CpuBackend {
fn poses(&self) -> &[GpuSim] {
&self.poses_cache
}
fn num_bodies(&self) -> usize {
self.poses().len()
}
fn num_joints(&self) -> usize {
self.impulse_joints.len()
}
async fn step(
&mut self,
_gpu: Option<&GpuInstance>,
_timestamps: Option<&mut GpuTimestamps>,
) -> RunStats {
let t0 = web_time::Instant::now();
self.pipeline.step(
&(rapier::math::Vector::y() * -9.81),
&self.integration_parameters,
&mut self.islands,
&mut self.broad_phase,
&mut self.narrow_phase,
&mut self.bodies,
&mut self.colliders,
&mut self.impulse_joints,
&mut self.multibody_joints,
&mut self.ccd_solver,
&(),
&(),
);
let total_sim_time = t0.elapsed();
// Update poses cache
self.poses_cache.clear();
for (_, co) in self.colliders.iter() {
#[cfg(feature = "dim2")]
{
self.poses_cache.push(GpuSim {
similarity: Similarity2::from_isometry(*co.position(), 1.0),
padding: Default::default(),
});
}
#[cfg(feature = "dim3")]
{
self.poses_cache
.push(Similarity3::from_isometry(*co.position(), 1.0));
}
}
RunStats {
total_simulation_time_with_readback: total_sim_time,
..Default::default()
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/all_examples2.rs | crates/wgrapier/crates/examples2d/all_examples2.rs | #![allow(dead_code)]
use std::cmp::Ordering;
use wgrapier_testbed2d::{SimulationState, Testbed};
mod balls2;
mod boxes2;
mod boxes_and_balls2;
mod joint_ball2;
mod joint_fixed2;
mod joint_prismatic2;
mod polyline2;
mod primitives2;
mod pyramid2;
fn demo_name_from_command_line() -> Option<String> {
let mut args = std::env::args();
while let Some(arg) = args.next() {
if &arg[..] == "--example" {
return args.next();
}
}
None
}
#[cfg(target_arch = "wasm32")]
fn demo_name_from_url() -> Option<String> {
None
// let window = stdweb::web::window();
// let hash = window.location()?.search().ok()?;
// Some(hash[1..].to_string())
}
#[cfg(not(target_arch = "wasm32"))]
fn demo_name_from_url() -> Option<String> {
None
}
#[kiss3d::main]
pub async fn main() {
let mut builders: Vec<(_, fn() -> SimulationState)> = vec![
("Balls", balls2::init_world),
("Boxes", boxes2::init_world),
("Boxes & balls", boxes_and_balls2::init_world),
("Pyramid", pyramid2::init_world),
("Primitives", primitives2::init_world),
("Polyline", polyline2::init_world),
("Joints (spherical)", joint_ball2::init_world),
("Joints (prismatic)", joint_prismatic2::init_world),
("Joints (fixed)", joint_fixed2::init_world),
];
// Lexicographic sort, with stress tests moved at the end of the list.
builders.sort_by(|a, b| match (a.0.starts_with('('), b.0.starts_with('(')) {
(true, true) | (false, false) => a.0.cmp(b.0),
(true, false) => Ordering::Greater,
(false, true) => Ordering::Less,
});
let testbed = Testbed::from_builders(builders);
testbed.run().await
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/joint_ball2.rs | crates/wgrapier/crates/examples2d/joint_ball2.rs | use rapier2d::prelude::*;
use wgrapier_testbed2d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut impulse_joints = ImpulseJointSet::new();
/*
* Create the balls
*/
// Build the rigid body.
let rad = 0.4;
let numi = 350; // Num vertical nodes.
let numk = 350; // Num horizontal nodes.
let shift = 1.0;
let mut body_handles = Vec::new();
for k in 0..numk {
for i in 0..numi {
let fk = k as f32;
let fi = i as f32;
let status = if i == 0 && (k < numk / 5 || k >= (4 * numk) / 5) {
RigidBodyType::Fixed
} else {
RigidBodyType::Dynamic
};
let rigid_body =
RigidBodyBuilder::new(status).translation(vector![fk * shift, -fi * shift]);
let child_handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::ball(rad);
colliders.insert_with_parent(collider, child_handle, &mut bodies);
// Vertical joint.
if i > 0 {
let parent_handle = *body_handles.last().unwrap();
let joint = RevoluteJointBuilder::new().local_anchor2(point![0.0, shift]);
impulse_joints.insert(parent_handle, child_handle, joint, true);
}
// Horizontal joint.
if k > 0 {
let parent_index = body_handles.len() - numi;
let parent_handle = body_handles[parent_index];
let joint = RevoluteJointBuilder::new().local_anchor2(point![-shift, 0.0]);
impulse_joints.insert(parent_handle, child_handle, joint, true);
}
body_handles.push(child_handle);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![numk as f32 * rad, numi as f32 * -rad], 5.0);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/pyramid2.rs | crates/wgrapier/crates/examples2d/pyramid2.rs | use rapier2d::prelude::*;
use wgrapier_testbed2d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let impulse_joints = ImpulseJointSet::new();
let _multibody_joints = MultibodyJointSet::new();
/*
* Ground
*/
let ground_size = 500.0;
let ground_thickness = 1.0;
let rigid_body = RigidBodyBuilder::fixed();
let ground_handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, ground_thickness);
colliders.insert_with_parent(collider, ground_handle, &mut bodies);
/*
* Create the cubes
*/
let num = 200;
let rad = 0.5;
let shiftx = rad * 2.0 + 0.1;
let shifty = rad * 2.0;
let centerx = shiftx * (num as f32) / 2.0;
let centery = shifty / 2.0 + ground_thickness;
for k in 0..4 {
for i in 0usize..num {
for j in i..num {
let fj = j as f32;
let fi = i as f32;
let x = (fi * shiftx / 2.0) + (fj - fi) * shiftx - centerx
+ (k as f32 - 1.5) * rad * 2.5 * num as f32;
let y = fi * shifty + centery;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(rad, rad);
colliders.insert_with_parent(collider, handle, &mut bodies);
}
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![0.0, 2.5], 5.0);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/joint_prismatic2.rs | crates/wgrapier/crates/examples2d/joint_prismatic2.rs | use rapier2d::prelude::*;
use wgrapier_testbed2d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut impulse_joints = ImpulseJointSet::new();
/*
* Create the balls
*/
// Build the rigid body.
let rad = 0.4;
let num = 10;
let shift = 1.0;
for l in 0..38 {
let y = l as f32 * shift * (num as f32 + 2.0) * 2.0;
for j in 0..300 {
let x = j as f32 * shift * 4.0;
let ground = RigidBodyBuilder::fixed().translation(vector![x, y]);
let mut curr_parent = bodies.insert(ground);
let collider = ColliderBuilder::cuboid(rad, rad);
colliders.insert_with_parent(collider, curr_parent, &mut bodies);
for i in 0..num {
let y = y - (i + 1) as f32 * shift;
let density = 1.0;
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y]);
let curr_child = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(rad, rad).density(density);
colliders.insert_with_parent(collider, curr_child, &mut bodies);
let axis = if i % 2 == 0 {
UnitVector::new_normalize(vector![1.0, 1.0])
} else {
UnitVector::new_normalize(vector![-1.0, 1.0])
};
let prism = PrismaticJointBuilder::new(axis)
.local_anchor2(point![0.0, shift])
.limits([-1.5, 1.5]);
impulse_joints.insert(curr_parent, curr_child, prism, true);
curr_parent = curr_child;
}
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![80.0, 80.0], 15.0);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/boxes2.rs | crates/wgrapier/crates/examples2d/boxes2.rs | use rapier2d::prelude::*;
use wgrapier_testbed2d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let impulse_joints = ImpulseJointSet::new();
let _multibody_joints = MultibodyJointSet::new();
/*
* Ground
*/
let ground_size = 150.0;
let rigid_body = RigidBodyBuilder::fixed();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
let rigid_body = RigidBodyBuilder::fixed()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(vector![ground_size, ground_size * 1.2]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size * 1.2, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
let rigid_body = RigidBodyBuilder::fixed()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(vector![-ground_size, ground_size * 1.2]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size * 1.2, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
/*
* Create the cubes
*/
let num = 124;
let rad = 0.5;
let shift = rad * 2.0 + 0.2;
let centerx = shift * (num as f32) / 2.0;
let centery = shift / 2.0;
for i in 0..num {
for j in 0usize..num * 4 {
let x = i as f32 * shift - centerx + (j % 2) as f32 * 0.2;
let y = j as f32 * shift + centery + 20.0;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(rad, rad);
colliders.insert_with_parent(collider, handle, &mut bodies);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![0.0, 50.0], 10.0);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/polyline2.rs | crates/wgrapier/crates/examples2d/polyline2.rs | use rapier2d::prelude::*;
use wgrapier_testbed2d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let impulse_joints = ImpulseJointSet::new();
let _multibody_joints = MultibodyJointSet::new();
/*
* Ground
*/
let ground_size = 200.0;
let nsubdivs = 40;
let step_size = ground_size / (nsubdivs as f32);
let mut points = Vec::new();
points.push(point![-ground_size / 2.0, 240.0]);
for i in 1..nsubdivs - 1 {
let x = -ground_size / 2.0 + i as f32 * step_size;
let y = (i as f32 / nsubdivs as f32 * 10.0).cos() * 20.0;
points.push(point![x, y]);
}
points.push(point![ground_size / 2.0, 240.0]);
let rigid_body = RigidBodyBuilder::fixed();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::polyline(points, None);
colliders.insert_with_parent(collider, handle, &mut bodies);
/*
* Create the cubes
*/
let num = 100;
let rad = 0.5;
let shift = rad * 2.0 + 0.4;
let centerx = shift * (num as f32) / 2.0;
let centery = shift / 2.0;
let mut rng = oorandom::Rand32::new(0);
for i in 0..num {
for j in 0usize..num * 3 {
let x = i as f32 * shift - centerx + (j % 2) as f32 * 0.2;
let y = j as f32 * shift + centery + 20.0;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y]);
let handle = bodies.insert(rigid_body);
let collider = match j % 4 {
0 => ColliderBuilder::cuboid(rad, rad),
1 => ColliderBuilder::capsule_y(rad, rad),
2 => {
if i % 2 == 0 {
continue;
}
// Make a convex polygon.
let mut points = Vec::new();
let scale = 2.0;
for _ in 0..10 {
let pt = Point::new(rng.rand_float() - 0.5, rng.rand_float() - 0.5);
points.push(pt * scale);
}
// TODO: align the collider’s local origin to its center-of-mass.
// wgrapier currently doesn’t support misaligned center-of-masses.
let shape = SharedShape::convex_hull(&points).unwrap();
let mprops = shape.mass_properties(1.0);
points
.iter_mut()
.for_each(|pt| *pt -= mprops.local_com.coords);
ColliderBuilder::convex_hull(&points).unwrap()
}
_ => ColliderBuilder::ball(rad),
};
colliders.insert_with_parent(collider, handle, &mut bodies);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![0.0, 50.0], 10.0);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/balls2.rs | crates/wgrapier/crates/examples2d/balls2.rs | use rapier2d::prelude::*;
use wgrapier_testbed2d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let impulse_joints = ImpulseJointSet::new();
let _multibody_joints = MultibodyJointSet::new();
/*
* Ground
*/
let ground_size = 150.0;
let rigid_body = RigidBodyBuilder::fixed();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
let rigid_body = RigidBodyBuilder::fixed()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(vector![ground_size, ground_size * 1.2]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size * 1.2, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
let rigid_body = RigidBodyBuilder::fixed()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(vector![-ground_size, ground_size * 1.2]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size * 1.2, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
/*
* Create the cubes
*/
let num = 124;
let rad = 0.5;
let shift = rad * 2.0 + 0.2;
let centerx = shift * (num as f32) / 2.0;
let centery = shift / 2.0;
for i in 0..num {
for j in 0usize..num * 4 {
let x = i as f32 * shift - centerx + (j % 2) as f32 * 0.2;
let y = j as f32 * shift + centery + 20.0;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::ball(rad);
colliders.insert_with_parent(collider, handle, &mut bodies);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![0.0, 50.0], 10.0);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/boxes_and_balls2.rs | crates/wgrapier/crates/examples2d/boxes_and_balls2.rs | use rapier2d::prelude::*;
use wgrapier_testbed2d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let impulse_joints = ImpulseJointSet::new();
let _multibody_joints = MultibodyJointSet::new();
/*
* Ground
*/
let ground_size = 150.0;
let rigid_body = RigidBodyBuilder::fixed();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
let rigid_body = RigidBodyBuilder::fixed()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(vector![ground_size, ground_size * 1.2]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size * 1.2, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
let rigid_body = RigidBodyBuilder::fixed()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(vector![-ground_size, ground_size * 1.2]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size * 1.2, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
/*
* Create the cubes
*/
let num = 124;
let rad = 0.5;
let shift = rad * 2.0 + 0.2;
let centerx = shift * (num as f32) / 2.0;
let centery = shift / 2.0;
for i in 0..num {
for j in 0usize..num * 4 {
let x = i as f32 * shift - centerx + (j % 2) as f32 * 0.2;
let y = j as f32 * shift + centery + 20.0;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y]);
let handle = bodies.insert(rigid_body);
let collider = if j % 2 == 0 {
ColliderBuilder::cuboid(rad, rad)
} else {
ColliderBuilder::ball(rad)
};
colliders.insert_with_parent(collider, handle, &mut bodies);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![0.0, 50.0], 10.0);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/joint_fixed2.rs | crates/wgrapier/crates/examples2d/joint_fixed2.rs | use rapier2d::prelude::*;
use wgrapier_testbed2d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut impulse_joints = ImpulseJointSet::new();
/*
* Create the balls
*/
// Build the rigid body.
let rad = 0.4;
let num = 50; // Num vertical nodes.
let shift = 1.0;
let mut body_handles = Vec::new();
for xx in 0..8 {
let x = xx as f32 * shift * (num as f32 + 2.0);
for yy in 0..8 {
let y = yy as f32 * shift * (num as f32 + 4.0);
for k in 0..num {
for i in 0..num {
let fk = k as f32;
let fi = i as f32;
let status = if k == 0 {
RigidBodyType::Fixed
} else {
RigidBodyType::Dynamic
};
let rigid_body = RigidBodyBuilder::new(status)
.translation(vector![x + fk * shift, y - fi * shift]);
let child_handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::ball(rad);
colliders.insert_with_parent(collider, child_handle, &mut bodies);
// Vertical joint.
if i > 0 {
let parent_handle = *body_handles.last().unwrap();
let joint = FixedJointBuilder::new()
.local_frame2(Isometry::translation(0.0, shift));
impulse_joints.insert(parent_handle, child_handle, joint, true);
}
// Horizontal joint.
if k > 0 {
let parent_index = body_handles.len() - num;
let parent_handle = body_handles[parent_index];
let joint = FixedJointBuilder::new()
.local_frame2(Isometry::translation(-shift, 0.0));
impulse_joints.insert(parent_handle, child_handle, joint, true);
}
body_handles.push(child_handle);
}
}
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![50.0, 50.0], 5.0);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples2d/primitives2.rs | crates/wgrapier/crates/examples2d/primitives2.rs | use rapier2d::prelude::*;
use wgrapier_testbed2d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let impulse_joints = ImpulseJointSet::new();
let _multibody_joints = MultibodyJointSet::new();
/*
* Ground
*/
let ground_size = 150.0;
let rigid_body = RigidBodyBuilder::fixed();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
let rigid_body = RigidBodyBuilder::fixed()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(vector![ground_size, ground_size * 1.2]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size * 1.2, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
let rigid_body = RigidBodyBuilder::fixed()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(vector![-ground_size, ground_size * 1.2]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size * 1.2, 1.5);
colliders.insert_with_parent(collider, handle, &mut bodies);
/*
* Create the cubes
*/
let num = 124;
let rad = 0.5;
let shift = rad * 2.0 + 0.4;
let centerx = shift * (num as f32) / 2.0;
let centery = shift / 2.0;
let mut rng = oorandom::Rand32::new(0);
for i in 0..num {
for j in 0usize..num * 4 {
let x = i as f32 * shift - centerx + (j % 2) as f32 * 0.2;
let y = j as f32 * shift + centery + 20.0;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y]);
let handle = bodies.insert(rigid_body);
let collider = match j % 4 {
0 => ColliderBuilder::cuboid(rad, rad),
1 => ColliderBuilder::capsule_y(rad, rad),
2 => {
if i % 2 == 0 {
continue;
}
// Make a convex polygon.
let mut points = Vec::new();
let scale = 2.0;
for _ in 0..10 {
let pt = Point::new(rng.rand_float() - 0.5, rng.rand_float() - 0.5);
points.push(pt * scale);
}
// TODO: align the collider’s local origin to its center-of-mass.
// wgrapier currently doesn’t support misaligned center-of-masses.
let shape = SharedShape::convex_hull(&points).unwrap();
let mprops = shape.mass_properties(1.0);
points
.iter_mut()
.for_each(|pt| *pt -= mprops.local_com.coords);
ColliderBuilder::convex_hull(&points).unwrap()
}
_ => ColliderBuilder::ball(rad),
};
colliders.insert_with_parent(collider, handle, &mut bodies);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![0.0, 50.0], 10.0);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/many_pyramids3.rs | crates/wgrapier/crates/examples3d/many_pyramids3.rs | use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
fn create_pyramid(
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
offset: Vector<f32>,
stack_height: usize,
rad: f32,
) {
let shift = rad * 2.0;
for i in 0usize..stack_height {
for j in i..stack_height {
let fj = j as f32;
let fi = i as f32;
let x = (fi * shift / 2.0) + (fj - fi) * shift;
let y = fi * shift;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y, 0.0] + offset);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(rad, rad, rad);
colliders.insert_with_parent(collider, handle, bodies);
}
}
}
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let impulse_joints = ImpulseJointSet::new();
let _multibody_joints = MultibodyJointSet::new();
let rad = 0.5;
let pyramid_count = 40;
let spacing = 4.0;
/*
* Ground
*/
let ground_size = 100.0;
let ground_height = 0.1;
let rigid_body = RigidBodyBuilder::fixed().translation(vector![0.0, -ground_height, 0.0]);
let ground_handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(
ground_size,
ground_height,
pyramid_count as f32 * spacing / 2.0 + ground_size,
);
colliders.insert_with_parent(collider, ground_handle, &mut bodies);
/*
* Create the cubes
*/
for pyramid_index in 0..pyramid_count {
let bottomy = rad;
create_pyramid(
&mut bodies,
&mut colliders,
vector![
0.0,
bottomy,
(pyramid_index as f32 - pyramid_count as f32 / 2.0) * spacing
],
60,
rad,
);
create_pyramid(
&mut bodies,
&mut colliders,
vector![
-75.0,
bottomy,
(pyramid_index as f32 - pyramid_count as f32 / 2.0) * spacing
],
60,
rad,
);
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![100.0, 100.0, 100.0], Point::origin());
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/all_examples3.rs | crates/wgrapier/crates/examples3d/all_examples3.rs | #![allow(dead_code)]
use inflector::Inflector;
use std::cmp::Ordering;
use wgrapier_testbed3d::{SimulationState, Testbed};
mod balls3;
mod boxes3;
mod boxes_and_balls3;
mod joint_ball3;
mod joint_fixed3;
mod joint_prismatic3;
mod joint_revolute3;
mod keva3;
mod many_pyramids3;
mod primitives3;
mod pyramid3;
mod trimesh3;
enum Command {
Run(String),
List,
RunAll,
}
fn parse_command_line() -> Command {
let mut args = std::env::args();
while let Some(arg) = args.next() {
if &arg[..] == "--example" {
return Command::Run(args.next().unwrap_or_default());
} else if &arg[..] == "--list" {
return Command::List;
}
}
Command::RunAll
}
#[allow(clippy::type_complexity)]
pub fn demo_builders() -> Vec<(&'static str, fn() -> SimulationState)> {
let mut builders: Vec<(_, fn() -> SimulationState)> = vec![
("Balls", balls3::init_world),
("Boxes", boxes3::init_world),
("Boxes & balls", boxes_and_balls3::init_world),
("Primitives", primitives3::init_world),
("Pyramid", pyramid3::init_world),
("Many pyramids", many_pyramids3::init_world),
("Keva tower", keva3::init_world),
("Joints (Spherical)", joint_ball3::init_world),
("Joints (Fixed)", joint_fixed3::init_world),
("Joints (Prismatic)", joint_prismatic3::init_world),
("Joints (Revolute)", joint_revolute3::init_world),
("Trimesh", trimesh3::init_world),
];
// Lexicographic sort, with stress tests moved at the end of the list.
builders.sort_by(|a, b| match (a.0.starts_with('('), b.0.starts_with('(')) {
(true, true) | (false, false) => a.0.cmp(b.0),
(true, false) => Ordering::Greater,
(false, true) => Ordering::Less,
});
builders
}
#[kiss3d::main]
pub async fn main() {
let command = parse_command_line();
let builders = demo_builders();
match command {
Command::Run(demo) => {
if let Some(i) = builders
.iter()
.position(|builder| builder.0.to_camel_case().as_str() == demo.as_str())
{
Testbed::from_builders(vec![builders[i]]).run().await
} else {
eprintln!("Invalid example to run provided: '{demo}'");
}
}
Command::RunAll => Testbed::from_builders(builders).run().await,
Command::List => {
for builder in &builders {
println!("{}", builder.0.to_camel_case())
}
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/keva3.rs | crates/wgrapier/crates/examples3d/keva3.rs | use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn build_block(
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
half_extents: Vector<f32>,
shift: Vector<f32>,
(mut numx, numy, mut numz): (usize, usize, usize),
) {
let dimensions = [half_extents.xyz(), half_extents.zyx()];
let block_width = 2.0 * half_extents.z * numx as f32;
let block_height = 2.0 * half_extents.y * numy as f32;
let spacing = (half_extents.z * numx as f32 - half_extents.x) / (numz as f32 - 1.0);
let mut color0 = [0.7, 0.5, 0.9];
let mut color1 = [0.6, 1.0, 0.6];
for i in 0..numy {
std::mem::swap(&mut numx, &mut numz);
let dim = dimensions[i % 2];
let y = dim.y * i as f32 * 2.0;
for j in 0..numx {
let x = if i % 2 == 0 {
spacing * j as f32 * 2.0
} else {
dim.x * j as f32 * 2.0
};
for k in 0..numz {
let z = if i % 2 == 0 {
dim.z * k as f32 * 2.0
} else {
spacing * k as f32 * 2.0
};
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![
x + dim.x + shift.x,
y + dim.y + shift.y,
z + dim.z + shift.z
]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(dim.x, dim.y, dim.z);
colliders.insert_with_parent(collider, handle, bodies);
// testbed.set_initial_body_color(handle, color0);
std::mem::swap(&mut color0, &mut color1);
}
}
}
// Close the top.
let dim = half_extents.zxy();
for i in 0..(block_width / (dim.x * 2.0)) as usize {
for j in 0..(block_width / (dim.z * 2.0)) as usize {
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![
i as f32 * dim.x * 2.0 + dim.x + shift.x,
dim.y + shift.y + block_height,
j as f32 * dim.z * 2.0 + dim.z + shift.z
]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(dim.x, dim.y, dim.z);
colliders.insert_with_parent(collider, handle, bodies);
// testbed.set_initial_body_color(handle, color0);
std::mem::swap(&mut color0, &mut color1);
}
}
}
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let impulse_joints = ImpulseJointSet::new();
/*
* Ground
*/
let ground_size = 70.0;
let ground_height = 2.0;
let rigid_body = RigidBodyBuilder::fixed().translation(vector![0.0, -ground_height, 0.0]);
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, ground_height, ground_size);
colliders.insert_with_parent(collider, handle, &mut bodies);
/*
* Create the cubes
*/
let half_extents = vector![0.02, 0.1, 0.4] / 2.0 * 10.0;
let mut block_height = 0.0;
// These should only be set to odd values otherwise
// the blocks won't align in the nicest way.
let numy = [0, 9, 13, 17, 21, 41];
for i in (1..=5).rev() {
let numx = (i as f32 * 2.5).ceil() as usize;
let numy = numy[i];
let numz = numx * 3 + 1;
let block_width = numx as f32 * half_extents.z * 2.0;
build_block(
&mut bodies,
&mut colliders,
half_extents,
vector![-block_width / 2.0, block_height, -block_width / 2.0],
(numx, numy, numz),
);
block_height += numy as f32 * half_extents.y * 2.0 + half_extents.x * 2.0;
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![100.0, 100.0, 100.0], Point::origin());
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/joint_prismatic3.rs | crates/wgrapier/crates/examples3d/joint_prismatic3.rs | use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut impulse_joints = ImpulseJointSet::new();
let rad = 0.4;
let num = 10;
let shift = 1.0;
for m in 0..20 {
let z = m as f32 * shift * (num as f32 + 2.0);
for l in 0..20 {
let y = l as f32 * shift * (num as f32) * 2.0;
for j in 0..30 {
let x = j as f32 * shift * 4.0;
let ground = RigidBodyBuilder::fixed().translation(vector![x, y, z]);
let mut curr_parent = bodies.insert(ground);
let collider = ColliderBuilder::cuboid(rad, rad, rad);
colliders.insert_with_parent(collider, curr_parent, &mut bodies);
for i in 0..num {
let z = z + (i + 1) as f32 * shift;
let density = 1.0;
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y, z]);
let curr_child = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(rad, rad, rad).density(density);
colliders.insert_with_parent(collider, curr_child, &mut bodies);
let axis = if i % 2 == 0 {
UnitVector::new_normalize(vector![1.0, 1.0, 0.0])
} else {
UnitVector::new_normalize(vector![-1.0, 1.0, 0.0])
};
let prism = PrismaticJointBuilder::new(axis)
.local_anchor2(point![0.0, 0.0, -shift])
.limits([-2.0, 0.0]);
impulse_joints.insert(curr_parent, curr_child, prism, true);
curr_parent = curr_child;
}
}
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![262.0, 63.0, 124.0], point![101.0, 4.0, -3.0]);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/primitives3.rs | crates/wgrapier/crates/examples3d/primitives3.rs | use rapier3d::na::Vector3;
use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn init_world() -> SimulationState {
const NXZ: isize = 20;
const NY: isize = 40;
let mut bodies = RigidBodySet::default();
let mut colliders = ColliderSet::default();
let impulse_joints = ImpulseJointSet::default();
/*
* Falling dynamic objects.
*/
let mut rng = oorandom::Rand32::new(0);
for j in 0..NY {
let max_ik = NXZ / 2;
for i in -max_ik..max_ik {
for k in -max_ik..max_ik {
let x = i as f32 * 1.1 + j as f32 * 0.01;
let y = j as f32 * 1.6 + 1.0;
let z = k as f32 * 1.1 + j as f32 * 0.01;
let pos = Vector3::new(x, y, z);
let body = bodies.insert(RigidBodyBuilder::dynamic().translation(pos));
let collider = match j % 6 {
0 => ColliderBuilder::cylinder(0.5, 0.5),
1 => ColliderBuilder::cuboid(0.5, 0.5, 0.5),
2 => ColliderBuilder::cone(0.5, 0.5),
3 => ColliderBuilder::capsule_y(0.4, 0.4),
4 => ColliderBuilder::ball(0.5),
_ => {
if i % 2 == 0 || k % 2 == 0 {
continue;
}
// Make a convex polyhedron.
let mut points = Vec::new();
let scale = 2.0;
for _ in 0..10 {
let pt = Point::new(
rng.rand_float() - 0.5,
rng.rand_float() - 0.5,
rng.rand_float() - 0.5,
);
points.push(pt * scale);
}
// TODO: align the collider’s local origin to its center-of-mass.
// wgrapier currently doesn’t support misaligned center-of-masses.
let shape = SharedShape::convex_hull(&points).unwrap();
let mprops = shape.mass_properties(1.0);
points
.iter_mut()
.for_each(|pt| *pt -= mprops.local_com.coords);
ColliderBuilder::convex_hull(&points).unwrap()
}
};
colliders.insert_with_parent(collider, body, &mut bodies);
}
}
}
/*
* Floor made of large cuboids.
*/
{
let thick = NXZ as f32 * 1.3;
let height = 8.0;
let walls = [
(
Vector3::new(0.0, -0.5, 0.0),
Vector3::new(thick, 0.5, thick),
),
(
Vector3::new(thick, height, 0.0),
Vector3::new(0.5, height, thick),
),
(
Vector3::new(-thick, height, 0.0),
Vector3::new(0.5, height, thick),
),
(
Vector3::new(0.0, height, thick),
Vector3::new(thick, height, 0.5),
),
(
Vector3::new(0.0, height, -thick),
Vector3::new(thick, height, 0.5),
),
];
for (wall_pos, wall_sz) in walls {
colliders.insert(
ColliderBuilder::cuboid(wall_sz.x, wall_sz.y, wall_sz.z).translation(wall_pos),
);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![100.0, 100.0, 100.0], Point::origin());
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/balls3.rs | crates/wgrapier/crates/examples3d/balls3.rs | use rapier3d::na::Vector3;
use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn init_world() -> SimulationState {
const NXZ: isize = 30;
const NY: isize = 70;
let mut bodies = RigidBodySet::default();
let mut colliders = ColliderSet::default();
let impulse_joints = ImpulseJointSet::default();
/*
* Falling dynamic objects.
*/
for j in 0..NY {
let max_ik = NXZ / 2;
for i in -max_ik..max_ik {
for k in -max_ik..max_ik {
let x = i as f32 * 1.1 + j as f32 * 0.01;
let y = j as f32 * 1.1;
let z = k as f32 * 1.1;
let pos = Vector3::new(x, y, z);
let body = bodies.insert(RigidBodyBuilder::dynamic().translation(pos));
colliders.insert_with_parent(ColliderBuilder::ball(0.5), body, &mut bodies);
}
}
}
/*
* Floor made of large cuboids.
*/
{
let thick = NXZ as f32 * 1.3;
let height = 7.0;
let walls = [
(
Vector3::new(0.0, -0.5, 0.0),
Vector3::new(thick, 0.5, thick),
),
(
Vector3::new(thick, height, 0.0),
Vector3::new(0.5, height, thick),
),
(
Vector3::new(-thick, height, 0.0),
Vector3::new(0.5, height, thick),
),
(
Vector3::new(0.0, height, thick),
Vector3::new(thick, height, 0.5),
),
(
Vector3::new(0.0, height, -thick),
Vector3::new(thick, height, 0.5),
),
];
for (wall_pos, wall_sz) in walls {
colliders.insert(
ColliderBuilder::cuboid(wall_sz.x, wall_sz.y, wall_sz.z).translation(wall_pos),
);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![100.0, 100.0, 100.0], Point::origin());
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/boxes_and_balls3.rs | crates/wgrapier/crates/examples3d/boxes_and_balls3.rs | use rapier3d::na::Vector3;
use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn init_world() -> SimulationState {
const NXZ: isize = 30;
const NY: isize = 70;
let mut bodies = RigidBodySet::default();
let mut colliders = ColliderSet::default();
let impulse_joints = ImpulseJointSet::default();
/*
* Falling dynamic objects.
*/
for j in 0..NY {
let max_ik = NXZ / 2;
for i in -max_ik..max_ik {
for k in -max_ik..max_ik {
let x = i as f32 * 1.1 + j as f32 * 0.01;
let y = j as f32 * 1.1;
let z = k as f32 * 1.1 + j as f32 * 0.01;
let pos = Vector3::new(x, y, z);
let body = bodies.insert(RigidBodyBuilder::dynamic().translation(pos));
let collider = if j % 2 == 0 {
ColliderBuilder::cuboid(0.5, 0.5, 0.5)
} else {
ColliderBuilder::ball(0.5)
};
colliders.insert_with_parent(collider, body, &mut bodies);
}
}
}
/*
* Floor made of large cuboids.
*/
{
let thick = NXZ as f32 * 1.3;
let height = 8.0;
let walls = [
(
Vector3::new(0.0, -0.5, 0.0),
Vector3::new(thick, 0.5, thick),
),
(
Vector3::new(thick, height, 0.0),
Vector3::new(0.5, height, thick),
),
(
Vector3::new(-thick, height, 0.0),
Vector3::new(0.5, height, thick),
),
(
Vector3::new(0.0, height, thick),
Vector3::new(thick, height, 0.5),
),
(
Vector3::new(0.0, height, -thick),
Vector3::new(thick, height, 0.5),
),
];
for (wall_pos, wall_sz) in walls {
colliders.insert(
ColliderBuilder::cuboid(wall_sz.x, wall_sz.y, wall_sz.z).translation(wall_pos),
);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![100.0, 100.0, 100.0], Point::origin());
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/joint_revolute3.rs | crates/wgrapier/crates/examples3d/joint_revolute3.rs | use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut impulse_joints = ImpulseJointSet::new();
let rad = 0.4;
let num = 10;
let shift = 2.0;
let nk = 10;
let nj = 50;
for k in 0..nk {
for l in 0..4 {
let y = l as f32 * shift * (num as f32) * 3.0;
for j in 0..nj {
let x = (j as f32 - nj as f32 / 2.0) * shift * 4.0;
let z = (k as f32 - nk as f32 / 2.0) * num as f32 * shift * 2.1;
let ground = RigidBodyBuilder::fixed().translation(vector![x, y, z]);
let mut curr_parent = bodies.insert(ground);
let collider = ColliderBuilder::cuboid(rad, rad, rad);
colliders.insert_with_parent(collider, curr_parent, &mut bodies);
for i in 0..num {
// Create four bodies.
let z = z + i as f32 * shift * 2.0 + shift;
let positions = [
Isometry::translation(x, y, z),
Isometry::translation(x + shift, y, z),
Isometry::translation(x + shift, y, z + shift),
Isometry::translation(x, y, z + shift),
];
let mut handles = [curr_parent; 4];
for k in 0..4 {
let density = 1.0;
let rigid_body = RigidBodyBuilder::dynamic().pose(positions[k]);
handles[k] = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(rad, rad, rad).density(density);
colliders.insert_with_parent(collider, handles[k], &mut bodies);
}
// Setup four impulse_joints.
let x = Vector::x_axis();
let z = Vector::z_axis();
let revs = [
RevoluteJointBuilder::new(z).local_anchor2(point![0.0, 0.0, -shift]),
RevoluteJointBuilder::new(x).local_anchor2(point![-shift, 0.0, 0.0]),
RevoluteJointBuilder::new(z).local_anchor2(point![0.0, 0.0, -shift]),
RevoluteJointBuilder::new(x).local_anchor2(point![shift, 0.0, 0.0]),
];
impulse_joints.insert(curr_parent, handles[0], revs[0], true);
impulse_joints.insert(handles[0], handles[1], revs[1], true);
impulse_joints.insert(handles[1], handles[2], revs[2], true);
impulse_joints.insert(handles[2], handles[3], revs[3], true);
curr_parent = handles[3];
}
}
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![478.0, 83.0, 228.0], point![134.0, 83.0, -116.0]);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/boxes3.rs | crates/wgrapier/crates/examples3d/boxes3.rs | use rapier3d::na::Vector3;
use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn init_world() -> SimulationState {
const NXZ: isize = 30;
const NY: isize = 70;
let mut bodies = RigidBodySet::default();
let mut colliders = ColliderSet::default();
let impulse_joints = ImpulseJointSet::default();
/*
* Falling dynamic objects.
*/
for j in 0..NY {
let max_ik = NXZ / 2;
for i in -max_ik..max_ik {
for k in -max_ik..max_ik {
let x = i as f32 * 1.1 + j as f32 * 0.01;
let y = j as f32 * 1.1 + 0.6;
let z = k as f32 * 1.1 + j as f32 * 0.01;
let pos = Vector3::new(x, y, z);
let body = bodies.insert(RigidBodyBuilder::dynamic().translation(pos));
colliders.insert_with_parent(
ColliderBuilder::cuboid(0.5, 0.5, 0.5),
body,
&mut bodies,
);
}
}
}
/*
* Floor made of large cuboids.
*/
{
let thick = NXZ as f32 * 1.5;
let height = 12.0;
let walls = [
(
Vector3::new(0.0, -0.5, 0.0),
Vector3::new(thick, 0.5, thick),
),
(
Vector3::new(thick, height, 0.0),
Vector3::new(0.5, height, thick),
),
(
Vector3::new(-thick, height, 0.0),
Vector3::new(0.5, height, thick),
),
(
Vector3::new(0.0, height, thick),
Vector3::new(thick, height, 0.5),
),
(
Vector3::new(0.0, height, -thick),
Vector3::new(thick, height, 0.5),
),
];
for (wall_pos, wall_sz) in walls {
colliders.insert(
ColliderBuilder::cuboid(wall_sz.x, wall_sz.y, wall_sz.z).translation(wall_pos),
);
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![100.0, 100.0, 100.0], Point::origin());
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/joint_fixed3.rs | crates/wgrapier/crates/examples3d/joint_fixed3.rs | use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut impulse_joints = ImpulseJointSet::new();
let rad = 0.4;
let num = 10;
let shift = 1.0;
let mut body_handles = Vec::new();
for m in 0..10 {
let z = m as f32 * shift * (num as f32 + 2.0);
for l in 0..10 {
let y = l as f32 * shift * 3.0;
for j in 0..10 {
let x = j as f32 * shift * (num as f32) * 2.0;
for k in 0..num {
for i in 0..num {
let fk = k as f32;
let fi = i as f32;
// NOTE: the num - 2 test is to avoid two consecutive
// fixed bodies. Because physx will crash if we add
// a joint between these.
let status = if i == 0 && (k % 4 == 0 && k != num - 2 || k == num - 1) {
RigidBodyType::Fixed
} else {
RigidBodyType::Dynamic
};
let rigid_body = RigidBodyBuilder::new(status).translation(vector![
x + fk * shift,
y,
z + fi * shift
]);
let child_handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::ball(rad);
colliders.insert_with_parent(collider, child_handle, &mut bodies);
// Vertical joint.
if i > 0 {
let parent_handle = *body_handles.last().unwrap();
let joint =
FixedJointBuilder::new().local_anchor2(point![0.0, 0.0, -shift]);
impulse_joints.insert(parent_handle, child_handle, joint, true);
}
// Horizontal joint.
if k > 0 {
let parent_index = body_handles.len() - num;
let parent_handle = body_handles[parent_index];
let joint =
FixedJointBuilder::new().local_anchor2(point![-shift, 0.0, 0.0]);
impulse_joints.insert(parent_handle, child_handle, joint, true);
}
body_handles.push(child_handle);
}
}
}
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![-38.0, 14.0, 108.0], point![46.0, 12.0, 23.0]);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/trimesh3.rs | crates/wgrapier/crates/examples3d/trimesh3.rs | use rapier3d::na::Vector3;
use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn init_world() -> SimulationState {
const NXZ: isize = 20;
const NY: isize = 40;
let mut bodies = RigidBodySet::default();
let mut colliders = ColliderSet::default();
let impulse_joints = ImpulseJointSet::default();
/*
* Falling dynamic objects.
*/
let mut rng = oorandom::Rand32::new(0);
for j in 0..NY {
let max_ik = NXZ / 2;
for i in -max_ik..max_ik {
for k in -max_ik..max_ik {
let x = i as f32 * 1.1 + j as f32 * 0.01;
let y = j as f32 * 1.6 + 2.0;
let z = k as f32 * 1.1 + j as f32 * 0.01;
let pos = Vector3::new(x, y, z);
let body = bodies.insert(RigidBodyBuilder::dynamic().translation(pos));
let collider = match j % 6 {
0 => ColliderBuilder::cylinder(0.5, 0.5),
1 => ColliderBuilder::cuboid(0.5, 0.5, 0.5),
2 => ColliderBuilder::cone(0.5, 0.5),
3 => ColliderBuilder::capsule_y(0.4, 0.4),
4 => ColliderBuilder::ball(0.5),
_ => {
if i % 2 == 0 || k % 2 == 0 {
continue;
}
// Make a convex polyhedron.
let mut points = Vec::new();
let scale = 2.0;
for _ in 0..10 {
let pt = Point::new(
rng.rand_float() - 0.5,
rng.rand_float() - 0.5,
rng.rand_float() - 0.5,
);
points.push(pt * scale);
}
// TODO: align the collider’s local origin to its center-of-mass.
// wgrapier currently doesn’t support misaligned center-of-masses.
let shape = SharedShape::convex_hull(&points).unwrap();
let mprops = shape.mass_properties(1.0);
points
.iter_mut()
.for_each(|pt| *pt -= mprops.local_com.coords);
ColliderBuilder::convex_hull(&points).unwrap()
}
};
colliders.insert_with_parent(collider, body, &mut bodies);
}
}
}
/*
* A trimesh floor.
*/
let ground_size = vector![100.0, 1.0, 100.0];
let nsubdivs = 20;
let heights = DMatrix::from_fn(nsubdivs + 1, nsubdivs + 1, |i, j| {
if i == 0 || i == nsubdivs || j == 0 || j == nsubdivs {
10.0
} else {
let x = i as f32 * ground_size.x / (nsubdivs as f32);
let z = j as f32 * ground_size.z / (nsubdivs as f32);
// NOTE: make sure we use the sin/cos from simba to ensure
// cross-platform determinism of the example when the
// enhanced_determinism feature is enabled.
x.sin() + z.cos()
}
});
// Here we will build our trimesh from the mesh representation of an
// heightfield.
let heightfield = HeightField::new(heights, ground_size);
let (vertices, indices) = heightfield.to_trimesh();
let rigid_body = RigidBodyBuilder::fixed();
let handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::trimesh_with_flags(
vertices,
indices,
TriMeshFlags::MERGE_DUPLICATE_VERTICES,
)
.unwrap();
colliders.insert_with_parent(collider, handle, &mut bodies);
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![100.0, 100.0, 100.0], Point::origin());
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/joint_ball3.rs | crates/wgrapier/crates/examples3d/joint_ball3.rs | use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut impulse_joints = ImpulseJointSet::new();
let rad = 0.4;
let ni = 200;
let nk = 301;
let shift = 1.0;
let center = vector![nk as f32 * shift / 2.0, 0.0, ni as f32 * shift / 2.0];
let mut body_handles = Vec::new();
// A lot of joints. Kind of like a piece of cloth.
for k in 0..nk {
for i in 0..ni {
let fk = k as f32;
let fi = i as f32;
let status = if ((i == 0 || i == ni - 1) && (k % 4 == 0 || k == ni - 1))
|| ((k == 0 || k == nk - 1) && (i % 4 == 0 || i == nk - 1))
{
RigidBodyType::Fixed
} else {
RigidBodyType::Dynamic
};
let rigid_body = RigidBodyBuilder::new(status)
.translation(vector![fk * shift, 0.0, fi * shift] - center);
let child_handle = bodies.insert(rigid_body);
let collider = if status == RigidBodyType::Fixed {
ColliderBuilder::cuboid(rad, rad, rad)
} else {
ColliderBuilder::ball(rad)
};
colliders.insert_with_parent(collider, child_handle, &mut bodies);
// Vertical joint.
if i > 0 {
let parent_handle = *body_handles.last().unwrap();
let joint = SphericalJointBuilder::new().local_anchor2(point![0.0, 0.0, -shift]);
impulse_joints.insert(parent_handle, child_handle, joint, true);
}
// Horizontal joint.
if k > 0 {
let parent_index = body_handles.len() - ni;
let parent_handle = body_handles[parent_index];
let joint = SphericalJointBuilder::new().local_anchor2(point![-shift, 0.0, 0.0]);
impulse_joints.insert(parent_handle, child_handle, joint, true);
}
body_handles.push(child_handle);
}
}
// Some rigid-bodies to fall on top.
let nj = 10;
let nk = nk / 3;
let ni = ni / 6;
let rad = rad * 2.5;
for k in 0..nk {
for i in 0..ni {
for j in 0..nj {
let body = RigidBodyBuilder::dynamic().translation(vector![
(k as f32 - nk as f32 / 2.0) * rad * 2.1,
j as f32 * rad * 2.1 + 2.0,
(i as f32 - ni as f32 / 2.0) * rad * 2.1,
]);
let handle = bodies.insert(body);
let collider = ColliderBuilder::cuboid(rad, rad, rad);
colliders.insert_with_parent(collider, handle, &mut bodies);
}
}
}
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![-110.0, -46.0, 170.0], point![54.0, -38.0, 29.0]);
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgrapier/crates/examples3d/pyramid3.rs | crates/wgrapier/crates/examples3d/pyramid3.rs | use rapier3d::prelude::*;
use wgrapier_testbed3d::SimulationState;
fn create_pyramid(
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
offset: Vector<f32>,
stack_height: usize,
half_extents: Vector<f32>,
) {
let shift = half_extents * 2.5;
for i in 0usize..stack_height {
for j in i..stack_height {
for k in i..stack_height {
let fi = i as f32;
let fj = j as f32;
let fk = k as f32;
let x = (fi * shift.x / 2.0) + (fk - fi) * shift.x + offset.x
- stack_height as f32 * half_extents.x;
let y = fi * shift.y + offset.y;
let z = (fi * shift.z / 2.0) + (fj - fi) * shift.z + offset.z
- stack_height as f32 * half_extents.z;
// Build the rigid body.
let rigid_body = RigidBodyBuilder::dynamic().translation(vector![x, y, z]);
let rigid_body_handle = bodies.insert(rigid_body);
let collider =
ColliderBuilder::cuboid(half_extents.x, half_extents.y, half_extents.z);
colliders.insert_with_parent(collider, rigid_body_handle, bodies);
}
}
}
}
pub fn init_world() -> SimulationState {
/*
* World
*/
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let impulse_joints = ImpulseJointSet::new();
/*
* Ground
*/
let ground_size = 200.0;
let ground_height = 0.1;
let rigid_body = RigidBodyBuilder::fixed().translation(vector![0.0, -ground_height, 0.0]);
let ground_handle = bodies.insert(rigid_body);
let collider = ColliderBuilder::cuboid(ground_size, ground_height, ground_size);
colliders.insert_with_parent(collider, ground_handle, &mut bodies);
/*
* Create the cubes
*/
let cube_size = 1.0;
let hext = Vector::repeat(cube_size);
let bottomy = cube_size;
create_pyramid(
&mut bodies,
&mut colliders,
vector![0.0, bottomy, 0.0],
50,
hext,
);
/*
* Set up the testbed.
*/
SimulationState {
bodies,
colliders,
impulse_joints,
}
// testbed.look_at(point![100.0, 100.0, 100.0], Point::origin());
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/test_helpers.rs | src/test_helpers.rs | #[cfg(test)]
pub mod helpers {
use dashmap::DashMap;
use serde_json::{Value, to_value};
use std::sync::{Arc, LazyLock, Mutex};
use tower::{Service, ServiceExt};
use tower_lsp::{
LspService,
jsonrpc::{Request, Response},
lsp_types::{
ClientCapabilities, DidOpenTextDocumentParams, InitializeParams, Position, Range,
TextDocumentContentChangeEvent, TextDocumentItem, TextEdit, Url,
WindowClientCapabilities, WorkspaceFolder, notification::DidOpenTextDocument,
request::Initialize,
},
};
use crate::{Backend, LspClient, Options};
pub static TEST_URI: LazyLock<Url> =
LazyLock::new(|| Url::parse("file:///tmp/queries/js/test.scm").unwrap());
pub static QUERY_TEST_URI: LazyLock<Url> =
LazyLock::new(|| Url::parse("file:///tmp/queries/query/test.scm").unwrap());
pub static RUST_TEST_URI: LazyLock<Url> =
LazyLock::new(|| Url::parse("file:///tmp/queries/rust/test.scm").unwrap());
pub static CPP_HIGHLIGHTS_WS_URI: LazyLock<Url> = LazyLock::new(|| {
Url::from_file_path(concat!(
env!("CARGO_MANIFEST_DIR"),
"/tests/fixtures/test_workspace/queries/cpp/test.scm"
))
.unwrap()
});
pub static FOO_HIGHLIGHTS_WS_URI: LazyLock<Url> = LazyLock::new(|| {
Url::from_file_path(concat!(
env!("CARGO_MANIFEST_DIR"),
"/tests/fixtures/test_workspace/queries/foo/test.scm"
))
.unwrap()
});
pub const SIMPLE_FILE: &str = include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/tests/fixtures/example_test_files/simple.scm"
));
pub const COMPLEX_FILE: &str = include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/tests/fixtures/example_test_files/complex.scm"
));
pub static TEST_CLIENT_CAPABILITIES: LazyLock<ClientCapabilities> =
LazyLock::new(|| ClientCapabilities {
window: Some(WindowClientCapabilities {
work_done_progress: Some(true),
..Default::default()
}),
..Default::default()
});
/// Always test with id of 1 for simplicity
const ID: i64 = 1;
/// A tuple holding the document's URI and source text.
pub type Document<'a> = (Url, &'a str);
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct MockRequest {
method: String,
params: Value,
}
impl MockRequest {
pub fn from_request<R>(params: R::Params) -> Self
where
R: tower_lsp::lsp_types::request::Request,
{
Self {
method: R::METHOD.to_string(),
params: serde_json::to_value(params).expect("Invalid parameters"),
}
}
pub fn from_notification<R>(params: R::Params) -> Self
where
R: tower_lsp::lsp_types::notification::Notification,
{
Self {
method: R::METHOD.to_string(),
params: serde_json::to_value(params).expect("Invalid parameters"),
}
}
}
#[derive(Default)]
pub struct MockClient {
notifications: Mutex<Vec<MockRequest>>,
requests: Mutex<Vec<MockRequest>>,
}
impl MockClient {
pub fn get_requests(&self) -> Vec<MockRequest> {
self.requests.try_lock().unwrap().clone()
}
pub fn get_notifications(&self) -> Vec<MockRequest> {
self.notifications.try_lock().unwrap().clone()
}
}
impl LspClient for MockClient {
async fn send_notification<N>(&self, params: N::Params)
where
N: tower_lsp::lsp_types::notification::Notification,
{
self.notifications.try_lock().unwrap().push(MockRequest {
method: N::METHOD.into(),
params: serde_json::to_value(params).unwrap(),
});
}
async fn send_request<R>(&self, params: R::Params) -> tower_lsp::jsonrpc::Result<R::Result>
where
R: tower_lsp::lsp_types::request::Request,
{
self.requests.try_lock().unwrap().push(MockRequest {
method: R::METHOD.into(),
params: serde_json::to_value(params).unwrap(),
});
// TODO: Sometimes a proper response must be returned, in which case this will give a serde
// parse error. This should be handled.
let response = ();
serde_json::from_value(response.into()).map_err(|e| tower_lsp::jsonrpc::Error {
code: tower_lsp::jsonrpc::ErrorCode::ParseError,
message: e.to_string().into(),
data: None,
})
}
}
/// Initialize a test server, populating it with fake documents denoted by (uri, text, symbols, fields) tuples.
pub async fn initialize_server(
documents: &[Document<'_>],
options: &Options,
) -> LspService<Backend<MockClient>> {
let options_value = serde_json::to_value(options).unwrap();
let (mut service, _socket) = LspService::build(|_client| Backend {
client: MockClient::default(),
client_capabilities: Arc::default(),
document_map: DashMap::default(),
language_map: DashMap::default(),
workspace_paths: Arc::default(),
dependents: DashMap::default(),
options: Arc::default(),
})
.finish();
// Initialize the server
service
.request::<Initialize>(InitializeParams {
capabilities: TEST_CLIENT_CAPABILITIES.clone(),
workspace_folders: Some(vec![WorkspaceFolder {
name: String::from("test_workspace"),
uri: Url::from_file_path(concat!(
env!("CARGO_MANIFEST_DIR"),
"/tests/fixtures/test_workspace/"
))
.unwrap(),
}]),
initialization_options: Some(options_value),
..Default::default()
})
.await;
// Open the documents
for (uri, src) in documents.iter().cloned() {
service
.notify::<DidOpenTextDocument>(DidOpenTextDocumentParams {
text_document: TextDocumentItem {
version: 0,
language_id: String::from("query"),
text: src.to_string(),
uri,
},
})
.await;
}
service
}
pub trait TestService {
async fn request<R>(&mut self, request: R::Params) -> R::Result
where
R: tower_lsp::lsp_types::request::Request;
async fn notify<R>(&mut self, request: R::Params)
where
R: tower_lsp::lsp_types::notification::Notification;
}
impl TestService for LspService<Backend<MockClient>> {
async fn request<R>(&mut self, request: R::Params) -> R::Result
where
R: tower_lsp::lsp_types::request::Request,
{
let response = self
.ready()
.await
.unwrap()
.call(lsp_request_to_jsonrpc_request::<R>(request))
.await
.unwrap_or_else(|e| panic!("Error when attempting to call {}: {}", R::METHOD, e))
.unwrap_or_else(|| panic!("No response from request {}", R::METHOD));
jsonrpc_response_to_lsp_value::<R>(&response)
}
async fn notify<R>(&mut self, request: R::Params)
where
R: tower_lsp::lsp_types::notification::Notification,
{
self.ready()
.await
.unwrap()
.call(lsp_notification_to_jsonrpc_request::<R>(request))
.await
.unwrap_or_else(|e| panic!("Error when attempting to call {}: {}", R::METHOD, e));
}
}
// Equivalent functions exist in tower-lsp but they are private
fn lsp_request_to_jsonrpc_request<R>(params: R::Params) -> Request
where
R: tower_lsp::lsp_types::request::Request,
{
Request::build(R::METHOD)
.id(ID)
.params(to_value(params).unwrap())
.finish()
}
fn lsp_notification_to_jsonrpc_request<R>(params: R::Params) -> Request
where
R: tower_lsp::lsp_types::notification::Notification,
{
Request::build(R::METHOD)
.params(to_value(params).unwrap())
.finish()
}
fn jsonrpc_response_to_lsp_value<R>(response: &Response) -> R::Result
where
R: tower_lsp::lsp_types::request::Request,
{
serde_json::from_value::<R::Result>(response.result().unwrap().clone()).unwrap()
}
#[derive(Debug, Clone)]
pub struct TestEdit {
pub text: String,
pub range: Range,
}
impl TestEdit {
pub fn new(text: &str, start: (u32, u32), end: (u32, u32)) -> Self {
Self {
text: text.to_string(),
range: Range {
start: Position {
line: start.0,
character: start.1,
},
end: Position {
line: end.0,
character: end.1,
},
},
}
}
}
impl From<&TestEdit> for TextDocumentContentChangeEvent {
fn from(val: &TestEdit) -> Self {
Self {
range: Some(val.range),
range_length: None,
text: val.text.clone(),
}
}
}
impl From<&TestEdit> for TextEdit {
fn from(val: &TestEdit) -> Self {
Self {
range: val.range,
new_text: val.text.clone(),
}
}
}
}
#[cfg(test)]
mod test {
use std::{
collections::{BTreeMap, HashMap},
ops::Deref,
};
use tower_lsp::lsp_types::Url;
use pretty_assertions::assert_eq;
use rstest::rstest;
use crate::{
Options,
test_helpers::helpers::{
COMPLEX_FILE, SIMPLE_FILE, TEST_CLIENT_CAPABILITIES, TEST_URI, initialize_server,
},
};
use super::helpers::Document;
#[rstest]
#[case(&[], &Options::default())]
#[case(&[(
TEST_URI.clone(),
SIMPLE_FILE,
),
(
Url::parse("file:///tmp/queries/css/test.scm").unwrap(),
COMPLEX_FILE,
)],
&Options {
valid_captures: HashMap::from([(String::from("test"), BTreeMap::from([(String::from("variable"), String::from("A common variable"))]))]),
..Default::default()
}
)]
#[tokio::test(flavor = "current_thread")]
async fn initialize_server_helper(
#[case] documents: &[Document<'_>],
#[case] options: &Options,
) {
// Act
let service = initialize_server(documents, options).await;
// Assert
let backend = service.inner();
// Serialize and re-serialize to populate the required fields that are added at
// deserialization time (default language retrieval regexes, `not-` predicates)
let options =
&serde_json::from_value::<Options>(serde_json::to_value(options).unwrap()).unwrap();
let actual_options = backend.options.read().await;
assert_eq!(&*actual_options, options);
assert_eq!(backend.document_map.len(), documents.len());
for (uri, source) in documents {
let doc = backend.document_map.get(uri).unwrap();
assert_eq!(doc.rope.to_string(), (*source).to_string());
assert_eq!(
doc.tree
.root_node()
.utf8_text((*source).to_string().as_bytes())
.unwrap(),
(*source).to_string()
);
}
assert_eq!(
&*backend.client_capabilities.deref().read().await,
&*TEST_CLIENT_CAPABILITIES
);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/lib.rs | src/lib.rs | use std::{
collections::{BTreeMap, HashMap},
env,
fmt::{Display, Write as _},
sync::LazyLock,
};
use regex::Regex;
#[cfg(feature = "schema")]
use schemars::JsonSchema;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
static LANGUAGE_REGEX_1: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"queries/([^/]+)/[^/]+\.scm$").unwrap());
static LANGUAGE_REGEX_2: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"tree-sitter-([^/]+)/queries/[^/]+\.scm$").unwrap());
/// A type specification for a predicate.
#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
struct PredicateAux {
/// A short description of the predicate (in Markdown format).
description: String,
/// The list of valid parameter types.
parameters: Vec<PredicateParameter>,
/// Whether this predicate supports a `not-` prefixed variant. Defaults to `true`.
#[serde(default = "default_true")]
not: bool,
/// Whether this predicate supports a `any-` prefixed variant. Defaults to `false`.
#[serde(default)]
any: bool,
}
const fn default_true() -> bool {
true
}
fn add_prefixes<'de, D>(deserializer: D) -> Result<BTreeMap<String, Predicate>, D::Error>
where
D: Deserializer<'de>,
{
let raw = BTreeMap::<String, PredicateAux>::deserialize(deserializer)?;
let mut valid_predicates = BTreeMap::new();
for (name, pred) in raw {
valid_predicates.insert(
name.clone(),
PredicateAux {
description: pred.description.clone(),
parameters: pred.parameters.clone(),
not: pred.not,
any: false,
},
);
if pred.any {
let description = format!(
"Like `#{name}?`, but for quantified patterns only one captured node must match. `#{name}?` is defined as follows:\n\n{}",
pred.description
);
valid_predicates.insert(
format!("any-{name}"),
PredicateAux {
description,
parameters: pred.parameters,
not: pred.not,
any: false,
},
);
}
}
Ok(valid_predicates
.into_iter()
.flat_map(|(name, pred)| {
let it = if pred.not {
let pref_name = format!("not-{name}");
let pref_pred = Predicate {
parameters: pred.parameters.clone(),
description: format!(
"The inverse of `#{name}?`, which is defined as follows:\n\n{}",
pred.description
),
};
vec![
(
name,
Predicate {
description: pred.description,
parameters: pred.parameters,
},
),
(pref_name, pref_pred),
]
} else {
vec![(
name,
Predicate {
description: pred.description,
parameters: pred.parameters,
},
)]
};
it.into_iter()
})
.collect())
}
/// Configuration options for the language server.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
pub struct Options {
/// A list of strings representing directories to search for parsers, of the form
/// `<lang>.(so|dll|dylib)` or `tree-sitter-<lang>.wasm`.
///
/// Supports environment variable expansion of the form `${VAR}`.
#[serde(default, deserialize_with = "deserialize_and_expand")]
pub parser_install_directories: Vec<String>,
/// A map of parser aliases.
#[serde(default)]
pub parser_aliases: BTreeMap<String, String>,
/// A list of patterns to aid the LSP in finding a language, given a file path.
/// Patterns must have one capture group which represents the language name. Ordered
/// from highest to lowest precedence.
#[serde(default = "default_regexes", deserialize_with = "add_default_regexes")]
pub language_retrieval_patterns: Vec<SerializableRegex>,
/// A map from query file name to valid captures. Valid captures are represented as a map from
/// capture name (sans `@`) to a short (markdown format) description. Note that captures
/// prefixed with an underscore are always permissible.
#[serde(default)]
pub valid_captures: HashMap<String, BTreeMap<String, String>>,
/// A map of predicate names (sans `#` and `?`) to parameter specifications.
#[serde(default, deserialize_with = "add_prefixes")]
#[cfg_attr(feature = "schema", schemars(schema_with = "prefixes_schema"))]
pub valid_predicates: BTreeMap<String, Predicate>,
/// A map of directive names (sans `#` and `!`) to parameter specifications.
#[serde(default)]
pub valid_directives: BTreeMap<String, Predicate>,
/// Options related to diagnostics
#[serde(default)]
pub diagnostic_options: DiagnosticOptions,
/// Options related to formatting
#[serde(default)]
pub formatting_options: FormattingOptions,
/// An inclusive range of ABI versions supported by your tool. The end of the range must be
/// greater than or equal to the start.
pub supported_abi_versions: Option<std::ops::RangeInclusive<u32>>,
}
impl Default for Options {
fn default() -> Self {
Self {
language_retrieval_patterns: default_regexes(),
valid_predicates: BTreeMap::default(),
valid_directives: BTreeMap::default(),
valid_captures: HashMap::default(),
diagnostic_options: DiagnosticOptions::default(),
formatting_options: FormattingOptions::default(),
parser_aliases: BTreeMap::default(),
parser_install_directories: Vec::default(),
supported_abi_versions: Option::default(),
}
}
}
#[cfg(feature = "schema")]
fn prefixes_schema(gen_: &mut schemars::r#gen::SchemaGenerator) -> schemars::schema::Schema {
let raw = <BTreeMap<String, PredicateAux>>::json_schema(gen_).into_object();
raw.into()
}
/// Options related to diagnostics
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
pub struct DiagnosticOptions {
/// The style for predicate string arguments
#[serde(default)]
pub string_argument_style: StringArgumentStyle,
/// Whether to warn on `_`-prefixed captures which are not referenced by a predicate or directive
/// (default `true`)
#[serde(default = "default_true")]
pub warn_unused_underscore_captures: bool,
}
impl Default for DiagnosticOptions {
fn default() -> Self {
Self {
string_argument_style: StringArgumentStyle::default(),
warn_unused_underscore_captures: true,
}
}
}
/// Options related to formatting
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Default, Copy)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
pub struct FormattingOptions {
/// Whether to use dot-prefixed predicates; i.e., prefer the form `.foo?` to the form `#foo?`.
#[serde(default)]
pub dot_prefix_predicates: bool,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Default, Clone, Copy)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
#[serde(rename_all = "snake_case")]
pub enum StringArgumentStyle {
/// String arguments can be quoted or unquoted (default)
#[default]
None,
/// String arguments must be quoted
PreferQuoted,
/// String arguments should be unquoted, when possible
PreferUnquoted,
}
/// A type specification for a directive.
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Default, Clone)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
pub struct Predicate {
/// A short description of the predicate (in Markdown format).
pub description: String,
/// The list of valid parameter types.
pub parameters: Vec<PredicateParameter>,
}
/// A parameter type reference.
///
/// Parameters can be one or both of two types (a capture or a string), and can be required,
/// optional, or "variadic" (there can be zero-to-many of them).
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Default)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
pub struct PredicateParameter {
/// An optional description of this parameter.
pub description: Option<String>,
/// The type of this parameter. Can be `capture`, `string`, or `any` (either a capture or a
/// string).
#[serde(rename = "type")]
pub type_: PredicateParameterType,
/// The arity of the predicate parameter. Must be `"required"`, `"optional"`, or `"variadic"`.
#[serde(default)]
pub arity: PredicateParameterArity,
/// An optional constraint for the parameter. Only applies when it is a `string`.
#[serde(default)]
pub constraint: ParameterConstraint,
}
/// Constraint for a predicate parameter.
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Default)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
#[serde(rename_all = "snake_case")]
pub enum ParameterConstraint {
/// Enforce no constraint.
#[default]
None,
/// Enforce that the parameter matches a named node kind.
NamedNode,
/// Enforce that the parameter is an integer.
Integer,
/// Enforce that the parameter one of the given values.
Enum(Vec<String>),
}
impl Display for ParameterConstraint {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::None => write!(f, "none"),
Self::NamedNode => write!(f, "named node"),
Self::Integer => write!(f, "integer"),
Self::Enum(values) => write!(f, "`{values:?}`"),
}
}
}
/// The type of the predicate parameter.
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Default)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
#[serde(rename_all = "lowercase")]
pub enum PredicateParameterType {
/// Must be a capture (e.g. `@variable`).
Capture,
/// Must be a string (e.g. `foo`).
String,
/// Can be either a capture or a string.
#[default]
Any,
}
impl Display for PredicateParameterType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Capture => write!(f, "capture"),
Self::String => write!(f, "string"),
Self::Any => write!(f, "any"),
}
}
}
/// The arity of the predicate parameter.
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Default)]
#[cfg_attr(feature = "schema", derive(JsonSchema))]
#[serde(rename_all = "lowercase")]
pub enum PredicateParameterArity {
/// A regular, required parameter.
#[default]
Required,
/// A parameter which can be omitted. Must only be followed by other optional parameters.
Optional,
/// A parameter which can appear zero-to-many times. Must be the last parameter if present.
Variadic,
}
impl Display for PredicateParameterArity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Required => write!(f, "required"),
Self::Optional => write!(f, "optional"),
Self::Variadic => write!(f, "variadic"),
}
}
}
/// Expand environment variables written in `${VAR}` syntax
fn expand_env_vars(input: &str) -> String {
let mut result = String::new();
let mut chars = input.chars().peekable();
while let Some(c) = chars.next() {
if c == '$' && chars.peek() == Some(&'{') {
chars.next(); // consume '{'
let mut var_name = String::new();
while let Some(&ch) = chars.peek() {
if ch == '}' {
chars.next(); // consume '}'
break;
}
var_name.push(ch);
chars.next();
}
// Lookup the env var
if let Ok(val) = env::var(&var_name) {
result.push_str(&val);
} else {
// Leave untouched if not found
let _ = write!(&mut result, "${{{var_name}}}");
}
} else {
result.push(c);
}
}
result
}
fn deserialize_and_expand<'de, D>(deserializer: D) -> Result<Vec<String>, D::Error>
where
D: Deserializer<'de>,
{
let raw = Vec::<String>::deserialize(deserializer)?;
Ok(raw.into_iter().map(|s| expand_env_vars(&s)).collect())
}
fn default_regexes() -> Vec<SerializableRegex> {
vec![
LANGUAGE_REGEX_1.clone().into(),
LANGUAGE_REGEX_2.clone().into(),
]
}
fn add_default_regexes<'de, D>(deserializer: D) -> Result<Vec<SerializableRegex>, D::Error>
where
D: Deserializer<'de>,
{
let mut raw = Vec::<SerializableRegex>::deserialize(deserializer)?;
// Always provide these defaults
raw.append(&mut default_regexes());
Ok(raw)
}
#[derive(Debug, Clone)]
pub struct SerializableRegex(Regex);
impl SerializableRegex {
#[must_use]
pub fn captures<'h>(&self, haystack: &'h str) -> Option<regex::Captures<'h>> {
self.0.captures_at(haystack, 0)
}
}
impl From<Regex> for SerializableRegex {
fn from(value: Regex) -> Self {
Self(value)
}
}
impl Serialize for SerializableRegex {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.0.as_str())
}
}
impl<'de> Deserialize<'de> for SerializableRegex {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
Regex::new(&s)
.map(SerializableRegex)
.map_err(serde::de::Error::custom)
}
}
impl PartialEq for SerializableRegex {
fn eq(&self, other: &Self) -> bool {
self.0.as_str() == other.0.as_str()
}
}
impl Eq for SerializableRegex {}
#[cfg(feature = "schema")]
impl JsonSchema for SerializableRegex {
fn json_schema(_: &mut schemars::r#gen::SchemaGenerator) -> schemars::schema::Schema {
use schemars::schema::{InstanceType, Schema, SchemaObject, StringValidation};
Schema::Object(SchemaObject {
instance_type: Some(InstanceType::String.into()),
format: Some(String::from("regex")),
string: Some(Box::new(StringValidation::default())),
metadata: Some(Box::new(schemars::schema::Metadata {
description: Some(String::from(
"A regular expression string (compiled at deserialization time)",
)),
..Default::default()
})),
..Default::default()
})
}
fn schema_name() -> String {
String::from("Regex")
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/util.rs | src/util.rs | use std::{
cell::RefCell,
fs::{self},
path::{Path, PathBuf},
sync::LazyLock,
};
use regex::Regex;
use ropey::Rope;
use serde_json::Value;
use streaming_iterator::StreamingIterator;
use tower_lsp::{
LanguageServer,
lsp_types::{
DocumentDiagnosticParams, DocumentDiagnosticReport, DocumentDiagnosticReportKind,
DocumentDiagnosticReportResult, NumberOrString, PartialResultParams, Position,
ProgressToken, PublishDiagnosticsParams, Range, RelatedFullDocumentDiagnosticReport,
TextDocumentContentChangeEvent, TextDocumentIdentifier, Url, WorkDoneProgressCreateParams,
WorkDoneProgressParams, notification::PublishDiagnostics, request::WorkDoneProgressCreate,
},
};
use tracing::{error, warn};
use tree_sitter::{
InputEdit, Language, Node, Parser, Point, Query, QueryCapture, QueryCursor, TextProvider, Tree,
WasmStore,
};
use crate::{Backend, DocumentData, ENGINE, ImportedUri, LspClient, Options, QUERY_LANGUAGE};
pub static CAPTURES_QUERY: LazyLock<Query> =
LazyLock::new(|| Query::new(&QUERY_LANGUAGE, "(capture) @cap").unwrap());
pub static INHERITS_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^;+\s*inherits: ([a-zA-Z0-9\-_,]+)").unwrap());
pub static FORMAT_IGNORE_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^;+\s*(format-ignore)").unwrap());
thread_local! {
static QUERY_PARSER: RefCell<Parser> = {
let mut parser = Parser::new();
parser.set_language(&QUERY_LANGUAGE).expect("Query language should load");
parser.into()
};
}
/// Parse the text in the rope as Tree-sitter query source code.
pub fn parse(rope: &Rope, old_tree: Option<&Tree>) -> Tree {
QUERY_PARSER.with_borrow_mut(|parser| {
let len_bytes = rope.len_bytes();
parser
.parse_with_options(
&mut |byte, _| {
if byte <= len_bytes {
let (chunk, start_byte, _, _) = rope.chunk_at_byte(byte);
&chunk.as_bytes()[byte - start_byte..]
} else {
&[]
}
},
old_tree,
None,
)
.expect("Parsing should have completed")
})
}
pub trait PosUtil {
fn to_char_idx(&self, rope: &Rope) -> usize;
fn to_byte_offset(&self, rope: &Rope) -> usize;
fn to_ts_point(&self, rope: &Rope) -> Point;
fn char(&self, rope: &Rope) -> char {
rope.char(self.to_char_idx(rope))
}
}
impl PosUtil for Position {
fn to_char_idx(&self, rope: &Rope) -> usize {
let row_char_idx = rope.line_to_char(self.line as usize);
let row_cu = rope.char_to_utf16_cu(row_char_idx);
rope.utf16_cu_to_char(row_cu + self.character as usize)
}
/// Returns the starting byte of the character if the position is in the middle of a character.
fn to_byte_offset(&self, rope: &Rope) -> usize {
rope.char_to_byte(self.to_char_idx(rope))
}
fn to_ts_point(&self, rope: &Rope) -> Point {
self.to_byte_offset(rope).to_ts_point(rope)
}
}
pub trait RangeUtil {
fn text(&self, rope: &Rope) -> String;
}
impl RangeUtil for Range {
fn text(&self, rope: &Rope) -> String {
rope.slice(self.start.to_char_idx(rope)..self.end.to_char_idx(rope))
.to_string()
}
}
pub trait ByteUtil {
fn to_lsp_pos(&self, rope: &Rope) -> Position;
fn to_ts_point(&self, rope: &Rope) -> Point;
}
impl ByteUtil for usize {
fn to_lsp_pos(&self, rope: &Rope) -> Position {
let line_idx = rope.byte_to_line(*self);
let line_utf16_cu_idx = {
let char_idx = rope.line_to_char(line_idx);
rope.char_to_utf16_cu(char_idx)
};
let character_utf16_cu_idx = {
let char_idx = rope.byte_to_char(*self);
rope.char_to_utf16_cu(char_idx)
};
let line = line_idx as u32;
let character = (character_utf16_cu_idx - line_utf16_cu_idx) as u32;
Position { line, character }
}
fn to_ts_point(&self, rope: &Rope) -> Point {
let line = rope.byte_to_line(*self);
let char = self - rope.line_to_byte(line);
Point {
row: line,
column: char,
}
}
}
trait PointUtil {
fn to_lsp_pos(&self, rope: &Rope) -> Position;
}
impl PointUtil for Point {
fn to_lsp_pos(&self, rope: &Rope) -> Position {
let offset = rope.line_to_byte(self.row) + self.column;
offset.to_lsp_pos(rope)
}
}
/// Gets the `(capture)` node at the cursor, if any.
pub fn get_current_capture_node(root: Node, point: Point) -> Option<Node> {
root.named_descendant_for_point_range(point, point)
.and_then(|node| {
if node.kind() == "capture" {
Some(node)
} else {
node.parent().filter(|parent| parent.kind() == "capture")
}
})
}
pub struct TextProviderRope<'a>(pub &'a Rope);
impl<'a> TextProvider<&'a [u8]> for &'a TextProviderRope<'a> {
type I = ChunksBytes<'a>;
fn text(&mut self, node: tree_sitter::Node) -> Self::I {
ChunksBytes(self.0.byte_slice(node.byte_range()).chunks())
}
}
pub struct ChunksBytes<'a>(ropey::iter::Chunks<'a>);
impl<'a> Iterator for ChunksBytes<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(str::as_bytes)
}
}
pub fn get_references<'a>(
root: &'a Node,
node: &'a Node,
query: &'a Query,
cursor: &'a mut QueryCursor,
provider: &'a TextProviderRope,
rope: &'a Rope,
) -> impl Iterator<Item = Node<'a>> + 'a {
cursor
.matches(query, root.child_with_descendant(*node).unwrap(), provider)
.map_deref(|match_| {
match_.captures.iter().filter_map(|cap| {
if cap.node.kind() == node.kind() && cap.node.text(rope) == node.text(rope) {
Some(cap.node)
} else {
None
}
})
})
.flatten()
}
pub fn node_is_or_has_ancestor(root: Node, node: Node, kind: &str) -> bool {
let mut optional_current_node = root.child_with_descendant(node);
while let Some(unwrapped_current_node) = optional_current_node {
if unwrapped_current_node.kind() == kind {
return true;
}
optional_current_node = unwrapped_current_node.child_with_descendant(node);
}
false
}
pub fn edit_rope(rope: &mut Rope, range: Range, new_text: &str) {
let start_char = range.start.to_char_idx(rope);
let end_char = range.end.to_char_idx(rope);
rope.remove(start_char..end_char);
if !new_text.is_empty() {
rope.insert(start_char, new_text);
}
}
pub trait TextDocChangeUtil {
fn to_tsedit(&self, rope: &Rope) -> InputEdit;
}
impl TextDocChangeUtil for TextDocumentContentChangeEvent {
fn to_tsedit(&self, rope: &Rope) -> InputEdit {
let text = self.text.as_str();
let text_end_byte_count = text.len();
let range = self.range.unwrap_or_else(|| {
let start = Position::new(0, 0);
let end = (rope.len_bytes() - 1).to_lsp_pos(rope);
Range { start, end }
});
let start_position = range.start.to_ts_point(rope);
let start_byte = range.start.to_byte_offset(rope);
let old_end_position = range.end.to_ts_point(rope);
let old_end_byte = range.end.to_byte_offset(rope);
let new_end_byte = start_byte + text_end_byte_count;
let new_end_position = {
if new_end_byte >= rope.len_bytes() {
let line_idx = text.lines().count();
let line_byte_idx = ropey::str_utils::line_to_byte_idx(text, line_idx);
let row = rope.len_lines() + line_idx;
let column = text_end_byte_count - line_byte_idx;
Point { row, column }
} else {
new_end_byte.to_ts_point(rope)
}
};
InputEdit {
start_byte,
old_end_byte,
new_end_byte,
start_position,
old_end_position,
new_end_position,
}
}
}
const DYLIB_EXTENSIONS: [&str; 3] = [".so", ".dll", ".dylib"];
/// Get the language name of a URI, following user-specified language aliases.
pub fn get_language_name(uri: &Url, options: &Options) -> Option<String> {
let language_retrieval_regexes = &options.language_retrieval_patterns;
let mut captures = None;
for re in language_retrieval_regexes {
if let Some(caps) = re.captures(uri.as_str()) {
captures = Some(caps);
break;
}
}
captures
.and_then(|captures| captures.get(1))
.map(|capture| {
options
.parser_aliases
.get(capture.as_str())
.cloned()
.unwrap_or_else(|| capture.as_str().to_owned())
})
}
/// Get the language name of a file without following aliases.
pub fn get_language_name_raw(path: &Path, options: &Options) -> Option<String> {
let language_retrieval_regexes = &options.language_retrieval_patterns;
let path = path.canonicalize().ok()?;
let path_str = &path.to_string_lossy();
for re in language_retrieval_regexes {
if let Some(caps) = re.captures(path_str).and_then(|caps| caps.get(1)) {
return Some(caps.as_str().to_owned());
}
}
None
}
/// Get the language object of the given name.
pub fn get_language(name: &str, options: &Options) -> Option<Language> {
// Return test language objects for mocks
#[cfg(test)]
if name == "rust" {
use crate::RUST_LANGUAGE;
return Some(RUST_LANGUAGE.clone());
} else if name == "query" {
return Some(QUERY_LANGUAGE.clone());
}
let directories = &options.parser_install_directories;
let language_fn_name = format!("tree_sitter_{}", name.replace('-', "_"));
for directory in directories {
for dylib_extension in DYLIB_EXTENSIONS {
let object_name = format!("{name}{dylib_extension}");
let mut library_path = Path::new(directory).join(&object_name);
if let Ok(library) = unsafe {
libloading::Library::new(&library_path).or_else(|_| {
let prefixed_object_name = format!("tree-sitter-{object_name}");
library_path.set_file_name(prefixed_object_name);
libloading::Library::new(library_path)
})
} {
let language = unsafe {
let language_fn: libloading::Symbol<unsafe extern "C" fn() -> Language> =
library
.get(language_fn_name.as_bytes())
.expect("Failed to load symbol");
language_fn()
};
std::mem::forget(library);
return Some(language);
}
}
if let Some(lang) = get_language_object_wasm(name, directory) {
return Some(lang);
}
}
None
}
fn get_language_object_wasm(name: &str, directory: &String) -> Option<Language> {
let object_name = format!("{name}.wasm");
let mut language_store = WasmStore::new(&ENGINE).ok()?;
let mut library_path = Path::new(directory).join(&object_name);
if let Ok(wasm) = fs::read(&library_path).or_else(|_| {
let prefixed_object_name = format!("tree-sitter-{object_name}");
library_path.set_file_name(prefixed_object_name);
fs::read(library_path)
}) {
let lang = language_store.load_language(name.replace('-', "_").as_str(), &wasm);
return match lang {
Err(err) => {
warn!("Error loading language {name}: {err}");
None
}
Ok(lang) => Some(lang),
};
}
None
}
pub trait NodeUtil {
/// Get the document text of this node.
fn text(&self, rope: &Rope) -> String;
/// Get the LSP range spanning the node's range.
fn lsp_range(&self, rope: &Rope) -> Range;
}
impl NodeUtil for Node<'_> {
fn text(&self, rope: &Rope) -> String {
rope.byte_slice(self.byte_range()).to_string()
}
fn lsp_range(&self, rope: &Rope) -> Range {
Range {
start: self.start_position().to_lsp_pos(rope),
end: self.end_position().to_lsp_pos(rope),
}
}
}
fn get_first_valid_file_config(workspace_uris: Vec<PathBuf>) -> Option<Options> {
for mut path in workspace_uris {
let mut config_path = path.join(".tsqueryrc.json");
loop {
if config_path.is_file()
&& let Some(options) = fs::read_to_string(&config_path)
.ok()
.and_then(|data| serde_json::from_str(&data).ok())
{
return options;
}
// Traverse up the file tree in search of a config file
path = match path.parent() {
Some(parent) => parent.into(),
None => break,
};
config_path = path.join(".tsqueryrc.json");
}
}
None
}
pub async fn set_configuration_options<C: LspClient>(
backend: &Backend<C>,
init_options: Option<Value>,
workspace_uris: Vec<PathBuf>,
) {
let mut options = backend.options.write().await;
*options = Options::default();
if let Some(init_options) = init_options {
if let Ok(parsed_options) = serde_json::from_value::<Options>(init_options) {
*options = parsed_options;
} else {
warn!("Unable to parse configuration settings!");
}
}
if let Some(mut file_options) = get_first_valid_file_config(workspace_uris) {
// Merge parser_install_directories, since these are dependent on the local user's
// installation paths
let mut config_file_install_dirs = options.parser_install_directories.clone();
config_file_install_dirs.append(&mut file_options.parser_install_directories);
file_options.parser_install_directories = config_file_install_dirs;
*options = file_options;
}
}
pub fn uri_to_basename(uri: &Url) -> Option<String> {
uri.to_file_path().ok().and_then(|path| {
path.file_stem()
.map(|os_str| os_str.to_string_lossy().into_owned())
})
}
/// Return the innermost capture at the given position, if any.
pub fn capture_at_pos<'t>(
tree: &'t Tree,
rope: &Rope,
query: &Query,
point: Point,
) -> Option<QueryCapture<'t>> {
let provider = TextProviderRope(rope);
let mut cursor = QueryCursor::new();
let mut p2 = point;
p2.column += 1;
cursor.set_point_range(point..p2);
let mut matches = cursor.matches(query, tree.root_node(), &provider);
let mut innermost_capture = None;
while let Some(match_) = matches.next() {
for capture in match_.captures {
if capture.node.start_position() > point || capture.node.end_position() <= point {
continue;
}
innermost_capture = Some(*capture);
}
}
innermost_capture
}
#[cfg(test)]
pub fn get_scm_files(directories: &[PathBuf]) -> impl Iterator<Item = PathBuf> {
let mut files: Vec<_> = directories
.iter()
.flat_map(|directory| {
ignore::Walk::new(directory)
.filter_map(|e| e.ok())
.filter(|e| {
e.file_type().is_some_and(|ft| ft.is_file())
&& e.path().extension().is_some_and(|ext| ext == "scm")
})
.map(|e| e.path().to_owned())
})
.collect();
// When testing, sort files to prevent flakiness
files.sort();
files.into_iter()
}
#[cfg(not(test))]
pub fn get_scm_files(directories: &[PathBuf]) -> impl Iterator<Item = PathBuf> {
directories.iter().flat_map(|directory| {
ignore::Walk::new(directory)
.filter_map(|e| e.ok())
.filter(|e| {
e.file_type().is_some_and(|ft| ft.is_file())
&& e.path().extension().is_some_and(|ext| ext == "scm")
})
.map(|e| e.path().to_owned())
})
}
pub fn get_file_uris(
dirs: &[PathBuf],
options: &Options,
language_name: &str,
query_type: &str,
) -> Vec<Url> {
let mut urls = Vec::new();
for scm_file in get_scm_files(dirs) {
if scm_file.file_stem().is_some_and(|stem| stem == query_type)
&& let Some(lang_name) = get_language_name_raw(&scm_file, options)
&& lang_name.as_str() == language_name
{
urls.push(Url::from_file_path(&scm_file).unwrap());
}
}
urls
}
/// Returns a list of URIs corresponding to the modules in the `; inherits: ` chain. `None` if the
/// module could not be found.
pub fn get_imported_uris(
workspace_dirs: &[PathBuf],
options: &Options,
uri: &Url,
rope: &Rope,
tree: &Tree,
) -> Vec<ImportedUri> {
let mut uris = Vec::new();
let Some(start_comment) = tree
.root_node()
.child(0)
.filter(|node| node.kind() == "comment" && node.start_position().row == 0)
else {
return uris;
};
let comment_text = start_comment.text(rope);
let Some(modules) = INHERITS_REGEX
.captures(&comment_text)
.and_then(|c| c.get(1))
else {
return uris;
};
let Some(query_name) = uri_to_basename(uri) else {
return uris;
};
let mut byte_offset = (start_comment.start_byte() + modules.start()) as u32;
for module in modules.as_str().split(',') {
let (start, end) = (byte_offset, byte_offset + module.len() as u32);
byte_offset = end + 1;
if module.is_empty() {
uris.push(ImportedUri::new(start, end, module.to_string(), None));
continue;
}
let module_uris = get_file_uris(workspace_dirs, options, module, &query_name);
if module_uris.len() > 1 {
warn!(
"Imported module {module} has more than one associated file location, analyzing the first one"
);
}
uris.push(ImportedUri::new(
start,
end,
module.to_string(),
module_uris.first().cloned(),
));
}
uris
}
/// Check if a string is a subsequence of another string; in order words, it is contained in the
/// other string with possible gaps between characters.
pub fn is_subsequence(sub: &str, main: &str) -> bool {
let mut sub_iter = sub.chars().peekable();
let mut main_iter = main.chars();
while let Some(&sub_char) = sub_iter.peek() {
match main_iter.next() {
Some(main_char) if main_char == sub_char => {
sub_iter.next();
}
None => return false,
_ => {}
}
}
true
}
pub fn get_imported_module_under_cursor(
document: &DocumentData,
position: Position,
) -> Option<&ImportedUri> {
if position.line != 0 {
return None;
}
let tree = &document.tree;
let rope = &document.rope;
let ts_point = position.to_ts_point(rope);
let comment_node = tree
.root_node()
.descendant_for_point_range(ts_point, ts_point)
.filter(|node| node.kind() == "comment")?;
let node_text = comment_node.text(rope);
let modules = INHERITS_REGEX.captures(&node_text).and_then(|c| c.get(1))?;
let cursor_offset = position.to_byte_offset(rope);
let mut comment_offset = comment_node.start_byte() + modules.start();
let module_name = modules.as_str().split(',').find_map(|module| {
let end = comment_offset + module.len();
let cursor_in_module = cursor_offset >= comment_offset && cursor_offset < end;
comment_offset = end + 1;
cursor_in_module.then(|| module.to_string())
})?;
document
.imported_uris
.iter()
.find(|import| import.name == module_name)
}
/// Push diagnostics to the client (only if it does not support pull diagnostics).
pub async fn push_diagnostics<C: LspClient>(backend: &Backend<C>, uri: Url) {
if backend
.client_capabilities
.read()
.await
.text_document
.as_ref()
.and_then(|td| td.diagnostic.as_ref())
.is_none()
// WARNING: This is *NOT* the same as `.get().as_deref().cloned()`!!!! That will still
// allow deadlocks to occur while this code will not!!! This is because as_deref() keeps the
// borrowed ref in scope, in the backround, until the cloned value is dropped. When using
// map(), the ref is dropped within the map closure and thus deadlocks are prevented. Many
// Bothans died to bring us this information.
&& let Some(document) = backend.document_map.get(&uri).map(|doc| doc.clone())
{
let version = document.version;
let Ok(diagnostics) = backend
.diagnostic(DocumentDiagnosticParams {
text_document: TextDocumentIdentifier { uri: uri.clone() },
identifier: None,
previous_result_id: None,
partial_result_params: PartialResultParams::default(),
work_done_progress_params: WorkDoneProgressParams::default(),
})
.await
else {
warn!("Error retrieving push diagnostics for {uri}");
return;
};
if let DocumentDiagnosticReportResult::Report(DocumentDiagnosticReport::Full(
RelatedFullDocumentDiagnosticReport {
related_documents,
full_document_diagnostic_report,
},
)) = diagnostics
{
backend
.client
.send_notification::<PublishDiagnostics>(PublishDiagnosticsParams::new(
uri,
full_document_diagnostic_report.items,
version,
))
.await;
for (uri, report) in related_documents.unwrap_or_default() {
if let DocumentDiagnosticReportKind::Full(report) = report
&& let Some(version) =
backend.document_map.get(&uri).and_then(|doc| doc.version)
{
backend
.client
.send_notification::<PublishDiagnostics>(PublishDiagnosticsParams::new(
uri,
report.items,
Some(version),
))
.await;
}
}
}
}
}
#[cfg(not(test))]
fn make_uuid() -> String {
uuid::Uuid::new_v4().to_string()
}
#[cfg(test)]
fn make_uuid() -> String {
"00000000-1111-2222-3333-444444444444".to_string()
}
/// Return the given progress token, or create one and issue a new work done request (if the client
/// supports them) if it does not exist.
pub async fn get_work_done_token<C: LspClient>(
backend: &Backend<C>,
work_done_token: Option<ProgressToken>,
) -> Option<ProgressToken> {
if let Some(token) = work_done_token {
Some(token)
} else if backend
.client_capabilities
.try_read()
.expect("Client capabilities should only be set once")
.window
.as_ref()
.is_some_and(|w| w.work_done_progress == Some(true))
{
let token = NumberOrString::String(make_uuid());
if let Err(error) = backend
.client
.send_request::<WorkDoneProgressCreate>(WorkDoneProgressCreateParams {
token: token.clone(),
})
.await
{
error!("{error}");
None
} else {
Some(token)
}
} else {
None
}
}
/// Remove unnecessary backslashes from the given string content.
pub fn remove_unnecessary_escapes(input: &str) -> String {
let mut result = String::new();
let mut chars = input.chars();
while let Some(c) = chars.next() {
if c == '\\' {
match chars.next() {
Some(char @ ('\"' | '\\' | 'n' | 'r' | 't' | '0')) => {
result.push('\\');
result.push(char);
}
Some(char) => {
result.push(char);
}
None => {}
}
} else {
result.push(c);
}
}
result
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/main.rs | src/main.rs | use clap::{Parser, Subcommand};
use cli::{
check::check_directories, format::format_directories, lint::lint_directories,
profile::profile_directories,
};
use core::fmt;
use std::{
collections::{BTreeSet, HashMap, HashSet},
env,
fs::{self},
path::{Path, PathBuf},
str,
sync::{Arc, LazyLock, RwLock},
};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
use ts_query_ls::{FormattingOptions, Options};
use dashmap::DashMap;
use ropey::Rope;
use tower_lsp::{
Client, LanguageServer, LspService, Server,
jsonrpc::Result,
lsp_types::{
ClientCapabilities, CodeActionKind, CodeActionOptions, CodeActionParams,
CodeActionProviderCapability, CodeActionResponse, CompletionOptions, CompletionParams,
CompletionResponse, DiagnosticOptions, DiagnosticServerCapabilities,
DidChangeConfigurationParams, DidChangeTextDocumentParams, DidCloseTextDocumentParams,
DidOpenTextDocumentParams, DidSaveTextDocumentParams, DocumentDiagnosticParams,
DocumentDiagnosticReportResult, DocumentFormattingParams, DocumentHighlight,
DocumentHighlightParams, DocumentRangeFormattingParams, DocumentSymbolParams,
DocumentSymbolResponse, GotoDefinitionParams, GotoDefinitionResponse, Hover, HoverParams,
HoverProviderCapability, InitializeParams, InitializeResult, Location, OneOf,
ReferenceParams, RenameParams, SelectionRange, SelectionRangeParams,
SelectionRangeProviderCapability, SemanticTokenModifier, SemanticTokenType,
SemanticTokensFullOptions, SemanticTokensLegend, SemanticTokensOptions,
SemanticTokensParams, SemanticTokensRangeParams, SemanticTokensRangeResult,
SemanticTokensResult, SemanticTokensServerCapabilities, ServerCapabilities,
SymbolInformation, TextDocumentSyncCapability, TextDocumentSyncKind, TextEdit, Url,
WorkDoneProgressOptions, WorkspaceEdit, WorkspaceSymbolOptions, WorkspaceSymbolParams,
},
};
use tree_sitter::{Language, Tree, wasmtime::Engine};
use handlers::{
code_action, completion, diagnostic, did_change, did_change_configuration, did_close, did_open,
did_save, document_highlight, document_symbol, formatting, goto_definition, hover, initialize,
references, rename, selection_range, semantic_tokens, shutdown, workspace_symbol,
};
use logging::LspLogLayer;
mod cli;
mod handlers;
mod logging;
mod test_helpers;
mod util;
static SERVER_CAPABILITIES: LazyLock<ServerCapabilities> = LazyLock::new(|| ServerCapabilities {
text_document_sync: Some(TextDocumentSyncCapability::Kind(
TextDocumentSyncKind::INCREMENTAL,
)),
diagnostic_provider: Some(DiagnosticServerCapabilities::Options(DiagnosticOptions {
identifier: Some(String::from("ts_query_ls")),
inter_file_dependencies: true,
..Default::default()
})),
code_action_provider: Some(CodeActionProviderCapability::Options(CodeActionOptions {
code_action_kinds: Some(vec![CodeActionKind::QUICKFIX]),
..Default::default()
})),
references_provider: Some(OneOf::Left(true)),
rename_provider: Some(OneOf::Left(true)),
definition_provider: Some(OneOf::Left(true)),
document_formatting_provider: Some(OneOf::Left(true)),
document_range_formatting_provider: Some(OneOf::Left(true)),
completion_provider: Some(CompletionOptions {
trigger_characters: Some(
["@", "\"", "\\", "(", "/", ".", "#", "!"]
.map(ToOwned::to_owned)
.into(),
),
..CompletionOptions::default()
}),
document_highlight_provider: Some(OneOf::Left(true)),
semantic_tokens_provider: Some(SemanticTokensServerCapabilities::SemanticTokensOptions(
SemanticTokensOptions {
legend: SemanticTokensLegend {
token_types: vec![
SemanticTokenType::INTERFACE,
SemanticTokenType::VARIABLE,
SemanticTokenType::NAMESPACE,
SemanticTokenType::KEYWORD,
],
token_modifiers: vec![SemanticTokenModifier::DEFAULT_LIBRARY],
},
full: Some(SemanticTokensFullOptions::Bool(true)),
range: Some(true),
..Default::default()
},
)),
hover_provider: Some(HoverProviderCapability::Simple(true)),
document_symbol_provider: Some(OneOf::Left(true)),
selection_range_provider: Some(SelectionRangeProviderCapability::Simple(true)),
workspace_symbol_provider: Some(OneOf::Right(WorkspaceSymbolOptions {
work_done_progress_options: WorkDoneProgressOptions {
work_done_progress: Some(true),
},
resolve_provider: None,
})),
..Default::default()
});
static ENGINE: LazyLock<Engine> = LazyLock::new(Engine::default);
static QUERY_LANGUAGE: LazyLock<Language> = LazyLock::new(|| tree_sitter_tsquery::LANGUAGE.into());
#[cfg(test)]
static RUST_LANGUAGE: LazyLock<Language> = LazyLock::new(|| tree_sitter_rust::LANGUAGE.into());
#[derive(PartialEq, Eq, Hash, Clone, PartialOrd, Ord, Debug)]
struct SymbolInfo {
label: String,
named: bool,
}
impl fmt::Display for SymbolInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let delims = if self.named { ('(', ')') } else { ('"', '"') };
write!(f, "{}{}{}", delims.0, self.label, delims.1)
}
}
#[derive(Clone)]
struct ImportedUri {
/// The start column, in bytes.
start_col: u32,
/// The end column, in bytes.
end_col: u32,
/// The name of the module.
name: String,
/// The URI of the associated document, if it exists.
uri: Option<Url>,
}
impl ImportedUri {
const fn new(start_col: u32, end_col: u32, name: String, uri: Option<Url>) -> Self {
Self {
start_col,
end_col,
name,
uri,
}
}
}
#[derive(Clone)]
struct DocumentData {
/// The document's text content.
rope: Rope,
/// The document's parsed CST.
tree: Tree,
/// Document version. `None` if the document has not been opened by the editor (i.e., it was
/// constructed because an open document imports it).
version: Option<i32>,
/// The query language name for the document, if it exists.
language_name: Option<String>,
/// The modules imported by this document.
imported_uris: Vec<ImportedUri>,
}
#[derive(Clone, Debug)]
struct LanguageData {
name: String,
symbols_set: HashSet<SymbolInfo>,
symbols_vec: Vec<SymbolInfo>,
fields_set: HashSet<String>,
fields_vec: Vec<String>,
supertype_map: HashMap<SymbolInfo, BTreeSet<SymbolInfo>>,
language: Language,
}
trait LspClient: Send + Sync + 'static {
fn send_request<R>(
&self,
params: R::Params,
) -> impl std::future::Future<Output = tower_lsp::jsonrpc::Result<R::Result>> + Send
where
R: tower_lsp::lsp_types::request::Request,
<R as tower_lsp::lsp_types::request::Request>::Params: Send;
fn send_notification<N>(
&self,
params: N::Params,
) -> impl std::future::Future<Output = ()> + std::marker::Send
where
N: tower_lsp::lsp_types::notification::Notification,
<N as tower_lsp::lsp_types::notification::Notification>::Params: Send;
}
impl LspClient for Client {
async fn send_request<R>(&self, params: R::Params) -> tower_lsp::jsonrpc::Result<R::Result>
where
R: tower_lsp::lsp_types::request::Request,
<R as tower_lsp::lsp_types::request::Request>::Params: Send,
{
self.send_request::<R>(params).await
}
async fn send_notification<N>(&self, params: N::Params)
where
N: tower_lsp::lsp_types::notification::Notification,
<N as tower_lsp::lsp_types::notification::Notification>::Params: Send,
{
self.send_notification::<N>(params).await;
}
}
struct Backend<C: LspClient> {
client: C,
client_capabilities: Arc<tokio::sync::RwLock<ClientCapabilities>>,
document_map: DashMap<Url, DocumentData>,
language_map: DashMap<String, Arc<LanguageData>>,
/// A map from URI -> URIs that depend on that URI
dependents: DashMap<Url, HashSet<Url>>,
options: Arc<tokio::sync::RwLock<Options>>,
workspace_paths: Arc<RwLock<Vec<PathBuf>>>,
}
#[tower_lsp::async_trait]
impl<C: LspClient> LanguageServer for Backend<C> {
async fn initialize(&self, params: InitializeParams) -> Result<InitializeResult> {
initialize::initialize(self, params).await
}
async fn did_change_configuration(&self, params: DidChangeConfigurationParams) {
did_change_configuration::did_change_configuration(self, params).await;
}
async fn shutdown(&self) -> Result<()> {
Ok(shutdown::shutdown(self))
}
async fn goto_definition(
&self,
params: GotoDefinitionParams,
) -> Result<Option<GotoDefinitionResponse>> {
Ok(goto_definition::goto_definition(self, ¶ms))
}
async fn did_open(&self, params: DidOpenTextDocumentParams) {
did_open::did_open(self, params).await;
}
async fn did_close(&self, params: DidCloseTextDocumentParams) {
did_close::did_close(self, ¶ms);
}
async fn did_save(&self, params: DidSaveTextDocumentParams) {
did_save::did_save(self, params);
}
async fn did_change(&self, params: DidChangeTextDocumentParams) {
did_change::did_change(self, params).await;
}
async fn references(&self, params: ReferenceParams) -> Result<Option<Vec<Location>>> {
Ok(references::references(self, ¶ms))
}
async fn document_highlight(
&self,
params: DocumentHighlightParams,
) -> Result<Option<Vec<DocumentHighlight>>> {
Ok(document_highlight::document_highlight(self, ¶ms))
}
async fn rename(&self, params: RenameParams) -> Result<Option<WorkspaceEdit>> {
rename::rename(self, ¶ms)
}
async fn completion(&self, params: CompletionParams) -> Result<Option<CompletionResponse>> {
completion::completion(self, params).await
}
async fn formatting(&self, params: DocumentFormattingParams) -> Result<Option<Vec<TextEdit>>> {
Ok(formatting::formatting(self, ¶ms).await)
}
async fn range_formatting(
&self,
params: DocumentRangeFormattingParams,
) -> Result<Option<Vec<TextEdit>>> {
Ok(formatting::range_formatting(self, ¶ms).await)
}
async fn semantic_tokens_full(
&self,
params: SemanticTokensParams,
) -> Result<Option<SemanticTokensResult>> {
Ok(semantic_tokens::semantic_tokens_full(self, ¶ms))
}
async fn semantic_tokens_range(
&self,
params: SemanticTokensRangeParams,
) -> Result<Option<SemanticTokensRangeResult>> {
Ok(semantic_tokens::semantic_tokens_range(self, ¶ms))
}
async fn hover(&self, params: HoverParams) -> Result<Option<Hover>> {
hover::hover(self, params).await
}
async fn document_symbol(
&self,
params: DocumentSymbolParams,
) -> Result<Option<DocumentSymbolResponse>> {
Ok(document_symbol::document_symbol(self, ¶ms))
}
async fn symbol(
&self,
params: WorkspaceSymbolParams,
) -> Result<Option<Vec<SymbolInformation>>> {
workspace_symbol::symbol(self, params).await
}
async fn diagnostic(
&self,
params: DocumentDiagnosticParams,
) -> Result<DocumentDiagnosticReportResult> {
diagnostic::diagnostic(self, params).await
}
async fn code_action(&self, params: CodeActionParams) -> Result<Option<CodeActionResponse>> {
Ok(code_action::code_action(self, params))
}
async fn selection_range(
&self,
params: SelectionRangeParams,
) -> Result<Option<Vec<SelectionRange>>> {
Ok(selection_range::selection_range(self, ¶ms))
}
}
#[derive(Parser)]
#[command(
name = "ts_query_ls",
version = env!("CARGO_PKG_VERSION"),
about = "LSP implementation for Tree-sitter's query files"
)]
struct Arguments {
#[command(subcommand)]
commands: Option<Commands>,
}
#[derive(Subcommand)]
enum Commands {
/// Format the query files in the given directories.
Format {
/// List of directories to format.
directories: Vec<PathBuf>,
/// Only check that formatting is valid, do not write.
#[arg(long, short)]
check: bool,
/// String representing server's JSON configuration.
#[arg(long)]
config: Option<String>,
},
/// Check the query files in the given directories for errors. This command performs a superset
/// of the work done by the lint command; it reads the query's language to validate query
/// structure, node names, etc.
Check {
/// List of directories to check.
directories: Vec<PathBuf>,
/// The workspace directory where imported query modules are searched when `; inherits` is
/// used. Defaults to the current directory.
#[arg(long, short)]
workspace: Option<PathBuf>,
/// String representing server's JSON configuration.
#[arg(long, short)]
config: Option<String>,
/// Check for valid formatting.
#[arg(long, short)]
format: bool,
/// Apply fixes to diagnostics that have them.
#[arg(long)]
fix: bool,
},
/// Lint the query files in the given directories for errors. This differs from `check` because
/// it does not perform a full semantic analysis (e.g. analyzing for impossible patterns), but
/// it does validate that there are no invalid captures or predicates as specified by the
/// configuration options. Useful when you don't (yet?) have access to the parser objects.
Lint {
/// List of directories to lint.
directories: Vec<PathBuf>,
/// The workspace directory where imported query modules are searched when `; inherits` is
/// used. Defaults to the current directory.
#[arg(long, short)]
workspace: Option<PathBuf>,
/// String representing server's JSON configuration.
#[arg(long, short)]
config: Option<String>,
/// Apply fixes to diagnostics, when possible.
#[arg(long, short)]
fix: bool,
},
/// Profile each pattern in the given queries, outputting the time it takes them to compile.
Profile {
/// List of directories to profile.
directories: Vec<PathBuf>,
/// String representing server's JSON configuration.
#[arg(long, short)]
config: Option<String>,
/// Whether to profile the entire query file, rather than each pattern within the query.
#[arg(long, short)]
per_file: bool,
},
}
/// Return the given config string, or read it from a config file if not given. This function can
/// exit the program.
fn get_config_str(config: Option<String>) -> String {
if let Some(config_str) = config {
config_str
} else {
let config_file_path = Path::new(".tsqueryrc.json");
fs::read_to_string(config_file_path).unwrap_or_else(|_| {
eprintln!("No config parameter given, and no .tsqueryrc.json found");
std::process::exit(1);
})
}
}
#[tokio::main]
async fn main() {
let args = Arguments::parse();
match args.commands {
Some(Commands::Format {
directories,
check,
config,
}) => {
let fmt_options = if let Some(config) = config {
let Ok(options) = serde_json::from_str::<Options>(&config) else {
eprintln!("Could not parse the provided configuration");
std::process::exit(1);
};
options.formatting_options
} else if let Ok(config) = fs::read_to_string(Path::new(".tsqueryrc.json")) {
let Ok(options) = serde_json::from_str::<Options>(&config) else {
eprintln!("Could not parse the configuration file");
std::process::exit(1);
};
options.formatting_options
} else {
FormattingOptions::default()
};
std::process::exit(format_directories(&directories, check, fmt_options).await);
}
Some(Commands::Check {
directories,
workspace,
config,
format,
fix,
}) => {
let config_str = get_config_str(config);
std::process::exit(
check_directories(&directories, config_str, workspace, format, fix).await,
);
}
Some(Commands::Lint {
directories,
workspace,
config,
fix,
}) => {
let config_str = get_config_str(config);
std::process::exit(lint_directories(&directories, config_str, workspace, fix).await)
}
Some(Commands::Profile {
directories,
per_file,
config,
}) => {
let config_str = get_config_str(config);
profile_directories(&directories, config_str, per_file).await;
std::process::exit(0);
}
None => {}
}
let stdin = tokio::io::stdin();
let stdout = tokio::io::stdout();
let options = Arc::new(Options::default().into());
let (service, socket) = LspService::build(|client| {
let lsp_layer = LspLogLayer::new(client.clone());
tracing_subscriber::registry().with(lsp_layer).init();
Backend {
client,
document_map: DashMap::default(),
language_map: DashMap::default(),
workspace_paths: Arc::default(),
client_capabilities: Arc::default(),
dependents: DashMap::default(),
options,
}
})
.finish();
Server::new(stdin, stdout, socket).serve(service).await;
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/logging.rs | src/logging.rs | use tower_lsp::Client;
use tower_lsp::lsp_types::MessageType;
use tracing::field::Field;
use tracing::{Event, Level, Subscriber};
use tracing_subscriber::layer::{Context, Layer};
use tracing_subscriber::registry::LookupSpan;
pub struct LspLogLayer(Client);
impl LspLogLayer {
pub const fn new(client: Client) -> Self {
Self(client)
}
}
impl<S> Layer<S> for LspLogLayer
where
S: Subscriber + for<'a> LookupSpan<'a>,
{
fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) {
let level = *event.metadata().level();
let message_type = match level {
Level::ERROR => MessageType::ERROR,
Level::WARN => MessageType::WARNING,
Level::INFO => MessageType::INFO,
Level::DEBUG => MessageType::LOG,
_ => return,
};
// Extract the log message from the event
let mut visitor = MessageVisitor::default();
event.record(&mut visitor);
let mut message = visitor.0;
if message.is_empty() {
message = format!("{event:?}");
}
tokio::spawn({
let client = self.0.clone();
async move {
client.log_message(message_type, message).await;
}
});
}
}
// Helper visitor to extract fields from the event
#[derive(Default)]
struct MessageVisitor(String);
impl tracing::field::Visit for MessageVisitor {
fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) {
if field.name() == "message" {
self.0 = format!("{value:?}");
}
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/document_symbol.rs | src/handlers/document_symbol.rs | use tower_lsp::lsp_types::{
DocumentSymbol, DocumentSymbolParams, DocumentSymbolResponse, SymbolKind,
};
use tracing::warn;
use tree_sitter::{QueryCursor, StreamingIterator};
use crate::{
Backend, LspClient,
util::{CAPTURES_QUERY, NodeUtil, TextProviderRope},
};
pub fn document_symbol<C: LspClient>(
backend: &Backend<C>,
params: &DocumentSymbolParams,
) -> Option<DocumentSymbolResponse> {
let uri = ¶ms.text_document.uri;
let mut document_symbols = vec![];
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when searching for document symbols.");
return None;
};
let rope = &doc.rope;
let tree = &doc.tree;
let provider = &TextProviderRope(rope);
let mut cursor = QueryCursor::new();
let mut matches = cursor.matches(&CAPTURES_QUERY, tree.root_node(), provider);
while let Some(match_) = matches.next() {
for capture in match_.captures {
let capture_node = capture.node;
let node_text = capture_node.text(rope);
let parent = capture_node.parent().unwrap();
document_symbols.push(DocumentSymbol {
name: node_text,
kind: SymbolKind::VARIABLE,
range: parent.lsp_range(rope),
selection_range: capture_node.lsp_range(rope),
detail: None,
// TODO: Structure this hierarchically
children: None,
tags: None,
#[allow(deprecated)]
deprecated: None,
});
}
}
Some(DocumentSymbolResponse::Nested(document_symbols))
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
DocumentSymbol, DocumentSymbolParams, DocumentSymbolResponse, PartialResultParams,
Position, Range, SymbolKind, TextDocumentIdentifier, WorkDoneProgressParams,
request::DocumentSymbolRequest,
};
use crate::{
Options,
test_helpers::helpers::{SIMPLE_FILE, TEST_URI, TestService, initialize_server},
};
type DocSymbol = (String, Range, Range);
#[rstest]
#[case(SIMPLE_FILE, vec![(
String::from("@constant"),
Range {
start: Position {
line: 0,
character: 14
},
end: Position {
line: 0,
character: 23
}
},
Range {
start: Position {
line: 0,
character: 1
},
end: Position {
line: 0,
character: 23
}
},
), (
String::from("@constant"),
Range {
start: Position {
line: 1,
character: 10
},
end: Position {
line: 1,
character: 19
}
},
Range {
start: Position {
line: 1,
character: 10
},
end: Position {
line: 1,
character: 29
}
},
), (
String::from("@constant"),
Range {
start: Position {
line: 1,
character: 20
},
end: Position {
line: 1,
character: 29
}
},
Range {
start: Position {
line: 1,
character: 10
},
end: Position {
line: 1,
character: 29
}
},
)])]
#[tokio::test(flavor = "current_thread")]
async fn document_symbol(#[case] source: &str, #[case] symbols: Vec<DocSymbol>) {
// Arrange
let mut service =
initialize_server(&[(TEST_URI.clone(), source)], &Options::default()).await;
// Act
let tokens = service
.request::<DocumentSymbolRequest>(DocumentSymbolParams {
text_document: TextDocumentIdentifier {
uri: TEST_URI.clone(),
},
partial_result_params: PartialResultParams::default(),
work_done_progress_params: WorkDoneProgressParams::default(),
})
.await;
// Assert
let expected = Some(DocumentSymbolResponse::Nested(
symbols
.iter()
.map(|s| DocumentSymbol {
name: s.0.clone(),
selection_range: s.1,
range: s.2,
kind: SymbolKind::VARIABLE,
detail: None,
children: None,
tags: None,
#[allow(deprecated)]
deprecated: None,
})
.collect(),
));
assert_eq!(expected, tokens);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/workspace_symbol.rs | src/handlers/workspace_symbol.rs | use std::fs;
use ropey::Rope;
use tower_lsp::{
jsonrpc::Result,
lsp_types::{
Location, ProgressParams, ProgressParamsValue, SymbolInformation, SymbolKind, Url,
WorkDoneProgress, WorkDoneProgressBegin, WorkDoneProgressEnd, WorkDoneProgressReport,
WorkspaceSymbolParams, notification::Progress,
},
};
use tree_sitter::{QueryCursor, StreamingIterator};
use crate::{
Backend, LspClient,
util::{
CAPTURES_QUERY, NodeUtil, TextProviderRope, get_scm_files, get_work_done_token,
is_subsequence, parse,
},
};
pub async fn symbol<C: LspClient>(
backend: &Backend<C>,
params: WorkspaceSymbolParams,
) -> Result<Option<Vec<SymbolInformation>>> {
let mut symbols = Vec::new();
let query = params.query;
let token =
get_work_done_token(backend, params.work_done_progress_params.work_done_token).await;
let dirs = backend
.workspace_paths
.read()
.as_deref()
.cloned()
.unwrap_or_default();
let files = get_scm_files(&dirs).collect::<Vec<_>>();
let file_count = files.len();
let file_count_div_100 = file_count as f64 * 0.01;
let mut num_processed_files = 0;
let mut progress_percent = 0;
if let Some(token) = token.clone() {
backend
.client
.send_notification::<Progress>(ProgressParams {
token,
value: ProgressParamsValue::WorkDone(WorkDoneProgress::Begin(
WorkDoneProgressBegin {
title: "Finding workspace symbols".into(),
percentage: Some(0),
message: Some(format!("0/{file_count} files indexed")),
cancellable: Some(false),
},
)),
})
.await;
}
for path in files {
let container_name = path
.file_name()
.and_then(|f| f.to_str())
.map(ToString::to_string);
if let (Ok(content), Ok(uri)) = (fs::read_to_string(&path), Url::from_file_path(path)) {
let rope = &Rope::from_str(&content);
let tree = parse(rope, None);
let provider = &TextProviderRope(rope);
let mut cursor = QueryCursor::new();
let mut matches = cursor.matches(&CAPTURES_QUERY, tree.root_node(), provider);
while let Some(match_) = matches.next() {
for capture in match_.captures {
let capture_node = capture.node;
let node_text = capture_node.text(rope);
if is_subsequence(&query, &node_text) {
symbols.push(SymbolInformation {
name: node_text,
kind: SymbolKind::VARIABLE,
location: Location::new(uri.clone(), capture_node.lsp_range(rope)),
container_name: container_name.clone(),
tags: None,
#[allow(deprecated)]
deprecated: None,
});
}
}
}
}
num_processed_files += 1;
let percentage = (num_processed_files as f64 / file_count_div_100).floor() as u32;
if percentage > progress_percent + 4 {
progress_percent = percentage;
if let Some(token) = token.clone() {
backend
.client
.send_notification::<Progress>(ProgressParams {
token,
value: ProgressParamsValue::WorkDone(WorkDoneProgress::Report(
WorkDoneProgressReport {
percentage: Some(progress_percent),
message: Some(format!(
"{num_processed_files}/{file_count} files indexed"
)),
cancellable: Some(false),
},
)),
})
.await;
}
}
}
if let Some(token) = token.clone() {
backend
.client
.send_notification::<Progress>(ProgressParams {
token,
value: ProgressParamsValue::WorkDone(WorkDoneProgress::End(WorkDoneProgressEnd {
message: Some(format!("{file_count}/{file_count} files indexed")),
})),
})
.await;
}
Ok(Some(symbols))
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use tower_lsp::lsp_types::{
Location, NumberOrString, PartialResultParams, Position, ProgressParams,
ProgressParamsValue, Range, SymbolInformation, SymbolKind, Url, WorkDoneProgress,
WorkDoneProgressCreateParams, WorkDoneProgressEnd, WorkDoneProgressParams,
WorkDoneProgressReport, WorkspaceSymbolParams, WorkspaceSymbolResponse,
notification::Progress,
request::{WorkDoneProgressCreate, WorkspaceSymbolRequest},
};
use crate::{
Options,
test_helpers::helpers::{MockRequest, TestService, initialize_server},
};
#[tokio::test(flavor = "current_thread")]
async fn workspace_symbol() {
// Arrange
let mut service = initialize_server(&[], &Options::default()).await;
fn make_range(start_line: u32, start_col: u32, end_line: u32, end_col: u32) -> Range {
Range::new(
Position::new(start_line, start_col),
Position::new(end_line, end_col),
)
}
// Act
let actual_tokens = service
.request::<WorkspaceSymbolRequest>(WorkspaceSymbolParams {
query: String::new(),
partial_result_params: PartialResultParams::default(),
work_done_progress_params: WorkDoneProgressParams::default(),
})
.await;
// Assert
let cpp_folds_uri = Url::from_file_path(concat!(
env!("CARGO_MANIFEST_DIR"),
"/tests/fixtures/test_workspace/queries/cpp/folds.scm"
))
.unwrap();
let other_highlights_uri = Url::from_file_path(concat!(
env!("CARGO_MANIFEST_DIR"),
"/tests/fixtures/test_workspace/queries/other/highlights.scm"
))
.unwrap();
#[allow(deprecated)]
let expected_tokens = Some(WorkspaceSymbolResponse::Flat(vec![
SymbolInformation {
name: String::from("@fold.region"),
kind: SymbolKind::VARIABLE,
location: Location {
uri: cpp_folds_uri.clone(),
range: make_range(0, 22, 0, 34),
},
deprecated: None,
tags: None,
container_name: Some(String::from("folds.scm")),
},
SymbolInformation {
name: String::from("@fold.imports"),
kind: SymbolKind::VARIABLE,
location: Location {
uri: cpp_folds_uri,
range: make_range(2, 20, 2, 33),
},
deprecated: None,
tags: None,
container_name: Some(String::from("folds.scm")),
},
SymbolInformation {
name: String::from("@variable"),
kind: SymbolKind::VARIABLE,
location: Location {
uri: other_highlights_uri.clone(),
range: make_range(5, 2, 5, 11),
},
deprecated: None,
tags: None,
container_name: Some(String::from("highlights.scm")),
},
SymbolInformation {
name: String::from("@function"),
kind: SymbolKind::VARIABLE,
location: Location {
uri: other_highlights_uri.clone(),
range: make_range(7, 11, 7, 20),
},
deprecated: None,
tags: None,
container_name: Some(String::from("highlights.scm")),
},
SymbolInformation {
name: String::from("@constant"),
kind: SymbolKind::VARIABLE,
location: Location {
uri: other_highlights_uri,
range: make_range(13, 2, 13, 11),
},
deprecated: None,
tags: None,
container_name: Some(String::from("highlights.scm")),
},
]));
assert_eq!(expected_tokens, actual_tokens);
assert_eq!(
service.inner().client.get_requests()[0],
MockRequest::from_request::<WorkDoneProgressCreate>(WorkDoneProgressCreateParams {
token: NumberOrString::String(String::from("00000000-1111-2222-3333-444444444444"))
})
);
assert!(service.inner().client.get_notifications().contains(
&MockRequest::from_notification::<Progress>(ProgressParams {
token: NumberOrString::String(String::from("00000000-1111-2222-3333-444444444444")),
value: ProgressParamsValue::WorkDone(WorkDoneProgress::Report(
WorkDoneProgressReport {
message: Some(String::from("3/4 files indexed")),
cancellable: Some(false),
percentage: Some(75)
}
))
})
));
assert!(service.inner().client.get_notifications().contains(
&MockRequest::from_notification::<Progress>(ProgressParams {
token: NumberOrString::String(String::from("00000000-1111-2222-3333-444444444444")),
value: ProgressParamsValue::WorkDone(WorkDoneProgress::End(WorkDoneProgressEnd {
message: Some(String::from("4/4 files indexed"))
}))
})
));
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/did_change_configuration.rs | src/handlers/did_change_configuration.rs | use tower_lsp::lsp_types::DidChangeConfigurationParams;
use crate::{Backend, LspClient, util::set_configuration_options};
pub async fn did_change_configuration<C: LspClient>(
backend: &Backend<C>,
params: DidChangeConfigurationParams,
) {
set_configuration_options(
backend,
Some(params.settings),
backend
.workspace_paths
.read()
.map(|uris| uris.to_vec())
.unwrap_or_default(),
)
.await;
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use regex::Regex;
use std::collections::BTreeMap;
use tower_lsp::lsp_types::{
DidChangeConfigurationParams, notification::DidChangeConfiguration,
};
use crate::{
Options,
test_helpers::helpers::{TestService, initialize_server},
};
#[tokio::test(flavor = "current_thread")]
async fn server_did_change_configuration() {
// Arrange
let mut service = initialize_server(&[], &Options::default()).await;
// Act
service
.notify::<DidChangeConfiguration>(DidChangeConfigurationParams {
settings: serde_json::from_str(
r#"
{
"parser_aliases": {
"ecma": "javascript",
"jsx": "javascript",
"foolang": "barlang"
},
"parser_install_directories": [
"/my/directory/",
"/tmp/tree-sitter/parsers/"
],
"language_retrieval_patterns": [
"\\.ts\\-([^/]+)\\-parser\\.wasm"
]
}
"#,
)
.unwrap(),
})
.await;
// Assert
let options = service.inner().options.read().await;
assert_eq!(
*options,
Options {
parser_aliases: BTreeMap::from([
("ecma".to_string(), "javascript".to_string()),
("jsx".to_string(), "javascript".to_string()),
("foolang".to_string(), "barlang".to_string())
]),
parser_install_directories: vec![
String::from("/my/directory/"),
String::from("/tmp/tree-sitter/parsers/"),
],
language_retrieval_patterns: vec![
Regex::new(r"\.ts\-([^/]+)\-parser\.wasm").unwrap().into(),
Regex::new("queries/([^/]+)/[^/]+\\.scm$").unwrap().into(),
Regex::new("tree-sitter-([^/]+)/queries/[^/]+\\.scm$",)
.unwrap()
.into(),
],
..Default::default()
}
);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/shutdown.rs | src/handlers/shutdown.rs | use tracing::info;
use crate::{Backend, LspClient};
pub fn shutdown<C: LspClient>(_backend: &Backend<C>) {
info!("ts_query_ls shutdown");
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.